code stringclasses 18 values | apis sequence | extract_api stringclasses 18 values |
|---|---|---|
"""Normality model of DFKDE."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import math
import torch
from torch import Tensor, nn
from anomalib.models.components import PCA, DynamicBufferModule
class SingleClassGaussian(DynamicBufferModule):
"""Model Gaussian distribution over a set of points."""
def __init__(self):
super().__init__()
self.register_buffer("mean_vec", Tensor())
self.register_buffer("u_mat", Tensor())
self.register_buffer("sigma_mat", Tensor())
self.mean_vec: Tensor
self.u_mat: Tensor
self.sigma_mat: Tensor
def fit(self, dataset: Tensor) -> None:
"""Fit a Gaussian model to dataset X.
Covariance matrix is not calculated directly using:
``C = X.X^T``
Instead, it is represented in terms of the Singular Value Decomposition of X:
``X = U.S.V^T``
Hence,
``C = U.S^2.U^T``
This simplifies the calculation of the log-likelihood without requiring full matrix inversion.
Args:
dataset (Tensor): Input dataset to fit the model.
"""
num_samples = dataset.shape[1]
self.mean_vec = torch.mean(dataset, dim=1)
data_centered = (dataset - self.mean_vec.reshape(-1, 1)) / math.sqrt(num_samples)
self.u_mat, self.sigma_mat, _ = torch.linalg.svd(data_centered, full_matrices=False)
def score_samples(self, features: Tensor) -> Tensor:
"""Compute the NLL (negative log likelihood) scores.
Args:
features (Tensor): semantic features on which density modeling is performed.
Returns:
nll (Tensor): Torch tensor of scores
"""
features_transformed = torch.matmul(features - self.mean_vec, self.u_mat / self.sigma_mat)
nll = torch.sum(features_transformed * features_transformed, dim=1) + 2 * torch.sum(torch.log(self.sigma_mat))
return nll
def forward(self, dataset: Tensor) -> None:
"""Provides the same functionality as `fit`.
Transforms the input dataset based on singular values calculated earlier.
Args:
dataset (Tensor): Input dataset
"""
self.fit(dataset)
class DFMModel(nn.Module):
"""Model for the DFM algorithm.
Args:
n_comps (float, optional): Ratio from which number of components for PCA are calculated. Defaults to 0.97.
score_type (str, optional): Scoring type. Options are `fre` and `nll`. Defaults to "fre".
"""
def __init__(self, n_comps: float = 0.97, score_type: str = "fre"):
super().__init__()
self.n_components = n_comps
self.pca_model = PCA(n_components=self.n_components)
self.gaussian_model = SingleClassGaussian()
self.score_type = score_type
def fit(self, dataset: Tensor) -> None:
"""Fit a pca transformation and a Gaussian model to dataset.
Args:
dataset (Tensor): Input dataset to fit the model.
"""
self.pca_model.fit(dataset)
features_reduced = self.pca_model.transform(dataset)
self.gaussian_model.fit(features_reduced.T)
def score(self, features: Tensor) -> Tensor:
"""Compute scores.
Scores are either PCA-based feature reconstruction error (FRE) scores or
the Gaussian density-based NLL scores
Args:
features (torch.Tensor): semantic features on which PCA and density modeling is performed.
Returns:
score (Tensor): numpy array of scores
"""
feats_projected = self.pca_model.transform(features)
if self.score_type == "nll":
score = self.gaussian_model.score_samples(feats_projected)
elif self.score_type == "fre":
feats_reconstructed = self.pca_model.inverse_transform(feats_projected)
score = torch.sum(torch.square(features - feats_reconstructed), dim=1)
else:
raise ValueError(f"unsupported score type: {self.score_type}")
return score
def forward(self, dataset: Tensor) -> None:
"""Provides the same functionality as `fit`.
Transforms the input dataset based on singular values calculated earlier.
Args:
dataset (Tensor): Input dataset
"""
self.fit(dataset)
| [
"anomalib.models.components.PCA"
] | [((1718, 1744), 'torch.mean', 'torch.mean', (['dataset'], {'dim': '(1)'}), '(dataset, dim=1)\n', (1728, 1744), False, 'import torch\n'), ((1875, 1927), 'torch.linalg.svd', 'torch.linalg.svd', (['data_centered'], {'full_matrices': '(False)'}), '(data_centered, full_matrices=False)\n', (1891, 1927), False, 'import torch\n'), ((2261, 2328), 'torch.matmul', 'torch.matmul', (['(features - self.mean_vec)', '(self.u_mat / self.sigma_mat)'], {}), '(features - self.mean_vec, self.u_mat / self.sigma_mat)\n', (2273, 2328), False, 'import torch\n'), ((3207, 3242), 'anomalib.models.components.PCA', 'PCA', ([], {'n_components': 'self.n_components'}), '(n_components=self.n_components)\n', (3210, 3242), False, 'from anomalib.models.components import PCA, DynamicBufferModule\n'), ((938, 946), 'torch.Tensor', 'Tensor', ([], {}), '()\n', (944, 946), False, 'from torch import Tensor, nn\n'), ((986, 994), 'torch.Tensor', 'Tensor', ([], {}), '()\n', (992, 994), False, 'from torch import Tensor, nn\n'), ((1038, 1046), 'torch.Tensor', 'Tensor', ([], {}), '()\n', (1044, 1046), False, 'from torch import Tensor, nn\n'), ((1812, 1834), 'math.sqrt', 'math.sqrt', (['num_samples'], {}), '(num_samples)\n', (1821, 1834), False, 'import math\n'), ((2343, 2404), 'torch.sum', 'torch.sum', (['(features_transformed * features_transformed)'], {'dim': '(1)'}), '(features_transformed * features_transformed, dim=1)\n', (2352, 2404), False, 'import torch\n'), ((2421, 2446), 'torch.log', 'torch.log', (['self.sigma_mat'], {}), '(self.sigma_mat)\n', (2430, 2446), False, 'import torch\n'), ((4410, 4454), 'torch.square', 'torch.square', (['(features - feats_reconstructed)'], {}), '(features - feats_reconstructed)\n', (4422, 4454), False, 'import torch\n')] |
from typing import Union
import pytorch_lightning as pl
import torch.nn.functional as F
from omegaconf import DictConfig, ListConfig
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import FakeData
from anomalib.utils.callbacks.visualizer_callback import VisualizerCallback
from anomalib.utils.metrics import AdaptiveThreshold, AnomalyScoreDistribution, MinMax
class FakeDataModule(pl.LightningDataModule):
def __init__(self, batch_size: int = 32):
super(FakeDataModule, self).__init__()
self.batch_size = batch_size
self.pre_process = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
def train_dataloader(self):
return DataLoader(
FakeData(
size=1000,
num_classes=10,
transform=self.pre_process,
image_size=(3, 32, 32),
),
batch_size=self.batch_size,
)
def test_dataloader(self):
return DataLoader(
FakeData(
size=100,
num_classes=10,
transform=self.pre_process,
image_size=(3, 32, 32),
),
batch_size=self.batch_size,
)
class DummyModel(nn.Module):
"""Creates a very basic CNN model to fit image data for classification task
The test uses this to check if this model is converted to OpenVINO IR."""
def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__()
self.hparams = hparams
self.conv1 = nn.Conv2d(3, 32, 3)
self.conv2 = nn.Conv2d(32, 32, 5)
self.conv3 = nn.Conv2d(32, 1, 7)
self.fc1 = nn.Linear(400, 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
batch_size, _, _, _ = x.size()
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(batch_size, -1)
x = self.fc1(x)
x = F.dropout(x, p=self.hparams.model.dropout)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x
class DummyLightningModule(pl.LightningModule):
"""A dummy model which fits the torchvision FakeData dataset."""
def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__()
self.save_hyperparameters(hparams)
self.loss_fn = nn.NLLLoss()
self.callbacks = [VisualizerCallback(task="segmentation")] # test if this is removed
self.image_threshold = AdaptiveThreshold(hparams.model.threshold.image_default).cpu()
self.pixel_threshold = AdaptiveThreshold(hparams.model.threshold.pixel_default).cpu()
self.training_distribution = AnomalyScoreDistribution().cpu()
self.min_max = MinMax().cpu()
self.model = DummyModel(hparams)
def training_step(self, batch, _):
x, y = batch
y_hat = self.model(x)
loss = self.loss_fn(y_hat, y)
return {"loss": loss}
def validation_step(self, batch, _):
x, y = batch
y_hat = self.model(x)
loss = self.loss_fn(y_hat, y)
self.log(name="loss", value=loss.item(), prog_bar=True)
def configure_optimizers(self):
return optim.SGD(
self.parameters(),
lr=self.hparams.model.lr,
momentum=self.hparams.model.momentum,
weight_decay=self.hparams.model.weight_decay,
)
| [
"anomalib.utils.metrics.AdaptiveThreshold",
"anomalib.utils.callbacks.visualizer_callback.VisualizerCallback",
"anomalib.utils.metrics.MinMax",
"anomalib.utils.metrics.AnomalyScoreDistribution"
] | [((1647, 1666), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(32)', '(3)'], {}), '(3, 32, 3)\n', (1656, 1666), False, 'from torch import nn, optim\n'), ((1688, 1708), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)', '(5)'], {}), '(32, 32, 5)\n', (1697, 1708), False, 'from torch import nn, optim\n'), ((1730, 1749), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(1)', '(7)'], {}), '(32, 1, 7)\n', (1739, 1749), False, 'from torch import nn, optim\n'), ((1769, 1788), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(256)'], {}), '(400, 256)\n', (1778, 1788), False, 'from torch import nn, optim\n'), ((1808, 1826), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(10)'], {}), '(256, 10)\n', (1817, 1826), False, 'from torch import nn, optim\n'), ((2042, 2084), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.hparams.model.dropout'}), '(x, p=self.hparams.model.dropout)\n', (2051, 2084), True, 'import torch.nn.functional as F\n'), ((2121, 2144), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (2134, 2144), True, 'import torch.nn.functional as F\n'), ((2439, 2451), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (2449, 2451), False, 'from torch import nn, optim\n'), ((808, 900), 'torchvision.datasets.FakeData', 'FakeData', ([], {'size': '(1000)', 'num_classes': '(10)', 'transform': 'self.pre_process', 'image_size': '(3, 32, 32)'}), '(size=1000, num_classes=10, transform=self.pre_process, image_size=\n (3, 32, 32))\n', (816, 900), False, 'from torchvision.datasets import FakeData\n'), ((1097, 1188), 'torchvision.datasets.FakeData', 'FakeData', ([], {'size': '(100)', 'num_classes': '(10)', 'transform': 'self.pre_process', 'image_size': '(3, 32, 32)'}), '(size=100, num_classes=10, transform=self.pre_process, image_size=(\n 3, 32, 32))\n', (1105, 1188), False, 'from torchvision.datasets import FakeData\n'), ((2478, 2517), 'anomalib.utils.callbacks.visualizer_callback.VisualizerCallback', 'VisualizerCallback', ([], {'task': '"""segmentation"""'}), "(task='segmentation')\n", (2496, 2517), False, 'from anomalib.utils.callbacks.visualizer_callback import VisualizerCallback\n'), ((668, 689), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (687, 689), False, 'from torchvision import transforms\n'), ((691, 733), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (711, 733), False, 'from torchvision import transforms\n'), ((2578, 2634), 'anomalib.utils.metrics.AdaptiveThreshold', 'AdaptiveThreshold', (['hparams.model.threshold.image_default'], {}), '(hparams.model.threshold.image_default)\n', (2595, 2634), False, 'from anomalib.utils.metrics import AdaptiveThreshold, AnomalyScoreDistribution, MinMax\n'), ((2672, 2728), 'anomalib.utils.metrics.AdaptiveThreshold', 'AdaptiveThreshold', (['hparams.model.threshold.pixel_default'], {}), '(hparams.model.threshold.pixel_default)\n', (2689, 2728), False, 'from anomalib.utils.metrics import AdaptiveThreshold, AnomalyScoreDistribution, MinMax\n'), ((2773, 2799), 'anomalib.utils.metrics.AnomalyScoreDistribution', 'AnomalyScoreDistribution', ([], {}), '()\n', (2797, 2799), False, 'from anomalib.utils.metrics import AdaptiveThreshold, AnomalyScoreDistribution, MinMax\n'), ((2829, 2837), 'anomalib.utils.metrics.MinMax', 'MinMax', ([], {}), '()\n', (2835, 2837), False, 'from anomalib.utils.metrics import AdaptiveThreshold, AnomalyScoreDistribution, MinMax\n')] |
"""MVTec Dataset.
MVTec This script contains PyTorch Dataset, Dataloader and PyTorch
Lightning DataModule for the MVTec dataset.
If the dataset is not on the file system, the script downloads and
extracts the dataset and create PyTorch data objects.
"""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import logging
import random
import tarfile
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
from urllib.request import urlretrieve
import albumentations as A
import cv2
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
from pytorch_lightning.core.datamodule import LightningDataModule
from torch import Tensor
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torchvision.datasets.folder import VisionDataset
from anomalib.data.transforms import PreProcessor
from anomalib.data.utils import read_image
from anomalib.utils.download_progress_bar import DownloadProgressBar
logger = logging.getLogger(name="Dataset: MVTec")
logger.setLevel(logging.DEBUG)
__all__ = ["MVTec", "MVTecDataModule"]
def split_normal_images_in_train_set(samples: DataFrame, split_ratio: float = 0.1, seed: int = 0) -> DataFrame:
"""Split normal images in train set.
This function splits the normal images in training set and assigns the
values to the test set. This is particularly useful especially when the
test set does not contain any normal images.
This is important because when the test set doesn't have any normal images,
AUC computation fails due to having single class.
Args:
samples (DataFrame): Dataframe containing dataset info such as filenames, splits etc.
split_ratio (float, optional): Train-Test normal image split ratio. Defaults to 0.1.
seed (int, optional): Random seed to ensure reproducibility. Defaults to 0.
Returns:
DataFrame: Output dataframe where the part of the training set is assigned to test set.
"""
if seed > 0:
random.seed(seed)
normal_train_image_indices = samples.index[(samples.split == "train") & (samples.label == "good")].to_list()
num_normal_train_images = len(normal_train_image_indices)
num_normal_valid_images = int(num_normal_train_images * split_ratio)
indices_to_split_from_train_set = random.sample(population=normal_train_image_indices, k=num_normal_valid_images)
samples.loc[indices_to_split_from_train_set, "split"] = "test"
return samples
def create_validation_set_from_test_set(samples: DataFrame, seed: int = 0) -> DataFrame:
"""Craete Validation Set from Test Set.
This function creates a validation set from test set by splitting both
normal and abnormal samples to two.
Args:
samples (DataFrame): Dataframe containing dataset info such as filenames, splits etc.
seed (int, optional): Random seed to ensure reproducibility. Defaults to 0.
"""
if seed > 0:
random.seed(seed)
# Split normal images.
normal_test_image_indices = samples.index[(samples.split == "test") & (samples.label == "good")].to_list()
num_normal_valid_images = len(normal_test_image_indices) // 2
indices_to_sample = random.sample(population=normal_test_image_indices, k=num_normal_valid_images)
samples.loc[indices_to_sample, "split"] = "val"
# Split abnormal images.
abnormal_test_image_indices = samples.index[(samples.split == "test") & (samples.label != "good")].to_list()
num_abnormal_valid_images = len(abnormal_test_image_indices) // 2
indices_to_sample = random.sample(population=abnormal_test_image_indices, k=num_abnormal_valid_images)
samples.loc[indices_to_sample, "split"] = "val"
return samples
def make_mvtec_dataset(
path: Path,
split: Optional[str] = None,
split_ratio: float = 0.1,
seed: int = 0,
create_validation_set: bool = False,
) -> DataFrame:
"""Create MVTec samples by parsing the MVTec data file structure.
The files are expected to follow the structure:
path/to/dataset/split/category/image_filename.png
path/to/dataset/ground_truth/category/mask_filename.png
This function creates a dataframe to store the parsed information based on the following format:
|---|---------------|-------|---------|---------------|---------------------------------------|-------------|
| | path | split | label | image_path | mask_path | label_index |
|---|---------------|-------|---------|---------------|---------------------------------------|-------------|
| 0 | datasets/name | test | defect | filename.png | ground_truth/defect/filename_mask.png | 1 |
|---|---------------|-------|---------|---------------|---------------------------------------|-------------|
Args:
path (Path): Path to dataset
split (str, optional): Dataset split (ie., either train or test). Defaults to None.
split_ratio (float, optional): Ratio to split normal training images and add to the
test set in case test set doesn't contain any normal images.
Defaults to 0.1.
seed (int, optional): Random seed to ensure reproducibility when splitting. Defaults to 0.
create_validation_set (bool, optional): Boolean to create a validation set from the test set.
MVTec dataset does not contain a validation set. Those wanting to create a validation set
could set this flag to ``True``.
Example:
The following example shows how to get training samples from MVTec bottle category:
>>> root = Path('./MVTec')
>>> category = 'bottle'
>>> path = root / category
>>> path
PosixPath('MVTec/bottle')
>>> samples = make_mvtec_dataset(path, split='train', split_ratio=0.1, seed=0)
>>> samples.head()
path split label image_path mask_path label_index
0 MVTec/bottle train good MVTec/bottle/train/good/105.png MVTec/bottle/ground_truth/good/105_mask.png 0
1 MVTec/bottle train good MVTec/bottle/train/good/017.png MVTec/bottle/ground_truth/good/017_mask.png 0
2 MVTec/bottle train good MVTec/bottle/train/good/137.png MVTec/bottle/ground_truth/good/137_mask.png 0
3 MVTec/bottle train good MVTec/bottle/train/good/152.png MVTec/bottle/ground_truth/good/152_mask.png 0
4 MVTec/bottle train good MVTec/bottle/train/good/109.png MVTec/bottle/ground_truth/good/109_mask.png 0
Returns:
DataFrame: an output dataframe containing samples for the requested split (ie., train or test)
"""
samples_list = [(str(path),) + filename.parts[-3:] for filename in path.glob("**/*.png")]
if len(samples_list) == 0:
raise RuntimeError(f"Found 0 images in {path}")
samples = pd.DataFrame(samples_list, columns=["path", "split", "label", "image_path"])
samples = samples[samples.split != "ground_truth"]
# Create mask_path column
samples["mask_path"] = (
samples.path
+ "/ground_truth/"
+ samples.label
+ "/"
+ samples.image_path.str.rstrip("png").str.rstrip(".")
+ "_mask.png"
)
# Modify image_path column by converting to absolute path
samples["image_path"] = samples.path + "/" + samples.split + "/" + samples.label + "/" + samples.image_path
# Split the normal images in training set if test set doesn't
# contain any normal images. This is needed because AUC score
# cannot be computed based on 1-class
if sum((samples.split == "test") & (samples.label == "good")) == 0:
samples = split_normal_images_in_train_set(samples, split_ratio, seed)
# Good images don't have mask
samples.loc[(samples.split == "test") & (samples.label == "good"), "mask_path"] = ""
# Create label index for normal (0) and anomalous (1) images.
samples.loc[(samples.label == "good"), "label_index"] = 0
samples.loc[(samples.label != "good"), "label_index"] = 1
samples.label_index = samples.label_index.astype(int)
if create_validation_set:
samples = create_validation_set_from_test_set(samples, seed=seed)
# Get the data frame for the split.
if split is not None and split in ["train", "val", "test"]:
samples = samples[samples.split == split]
samples = samples.reset_index(drop=True)
return samples
class MVTec(VisionDataset):
"""MVTec PyTorch Dataset."""
def __init__(
self,
root: Union[Path, str],
category: str,
pre_process: PreProcessor,
split: str,
task: str,
download: bool = False,
seed: int = 0,
create_validation_set: bool = False,
) -> None:
"""Mvtec Dataset class.
Args:
root: Path to the MVTec dataset
category: Name of the MVTec category.
pre_process: List of pre_processing object containing albumentation compose.
split: 'train', 'val' or 'test'
task: ``classification`` or ``segmentation``
download: Boolean to download the MVTec dataset.
seed: seed used for the random subset splitting
create_validation_set: Create a validation subset in addition to the train and test subsets
Examples:
>>> from anomalib.data.mvtec import MVTec
>>> from anomalib.data.transforms import PreProcessor
>>> pre_process = PreProcessor(image_size=256)
>>> dataset = MVTec(
... root='./datasets/MVTec',
... category='leather',
... pre_process=pre_process,
... task="classification",
... is_train=True,
... )
>>> dataset[0].keys()
dict_keys(['image'])
>>> dataset.split = "test"
>>> dataset[0].keys()
dict_keys(['image', 'image_path', 'label'])
>>> dataset.task = "segmentation"
>>> dataset.split = "train"
>>> dataset[0].keys()
dict_keys(['image'])
>>> dataset.split = "test"
>>> dataset[0].keys()
dict_keys(['image_path', 'label', 'mask_path', 'image', 'mask'])
>>> dataset[0]["image"].shape, dataset[0]["mask"].shape
(torch.Size([3, 256, 256]), torch.Size([256, 256]))
"""
super().__init__(root)
self.root = Path(root) if isinstance(root, str) else root
self.category: str = category
self.split = split
self.task = task
self.pre_process = pre_process
if download:
self._download()
self.samples = make_mvtec_dataset(
path=self.root / category, split=self.split, seed=seed, create_validation_set=create_validation_set
)
def _download(self) -> None:
"""Download the MVTec dataset."""
if (self.root / self.category).is_dir():
logger.warning("Dataset directory exists.")
else:
self.root.mkdir(parents=True, exist_ok=True)
dataset_name = "mvtec_anomaly_detection.tar.xz"
self.filename = self.root / dataset_name
logger.info("Downloading MVTec Dataset")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc=dataset_name) as progress_bar:
urlretrieve( # nosec
url=f"ftp://guest:<EMAIL>/mvtec_anomaly_detection/{dataset_name}",
filename=self.filename,
reporthook=progress_bar.update_to,
) # nosec
self._extract()
self._clean()
def _extract(self) -> None:
"""Extract MVTec Dataset."""
logger.info("Extracting MVTec dataset")
with tarfile.open(self.filename) as file:
file.extractall(self.root)
def _clean(self) -> None:
"""Cleanup MVTec Dataset tar file."""
logger.info("Cleaning up the tar file")
self.filename.unlink()
def __len__(self) -> int:
"""Get length of the dataset."""
return len(self.samples)
def __getitem__(self, index: int) -> Dict[str, Union[str, Tensor]]:
"""Get dataset item for the index ``index``.
Args:
index (int): Index to get the item.
Returns:
Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: Dict of image tensor during training.
Otherwise, Dict containing image path, target path, image tensor, label and transformed bounding box.
"""
item: Dict[str, Union[str, Tensor]] = {}
image_path = self.samples.image_path[index]
image = read_image(image_path)
if self.split == "train":# or self.task == "classification":
pre_processed = self.pre_process(image=image)
item = {"image": pre_processed["image"]}
elif self.split in ["val", "test"]:
label_index = self.samples.label_index[index]
item["image_path"] = image_path
item["label"] = label_index
pre_processed = self.pre_process(image=image)
item["image"] = pre_processed["image"]
if self.task == "segmentation":
mask_path = self.samples.mask_path[index]
# Only Anomalous (1) images has masks in MVTec dataset.
# Therefore, create empty mask for Normal (0) images.
if label_index == 0:
mask = np.zeros(shape=image.shape[:2])
else:
mask = cv2.imread(mask_path, flags=0) / 255.0
pre_processed = self.pre_process(image=image, mask=mask)
item["mask_path"] = mask_path
item["image"] = pre_processed["image"]
item["mask"] = pre_processed["mask"]
return item
class MVTecDataModule(LightningDataModule):
"""MVTec Lightning Data Module."""
def __init__(
self,
root: str,
category: str,
task: str,
# TODO: Remove default values. IAAALD-211
image_size: Optional[Union[int, Tuple[int, int]]] = None,
train_batch_size: int = 32,
test_batch_size: int = 32,
num_workers: int = 8,
transform_config: Optional[Union[str, A.Compose]] = None,
seed: int = 0,
create_validation_set: bool = False,
) -> None:
"""Mvtec Lightning Data Module.
Args:
root: Path to the MVTec dataset
category: Name of the MVTec category.
image_size: Variable to which image is resized.
train_batch_size: Training batch size.
test_batch_size: Testing batch size.
num_workers: Number of workers.
transform_config: Config for pre-processing.
seed: seed used for the random subset splitting
create_validation_set: Create a validation subset in addition to the train and test subsets
Examples
>>> from anomalib.data import MVTecDataModule
>>> datamodule = MVTecDataModule(
... root="./datasets/MVTec",
... category="leather",
... image_size=256,
... train_batch_size=32,
... test_batch_size=32,
... num_workers=8,
... transform_config=None,
... )
>>> datamodule.setup()
>>> i, data = next(enumerate(datamodule.train_dataloader()))
>>> data.keys()
dict_keys(['image'])
>>> data["image"].shape
torch.Size([32, 3, 256, 256])
>>> i, data = next(enumerate(datamodule.val_dataloader()))
>>> data.keys()
dict_keys(['image_path', 'label', 'mask_path', 'image', 'mask'])
>>> data["image"].shape, data["mask"].shape
(torch.Size([32, 3, 256, 256]), torch.Size([32, 256, 256]))
"""
super().__init__()
self.root = root if isinstance(root, Path) else Path(root)
self.category = category
self.task = task
self.dataset_path = self.root / self.category
self.pre_process = PreProcessor(config=transform_config, image_size=image_size)
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.num_workers = num_workers
self.create_validation_set = create_validation_set
self.seed = seed
self.train_data: Dataset
self.test_data: Dataset
if create_validation_set:
self.val_data: Dataset
def setup(self, stage: Optional[str] = None) -> None:
"""Setup train, validation and test data.
Args:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)
"""
if self.create_validation_set:
self.val_data = MVTec(
root=self.root,
category=self.category,
pre_process=self.pre_process,
split="val",
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
self.test_data = MVTec(
root=self.root,
category=self.category,
pre_process=self.pre_process,
split="test",
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if stage in (None, "fit"):
self.train_data = MVTec(
root=self.root,
category=self.category,
pre_process=self.pre_process,
split="train",
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
def train_dataloader(self) -> DataLoader:
"""Get train dataloader."""
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batch_size, num_workers=self.num_workers)
def val_dataloader(self) -> DataLoader:
"""Get validation dataloader."""
dataset = self.val_data if self.create_validation_set else self.test_data
return DataLoader(dataset=dataset, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
def test_dataloader(self) -> DataLoader:
"""Get test dataloader."""
return DataLoader(self.test_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
| [
"anomalib.data.utils.read_image",
"anomalib.utils.download_progress_bar.DownloadProgressBar",
"anomalib.data.transforms.PreProcessor"
] | [((1516, 1556), 'logging.getLogger', 'logging.getLogger', ([], {'name': '"""Dataset: MVTec"""'}), "(name='Dataset: MVTec')\n", (1533, 1556), False, 'import logging\n'), ((2871, 2950), 'random.sample', 'random.sample', ([], {'population': 'normal_train_image_indices', 'k': 'num_normal_valid_images'}), '(population=normal_train_image_indices, k=num_normal_valid_images)\n', (2884, 2950), False, 'import random\n'), ((3760, 3838), 'random.sample', 'random.sample', ([], {'population': 'normal_test_image_indices', 'k': 'num_normal_valid_images'}), '(population=normal_test_image_indices, k=num_normal_valid_images)\n', (3773, 3838), False, 'import random\n'), ((4129, 4216), 'random.sample', 'random.sample', ([], {'population': 'abnormal_test_image_indices', 'k': 'num_abnormal_valid_images'}), '(population=abnormal_test_image_indices, k=\n num_abnormal_valid_images)\n', (4142, 4216), False, 'import random\n'), ((7438, 7514), 'pandas.DataFrame', 'pd.DataFrame', (['samples_list'], {'columns': "['path', 'split', 'label', 'image_path']"}), "(samples_list, columns=['path', 'split', 'label', 'image_path'])\n", (7450, 7514), True, 'import pandas as pd\n'), ((2565, 2582), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2576, 2582), False, 'import random\n'), ((3512, 3529), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3523, 3529), False, 'import random\n'), ((13321, 13343), 'anomalib.data.utils.read_image', 'read_image', (['image_path'], {}), '(image_path)\n', (13331, 13343), False, 'from anomalib.data.utils import read_image\n'), ((16853, 16913), 'anomalib.data.transforms.PreProcessor', 'PreProcessor', ([], {'config': 'transform_config', 'image_size': 'image_size'}), '(config=transform_config, image_size=image_size)\n', (16865, 16913), False, 'from anomalib.data.transforms import PreProcessor\n'), ((18605, 18714), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_data'], {'shuffle': '(True)', 'batch_size': 'self.train_batch_size', 'num_workers': 'self.num_workers'}), '(self.train_data, shuffle=True, batch_size=self.train_batch_size,\n num_workers=self.num_workers)\n', (18615, 18714), False, 'from torch.utils.data import DataLoader\n'), ((18894, 19003), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'shuffle': '(False)', 'batch_size': 'self.test_batch_size', 'num_workers': 'self.num_workers'}), '(dataset=dataset, shuffle=False, batch_size=self.test_batch_size,\n num_workers=self.num_workers)\n', (18904, 19003), False, 'from torch.utils.data import DataLoader\n'), ((19096, 19204), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_data'], {'shuffle': '(False)', 'batch_size': 'self.test_batch_size', 'num_workers': 'self.num_workers'}), '(self.test_data, shuffle=False, batch_size=self.test_batch_size,\n num_workers=self.num_workers)\n', (19106, 19204), False, 'from torch.utils.data import DataLoader\n'), ((11061, 11071), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (11065, 11071), False, 'from pathlib import Path\n'), ((12422, 12449), 'tarfile.open', 'tarfile.open', (['self.filename'], {}), '(self.filename)\n', (12434, 12449), False, 'import tarfile\n'), ((16702, 16712), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (16706, 16712), False, 'from pathlib import Path\n'), ((11890, 11967), 'anomalib.utils.download_progress_bar.DownloadProgressBar', 'DownloadProgressBar', ([], {'unit': '"""B"""', 'unit_scale': '(True)', 'miniters': '(1)', 'desc': 'dataset_name'}), "(unit='B', unit_scale=True, miniters=1, desc=dataset_name)\n", (11909, 11967), False, 'from anomalib.utils.download_progress_bar import DownloadProgressBar\n'), ((12001, 12148), 'urllib.request.urlretrieve', 'urlretrieve', ([], {'url': 'f"""ftp://guest:<EMAIL>/mvtec_anomaly_detection/{dataset_name}"""', 'filename': 'self.filename', 'reporthook': 'progress_bar.update_to'}), "(url=\n f'ftp://guest:<EMAIL>/mvtec_anomaly_detection/{dataset_name}', filename\n =self.filename, reporthook=progress_bar.update_to)\n", (12012, 12148), False, 'from urllib.request import urlretrieve\n'), ((14144, 14175), 'numpy.zeros', 'np.zeros', ([], {'shape': 'image.shape[:2]'}), '(shape=image.shape[:2])\n', (14152, 14175), True, 'import numpy as np\n'), ((14225, 14255), 'cv2.imread', 'cv2.imread', (['mask_path'], {'flags': '(0)'}), '(mask_path, flags=0)\n', (14235, 14255), False, 'import cv2\n')] |
"""PyTorch model for CFlow model implementation."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from typing import List, Tuple
import einops
import torch
import torchvision
from torch import nn
from anomalib.models.cflow.anomaly_map import AnomalyMapGenerator
from anomalib.models.cflow.utils import cflow_head, get_logp, positional_encoding_2d
from anomalib.models.components import FeatureExtractor
class CflowModel(nn.Module):
"""CFLOW: Conditional Normalizing Flows."""
def __init__(
self,
input_size: Tuple[int, int],
backbone: str,
layers: List[str],
fiber_batch_size: int = 64,
decoder: str = "freia-cflow",
condition_vector: int = 128,
coupling_blocks: int = 8,
clamp_alpha: float = 1.9,
permute_soft: bool = False,
):
super().__init__()
self.backbone = getattr(torchvision.models, backbone)
self.fiber_batch_size = fiber_batch_size
self.condition_vector: int = condition_vector
self.dec_arch = decoder
self.pool_layers = layers
self.encoder = FeatureExtractor(backbone=self.backbone(pretrained=True), layers=self.pool_layers)
self.pool_dims = self.encoder.out_dims
self.decoders = nn.ModuleList(
[
cflow_head(
condition_vector=self.condition_vector,
coupling_blocks=coupling_blocks,
clamp_alpha=clamp_alpha,
n_features=pool_dim,
permute_soft=permute_soft,
)
for pool_dim in self.pool_dims
]
)
# encoder model is fixed
for parameters in self.encoder.parameters():
parameters.requires_grad = False
self.anomaly_map_generator = AnomalyMapGenerator(image_size=tuple(input_size), pool_layers=self.pool_layers)
def forward(self, images):
"""Forward-pass images into the network to extract encoder features and compute probability.
Args:
images: Batch of images.
Returns:
Predicted anomaly maps.
"""
self.encoder.eval()
self.decoders.eval()
with torch.no_grad():
activation = self.encoder(images)
distribution = [torch.Tensor(0).to(images.device) for _ in self.pool_layers]
height: List[int] = []
width: List[int] = []
for layer_idx, layer in enumerate(self.pool_layers):
encoder_activations = activation[layer] # BxCxHxW
batch_size, dim_feature_vector, im_height, im_width = encoder_activations.size()
image_size = im_height * im_width
embedding_length = batch_size * image_size # number of rows in the conditional vector
height.append(im_height)
width.append(im_width)
# repeats positional encoding for the entire batch 1 C H W to B C H W
pos_encoding = einops.repeat(
positional_encoding_2d(self.condition_vector, im_height, im_width).unsqueeze(0),
"b c h w-> (tile b) c h w",
tile=batch_size,
).to(images.device)
c_r = einops.rearrange(pos_encoding, "b c h w -> (b h w) c") # BHWxP
e_r = einops.rearrange(encoder_activations, "b c h w -> (b h w) c") # BHWxC
decoder = self.decoders[layer_idx].to(images.device)
# Sometimes during validation, the last batch E / N is not a whole number. Hence we need to add 1.
# It is assumed that during training that E / N is a whole number as no errors were discovered during
# testing. In case it is observed in the future, we can use only this line and ensure that FIB is at
# least 1 or set `drop_last` in the dataloader to drop the last non-full batch.
fiber_batches = embedding_length // self.fiber_batch_size + int(
embedding_length % self.fiber_batch_size > 0
)
for batch_num in range(fiber_batches): # per-fiber processing
if batch_num < (fiber_batches - 1):
idx = torch.arange(batch_num * self.fiber_batch_size, (batch_num + 1) * self.fiber_batch_size)
else: # When non-full batch is encountered batch_num+1 * N will go out of bounds
idx = torch.arange(batch_num * self.fiber_batch_size, embedding_length)
c_p = c_r[idx] # NxP
e_p = e_r[idx] # NxC
# decoder returns the transformed variable z and the log Jacobian determinant
with torch.no_grad():
p_u, log_jac_det = decoder(e_p, [c_p])
#
decoder_log_prob = get_logp(dim_feature_vector, p_u, log_jac_det)
log_prob = decoder_log_prob / dim_feature_vector # likelihood per dim
distribution[layer_idx] = torch.cat((distribution[layer_idx], log_prob))
output = self.anomaly_map_generator(distribution=distribution, height=height, width=width)
self.decoders.train()
return output.to(images.device)
| [
"anomalib.models.cflow.utils.get_logp",
"anomalib.models.cflow.utils.cflow_head",
"anomalib.models.cflow.utils.positional_encoding_2d"
] | [((2761, 2776), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2774, 2776), False, 'import torch\n'), ((3756, 3810), 'einops.rearrange', 'einops.rearrange', (['pos_encoding', '"""b c h w -> (b h w) c"""'], {}), "(pos_encoding, 'b c h w -> (b h w) c')\n", (3772, 3810), False, 'import einops\n'), ((3838, 3899), 'einops.rearrange', 'einops.rearrange', (['encoder_activations', '"""b c h w -> (b h w) c"""'], {}), "(encoder_activations, 'b c h w -> (b h w) c')\n", (3854, 3899), False, 'import einops\n'), ((1845, 2010), 'anomalib.models.cflow.utils.cflow_head', 'cflow_head', ([], {'condition_vector': 'self.condition_vector', 'coupling_blocks': 'coupling_blocks', 'clamp_alpha': 'clamp_alpha', 'n_features': 'pool_dim', 'permute_soft': 'permute_soft'}), '(condition_vector=self.condition_vector, coupling_blocks=\n coupling_blocks, clamp_alpha=clamp_alpha, n_features=pool_dim,\n permute_soft=permute_soft)\n', (1855, 2010), False, 'from anomalib.models.cflow.utils import cflow_head, get_logp, positional_encoding_2d\n'), ((5310, 5356), 'anomalib.models.cflow.utils.get_logp', 'get_logp', (['dim_feature_vector', 'p_u', 'log_jac_det'], {}), '(dim_feature_vector, p_u, log_jac_det)\n', (5318, 5356), False, 'from anomalib.models.cflow.utils import cflow_head, get_logp, positional_encoding_2d\n'), ((5486, 5532), 'torch.cat', 'torch.cat', (['(distribution[layer_idx], log_prob)'], {}), '((distribution[layer_idx], log_prob))\n', (5495, 5532), False, 'import torch\n'), ((2849, 2864), 'torch.Tensor', 'torch.Tensor', (['(0)'], {}), '(0)\n', (2861, 2864), False, 'import torch\n'), ((4711, 4804), 'torch.arange', 'torch.arange', (['(batch_num * self.fiber_batch_size)', '((batch_num + 1) * self.fiber_batch_size)'], {}), '(batch_num * self.fiber_batch_size, (batch_num + 1) * self.\n fiber_batch_size)\n', (4723, 4804), False, 'import torch\n'), ((4924, 4989), 'torch.arange', 'torch.arange', (['(batch_num * self.fiber_batch_size)', 'embedding_length'], {}), '(batch_num * self.fiber_batch_size, embedding_length)\n', (4936, 4989), False, 'import torch\n'), ((5181, 5196), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5194, 5196), False, 'import torch\n'), ((3548, 3614), 'anomalib.models.cflow.utils.positional_encoding_2d', 'positional_encoding_2d', (['self.condition_vector', 'im_height', 'im_width'], {}), '(self.condition_vector, im_height, im_width)\n', (3570, 3614), False, 'from anomalib.models.cflow.utils import cflow_head, get_logp, positional_encoding_2d\n')] |
"""Custom Folder Dataset.
This script creates a custom dataset from a folder.
"""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
import albumentations as A
import cv2
import numpy as np
from pandas.core.frame import DataFrame
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch import Tensor
from torch.utils.data import DataLoader, Dataset
from torchvision.datasets.folder import IMG_EXTENSIONS
from anomalib.data.inference import InferenceDataset
from anomalib.data.utils import read_image
from anomalib.data.utils.split import (
create_validation_set_from_test_set,
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
logger = logging.getLogger(name="Dataset: Folder Dataset")
logger.setLevel(logging.DEBUG)
def _check_and_convert_path(path: Union[str, Path]) -> Path:
"""Check an input path, and convert to Pathlib object.
Args:
path (Union[str, Path]): Input path.
Returns:
Path: Output path converted to pathlib object.
"""
if not isinstance(path, Path):
path = Path(path)
return path
def make_dataset(
normal_dir: Union[str, Path],
abnormal_dir: Union[str, Path],
mask_dir: Optional[Union[str, Path]] = None,
split: Optional[str] = None,
split_ratio: float = 0.2,
seed: int = 0,
create_validation_set: bool = True,
extensions: Optional[Tuple[str, ...]] = None,
):
"""Make Folder Dataset.
Args:
normal_dir (Union[str, Path]): Path to the directory containing normal images.
abnormal_dir (Union[str, Path]): Path to the directory containing abnormal images.
mask_dir (Optional[Union[str, Path]], optional): Path to the directory containing
the mask annotations. Defaults to None.
split (Optional[str], optional): Dataset split (ie., either train or test). Defaults to None.
split_ratio (float, optional): Ratio to split normal training images and add to the
test set in case test set doesn't contain any normal images.
Defaults to 0.2.
seed (int, optional): Random seed to ensure reproducibility when splitting. Defaults to 0.
create_validation_set (bool, optional):Boolean to create a validation set from the test set.
Those wanting to create a validation set could set this flag to ``True``.
extensions (Optional[Tuple[str, ...]], optional): Type of the image extensions to read from the
directory.
Returns:
DataFrame: an output dataframe containing samples for the requested split (ie., train or test)
"""
normal_dir = _check_and_convert_path(normal_dir)
abnormal_dir = _check_and_convert_path(abnormal_dir)
if extensions is None:
extensions = IMG_EXTENSIONS
# Get filenames from normal and abnormal directory.
normal_filenames = [f for f in normal_dir.glob(r"**/*") if f.suffix in extensions]
abnormal_filenames = [f for f in abnormal_dir.glob(r"**/*") if f.suffix in extensions]
filenames = normal_filenames + abnormal_filenames
if len(normal_filenames) == 0:
raise RuntimeError(f"Found 0 normal images in {normal_dir}")
if len(abnormal_filenames) == 0:
raise RuntimeError(f"Found 0 annormal images in {abnormal_dir}")
# Add normal and abnormal labels to the samples as `label` column.
normal_labels = ["normal"] * len(normal_filenames)
abnormal_labels = ["abnormal"] * len(abnormal_filenames)
labels = normal_labels + abnormal_labels
samples = DataFrame({"image_path": filenames, "label": labels})
# Create label index for normal (0) and abnormal (1) images.
samples.loc[(samples.label == "normal"), "label_index"] = 0
samples.loc[(samples.label == "abnormal"), "label_index"] = 1
samples.label_index = samples.label_index.astype(int)
# If a path to mask is provided, add it to the sample dataframe.
if mask_dir is not None:
mask_dir = _check_and_convert_path(mask_dir)
normal_gt = ["" for f in normal_filenames]
abnormal_gt = [str(mask_dir / f.name) for f in abnormal_filenames]
gt_filenames = normal_gt + abnormal_gt
samples["mask_path"] = gt_filenames
# Ensure the pathlib objects are converted to str.
# This is because torch dataloader doesn't like pathlib.
samples = samples.astype({"image_path": "str"})
# Create train/test split.
# By default, all the normal samples are assigned as train.
# and all the abnormal samples are test.
samples.loc[(samples.label == "normal"), "split"] = "train"
samples.loc[(samples.label == "abnormal"), "split"] = "test"
samples = split_normal_images_in_train_set(
samples=samples, split_ratio=split_ratio, seed=seed, normal_label="normal"
)
# If `create_validation_set` is set to True, the test set is split into half.
if create_validation_set:
samples = create_validation_set_from_test_set(samples, seed=seed, normal_label="normal")
# Get the data frame for the split.
if split is not None and split in ["train", "val", "test"]:
samples = samples[samples.split == split]
samples = samples.reset_index(drop=True)
return samples
class FolderDataset(Dataset):
"""Folder Dataset."""
def __init__(
self,
normal_dir: Union[Path, str],
abnormal_dir: Union[Path, str],
split: str,
pre_process: PreProcessor,
split_ratio: float = 0.2,
mask_dir: Optional[Union[Path, str]] = None,
extensions: Optional[Tuple[str, ...]] = None,
task: Optional[str] = None,
seed: int = 0,
create_validation_set: bool = False,
) -> None:
"""Create Folder Folder Dataset.
Args:
normal_dir (Union[str, Path]): Path to the directory containing normal images.
abnormal_dir (Union[str, Path]): Path to the directory containing abnormal images.
split (Optional[str], optional): Dataset split (ie., either train or test). Defaults to None.
pre_process (Optional[PreProcessor], optional): Image Pro-processor to apply transform.
Defaults to None.
split_ratio (float, optional): Ratio to split normal training images and add to the
test set in case test set doesn't contain any normal images.
Defaults to 0.2.
mask_dir (Optional[Union[str, Path]], optional): Path to the directory containing
the mask annotations. Defaults to None.
extensions (Optional[Tuple[str, ...]], optional): Type of the image extensions to read from the
directory.
task (Optional[str], optional): Task type. (classification or segmentation) Defaults to None.
seed (int, optional): Random seed to ensure reproducibility when splitting. Defaults to 0.
create_validation_set (bool, optional):Boolean to create a validation set from the test set.
Those wanting to create a validation set could set this flag to ``True``.
Raises:
ValueError: When task is set to classification and `mask_dir` is provided. When `mask_dir` is
provided, `task` should be set to `segmentation`.
"""
self.split = split
if task == "classification" and mask_dir:
raise ValueError(
"Classification task is requested, but mask directory is provided. "
"Segmentation task is to be chosen if mask directory is provided."
)
if task is None or mask_dir is None:
self.task = "classification"
else:
self.task = task
self.pre_process = pre_process
self.samples = make_dataset(
normal_dir=normal_dir,
abnormal_dir=abnormal_dir,
mask_dir=mask_dir,
split=split,
split_ratio=split_ratio,
seed=seed,
create_validation_set=create_validation_set,
extensions=extensions,
)
def __len__(self) -> int:
"""Get length of the dataset."""
return len(self.samples)
def __getitem__(self, index: int) -> Dict[str, Union[str, Tensor]]:
"""Get dataset item for the index ``index``.
Args:
index (int): Index to get the item.
Returns:
Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: Dict of image tensor during training.
Otherwise, Dict containing image path, target path, image tensor, label and transformed bounding box.
"""
item: Dict[str, Union[str, Tensor]] = {}
image_path = self.samples.image_path[index]
image = read_image(image_path)
pre_processed = self.pre_process(image=image)
item = {"image": pre_processed["image"]}
if self.split in ["val", "test"]:
label_index = self.samples.label_index[index]
item["image_path"] = image_path
item["label"] = label_index
if self.task == "segmentation":
mask_path = self.samples.mask_path[index]
# Only Anomalous (1) images has masks in MVTec AD dataset.
# Therefore, create empty mask for Normal (0) images.
if label_index == 0:
mask = np.zeros(shape=image.shape[:2])
else:
mask = cv2.imread(mask_path, flags=0) / 255.0
pre_processed = self.pre_process(image=image, mask=mask)
item["mask_path"] = mask_path
item["image"] = pre_processed["image"]
item["mask"] = pre_processed["mask"]
return item
class FolderDataModule(LightningDataModule):
"""Folder Lightning Data Module."""
def __init__(
self,
root: Union[str, Path],
normal: str = "normal",
abnormal: str = "abnormal",
task: str = "classification",
mask_dir: Optional[Union[Path, str]] = None,
extensions: Optional[Tuple[str, ...]] = None,
split_ratio: float = 0.2,
seed: int = 0,
image_size: Optional[Union[int, Tuple[int, int]]] = None,
train_batch_size: int = 32,
test_batch_size: int = 32,
num_workers: int = 8,
transform_config: Optional[Union[str, A.Compose]] = None,
create_validation_set: bool = False,
) -> None:
"""Folder Dataset PL Datamodule.
Args:
root (Union[str, Path]): Path to the root folder containing normal and abnormal dirs.
normal (str, optional): Name of the directory containing normal images.
Defaults to "normal".
abnormal (str, optional): Name of the directory containing abnormal images.
Defaults to "abnormal".
task (str, optional): Task type. Could be either classification or segmentation.
Defaults to "classification".
mask_dir (Optional[Union[str, Path]], optional): Path to the directory containing
the mask annotations. Defaults to None.
extensions (Optional[Tuple[str, ...]], optional): Type of the image extensions to read from the
directory. Defaults to None.
split_ratio (float, optional): Ratio to split normal training images and add to the
test set in case test set doesn't contain any normal images.
Defaults to 0.2.
seed (int, optional): Random seed to ensure reproducibility when splitting. Defaults to 0.
image_size (Optional[Union[int, Tuple[int, int]]], optional): Size of the input image.
Defaults to None.
train_batch_size (int, optional): Training batch size. Defaults to 32.
test_batch_size (int, optional): Test batch size. Defaults to 32.
num_workers (int, optional): Number of workers. Defaults to 8.
transform_config (Optional[Union[str, A.Compose]], optional): Config for pre-processing.
Defaults to None.
create_validation_set (bool, optional):Boolean to create a validation set from the test set.
Those wanting to create a validation set could set this flag to ``True``.
Examples:
Assume that we use Folder Dataset for the MVTec/bottle/broken_large category. We would do:
>>> from anomalib.data import FolderDataModule
>>> datamodule = FolderDataModule(
... root="./datasets/MVTec/bottle/test",
... normal="good",
... abnormal="broken_large",
... image_size=256
... )
>>> datamodule.setup()
>>> i, data = next(enumerate(datamodule.train_dataloader()))
>>> data["image"].shape
torch.Size([16, 3, 256, 256])
>>> i, test_data = next(enumerate(datamodule.test_dataloader()))
>>> test_data.keys()
dict_keys(['image'])
We could also create a Folder DataModule for datasets containing mask annotations.
The dataset expects that mask annotation filenames must be same as the original filename.
To this end, we modified mask filenames in MVTec AD bottle category.
Now we could try folder data module using the mvtec bottle broken large category
>>> datamodule = FolderDataModule(
... root="./datasets/bottle/test",
... normal="good",
... abnormal="broken_large",
... mask_dir="./datasets/bottle/ground_truth/broken_large",
... image_size=256
... )
>>> i , train_data = next(enumerate(datamodule.train_dataloader()))
>>> train_data.keys()
dict_keys(['image'])
>>> train_data["image"].shape
torch.Size([16, 3, 256, 256])
>>> i, test_data = next(enumerate(datamodule.test_dataloader()))
dict_keys(['image_path', 'label', 'mask_path', 'image', 'mask'])
>>> print(test_data["image"].shape, test_data["mask"].shape)
torch.Size([24, 3, 256, 256]) torch.Size([24, 256, 256])
By default, Folder Data Module does not create a validation set. If a validation set
is needed it could be set as follows:
>>> datamodule = FolderDataModule(
... root="./datasets/bottle/test",
... normal="good",
... abnormal="broken_large",
... mask_dir="./datasets/bottle/ground_truth/broken_large",
... image_size=256,
... create_validation_set=True,
... )
>>> i, val_data = next(enumerate(datamodule.val_dataloader()))
>>> val_data.keys()
dict_keys(['image_path', 'label', 'mask_path', 'image', 'mask'])
>>> print(val_data["image"].shape, val_data["mask"].shape)
torch.Size([12, 3, 256, 256]) torch.Size([12, 256, 256])
>>> i, test_data = next(enumerate(datamodule.test_dataloader()))
>>> print(test_data["image"].shape, test_data["mask"].shape)
torch.Size([12, 3, 256, 256]) torch.Size([12, 256, 256])
"""
super().__init__()
self.root = _check_and_convert_path(root)
self.normal_dir = self.root / normal
self.abnormal_dir = self.root / abnormal
self.mask_dir = mask_dir
self.extensions = extensions
self.split_ratio = split_ratio
if task == "classification" and mask_dir is not None:
raise ValueError(
"Classification type is set but mask_dir provided. "
"If mask_dir is provided task type must be segmentation. "
"Check your configuration."
)
self.task = task
self.transform_config = transform_config
self.image_size = image_size
self.pre_process = PreProcessor(config=self.transform_config, image_size=self.image_size)
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.num_workers = num_workers
self.create_validation_set = create_validation_set
self.seed = seed
self.train_data: Dataset
self.test_data: Dataset
if create_validation_set:
self.val_data: Dataset
self.inference_data: Dataset
def setup(self, stage: Optional[str] = None) -> None:
"""Setup train, validation and test data.
Args:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)
"""
if stage in (None, "fit"):
self.train_data = FolderDataset(
normal_dir=self.normal_dir,
abnormal_dir=self.abnormal_dir,
split="train",
split_ratio=self.split_ratio,
mask_dir=self.mask_dir,
pre_process=self.pre_process,
extensions=self.extensions,
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if self.create_validation_set:
self.val_data = FolderDataset(
normal_dir=self.normal_dir,
abnormal_dir=self.abnormal_dir,
split="val",
split_ratio=self.split_ratio,
mask_dir=self.mask_dir,
pre_process=self.pre_process,
extensions=self.extensions,
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
self.test_data = FolderDataset(
normal_dir=self.normal_dir,
abnormal_dir=self.abnormal_dir,
split="test",
split_ratio=self.split_ratio,
mask_dir=self.mask_dir,
pre_process=self.pre_process,
extensions=self.extensions,
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if stage == "predict":
self.inference_data = InferenceDataset(
path=self.root, image_size=self.image_size, transform_config=self.transform_config
)
def train_dataloader(self) -> TRAIN_DATALOADERS:
"""Get train dataloader."""
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batch_size, num_workers=self.num_workers)
def val_dataloader(self) -> EVAL_DATALOADERS:
"""Get validation dataloader."""
dataset = self.val_data if self.create_validation_set else self.test_data
return DataLoader(dataset=dataset, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
def test_dataloader(self) -> EVAL_DATALOADERS:
"""Get test dataloader."""
return DataLoader(self.test_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
def predict_dataloader(self) -> EVAL_DATALOADERS:
"""Get predict dataloader."""
return DataLoader(
self.inference_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers
)
| [
"anomalib.data.utils.read_image",
"anomalib.data.utils.split.create_validation_set_from_test_set",
"anomalib.pre_processing.PreProcessor",
"anomalib.data.inference.InferenceDataset",
"anomalib.data.utils.split.split_normal_images_in_train_set"
] | [((1406, 1455), 'logging.getLogger', 'logging.getLogger', ([], {'name': '"""Dataset: Folder Dataset"""'}), "(name='Dataset: Folder Dataset')\n", (1423, 1455), False, 'import logging\n'), ((4253, 4306), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'image_path': filenames, 'label': labels}"], {}), "({'image_path': filenames, 'label': labels})\n", (4262, 4306), False, 'from pandas.core.frame import DataFrame\n'), ((5386, 5498), 'anomalib.data.utils.split.split_normal_images_in_train_set', 'split_normal_images_in_train_set', ([], {'samples': 'samples', 'split_ratio': 'split_ratio', 'seed': 'seed', 'normal_label': '"""normal"""'}), "(samples=samples, split_ratio=split_ratio,\n seed=seed, normal_label='normal')\n", (5418, 5498), False, 'from anomalib.data.utils.split import create_validation_set_from_test_set, split_normal_images_in_train_set\n'), ((1792, 1802), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1796, 1802), False, 'from pathlib import Path\n'), ((5640, 5718), 'anomalib.data.utils.split.create_validation_set_from_test_set', 'create_validation_set_from_test_set', (['samples'], {'seed': 'seed', 'normal_label': '"""normal"""'}), "(samples, seed=seed, normal_label='normal')\n", (5675, 5718), False, 'from anomalib.data.utils.split import create_validation_set_from_test_set, split_normal_images_in_train_set\n'), ((9461, 9483), 'anomalib.data.utils.read_image', 'read_image', (['image_path'], {}), '(image_path)\n', (9471, 9483), False, 'from anomalib.data.utils import read_image\n'), ((16760, 16830), 'anomalib.pre_processing.PreProcessor', 'PreProcessor', ([], {'config': 'self.transform_config', 'image_size': 'self.image_size'}), '(config=self.transform_config, image_size=self.image_size)\n', (16772, 16830), False, 'from anomalib.pre_processing import PreProcessor\n'), ((19228, 19337), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_data'], {'shuffle': '(True)', 'batch_size': 'self.train_batch_size', 'num_workers': 'self.num_workers'}), '(self.train_data, shuffle=True, batch_size=self.train_batch_size,\n num_workers=self.num_workers)\n', (19238, 19337), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((19523, 19632), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'shuffle': '(False)', 'batch_size': 'self.test_batch_size', 'num_workers': 'self.num_workers'}), '(dataset=dataset, shuffle=False, batch_size=self.test_batch_size,\n num_workers=self.num_workers)\n', (19533, 19632), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((19731, 19839), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_data'], {'shuffle': '(False)', 'batch_size': 'self.test_batch_size', 'num_workers': 'self.num_workers'}), '(self.test_data, shuffle=False, batch_size=self.test_batch_size,\n num_workers=self.num_workers)\n', (19741, 19839), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((19944, 20058), 'torch.utils.data.DataLoader', 'DataLoader', (['self.inference_data'], {'shuffle': '(False)', 'batch_size': 'self.test_batch_size', 'num_workers': 'self.num_workers'}), '(self.inference_data, shuffle=False, batch_size=self.\n test_batch_size, num_workers=self.num_workers)\n', (19954, 20058), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((18992, 19096), 'anomalib.data.inference.InferenceDataset', 'InferenceDataset', ([], {'path': 'self.root', 'image_size': 'self.image_size', 'transform_config': 'self.transform_config'}), '(path=self.root, image_size=self.image_size,\n transform_config=self.transform_config)\n', (19008, 19096), False, 'from anomalib.data.inference import InferenceDataset\n'), ((10087, 10118), 'numpy.zeros', 'np.zeros', ([], {'shape': 'image.shape[:2]'}), '(shape=image.shape[:2])\n', (10095, 10118), True, 'import numpy as np\n'), ((10168, 10198), 'cv2.imread', 'cv2.imread', (['mask_path'], {'flags': '(0)'}), '(mask_path, flags=0)\n', (10178, 10198), False, 'import cv2\n')] |
"""Base Inferencer for Torch and OpenVINO."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, Optional, Tuple, Union, cast
import cv2
import numpy as np
from omegaconf import DictConfig, OmegaConf
from skimage.morphology import dilation
from skimage.segmentation import find_boundaries
from torch import Tensor
from anomalib.data.utils import read_image
from anomalib.post_processing import compute_mask, superimpose_anomaly_map
from anomalib.post_processing.normalization.cdf import normalize as normalize_cdf
from anomalib.post_processing.normalization.cdf import standardize
from anomalib.post_processing.normalization.min_max import (
normalize as normalize_min_max,
)
class Inferencer(ABC):
"""Abstract class for the inference.
This is used by both Torch and OpenVINO inference.
"""
@abstractmethod
def load_model(self, path: Union[str, Path]):
"""Load Model."""
raise NotImplementedError
@abstractmethod
def pre_process(self, image: np.ndarray) -> Union[np.ndarray, Tensor]:
"""Pre-process."""
raise NotImplementedError
@abstractmethod
def forward(self, image: Union[np.ndarray, Tensor]) -> Union[np.ndarray, Tensor]:
"""Forward-Pass input to model."""
raise NotImplementedError
@abstractmethod
def post_process(
self, predictions: Union[np.ndarray, Tensor], meta_data: Optional[Dict]
) -> Tuple[np.ndarray, float]:
"""Post-Process."""
raise NotImplementedError
def predict(
self,
image: Union[str, np.ndarray, Path],
superimpose: bool = True,
meta_data: Optional[dict] = None,
overlay_mask: bool = False,
) -> Tuple[np.ndarray, float]:
"""Perform a prediction for a given input image.
The main workflow is (i) pre-processing, (ii) forward-pass, (iii) post-process.
Args:
image (Union[str, np.ndarray]): Input image whose output is to be predicted.
It could be either a path to image or numpy array itself.
superimpose (bool): If this is set to True, output predictions
will be superimposed onto the original image. If false, `predict`
method will return the raw heatmap.
overlay_mask (bool): If this is set to True, output segmentation mask on top of image.
Returns:
np.ndarray: Output predictions to be visualized.
"""
if meta_data is None:
if hasattr(self, "meta_data"):
meta_data = getattr(self, "meta_data")
else:
meta_data = {}
if isinstance(image, (str, Path)):
image_arr: np.ndarray = read_image(image)
else: # image is already a numpy array. Kept for mypy compatibility.
image_arr = image
meta_data["image_shape"] = image_arr.shape[:2]
processed_image = self.pre_process(image_arr)
predictions = self.forward(processed_image)
anomaly_map, pred_scores = self.post_process(predictions, meta_data=meta_data)
# Overlay segmentation mask using raw predictions
if overlay_mask and meta_data is not None:
image_arr = self._superimpose_segmentation_mask(meta_data, anomaly_map, image_arr)
if superimpose is True:
anomaly_map = superimpose_anomaly_map(anomaly_map, image_arr)
return anomaly_map, pred_scores
def _superimpose_segmentation_mask(self, meta_data: dict, anomaly_map: np.ndarray, image: np.ndarray):
"""Superimpose segmentation mask on top of image.
Args:
meta_data (dict): Metadata of the image which contains the image size.
anomaly_map (np.ndarray): Anomaly map which is used to extract segmentation mask.
image (np.ndarray): Image on which segmentation mask is to be superimposed.
Returns:
np.ndarray: Image with segmentation mask superimposed.
"""
pred_mask = compute_mask(anomaly_map, 0.5) # assumes predictions are normalized.
image_height = meta_data["image_shape"][0]
image_width = meta_data["image_shape"][1]
pred_mask = cv2.resize(pred_mask, (image_width, image_height))
boundaries = find_boundaries(pred_mask)
outlines = dilation(boundaries, np.ones((7, 7)))
image[outlines] = [255, 0, 0]
return image
def __call__(self, image: np.ndarray) -> Tuple[np.ndarray, float]:
"""Call predict on the Image.
Args:
image (np.ndarray): Input Image
Returns:
np.ndarray: Output predictions to be visualized
"""
return self.predict(image)
def _normalize(
self,
anomaly_maps: Union[Tensor, np.ndarray],
pred_scores: Union[Tensor, np.float32],
meta_data: Union[Dict, DictConfig],
) -> Tuple[Union[np.ndarray, Tensor], float]:
"""Applies normalization and resizes the image.
Args:
anomaly_maps (Union[Tensor, np.ndarray]): Predicted raw anomaly map.
pred_scores (Union[Tensor, np.float32]): Predicted anomaly score
meta_data (Dict): Meta data. Post-processing step sometimes requires
additional meta data such as image shape. This variable comprises such info.
Returns:
Tuple[Union[np.ndarray, Tensor], float]: Post processed predictions that are ready to be visualized and
predicted scores.
"""
# min max normalization
if "min" in meta_data and "max" in meta_data:
anomaly_maps = normalize_min_max(
anomaly_maps, meta_data["pixel_threshold"], meta_data["min"], meta_data["max"]
)
pred_scores = normalize_min_max(
pred_scores, meta_data["image_threshold"], meta_data["min"], meta_data["max"]
)
# standardize pixel scores
if "pixel_mean" in meta_data.keys() and "pixel_std" in meta_data.keys():
anomaly_maps = standardize(
anomaly_maps, meta_data["pixel_mean"], meta_data["pixel_std"], center_at=meta_data["image_mean"]
)
anomaly_maps = normalize_cdf(anomaly_maps, meta_data["pixel_threshold"])
# standardize image scores
if "image_mean" in meta_data.keys() and "image_std" in meta_data.keys():
pred_scores = standardize(pred_scores, meta_data["image_mean"], meta_data["image_std"])
pred_scores = normalize_cdf(pred_scores, meta_data["image_threshold"])
return anomaly_maps, float(pred_scores)
def _load_meta_data(
self, path: Optional[Union[str, Path]] = None
) -> Union[DictConfig, Dict[str, Union[float, np.ndarray, Tensor]]]:
"""Loads the meta data from the given path.
Args:
path (Optional[Union[str, Path]], optional): Path to JSON file containing the metadata.
If no path is provided, it returns an empty dict. Defaults to None.
Returns:
Union[DictConfig, Dict]: Dictionary containing the metadata.
"""
meta_data: Union[DictConfig, Dict[str, Union[float, np.ndarray, Tensor]]] = {}
if path is not None:
config = OmegaConf.load(path)
meta_data = cast(DictConfig, config)
return meta_data
| [
"anomalib.data.utils.read_image",
"anomalib.post_processing.normalization.cdf.standardize",
"anomalib.post_processing.normalization.cdf.normalize",
"anomalib.post_processing.normalization.min_max.normalize",
"anomalib.post_processing.compute_mask",
"anomalib.post_processing.superimpose_anomaly_map"
] | [((4618, 4648), 'anomalib.post_processing.compute_mask', 'compute_mask', (['anomaly_map', '(0.5)'], {}), '(anomaly_map, 0.5)\n', (4630, 4648), False, 'from anomalib.post_processing import compute_mask, superimpose_anomaly_map\n'), ((4809, 4859), 'cv2.resize', 'cv2.resize', (['pred_mask', '(image_width, image_height)'], {}), '(pred_mask, (image_width, image_height))\n', (4819, 4859), False, 'import cv2\n'), ((4881, 4907), 'skimage.segmentation.find_boundaries', 'find_boundaries', (['pred_mask'], {}), '(pred_mask)\n', (4896, 4907), False, 'from skimage.segmentation import find_boundaries\n'), ((3327, 3344), 'anomalib.data.utils.read_image', 'read_image', (['image'], {}), '(image)\n', (3337, 3344), False, 'from anomalib.data.utils import read_image\n'), ((3966, 4013), 'anomalib.post_processing.superimpose_anomaly_map', 'superimpose_anomaly_map', (['anomaly_map', 'image_arr'], {}), '(anomaly_map, image_arr)\n', (3989, 4013), False, 'from anomalib.post_processing import compute_mask, superimpose_anomaly_map\n'), ((4948, 4963), 'numpy.ones', 'np.ones', (['(7, 7)'], {}), '((7, 7))\n', (4955, 4963), True, 'import numpy as np\n'), ((6243, 6345), 'anomalib.post_processing.normalization.min_max.normalize', 'normalize_min_max', (['anomaly_maps', "meta_data['pixel_threshold']", "meta_data['min']", "meta_data['max']"], {}), "(anomaly_maps, meta_data['pixel_threshold'], meta_data[\n 'min'], meta_data['max'])\n", (6260, 6345), True, 'from anomalib.post_processing.normalization.min_max import normalize as normalize_min_max\n'), ((6397, 6498), 'anomalib.post_processing.normalization.min_max.normalize', 'normalize_min_max', (['pred_scores', "meta_data['image_threshold']", "meta_data['min']", "meta_data['max']"], {}), "(pred_scores, meta_data['image_threshold'], meta_data[\n 'min'], meta_data['max'])\n", (6414, 6498), True, 'from anomalib.post_processing.normalization.min_max import normalize as normalize_min_max\n'), ((6668, 6781), 'anomalib.post_processing.normalization.cdf.standardize', 'standardize', (['anomaly_maps', "meta_data['pixel_mean']", "meta_data['pixel_std']"], {'center_at': "meta_data['image_mean']"}), "(anomaly_maps, meta_data['pixel_mean'], meta_data['pixel_std'],\n center_at=meta_data['image_mean'])\n", (6679, 6781), False, 'from anomalib.post_processing.normalization.cdf import standardize\n'), ((6835, 6892), 'anomalib.post_processing.normalization.cdf.normalize', 'normalize_cdf', (['anomaly_maps', "meta_data['pixel_threshold']"], {}), "(anomaly_maps, meta_data['pixel_threshold'])\n", (6848, 6892), True, 'from anomalib.post_processing.normalization.cdf import normalize as normalize_cdf\n'), ((7036, 7109), 'anomalib.post_processing.normalization.cdf.standardize', 'standardize', (['pred_scores', "meta_data['image_mean']", "meta_data['image_std']"], {}), "(pred_scores, meta_data['image_mean'], meta_data['image_std'])\n", (7047, 7109), False, 'from anomalib.post_processing.normalization.cdf import standardize\n'), ((7136, 7192), 'anomalib.post_processing.normalization.cdf.normalize', 'normalize_cdf', (['pred_scores', "meta_data['image_threshold']"], {}), "(pred_scores, meta_data['image_threshold'])\n", (7149, 7192), True, 'from anomalib.post_processing.normalization.cdf import normalize as normalize_cdf\n'), ((7886, 7906), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['path'], {}), '(path)\n', (7900, 7906), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((7931, 7955), 'typing.cast', 'cast', (['DictConfig', 'config'], {}), '(DictConfig, config)\n', (7935, 7955), False, 'from typing import Dict, Optional, Tuple, Union, cast\n')] |
from pytorch_lightning import Trainer, seed_everything
from anomalib.config import get_configurable_parameters
from anomalib.data import get_datamodule
from anomalib.models import get_model
from anomalib.utils.callbacks import get_callbacks
from tests.helpers.dataset import TestDataset, get_dataset_path
def run_train_test(config):
model = get_model(config)
datamodule = get_datamodule(config)
callbacks = get_callbacks(config)
trainer = Trainer(**config.trainer, callbacks=callbacks)
trainer.fit(model=model, datamodule=datamodule)
results = trainer.test(model=model, datamodule=datamodule)
return results
@TestDataset(num_train=200, num_test=30, path=get_dataset_path(), seed=42)
def test_normalizer(path=get_dataset_path(), category="shapes"):
config = get_configurable_parameters(config_path="anomalib/models/padim/config.yaml")
config.dataset.path = path
config.dataset.category = category
config.metrics.threshold.adaptive = True
config.project.log_images_to = []
config.metrics.image = ["F1Score", "AUROC"]
# run without normalization
config.model.normalization_method = "none"
seed_everything(42)
results_without_normalization = run_train_test(config)
# run with cdf normalization
config.model.normalization_method = "cdf"
seed_everything(42)
results_with_cdf_normalization = run_train_test(config)
# run without normalization
config.model.normalization_method = "min_max"
seed_everything(42)
results_with_minmax_normalization = run_train_test(config)
# performance should be the same
for metric in ["image_AUROC", "image_F1Score"]:
assert round(results_without_normalization[0][metric], 3) == round(results_with_cdf_normalization[0][metric], 3)
assert round(results_without_normalization[0][metric], 3) == round(
results_with_minmax_normalization[0][metric], 3
)
| [
"anomalib.models.get_model",
"anomalib.data.get_datamodule",
"anomalib.config.get_configurable_parameters",
"anomalib.utils.callbacks.get_callbacks"
] | [((348, 365), 'anomalib.models.get_model', 'get_model', (['config'], {}), '(config)\n', (357, 365), False, 'from anomalib.models import get_model\n'), ((383, 405), 'anomalib.data.get_datamodule', 'get_datamodule', (['config'], {}), '(config)\n', (397, 405), False, 'from anomalib.data import get_datamodule\n'), ((422, 443), 'anomalib.utils.callbacks.get_callbacks', 'get_callbacks', (['config'], {}), '(config)\n', (435, 443), False, 'from anomalib.utils.callbacks import get_callbacks\n'), ((459, 505), 'pytorch_lightning.Trainer', 'Trainer', ([], {'callbacks': 'callbacks'}), '(**config.trainer, callbacks=callbacks)\n', (466, 505), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((742, 760), 'tests.helpers.dataset.get_dataset_path', 'get_dataset_path', ([], {}), '()\n', (758, 760), False, 'from tests.helpers.dataset import TestDataset, get_dataset_path\n'), ((795, 871), 'anomalib.config.get_configurable_parameters', 'get_configurable_parameters', ([], {'config_path': '"""anomalib/models/padim/config.yaml"""'}), "(config_path='anomalib/models/padim/config.yaml')\n", (822, 871), False, 'from anomalib.config import get_configurable_parameters\n'), ((1157, 1176), 'pytorch_lightning.seed_everything', 'seed_everything', (['(42)'], {}), '(42)\n', (1172, 1176), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((1320, 1339), 'pytorch_lightning.seed_everything', 'seed_everything', (['(42)'], {}), '(42)\n', (1335, 1339), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((1487, 1506), 'pytorch_lightning.seed_everything', 'seed_everything', (['(42)'], {}), '(42)\n', (1502, 1506), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((688, 706), 'tests.helpers.dataset.get_dataset_path', 'get_dataset_path', ([], {}), '()\n', (704, 706), False, 'from tests.helpers.dataset import TestDataset, get_dataset_path\n')] |
"""Base Inferencer for Torch and OpenVINO."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, Optional, Tuple, Union, cast
import numpy as np
from omegaconf import DictConfig, OmegaConf
from torch import Tensor
from anomalib.data.utils import read_image
from anomalib.utils.normalization.cdf import normalize as normalize_cdf
from anomalib.utils.normalization.cdf import standardize
from anomalib.utils.normalization.min_max import normalize as normalize_min_max
from anomalib.utils.post_process import superimpose_anomaly_map
class Inferencer(ABC):
"""Abstract class for the inference.
This is used by both Torch and OpenVINO inference.
"""
@abstractmethod
def load_model(self, path: Union[str, Path]):
"""Load Model."""
raise NotImplementedError
@abstractmethod
def pre_process(self, image: np.ndarray) -> Union[np.ndarray, Tensor]:
"""Pre-process."""
raise NotImplementedError
@abstractmethod
def forward(self, image: Union[np.ndarray, Tensor]) -> Union[np.ndarray, Tensor]:
"""Forward-Pass input to model."""
raise NotImplementedError
@abstractmethod
def post_process(
self, predictions: Union[np.ndarray, Tensor], meta_data: Optional[Dict]
) -> Tuple[np.ndarray, float]:
"""Post-Process."""
raise NotImplementedError
def predict(
self, image: Union[str, np.ndarray, Path], superimpose: bool = True, meta_data: Optional[dict] = None
) -> Tuple[np.ndarray, float]:
"""Perform a prediction for a given input image.
The main workflow is (i) pre-processing, (ii) forward-pass, (iii) post-process.
Args:
image (Union[str, np.ndarray]): Input image whose output is to be predicted.
It could be either a path to image or numpy array itself.
superimpose (bool): If this is set to True, output predictions
will be superimposed onto the original image. If false, `predict`
method will return the raw heatmap.
Returns:
np.ndarray: Output predictions to be visualized.
"""
if meta_data is None:
if hasattr(self, "meta_data"):
meta_data = getattr(self, "meta_data")
else:
meta_data = {}
if isinstance(image, (str, Path)):
image = read_image(image)
meta_data["image_shape"] = image.shape[:2]
processed_image = self.pre_process(image)
predictions = self.forward(processed_image)
anomaly_map, pred_scores = self.post_process(predictions, meta_data=meta_data)
if superimpose is True:
anomaly_map = superimpose_anomaly_map(anomaly_map, image)
return anomaly_map, pred_scores
def __call__(self, image: np.ndarray) -> Tuple[np.ndarray, float]:
"""Call predict on the Image.
Args:
image (np.ndarray): Input Image
Returns:
np.ndarray: Output predictions to be visualized
"""
return self.predict(image)
def _normalize(
self,
anomaly_maps: Union[Tensor, np.ndarray],
pred_scores: Union[Tensor, np.float32],
meta_data: Union[Dict, DictConfig],
) -> Tuple[Union[np.ndarray, Tensor], float]:
"""Applies normalization and resizes the image.
Args:
anomaly_maps (Union[Tensor, np.ndarray]): Predicted raw anomaly map.
pred_scores (Union[Tensor, np.float32]): Predicted anomaly score
meta_data (Dict): Meta data. Post-processing step sometimes requires
additional meta data such as image shape. This variable comprises such info.
Returns:
Tuple[Union[np.ndarray, Tensor], float]: Post processed predictions that are ready to be visualized and
predicted scores.
"""
# min max normalization
if "min" in meta_data and "max" in meta_data:
anomaly_maps = normalize_min_max(
anomaly_maps, meta_data["pixel_threshold"], meta_data["min"], meta_data["max"]
)
pred_scores = normalize_min_max(
pred_scores, meta_data["image_threshold"], meta_data["min"], meta_data["max"]
)
# standardize pixel scores
if "pixel_mean" in meta_data.keys() and "pixel_std" in meta_data.keys():
anomaly_maps = standardize(
anomaly_maps, meta_data["pixel_mean"], meta_data["pixel_std"], center_at=meta_data["image_mean"]
)
anomaly_maps = normalize_cdf(anomaly_maps, meta_data["pixel_threshold"])
# standardize image scores
if "image_mean" in meta_data.keys() and "image_std" in meta_data.keys():
pred_scores = standardize(pred_scores, meta_data["image_mean"], meta_data["image_std"])
pred_scores = normalize_cdf(pred_scores, meta_data["image_threshold"])
return anomaly_maps, float(pred_scores)
def _load_meta_data(
self, path: Optional[Union[str, Path]] = None
) -> Union[DictConfig, Dict[str, Union[float, np.ndarray, Tensor]]]:
"""Loads the meta data from the given path.
Args:
path (Optional[Union[str, Path]], optional): Path to JSON file containing the metadata.
If no path is provided, it returns an empty dict. Defaults to None.
Returns:
Union[DictConfig, Dict]: Dictionary containing the metadata.
"""
meta_data: Union[DictConfig, Dict[str, Union[float, np.ndarray, Tensor]]] = {}
if path is not None:
config = OmegaConf.load(path)
meta_data = cast(DictConfig, config)
return meta_data
| [
"anomalib.utils.post_process.superimpose_anomaly_map",
"anomalib.data.utils.read_image",
"anomalib.utils.normalization.min_max.normalize",
"anomalib.utils.normalization.cdf.standardize",
"anomalib.utils.normalization.cdf.normalize"
] | [((3000, 3017), 'anomalib.data.utils.read_image', 'read_image', (['image'], {}), '(image)\n', (3010, 3017), False, 'from anomalib.data.utils import read_image\n'), ((3318, 3361), 'anomalib.utils.post_process.superimpose_anomaly_map', 'superimpose_anomaly_map', (['anomaly_map', 'image'], {}), '(anomaly_map, image)\n', (3341, 3361), False, 'from anomalib.utils.post_process import superimpose_anomaly_map\n'), ((4622, 4724), 'anomalib.utils.normalization.min_max.normalize', 'normalize_min_max', (['anomaly_maps', "meta_data['pixel_threshold']", "meta_data['min']", "meta_data['max']"], {}), "(anomaly_maps, meta_data['pixel_threshold'], meta_data[\n 'min'], meta_data['max'])\n", (4639, 4724), True, 'from anomalib.utils.normalization.min_max import normalize as normalize_min_max\n'), ((4776, 4877), 'anomalib.utils.normalization.min_max.normalize', 'normalize_min_max', (['pred_scores', "meta_data['image_threshold']", "meta_data['min']", "meta_data['max']"], {}), "(pred_scores, meta_data['image_threshold'], meta_data[\n 'min'], meta_data['max'])\n", (4793, 4877), True, 'from anomalib.utils.normalization.min_max import normalize as normalize_min_max\n'), ((5047, 5160), 'anomalib.utils.normalization.cdf.standardize', 'standardize', (['anomaly_maps', "meta_data['pixel_mean']", "meta_data['pixel_std']"], {'center_at': "meta_data['image_mean']"}), "(anomaly_maps, meta_data['pixel_mean'], meta_data['pixel_std'],\n center_at=meta_data['image_mean'])\n", (5058, 5160), False, 'from anomalib.utils.normalization.cdf import standardize\n'), ((5214, 5271), 'anomalib.utils.normalization.cdf.normalize', 'normalize_cdf', (['anomaly_maps', "meta_data['pixel_threshold']"], {}), "(anomaly_maps, meta_data['pixel_threshold'])\n", (5227, 5271), True, 'from anomalib.utils.normalization.cdf import normalize as normalize_cdf\n'), ((5415, 5488), 'anomalib.utils.normalization.cdf.standardize', 'standardize', (['pred_scores', "meta_data['image_mean']", "meta_data['image_std']"], {}), "(pred_scores, meta_data['image_mean'], meta_data['image_std'])\n", (5426, 5488), False, 'from anomalib.utils.normalization.cdf import standardize\n'), ((5515, 5571), 'anomalib.utils.normalization.cdf.normalize', 'normalize_cdf', (['pred_scores', "meta_data['image_threshold']"], {}), "(pred_scores, meta_data['image_threshold'])\n", (5528, 5571), True, 'from anomalib.utils.normalization.cdf import normalize as normalize_cdf\n'), ((6265, 6285), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['path'], {}), '(path)\n', (6279, 6285), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((6310, 6334), 'typing.cast', 'cast', (['DictConfig', 'config'], {}), '(DictConfig, config)\n', (6314, 6334), False, 'from typing import Dict, Optional, Tuple, Union, cast\n')] |
"""Anomaly Score Normalization Callback."""
from typing import Any, Dict, Optional
import pytorch_lightning as pl
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities.types import STEP_OUTPUT
from torch.distributions import LogNormal
from anomalib.models import get_model
from anomalib.utils.normalization.cdf import normalize, standardize
class CdfNormalizationCallback(Callback):
"""Callback that standardizes the image-level and pixel-level anomaly scores."""
def __init__(self):
self.image_dist: Optional[LogNormal] = None
self.pixel_dist: Optional[LogNormal] = None
def on_test_start(self, _trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""Called when the test begins."""
pl_module.image_metrics.F1.threshold = 0.5
pl_module.pixel_metrics.F1.threshold = 0.5
def on_train_epoch_end(
self, trainer: pl.Trainer, pl_module: pl.LightningModule, _unused: Optional[Any] = None
) -> None:
"""Called when the train epoch ends.
Use the current model to compute the anomaly score distributions
of the normal training data. This is needed after every epoch, because the statistics must be
stored in the state dict of the checkpoint file.
"""
self._collect_stats(trainer, pl_module)
def on_validation_batch_end(
self,
_trainer: pl.Trainer,
pl_module: pl.LightningModule,
outputs: Optional[STEP_OUTPUT],
_batch: Any,
_batch_idx: int,
_dataloader_idx: int,
) -> None:
"""Called when the validation batch ends, standardizes the predicted scores and anomaly maps."""
self._standardize_batch(outputs, pl_module)
def on_test_batch_end(
self,
_trainer: pl.Trainer,
pl_module: pl.LightningModule,
outputs: Optional[STEP_OUTPUT],
_batch: Any,
_batch_idx: int,
_dataloader_idx: int,
) -> None:
"""Called when the test batch ends, normalizes the predicted scores and anomaly maps."""
self._standardize_batch(outputs, pl_module)
self._normalize_batch(outputs, pl_module)
def on_predict_batch_end(
self,
_trainer: pl.Trainer,
pl_module: pl.LightningModule,
outputs: Dict,
_batch: Any,
_batch_idx: int,
_dataloader_idx: int,
) -> None:
"""Called when the predict batch ends, normalizes the predicted scores and anomaly maps."""
self._standardize_batch(outputs, pl_module)
self._normalize_batch(outputs, pl_module)
outputs["pred_labels"] = outputs["pred_scores"] >= 0.5
def _collect_stats(self, trainer, pl_module):
"""Collect the statistics of the normal training data.
Create a trainer and use it to predict the anomaly maps and scores of the normal training data. Then
estimate the distribution of anomaly scores for normal data at the image and pixel level by computing
the mean and standard deviations. A dictionary containing the computed statistics is stored in self.stats.
"""
predictions = Trainer(gpus=trainer.gpus).predict(
model=self._create_inference_model(pl_module), dataloaders=trainer.datamodule.train_dataloader()
)
pl_module.training_distribution.reset()
for batch in predictions:
if "pred_scores" in batch.keys():
pl_module.training_distribution.update(anomaly_scores=batch["pred_scores"])
if "anomaly_maps" in batch.keys():
pl_module.training_distribution.update(anomaly_maps=batch["anomaly_maps"])
pl_module.training_distribution.compute()
@staticmethod
def _create_inference_model(pl_module):
"""Create a duplicate of the PL module that can be used to perform inference on the training set."""
new_model = get_model(pl_module.hparams)
new_model.load_state_dict(pl_module.state_dict())
return new_model
@staticmethod
def _standardize_batch(outputs: STEP_OUTPUT, pl_module) -> None:
stats = pl_module.training_distribution.to(outputs["pred_scores"].device)
outputs["pred_scores"] = standardize(outputs["pred_scores"], stats.image_mean, stats.image_std)
if "anomaly_maps" in outputs.keys():
outputs["anomaly_maps"] = standardize(
outputs["anomaly_maps"], stats.pixel_mean, stats.pixel_std, center_at=stats.image_mean
)
@staticmethod
def _normalize_batch(outputs: STEP_OUTPUT, pl_module: pl.LightningModule) -> None:
outputs["pred_scores"] = normalize(outputs["pred_scores"], pl_module.image_threshold.value)
if "anomaly_maps" in outputs.keys():
outputs["anomaly_maps"] = normalize(outputs["anomaly_maps"], pl_module.pixel_threshold.value)
| [
"anomalib.utils.normalization.cdf.normalize",
"anomalib.models.get_model",
"anomalib.utils.normalization.cdf.standardize"
] | [((3921, 3949), 'anomalib.models.get_model', 'get_model', (['pl_module.hparams'], {}), '(pl_module.hparams)\n', (3930, 3949), False, 'from anomalib.models import get_model\n'), ((4236, 4306), 'anomalib.utils.normalization.cdf.standardize', 'standardize', (["outputs['pred_scores']", 'stats.image_mean', 'stats.image_std'], {}), "(outputs['pred_scores'], stats.image_mean, stats.image_std)\n", (4247, 4306), False, 'from anomalib.utils.normalization.cdf import normalize, standardize\n'), ((4659, 4725), 'anomalib.utils.normalization.cdf.normalize', 'normalize', (["outputs['pred_scores']", 'pl_module.image_threshold.value'], {}), "(outputs['pred_scores'], pl_module.image_threshold.value)\n", (4668, 4725), False, 'from anomalib.utils.normalization.cdf import normalize, standardize\n'), ((4390, 4493), 'anomalib.utils.normalization.cdf.standardize', 'standardize', (["outputs['anomaly_maps']", 'stats.pixel_mean', 'stats.pixel_std'], {'center_at': 'stats.image_mean'}), "(outputs['anomaly_maps'], stats.pixel_mean, stats.pixel_std,\n center_at=stats.image_mean)\n", (4401, 4493), False, 'from anomalib.utils.normalization.cdf import normalize, standardize\n'), ((4809, 4876), 'anomalib.utils.normalization.cdf.normalize', 'normalize', (["outputs['anomaly_maps']", 'pl_module.pixel_threshold.value'], {}), "(outputs['anomaly_maps'], pl_module.pixel_threshold.value)\n", (4818, 4876), False, 'from anomalib.utils.normalization.cdf import normalize, standardize\n'), ((3166, 3192), 'pytorch_lightning.Trainer', 'Trainer', ([], {'gpus': 'trainer.gpus'}), '(gpus=trainer.gpus)\n', (3173, 3192), False, 'from pytorch_lightning import Callback, Trainer\n')] |
"""Inference Dataset."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from pathlib import Path
from typing import Any, Optional, Tuple, Union
import albumentations as A
from torch.utils.data.dataset import Dataset
from anomalib.data.utils import get_image_filenames, read_image
from anomalib.pre_processing import PreProcessor
class InferenceDataset(Dataset):
"""Inference Dataset to perform prediction."""
def __init__(
self,
path: Union[str, Path],
pre_process: Optional[PreProcessor] = None,
image_size: Optional[Union[int, Tuple[int, int]]] = None,
transform_config: Optional[Union[str, A.Compose]] = None,
) -> None:
"""Inference Dataset to perform prediction.
Args:
path (Union[str, Path]): Path to an image or image-folder.
pre_process (Optional[PreProcessor], optional): Pre-Processing transforms to
pre-process the input dataset. Defaults to None.
image_size (Optional[Union[int, Tuple[int, int]]], optional): Target image size
to resize the original image. Defaults to None.
transform_config (Optional[Union[str, A.Compose]], optional): Configuration file
parse the albumentation transforms. Defaults to None.
"""
super().__init__()
self.image_filenames = get_image_filenames(path)
if pre_process is None:
self.pre_process = PreProcessor(transform_config, image_size)
else:
self.pre_process = pre_process
def __len__(self) -> int:
"""Get the number of images in the given path."""
return len(self.image_filenames)
def __getitem__(self, index: int) -> Any:
"""Get the image based on the `index`."""
image_filename = self.image_filenames[index]
image = read_image(path=image_filename)
pre_processed = self.pre_process(image=image)
return pre_processed
| [
"anomalib.data.utils.get_image_filenames",
"anomalib.data.utils.read_image",
"anomalib.pre_processing.PreProcessor"
] | [((1899, 1924), 'anomalib.data.utils.get_image_filenames', 'get_image_filenames', (['path'], {}), '(path)\n', (1918, 1924), False, 'from anomalib.data.utils import get_image_filenames, read_image\n'), ((2385, 2416), 'anomalib.data.utils.read_image', 'read_image', ([], {'path': 'image_filename'}), '(path=image_filename)\n', (2395, 2416), False, 'from anomalib.data.utils import get_image_filenames, read_image\n'), ((1989, 2031), 'anomalib.pre_processing.PreProcessor', 'PreProcessor', (['transform_config', 'image_size'], {}), '(transform_config, image_size)\n', (2001, 2031), False, 'from anomalib.pre_processing import PreProcessor\n')] |
"""Common helpers for both nightly and pre-merge model tests."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import os
from typing import Dict, List, Tuple, Union
import numpy as np
from omegaconf import DictConfig, ListConfig
from pytorch_lightning import LightningDataModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from anomalib.config import get_configurable_parameters, update_nncf_config
from anomalib.data import get_datamodule
from anomalib.models import get_model
from anomalib.models.components import AnomalyModule
from anomalib.utils.callbacks import VisualizerCallback, get_callbacks
def setup_model_train(
model_name: str,
dataset_path: str,
project_path: str,
nncf: bool,
category: str,
score_type: str = None,
weight_file: str = "weights/model.ckpt",
fast_run: bool = False,
device: Union[List[int], int] = [0],
) -> Tuple[Union[DictConfig, ListConfig], LightningDataModule, AnomalyModule, Trainer]:
"""Train the model based on the parameters passed.
Args:
model_name (str): Name of the model to train.
dataset_path (str): Location of the dataset.
project_path (str): Path to temporary project folder.
nncf (bool): Add nncf callback.
category (str): Category to train on.
score_type (str, optional): Only used for DFM. Defaults to None.
weight_file (str, optional): Path to weight file.
fast_run (bool, optional): If set to true, the model trains for only 1 epoch. We train for one epoch as
this ensures that both anomalous and non-anomalous images are present in the validation step.
device (List[int], int, optional): Select which device you want to train the model on. Defaults to first GPU.
Returns:
Tuple[DictConfig, LightningDataModule, AnomalyModule, Trainer]: config, datamodule, trained model, trainer
"""
config = get_configurable_parameters(model_name=model_name)
if score_type is not None:
config.model.score_type = score_type
config.project.seed = 42
config.dataset.category = category
config.dataset.path = dataset_path
config.project.log_images_to = []
config.trainer.gpus = device
# If weight file is empty, remove the key from config
if "weight_file" in config.model.keys() and weight_file == "":
config.model.pop("weight_file")
else:
config.model.weight_file = weight_file if not fast_run else "weights/last.ckpt"
if nncf:
config.optimization.nncf.apply = True
config = update_nncf_config(config)
config.init_weights = None
# reassign project path as config is updated in `update_config_for_nncf`
config.project.path = project_path
datamodule = get_datamodule(config)
model = get_model(config)
callbacks = get_callbacks(config)
# Force model checkpoint to create checkpoint after first epoch
if fast_run == True:
for index, callback in enumerate(callbacks):
if isinstance(callback, ModelCheckpoint):
callbacks.pop(index)
break
model_checkpoint = ModelCheckpoint(
dirpath=os.path.join(config.project.path, "weights"),
filename="last",
monitor=None,
mode="max",
save_last=True,
auto_insert_metric_name=False,
)
callbacks.append(model_checkpoint)
for index, callback in enumerate(callbacks):
if isinstance(callback, VisualizerCallback):
callbacks.pop(index)
break
# Train the model.
if fast_run:
config.trainer.max_epochs = 1
trainer = Trainer(callbacks=callbacks, **config.trainer)
trainer.fit(model=model, datamodule=datamodule)
return config, datamodule, model, trainer
def model_load_test(config: Union[DictConfig, ListConfig], datamodule: LightningDataModule, results: Dict):
"""Create a new model based on the weights specified in config.
Args:
config ([Union[DictConfig, ListConfig]): Model config.
datamodule (LightningDataModule): Dataloader
results (Dict): Results from original model.
"""
loaded_model = get_model(config) # get new model
callbacks = get_callbacks(config)
for index, callback in enumerate(callbacks):
# Remove visualizer callback as saving results takes time
if isinstance(callback, VisualizerCallback):
callbacks.pop(index)
break
# create new trainer object with LoadModel callback (assumes it is present)
trainer = Trainer(callbacks=callbacks, **config.trainer)
# Assumes the new model has LoadModel callback and the old one had ModelCheckpoint callback
new_results = trainer.test(model=loaded_model, datamodule=datamodule)[0]
assert np.isclose(
results["image_AUROC"], new_results["image_AUROC"]
), "Loaded model does not yield close performance results"
if config.dataset.task == "segmentation":
assert np.isclose(
results["pixel_AUROC"], new_results["pixel_AUROC"]
), "Loaded model does not yield close performance results"
| [
"anomalib.data.get_datamodule",
"anomalib.config.update_nncf_config",
"anomalib.models.get_model",
"anomalib.config.get_configurable_parameters",
"anomalib.utils.callbacks.get_callbacks"
] | [((2457, 2507), 'anomalib.config.get_configurable_parameters', 'get_configurable_parameters', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (2484, 2507), False, 'from anomalib.config import get_configurable_parameters, update_nncf_config\n'), ((3300, 3322), 'anomalib.data.get_datamodule', 'get_datamodule', (['config'], {}), '(config)\n', (3314, 3322), False, 'from anomalib.data import get_datamodule\n'), ((3335, 3352), 'anomalib.models.get_model', 'get_model', (['config'], {}), '(config)\n', (3344, 3352), False, 'from anomalib.models import get_model\n'), ((3370, 3391), 'anomalib.utils.callbacks.get_callbacks', 'get_callbacks', (['config'], {}), '(config)\n', (3383, 3391), False, 'from anomalib.utils.callbacks import VisualizerCallback, get_callbacks\n'), ((4213, 4259), 'pytorch_lightning.Trainer', 'Trainer', ([], {'callbacks': 'callbacks'}), '(callbacks=callbacks, **config.trainer)\n', (4220, 4259), False, 'from pytorch_lightning import LightningDataModule, Trainer\n'), ((4744, 4761), 'anomalib.models.get_model', 'get_model', (['config'], {}), '(config)\n', (4753, 4761), False, 'from anomalib.models import get_model\n'), ((4796, 4817), 'anomalib.utils.callbacks.get_callbacks', 'get_callbacks', (['config'], {}), '(config)\n', (4809, 4817), False, 'from anomalib.utils.callbacks import VisualizerCallback, get_callbacks\n'), ((5133, 5179), 'pytorch_lightning.Trainer', 'Trainer', ([], {'callbacks': 'callbacks'}), '(callbacks=callbacks, **config.trainer)\n', (5140, 5179), False, 'from pytorch_lightning import LightningDataModule, Trainer\n'), ((5364, 5426), 'numpy.isclose', 'np.isclose', (["results['image_AUROC']", "new_results['image_AUROC']"], {}), "(results['image_AUROC'], new_results['image_AUROC'])\n", (5374, 5426), True, 'import numpy as np\n'), ((3103, 3129), 'anomalib.config.update_nncf_config', 'update_nncf_config', (['config'], {}), '(config)\n', (3121, 3129), False, 'from anomalib.config import get_configurable_parameters, update_nncf_config\n'), ((5559, 5621), 'numpy.isclose', 'np.isclose', (["results['pixel_AUROC']", "new_results['pixel_AUROC']"], {}), "(results['pixel_AUROC'], new_results['pixel_AUROC'])\n", (5569, 5621), True, 'import numpy as np\n'), ((3716, 3760), 'os.path.join', 'os.path.join', (['config.project.path', '"""weights"""'], {}), "(config.project.path, 'weights')\n", (3728, 3760), False, 'import os\n')] |
"""BTech Dataset.
This script contains PyTorch Lightning DataModule for the BTech dataset.
If the dataset is not on the file system, the script downloads and
extracts the dataset and create PyTorch data objects.
"""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import logging
import shutil
import zipfile
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
from urllib.request import urlretrieve
import albumentations as A
import cv2
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch import Tensor
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torchvision.datasets.folder import VisionDataset
from tqdm import tqdm
from anomalib.data.inference import InferenceDataset
from anomalib.data.utils import DownloadProgressBar, read_image
from anomalib.data.utils.split import (
create_validation_set_from_test_set,
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
logger = logging.getLogger(name="Dataset: BTech")
logger.setLevel(logging.DEBUG)
def make_btech_dataset(
path: Path,
split: Optional[str] = None,
split_ratio: float = 0.1,
seed: int = 0,
create_validation_set: bool = False,
) -> DataFrame:
"""Create BTech samples by parsing the BTech data file structure.
The files are expected to follow the structure:
path/to/dataset/split/category/image_filename.png
path/to/dataset/ground_truth/category/mask_filename.png
Args:
path (Path): Path to dataset
split (str, optional): Dataset split (ie., either train or test). Defaults to None.
split_ratio (float, optional): Ratio to split normal training images and add to the
test set in case test set doesn't contain any normal images.
Defaults to 0.1.
seed (int, optional): Random seed to ensure reproducibility when splitting. Defaults to 0.
create_validation_set (bool, optional): Boolean to create a validation set from the test set.
BTech dataset does not contain a validation set. Those wanting to create a validation set
could set this flag to ``True``.
Example:
The following example shows how to get training samples from BTech 01 category:
>>> root = Path('./BTech')
>>> category = '01'
>>> path = root / category
>>> path
PosixPath('BTech/01')
>>> samples = make_btech_dataset(path, split='train', split_ratio=0.1, seed=0)
>>> samples.head()
path split label image_path mask_path label_index
0 BTech/01 train 01 BTech/01/train/ok/105.bmp BTech/01/ground_truth/ok/105.png 0
1 BTech/01 train 01 BTech/01/train/ok/017.bmp BTech/01/ground_truth/ok/017.png 0
...
Returns:
DataFrame: an output dataframe containing samples for the requested split (ie., train or test)
"""
samples_list = [
(str(path),) + filename.parts[-3:] for filename in path.glob("**/*") if filename.suffix in (".bmp", ".png")
]
if len(samples_list) == 0:
raise RuntimeError(f"Found 0 images in {path}")
samples = pd.DataFrame(samples_list, columns=["path", "split", "label", "image_path"])
samples = samples[samples.split != "ground_truth"]
# Create mask_path column
samples["mask_path"] = (
samples.path
+ "/ground_truth/"
+ samples.label
+ "/"
+ samples.image_path.str.rstrip("png").str.rstrip(".")
+ ".png"
)
# Modify image_path column by converting to absolute path
samples["image_path"] = samples.path + "/" + samples.split + "/" + samples.label + "/" + samples.image_path
# Split the normal images in training set if test set doesn't
# contain any normal images. This is needed because AUC score
# cannot be computed based on 1-class
if sum((samples.split == "test") & (samples.label == "ok")) == 0:
samples = split_normal_images_in_train_set(samples, split_ratio, seed)
# Good images don't have mask
samples.loc[(samples.split == "test") & (samples.label == "ok"), "mask_path"] = ""
# Create label index for normal (0) and anomalous (1) images.
samples.loc[(samples.label == "ok"), "label_index"] = 0
samples.loc[(samples.label != "ok"), "label_index"] = 1
samples.label_index = samples.label_index.astype(int)
if create_validation_set:
samples = create_validation_set_from_test_set(samples, seed=seed)
# Get the data frame for the split.
if split is not None and split in ["train", "val", "test"]:
samples = samples[samples.split == split]
samples = samples.reset_index(drop=True)
return samples
class BTech(VisionDataset):
"""BTech PyTorch Dataset."""
def __init__(
self,
root: Union[Path, str],
category: str,
pre_process: PreProcessor,
split: str,
task: str = "segmentation",
seed: int = 0,
create_validation_set: bool = False,
) -> None:
"""Btech Dataset class.
Args:
root: Path to the BTech dataset
category: Name of the BTech category.
pre_process: List of pre_processing object containing albumentation compose.
split: 'train', 'val' or 'test'
task: ``classification`` or ``segmentation``
seed: seed used for the random subset splitting
create_validation_set: Create a validation subset in addition to the train and test subsets
Examples:
>>> from anomalib.data.btech import BTech
>>> from anomalib.data.transforms import PreProcessor
>>> pre_process = PreProcessor(image_size=256)
>>> dataset = BTech(
... root='./datasets/BTech',
... category='leather',
... pre_process=pre_process,
... task="classification",
... is_train=True,
... )
>>> dataset[0].keys()
dict_keys(['image'])
>>> dataset.split = "test"
>>> dataset[0].keys()
dict_keys(['image', 'image_path', 'label'])
>>> dataset.task = "segmentation"
>>> dataset.split = "train"
>>> dataset[0].keys()
dict_keys(['image'])
>>> dataset.split = "test"
>>> dataset[0].keys()
dict_keys(['image_path', 'label', 'mask_path', 'image', 'mask'])
>>> dataset[0]["image"].shape, dataset[0]["mask"].shape
(torch.Size([3, 256, 256]), torch.Size([256, 256]))
"""
super().__init__(root)
self.root = Path(root) if isinstance(root, str) else root
self.category: str = category
self.split = split
self.task = task
self.pre_process = pre_process
self.samples = make_btech_dataset(
path=self.root / category,
split=self.split,
seed=seed,
create_validation_set=create_validation_set,
)
def __len__(self) -> int:
"""Get length of the dataset."""
return len(self.samples)
def __getitem__(self, index: int) -> Dict[str, Union[str, Tensor]]:
"""Get dataset item for the index ``index``.
Args:
index (int): Index to get the item.
Returns:
Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: Dict of image tensor during training.
Otherwise, Dict containing image path, target path, image tensor, label and transformed bounding box.
"""
item: Dict[str, Union[str, Tensor]] = {}
image_path = self.samples.image_path[index]
image = read_image(image_path)
if self.split == "train" or self.task == "classification":
pre_processed = self.pre_process(image=image)
item = {"image": pre_processed["image"]}
elif self.split in ["val", "test"]:
label_index = self.samples.label_index[index]
item["image_path"] = image_path
item["label"] = label_index
if self.task == "segmentation":
mask_path = self.samples.mask_path[index]
# Only Anomalous (1) images has masks in BTech dataset.
# Therefore, create empty mask for Normal (0) images.
if label_index == 0:
mask = np.zeros(shape=image.shape[:2])
else:
mask = cv2.imread(mask_path, flags=0) / 255.0
pre_processed = self.pre_process(image=image, mask=mask)
item["mask_path"] = mask_path
item["image"] = pre_processed["image"]
item["mask"] = pre_processed["mask"]
return item
class BTechDataModule(LightningDataModule):
"""BTechDataModule Lightning Data Module."""
def __init__(
self,
root: str,
category: str,
# TODO: Remove default values. IAAALD-211
image_size: Optional[Union[int, Tuple[int, int]]] = None,
train_batch_size: int = 32,
test_batch_size: int = 32,
num_workers: int = 8,
transform_config: Optional[Union[str, A.Compose]] = None,
seed: int = 0,
create_validation_set: bool = False,
) -> None:
"""Instantiate BTech Lightning Data Module.
Args:
root: Path to the BTech dataset
category: Name of the BTech category.
image_size: Variable to which image is resized.
train_batch_size: Training batch size.
test_batch_size: Testing batch size.
num_workers: Number of workers.
transform_config: Config for pre-processing.
seed: seed used for the random subset splitting
create_validation_set: Create a validation subset in addition to the train and test subsets
Examples
>>> from anomalib.data import BTechDataModule
>>> datamodule = BTechDataModule(
... root="./datasets/BTech",
... category="leather",
... image_size=256,
... train_batch_size=32,
... test_batch_size=32,
... num_workers=8,
... transform_config=None,
... )
>>> datamodule.setup()
>>> i, data = next(enumerate(datamodule.train_dataloader()))
>>> data.keys()
dict_keys(['image'])
>>> data["image"].shape
torch.Size([32, 3, 256, 256])
>>> i, data = next(enumerate(datamodule.val_dataloader()))
>>> data.keys()
dict_keys(['image_path', 'label', 'mask_path', 'image', 'mask'])
>>> data["image"].shape, data["mask"].shape
(torch.Size([32, 3, 256, 256]), torch.Size([32, 256, 256]))
"""
super().__init__()
self.root = root if isinstance(root, Path) else Path(root)
self.category = category
self.dataset_path = self.root / self.category
self.transform_config = transform_config
self.image_size = image_size
self.pre_process = PreProcessor(config=self.transform_config, image_size=self.image_size)
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.num_workers = num_workers
self.create_validation_set = create_validation_set
self.seed = seed
self.train_data: Dataset
self.test_data: Dataset
if create_validation_set:
self.val_data: Dataset
self.inference_data: Dataset
def prepare_data(self) -> None:
"""Download the dataset if not available."""
if (self.root / self.category).is_dir():
logging.info("Found the dataset.")
else:
zip_filename = self.root.parent / "btad.zip"
logging.info("Downloading the BTech dataset.")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc="BTech") as progress_bar:
urlretrieve(
url="https://avires.dimi.uniud.it/papers/btad/btad.zip",
filename=zip_filename,
reporthook=progress_bar.update_to,
) # nosec
logging.info("Extracting the dataset.")
with zipfile.ZipFile(zip_filename, "r") as zip_file:
zip_file.extractall(self.root.parent)
logging.info("Renaming the dataset directory")
shutil.move(src=str(self.root.parent / "BTech_Dataset_transformed"), dst=str(self.root))
# NOTE: Each BTech category has different image extension as follows
# | Category | Image | Mask |
# |----------|-------|------|
# | 01 | bmp | png |
# | 02 | png | png |
# | 03 | bmp | bmp |
# To avoid any conflict, the following script converts all the extensions to png.
# This solution works fine, but it's also possible to properly ready the bmp and
# png filenames from categories in `make_btech_dataset` function.
logging.info("Convert the bmp formats to png to have consistent image extensions")
for filename in tqdm(self.root.glob("**/*.bmp"), desc="Converting bmp to png"):
image = cv2.imread(str(filename))
cv2.imwrite(str(filename.with_suffix(".png")), image)
filename.unlink()
logging.info("Cleaning the tar file")
zip_filename.unlink()
def setup(self, stage: Optional[str] = None) -> None:
"""Setup train, validation and test data.
BTech dataset uses BTech dataset structure, which is the reason for
using `anomalib.data.btech.BTech` class to get the dataset items.
Args:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)
"""
if stage in (None, "fit"):
self.train_data = BTech(
root=self.root,
category=self.category,
pre_process=self.pre_process,
split="train",
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if self.create_validation_set:
self.val_data = BTech(
root=self.root,
category=self.category,
pre_process=self.pre_process,
split="val",
seed=self.seed,
create_validation_set=self.create_validation_set,
)
self.test_data = BTech(
root=self.root,
category=self.category,
pre_process=self.pre_process,
split="test",
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if stage == "predict":
self.inference_data = InferenceDataset(
path=self.root, image_size=self.image_size, transform_config=self.transform_config
)
def train_dataloader(self) -> TRAIN_DATALOADERS:
"""Get train dataloader."""
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batch_size, num_workers=self.num_workers)
def val_dataloader(self) -> EVAL_DATALOADERS:
"""Get validation dataloader."""
dataset = self.val_data if self.create_validation_set else self.test_data
return DataLoader(dataset=dataset, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
def test_dataloader(self) -> EVAL_DATALOADERS:
"""Get test dataloader."""
return DataLoader(self.test_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
def predict_dataloader(self) -> EVAL_DATALOADERS:
"""Get predict dataloader."""
return DataLoader(
self.inference_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers
)
| [
"anomalib.data.inference.InferenceDataset",
"anomalib.data.utils.split.split_normal_images_in_train_set",
"anomalib.pre_processing.PreProcessor",
"anomalib.data.utils.read_image",
"anomalib.data.utils.split.create_validation_set_from_test_set",
"anomalib.data.utils.DownloadProgressBar"
] | [((1707, 1747), 'logging.getLogger', 'logging.getLogger', ([], {'name': '"""Dataset: BTech"""'}), "(name='Dataset: BTech')\n", (1724, 1747), False, 'import logging\n'), ((3930, 4006), 'pandas.DataFrame', 'pd.DataFrame', (['samples_list'], {'columns': "['path', 'split', 'label', 'image_path']"}), "(samples_list, columns=['path', 'split', 'label', 'image_path'])\n", (3942, 4006), True, 'import pandas as pd\n'), ((4732, 4792), 'anomalib.data.utils.split.split_normal_images_in_train_set', 'split_normal_images_in_train_set', (['samples', 'split_ratio', 'seed'], {}), '(samples, split_ratio, seed)\n', (4764, 4792), False, 'from anomalib.data.utils.split import create_validation_set_from_test_set, split_normal_images_in_train_set\n'), ((5209, 5264), 'anomalib.data.utils.split.create_validation_set_from_test_set', 'create_validation_set_from_test_set', (['samples'], {'seed': 'seed'}), '(samples, seed=seed)\n', (5244, 5264), False, 'from anomalib.data.utils.split import create_validation_set_from_test_set, split_normal_images_in_train_set\n'), ((8510, 8532), 'anomalib.data.utils.read_image', 'read_image', (['image_path'], {}), '(image_path)\n', (8520, 8532), False, 'from anomalib.data.utils import DownloadProgressBar, read_image\n'), ((11982, 12052), 'anomalib.pre_processing.PreProcessor', 'PreProcessor', ([], {'config': 'self.transform_config', 'image_size': 'self.image_size'}), '(config=self.transform_config, image_size=self.image_size)\n', (11994, 12052), False, 'from anomalib.pre_processing import PreProcessor\n'), ((16048, 16157), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_data'], {'shuffle': '(True)', 'batch_size': 'self.train_batch_size', 'num_workers': 'self.num_workers'}), '(self.train_data, shuffle=True, batch_size=self.train_batch_size,\n num_workers=self.num_workers)\n', (16058, 16157), False, 'from torch.utils.data import DataLoader\n'), ((16343, 16452), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'shuffle': '(False)', 'batch_size': 'self.test_batch_size', 'num_workers': 'self.num_workers'}), '(dataset=dataset, shuffle=False, batch_size=self.test_batch_size,\n num_workers=self.num_workers)\n', (16353, 16452), False, 'from torch.utils.data import DataLoader\n'), ((16551, 16659), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_data'], {'shuffle': '(False)', 'batch_size': 'self.test_batch_size', 'num_workers': 'self.num_workers'}), '(self.test_data, shuffle=False, batch_size=self.test_batch_size,\n num_workers=self.num_workers)\n', (16561, 16659), False, 'from torch.utils.data import DataLoader\n'), ((16764, 16878), 'torch.utils.data.DataLoader', 'DataLoader', (['self.inference_data'], {'shuffle': '(False)', 'batch_size': 'self.test_batch_size', 'num_workers': 'self.num_workers'}), '(self.inference_data, shuffle=False, batch_size=self.\n test_batch_size, num_workers=self.num_workers)\n', (16774, 16878), False, 'from torch.utils.data import DataLoader\n'), ((7464, 7474), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (7468, 7474), False, 'from pathlib import Path\n'), ((11770, 11780), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (11774, 11780), False, 'from pathlib import Path\n'), ((12597, 12631), 'logging.info', 'logging.info', (['"""Found the dataset."""'], {}), "('Found the dataset.')\n", (12609, 12631), False, 'import logging\n'), ((12716, 12762), 'logging.info', 'logging.info', (['"""Downloading the BTech dataset."""'], {}), "('Downloading the BTech dataset.')\n", (12728, 12762), False, 'import logging\n'), ((13114, 13153), 'logging.info', 'logging.info', (['"""Extracting the dataset."""'], {}), "('Extracting the dataset.')\n", (13126, 13153), False, 'import logging\n'), ((13286, 13332), 'logging.info', 'logging.info', (['"""Renaming the dataset directory"""'], {}), "('Renaming the dataset directory')\n", (13298, 13332), False, 'import logging\n'), ((14033, 14120), 'logging.info', 'logging.info', (['"""Convert the bmp formats to png to have consistent image extensions"""'], {}), "(\n 'Convert the bmp formats to png to have consistent image extensions')\n", (14045, 14120), False, 'import logging\n'), ((14375, 14412), 'logging.info', 'logging.info', (['"""Cleaning the tar file"""'], {}), "('Cleaning the tar file')\n", (14387, 14412), False, 'import logging\n'), ((15812, 15916), 'anomalib.data.inference.InferenceDataset', 'InferenceDataset', ([], {'path': 'self.root', 'image_size': 'self.image_size', 'transform_config': 'self.transform_config'}), '(path=self.root, image_size=self.image_size,\n transform_config=self.transform_config)\n', (15828, 15916), False, 'from anomalib.data.inference import InferenceDataset\n'), ((12780, 12852), 'anomalib.data.utils.DownloadProgressBar', 'DownloadProgressBar', ([], {'unit': '"""B"""', 'unit_scale': '(True)', 'miniters': '(1)', 'desc': '"""BTech"""'}), "(unit='B', unit_scale=True, miniters=1, desc='BTech')\n", (12799, 12852), False, 'from anomalib.data.utils import DownloadProgressBar, read_image\n'), ((12886, 13016), 'urllib.request.urlretrieve', 'urlretrieve', ([], {'url': '"""https://avires.dimi.uniud.it/papers/btad/btad.zip"""', 'filename': 'zip_filename', 'reporthook': 'progress_bar.update_to'}), "(url='https://avires.dimi.uniud.it/papers/btad/btad.zip',\n filename=zip_filename, reporthook=progress_bar.update_to)\n", (12897, 13016), False, 'from urllib.request import urlretrieve\n'), ((13171, 13205), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_filename', '"""r"""'], {}), "(zip_filename, 'r')\n", (13186, 13205), False, 'import zipfile\n'), ((9209, 9240), 'numpy.zeros', 'np.zeros', ([], {'shape': 'image.shape[:2]'}), '(shape=image.shape[:2])\n', (9217, 9240), True, 'import numpy as np\n'), ((9290, 9320), 'cv2.imread', 'cv2.imread', (['mask_path'], {'flags': '(0)'}), '(mask_path, flags=0)\n', (9300, 9320), False, 'import cv2\n')] |
"""Anomalib Gradio Script.
This script provide a gradio web interface
"""
from argparse import ArgumentParser, Namespace
from importlib import import_module
from pathlib import Path
from typing import Optional, Tuple
import gradio as gr
import gradio.inputs
import gradio.outputs
import numpy as np
from skimage.segmentation import mark_boundaries
from anomalib.config import get_configurable_parameters
from anomalib.deploy.inferencers.base import Inferencer
from anomalib.post_processing import compute_mask, superimpose_anomaly_map
def infer(
image: np.ndarray, inferencer: Inferencer, threshold: float = 50.0
) -> Tuple[np.ndarray, float, np.ndarray, np.ndarray, np.ndarray]:
"""Inference function, return anomaly map, score, heat map, prediction mask ans visualisation.
Args:
image (np.ndarray): image to compute
inferencer (Inferencer): model inferencer
threshold (float, optional): threshold between 0 and 100. Defaults to 50.0.
Returns:
Tuple[np.ndarray, float, np.ndarray, np.ndarray, np.ndarray]:
anomaly_map, anomaly_score, heat_map, pred_mask, vis_img
"""
# Perform inference for the given image.
threshold = threshold / 100
anomaly_map, anomaly_score = inferencer.predict(image=image, superimpose=False)
heat_map = superimpose_anomaly_map(anomaly_map, image)
pred_mask = compute_mask(anomaly_map, threshold)
vis_img = mark_boundaries(image, pred_mask, color=(1, 0, 0), mode="thick")
return anomaly_map, anomaly_score, heat_map, pred_mask, vis_img
def get_args() -> Namespace:
"""Get command line arguments.
Example:
>>> python tools/inference_gradio.py \
--config_path ./anomalib/models/padim/config.yaml \
--weight_path ./results/padim/mvtec/bottle/weights/model.ckpt
Returns:
Namespace: List of arguments.
"""
parser = ArgumentParser()
parser.add_argument("--config_path", type=Path, required=True, help="Path to a model config file")
parser.add_argument("--weight_path", type=Path, required=True, help="Path to a model weights")
parser.add_argument(
"--meta_data_path", type=Path, required=False, help="Path to JSON file containing the metadata."
)
parser.add_argument(
"--threshold",
type=float,
required=False,
default=75.0,
help="Value to threshold anomaly scores into 0-100 range",
)
parser.add_argument("--share", type=bool, required=False, default=False, help="Share Gradio `share_url`")
args = parser.parse_args()
return args
def get_inferencer(config_path: Path, weight_path: Path, meta_data_path: Optional[Path] = None) -> Inferencer:
"""Parse args and open inferencer.
Args:
config_path (Path): Path to model configuration file or the name of the model.
weight_path (Path): Path to model weights.
meta_data_path (Optional[Path], optional): Metadata is required for OpenVINO models. Defaults to None.
Raises:
ValueError: If unsupported model weight is passed.
Returns:
Inferencer: Torch or OpenVINO inferencer.
"""
config = get_configurable_parameters(config_path=config_path)
# Get the inferencer. We use .ckpt extension for Torch models and (onnx, bin)
# for the openvino models.
extension = weight_path.suffix
inferencer: Inferencer
if extension in (".ckpt"):
module = import_module("anomalib.deploy.inferencers.torch")
TorchInferencer = getattr(module, "TorchInferencer")
inferencer = TorchInferencer(config=config, model_source=weight_path, meta_data_path=meta_data_path)
elif extension in (".onnx", ".bin", ".xml"):
module = import_module("anomalib.deploy.inferencers.openvino")
OpenVINOInferencer = getattr(module, "OpenVINOInferencer")
inferencer = OpenVINOInferencer(config=config, path=weight_path, meta_data_path=meta_data_path)
else:
raise ValueError(
f"Model extension is not supported. Torch Inferencer exptects a .ckpt file,"
f"OpenVINO Inferencer expects either .onnx, .bin or .xml file. Got {extension}"
)
return inferencer
if __name__ == "__main__":
session_args = get_args()
gradio_inferencer = get_inferencer(session_args.config_path, session_args.weight_path, session_args.meta_data_path)
interface = gr.Interface(
fn=lambda image, threshold: infer(image, gradio_inferencer, threshold),
inputs=[
gradio.inputs.Image(
shape=None, image_mode="RGB", source="upload", tool="editor", type="numpy", label="Image"
),
gradio.inputs.Slider(default=session_args.threshold, label="threshold", optional=False),
],
outputs=[
gradio.outputs.Image(type="numpy", label="Anomaly Map"),
gradio.outputs.Textbox(type="number", label="Anomaly Score"),
gradio.outputs.Image(type="numpy", label="Predicted Heat Map"),
gradio.outputs.Image(type="numpy", label="Predicted Mask"),
gradio.outputs.Image(type="numpy", label="Segmentation Result"),
],
title="Anomalib",
description="Anomalib Gradio",
)
interface.launch(share=session_args.share)
| [
"anomalib.post_processing.compute_mask",
"anomalib.config.get_configurable_parameters",
"anomalib.post_processing.superimpose_anomaly_map"
] | [((1312, 1355), 'anomalib.post_processing.superimpose_anomaly_map', 'superimpose_anomaly_map', (['anomaly_map', 'image'], {}), '(anomaly_map, image)\n', (1335, 1355), False, 'from anomalib.post_processing import compute_mask, superimpose_anomaly_map\n'), ((1372, 1408), 'anomalib.post_processing.compute_mask', 'compute_mask', (['anomaly_map', 'threshold'], {}), '(anomaly_map, threshold)\n', (1384, 1408), False, 'from anomalib.post_processing import compute_mask, superimpose_anomaly_map\n'), ((1423, 1487), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['image', 'pred_mask'], {'color': '(1, 0, 0)', 'mode': '"""thick"""'}), "(image, pred_mask, color=(1, 0, 0), mode='thick')\n", (1438, 1487), False, 'from skimage.segmentation import mark_boundaries\n'), ((1897, 1913), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1911, 1913), False, 'from argparse import ArgumentParser, Namespace\n'), ((3169, 3221), 'anomalib.config.get_configurable_parameters', 'get_configurable_parameters', ([], {'config_path': 'config_path'}), '(config_path=config_path)\n', (3196, 3221), False, 'from anomalib.config import get_configurable_parameters\n'), ((3446, 3496), 'importlib.import_module', 'import_module', (['"""anomalib.deploy.inferencers.torch"""'], {}), "('anomalib.deploy.inferencers.torch')\n", (3459, 3496), False, 'from importlib import import_module\n'), ((3734, 3787), 'importlib.import_module', 'import_module', (['"""anomalib.deploy.inferencers.openvino"""'], {}), "('anomalib.deploy.inferencers.openvino')\n", (3747, 3787), False, 'from importlib import import_module\n')] |
"""CFLOW: Real-Time Unsupervised Anomaly Detection via Conditional Normalizing Flows.
https://arxiv.org/pdf/2107.12571v1.pdf
"""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import logging
from typing import List, Tuple, Union
import einops
import torch
import torch.nn.functional as F
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.utilities.cli import MODEL_REGISTRY
from torch import optim
from anomalib.models.cflow.torch_model import CflowModel
from anomalib.models.cflow.utils import get_logp, positional_encoding_2d
from anomalib.models.components import AnomalyModule
logger = logging.getLogger(__name__)
__all__ = ["Cflow", "CflowLightning"]
@MODEL_REGISTRY
class Cflow(AnomalyModule):
"""PL Lightning Module for the CFLOW algorithm."""
def __init__(
self,
input_size: Tuple[int, int],
backbone: str,
layers: List[str],
fiber_batch_size: int = 64,
decoder: str = "freia-cflow",
condition_vector: int = 128,
coupling_blocks: int = 8,
clamp_alpha: float = 1.9,
permute_soft: bool = False,
):
super().__init__()
logger.info("Initializing Cflow Lightning model.")
self.model: CflowModel = CflowModel(
input_size=input_size,
backbone=backbone,
layers=layers,
fiber_batch_size=fiber_batch_size,
decoder=decoder,
condition_vector=condition_vector,
coupling_blocks=coupling_blocks,
clamp_alpha=clamp_alpha,
permute_soft=permute_soft,
)
self.loss_val = 0
self.automatic_optimization = False
def training_step(self, batch, _): # pylint: disable=arguments-differ
"""Training Step of CFLOW.
For each batch, decoder layers are trained with a dynamic fiber batch size.
Training step is performed manually as multiple training steps are involved
per batch of input images
Args:
batch: Input batch
_: Index of the batch.
Returns:
Loss value for the batch
"""
opt = self.optimizers()
self.model.encoder.eval()
images = batch["image"]
activation = self.model.encoder(images)
avg_loss = torch.zeros([1], dtype=torch.float64).to(images.device)
height = []
width = []
for layer_idx, layer in enumerate(self.model.pool_layers):
encoder_activations = activation[layer].detach() # BxCxHxW
batch_size, dim_feature_vector, im_height, im_width = encoder_activations.size()
image_size = im_height * im_width
embedding_length = batch_size * image_size # number of rows in the conditional vector
height.append(im_height)
width.append(im_width)
# repeats positional encoding for the entire batch 1 C H W to B C H W
pos_encoding = einops.repeat(
positional_encoding_2d(self.model.condition_vector, im_height, im_width).unsqueeze(0),
"b c h w-> (tile b) c h w",
tile=batch_size,
).to(images.device)
c_r = einops.rearrange(pos_encoding, "b c h w -> (b h w) c") # BHWxP
e_r = einops.rearrange(encoder_activations, "b c h w -> (b h w) c") # BHWxC
perm = torch.randperm(embedding_length) # BHW
decoder = self.model.decoders[layer_idx].to(images.device)
fiber_batches = embedding_length // self.model.fiber_batch_size # number of fiber batches
assert fiber_batches > 0, "Make sure we have enough fibers, otherwise decrease N or batch-size!"
for batch_num in range(fiber_batches): # per-fiber processing
opt.zero_grad()
if batch_num < (fiber_batches - 1):
idx = torch.arange(
batch_num * self.model.fiber_batch_size, (batch_num + 1) * self.model.fiber_batch_size
)
else: # When non-full batch is encountered batch_num * N will go out of bounds
idx = torch.arange(batch_num * self.model.fiber_batch_size, embedding_length)
# get random vectors
c_p = c_r[perm[idx]] # NxP
e_p = e_r[perm[idx]] # NxC
# decoder returns the transformed variable z and the log Jacobian determinant
p_u, log_jac_det = decoder(e_p, [c_p])
#
decoder_log_prob = get_logp(dim_feature_vector, p_u, log_jac_det)
log_prob = decoder_log_prob / dim_feature_vector # likelihood per dim
loss = -F.logsigmoid(log_prob)
self.manual_backward(loss.mean())
opt.step()
avg_loss += loss.sum()
return {"loss": avg_loss}
def validation_step(self, batch, _): # pylint: disable=arguments-differ
"""Validation Step of CFLOW.
Similar to the training step, encoder features
are extracted from the CNN for each batch, and anomaly
map is computed.
Args:
batch: Input batch
_: Index of the batch.
Returns:
Dictionary containing images, anomaly maps, true labels and masks.
These are required in `validation_epoch_end` for feature concatenation.
"""
batch["anomaly_maps"] = self.model(batch["image"])
return batch
class CflowLightning(Cflow):
"""PL Lightning Module for the CFLOW algorithm.
Args:
hparams (Union[DictConfig, ListConfig]): Model params
"""
def __init__(self, hparams: Union[DictConfig, ListConfig]) -> None:
super().__init__(
input_size=hparams.model.input_size,
backbone=hparams.model.backbone,
layers=hparams.model.layers,
fiber_batch_size=hparams.dataset.fiber_batch_size,
decoder=hparams.model.decoder,
condition_vector=hparams.model.condition_vector,
coupling_blocks=hparams.model.coupling_blocks,
clamp_alpha=hparams.model.clamp_alpha,
permute_soft=hparams.model.soft_permutation,
)
self.hparams: Union[DictConfig, ListConfig] # type: ignore
self.save_hyperparameters(hparams)
def configure_callbacks(self):
"""Configure model-specific callbacks.
Note:
This method is used for the existing CLI.
When PL CLI is introduced, configure callback method will be
deprecated, and callbacks will be configured from either
config.yaml file or from CLI.
"""
early_stopping = EarlyStopping(
monitor=self.hparams.model.early_stopping.metric,
patience=self.hparams.model.early_stopping.patience,
mode=self.hparams.model.early_stopping.mode,
)
return [early_stopping]
def configure_optimizers(self) -> torch.optim.Optimizer:
"""Configures optimizers for each decoder.
Note:
This method is used for the existing CLI.
When PL CLI is introduced, configure optimizers method will be
deprecated, and optimizers will be configured from either
config.yaml file or from CLI.
Returns:
Optimizer: Adam optimizer for each decoder
"""
decoders_parameters = []
for decoder_idx in range(len(self.model.pool_layers)):
decoders_parameters.extend(list(self.model.decoders[decoder_idx].parameters()))
optimizer = optim.Adam(
params=decoders_parameters,
lr=self.hparams.model.lr,
)
return optimizer
| [
"anomalib.models.cflow.utils.positional_encoding_2d",
"anomalib.models.cflow.torch_model.CflowModel",
"anomalib.models.cflow.utils.get_logp"
] | [((1203, 1230), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1220, 1230), False, 'import logging\n'), ((1833, 2086), 'anomalib.models.cflow.torch_model.CflowModel', 'CflowModel', ([], {'input_size': 'input_size', 'backbone': 'backbone', 'layers': 'layers', 'fiber_batch_size': 'fiber_batch_size', 'decoder': 'decoder', 'condition_vector': 'condition_vector', 'coupling_blocks': 'coupling_blocks', 'clamp_alpha': 'clamp_alpha', 'permute_soft': 'permute_soft'}), '(input_size=input_size, backbone=backbone, layers=layers,\n fiber_batch_size=fiber_batch_size, decoder=decoder, condition_vector=\n condition_vector, coupling_blocks=coupling_blocks, clamp_alpha=\n clamp_alpha, permute_soft=permute_soft)\n', (1843, 2086), False, 'from anomalib.models.cflow.torch_model import CflowModel\n'), ((7325, 7496), 'pytorch_lightning.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': 'self.hparams.model.early_stopping.metric', 'patience': 'self.hparams.model.early_stopping.patience', 'mode': 'self.hparams.model.early_stopping.mode'}), '(monitor=self.hparams.model.early_stopping.metric, patience=\n self.hparams.model.early_stopping.patience, mode=self.hparams.model.\n early_stopping.mode)\n', (7338, 7496), False, 'from pytorch_lightning.callbacks import EarlyStopping\n'), ((8237, 8301), 'torch.optim.Adam', 'optim.Adam', ([], {'params': 'decoders_parameters', 'lr': 'self.hparams.model.lr'}), '(params=decoders_parameters, lr=self.hparams.model.lr)\n', (8247, 8301), False, 'from torch import optim\n'), ((3790, 3844), 'einops.rearrange', 'einops.rearrange', (['pos_encoding', '"""b c h w -> (b h w) c"""'], {}), "(pos_encoding, 'b c h w -> (b h w) c')\n", (3806, 3844), False, 'import einops\n'), ((3872, 3933), 'einops.rearrange', 'einops.rearrange', (['encoder_activations', '"""b c h w -> (b h w) c"""'], {}), "(encoder_activations, 'b c h w -> (b h w) c')\n", (3888, 3933), False, 'import einops\n'), ((3962, 3994), 'torch.randperm', 'torch.randperm', (['embedding_length'], {}), '(embedding_length)\n', (3976, 3994), False, 'import torch\n'), ((2889, 2926), 'torch.zeros', 'torch.zeros', (['[1]'], {'dtype': 'torch.float64'}), '([1], dtype=torch.float64)\n', (2900, 2926), False, 'import torch\n'), ((5140, 5186), 'anomalib.models.cflow.utils.get_logp', 'get_logp', (['dim_feature_vector', 'p_u', 'log_jac_det'], {}), '(dim_feature_vector, p_u, log_jac_det)\n', (5148, 5186), False, 'from anomalib.models.cflow.utils import get_logp, positional_encoding_2d\n'), ((4472, 4576), 'torch.arange', 'torch.arange', (['(batch_num * self.model.fiber_batch_size)', '((batch_num + 1) * self.model.fiber_batch_size)'], {}), '(batch_num * self.model.fiber_batch_size, (batch_num + 1) *\n self.model.fiber_batch_size)\n', (4484, 4576), False, 'import torch\n'), ((4741, 4812), 'torch.arange', 'torch.arange', (['(batch_num * self.model.fiber_batch_size)', 'embedding_length'], {}), '(batch_num * self.model.fiber_batch_size, embedding_length)\n', (4753, 4812), False, 'import torch\n'), ((5298, 5320), 'torch.nn.functional.logsigmoid', 'F.logsigmoid', (['log_prob'], {}), '(log_prob)\n', (5310, 5320), True, 'import torch.nn.functional as F\n'), ((3576, 3648), 'anomalib.models.cflow.utils.positional_encoding_2d', 'positional_encoding_2d', (['self.model.condition_vector', 'im_height', 'im_width'], {}), '(self.model.condition_vector, im_height, im_width)\n', (3598, 3648), False, 'from anomalib.models.cflow.utils import get_logp, positional_encoding_2d\n')] |
"""Test Config Getter."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import pytest
from anomalib.config import get_configurable_parameters
class TestConfig:
"""Test Config Getter."""
def test_get_configurable_parameters_return_correct_model_name(self):
"""Configurable parameter should return the correct model name."""
model_name = "stfpm"
configurable_parameters = get_configurable_parameters(model_name)
assert configurable_parameters.model.name == model_name
def test_get_configurable_parameter_fails_with_none_arguments(self):
"""Configurable parameter should raise an error with none arguments."""
with pytest.raises(ValueError):
get_configurable_parameters()
| [
"anomalib.config.get_configurable_parameters"
] | [((943, 982), 'anomalib.config.get_configurable_parameters', 'get_configurable_parameters', (['model_name'], {}), '(model_name)\n', (970, 982), False, 'from anomalib.config import get_configurable_parameters\n'), ((1214, 1239), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1227, 1239), False, 'import pytest\n'), ((1253, 1282), 'anomalib.config.get_configurable_parameters', 'get_configurable_parameters', ([], {}), '()\n', (1280, 1282), False, 'from anomalib.config import get_configurable_parameters\n')] |
"""Anomalib Inferencer Script.
This script performs inference by reading a model config file from
command line, and show the visualization results.
"""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from argparse import ArgumentParser, Namespace
from importlib import import_module
from pathlib import Path
import cv2
import numpy as np
from anomalib.config import get_configurable_parameters
from anomalib.deploy.inferencers.base import Inferencer
def get_args() -> Namespace:
"""Get command line arguments.
Returns:
Namespace: List of arguments.
"""
parser = ArgumentParser()
parser.add_argument("--model_config_path", type=Path, required=True, help="Path to a model config file")
parser.add_argument("--weight_path", type=Path, required=True, help="Path to a model weights")
parser.add_argument("--image_path", type=Path, required=True, help="Path to an image to infer.")
parser.add_argument("--save_path", type=Path, required=False, help="Path to save the output image.")
parser.add_argument("--meta_data", type=Path, required=False, help="Path to JSON file containing the metadata.")
return parser.parse_args()
def add_label(prediction: np.ndarray, scores: float, font: int = cv2.FONT_HERSHEY_PLAIN) -> np.ndarray:
"""If the model outputs score, it adds the score to the output image.
Args:
prediction (np.ndarray): Resized anomaly map.
scores (float): Confidence score.
Returns:
np.ndarray: Image with score text.
"""
text = f"Confidence Score {scores:.0%}"
font_size = prediction.shape[1] // 1024 + 1 # Text scale is calculated based on the reference size of 1024
(width, height), baseline = cv2.getTextSize(text, font, font_size, thickness=font_size // 2)
label_patch = np.zeros((height + baseline, width + baseline, 3), dtype=np.uint8)
label_patch[:, :] = (225, 252, 134)
cv2.putText(label_patch, text, (0, baseline // 2 + height), font, font_size, 0)
prediction[: baseline + height, : baseline + width] = label_patch
return prediction
def infer() -> None:
"""Perform inference on an input image."""
# Get the command line arguments, and config from the config.yaml file.
# This config file is also used for training and contains all the relevant
# information regarding the data, model, train and inference details.
args = get_args()
config = get_configurable_parameters(model_config_path=args.model_config_path)
# Get the inferencer. We use .ckpt extension for Torch models and (onnx, bin)
# for the openvino models.
extension = args.weight_path.suffix
inference: Inferencer
if extension in (".ckpt"):
module = import_module("anomalib.deploy.inferencers.torch")
TorchInferencer = getattr(module, "TorchInferencer") # pylint: disable=invalid-name
inference = TorchInferencer(config=config, model_source=args.weight_path, meta_data_path=args.meta_data)
elif extension in (".onnx", ".bin", ".xml"):
module = import_module("anomalib.deploy.inferencers.openvino")
OpenVINOInferencer = getattr(module, "OpenVINOInferencer") # pylint: disable=invalid-name
inference = OpenVINOInferencer(config=config, path=args.weight_path, meta_data_path=args.meta_data)
else:
raise ValueError(
f"Model extension is not supported. Torch Inferencer exptects a .ckpt file,"
f"OpenVINO Inferencer expects either .onnx, .bin or .xml file. Got {extension}"
)
# Perform inference for the given image or image path. if image
# path is provided, `predict` method will read the image from
# file for convenience. We set the superimpose flag to True
# to overlay the predicted anomaly map on top of the input image.
output = inference.predict(image=args.image_path, superimpose=True)
# Incase both anomaly map and scores are returned add scores to the image.
if isinstance(output, tuple):
anomaly_map, score = output
output = add_label(anomaly_map, score)
# Show or save the output image, depending on what's provided as
# the command line argument.
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
if args.save_path is None:
cv2.imshow("Anomaly Map", output)
else:
cv2.imwrite(filename=str(args.save_path), img=output)
if __name__ == "__main__":
infer()
| [
"anomalib.config.get_configurable_parameters"
] | [((1127, 1143), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1141, 1143), False, 'from argparse import ArgumentParser, Namespace\n'), ((2247, 2311), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'font_size'], {'thickness': '(font_size // 2)'}), '(text, font, font_size, thickness=font_size // 2)\n', (2262, 2311), False, 'import cv2\n'), ((2330, 2396), 'numpy.zeros', 'np.zeros', (['(height + baseline, width + baseline, 3)'], {'dtype': 'np.uint8'}), '((height + baseline, width + baseline, 3), dtype=np.uint8)\n', (2338, 2396), True, 'import numpy as np\n'), ((2441, 2520), 'cv2.putText', 'cv2.putText', (['label_patch', 'text', '(0, baseline // 2 + height)', 'font', 'font_size', '(0)'], {}), '(label_patch, text, (0, baseline // 2 + height), font, font_size, 0)\n', (2452, 2520), False, 'import cv2\n'), ((2948, 3017), 'anomalib.config.get_configurable_parameters', 'get_configurable_parameters', ([], {'model_config_path': 'args.model_config_path'}), '(model_config_path=args.model_config_path)\n', (2975, 3017), False, 'from anomalib.config import get_configurable_parameters\n'), ((4713, 4752), 'cv2.cvtColor', 'cv2.cvtColor', (['output', 'cv2.COLOR_RGB2BGR'], {}), '(output, cv2.COLOR_RGB2BGR)\n', (4725, 4752), False, 'import cv2\n'), ((3246, 3296), 'importlib.import_module', 'import_module', (['"""anomalib.deploy.inferencers.torch"""'], {}), "('anomalib.deploy.inferencers.torch')\n", (3259, 3296), False, 'from importlib import import_module\n'), ((4792, 4825), 'cv2.imshow', 'cv2.imshow', (['"""Anomaly Map"""', 'output'], {}), "('Anomaly Map', output)\n", (4802, 4825), False, 'import cv2\n'), ((3570, 3623), 'importlib.import_module', 'import_module', (['"""anomalib.deploy.inferencers.openvino"""'], {}), "('anomalib.deploy.inferencers.openvino')\n", (3583, 3623), False, 'from importlib import import_module\n')] |
"""Anomaly Classification Task."""
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from anomalib.utils.callbacks import MinMaxNormalizationCallback
from ote_anomalib import AnomalyInferenceTask
from ote_anomalib.callbacks import ProgressCallback
from ote_anomalib.data import OTEAnomalyDataModule
from ote_anomalib.logging import get_logger
from ote_sdk.entities.datasets import DatasetEntity
from ote_sdk.entities.model import ModelEntity
from ote_sdk.entities.train_parameters import TrainParameters
from ote_sdk.usecases.tasks.interfaces.training_interface import ITrainingTask
from pytorch_lightning import Trainer
logger = get_logger(__name__)
class AnomalyTrainingTask(AnomalyInferenceTask, ITrainingTask):
"""Base Anomaly Task."""
def train(
self,
dataset: DatasetEntity,
output_model: ModelEntity,
train_parameters: TrainParameters,
) -> None:
"""Train the anomaly classification model.
Args:
dataset (DatasetEntity): Input dataset.
output_model (ModelEntity): Output model to save the model weights.
train_parameters (TrainParameters): Training parameters
"""
logger.info("Training the model.")
config = self.get_config()
logger.info("Training Configs '%s'", config)
datamodule = OTEAnomalyDataModule(config=config, dataset=dataset, task_type=self.task_type)
callbacks = [ProgressCallback(parameters=train_parameters), MinMaxNormalizationCallback()]
self.trainer = Trainer(**config.trainer, logger=False, callbacks=callbacks)
self.trainer.fit(model=self.model, datamodule=datamodule)
self.save_model(output_model)
logger.info("Training completed.")
| [
"anomalib.utils.callbacks.MinMaxNormalizationCallback"
] | [((1164, 1184), 'ote_anomalib.logging.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1174, 1184), False, 'from ote_anomalib.logging import get_logger\n'), ((1867, 1945), 'ote_anomalib.data.OTEAnomalyDataModule', 'OTEAnomalyDataModule', ([], {'config': 'config', 'dataset': 'dataset', 'task_type': 'self.task_type'}), '(config=config, dataset=dataset, task_type=self.task_type)\n', (1887, 1945), False, 'from ote_anomalib.data import OTEAnomalyDataModule\n'), ((2069, 2129), 'pytorch_lightning.Trainer', 'Trainer', ([], {'logger': '(False)', 'callbacks': 'callbacks'}), '(**config.trainer, logger=False, callbacks=callbacks)\n', (2076, 2129), False, 'from pytorch_lightning import Trainer\n'), ((1967, 2012), 'ote_anomalib.callbacks.ProgressCallback', 'ProgressCallback', ([], {'parameters': 'train_parameters'}), '(parameters=train_parameters)\n', (1983, 2012), False, 'from ote_anomalib.callbacks import ProgressCallback\n'), ((2014, 2043), 'anomalib.utils.callbacks.MinMaxNormalizationCallback', 'MinMaxNormalizationCallback', ([], {}), '()\n', (2041, 2043), False, 'from anomalib.utils.callbacks import MinMaxNormalizationCallback\n')] |
"""Test This script performs inference on the test dataset and saves the output visualizations into a directory."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from argparse import ArgumentParser, Namespace
from pytorch_lightning import Trainer
from anomalib.config import get_configurable_parameters
from anomalib.data import get_datamodule
from anomalib.models import get_model
from anomalib.utils.callbacks import get_callbacks
def get_args() -> Namespace:
"""Get CLI arguments.
Returns:
Namespace: CLI arguments.
"""
parser = ArgumentParser()
parser.add_argument("--model", type=str, default="stfpm", help="Name of the algorithm to train/test")
parser.add_argument("--model_config_path", type=str, required=False, help="Path to a model config file")
parser.add_argument("--weight_file", type=str, default="weights/model.ckpt")
parser.add_argument("--openvino", type=bool, default=False)
return parser.parse_args()
def test():
"""Test an anomaly classification and segmentation model that is initially trained via `tools/train.py`.
The script is able to write the results into both filesystem and a logger such as Tensorboard.
"""
args = get_args()
config = get_configurable_parameters(
model_name=args.model,
model_config_path=args.model_config_path,
weight_file=args.weight_file,
openvino=args.openvino,
)
datamodule = get_datamodule(config)
model = get_model(config)
callbacks = get_callbacks(config)
trainer = Trainer(callbacks=callbacks, **config.trainer)
trainer.test(model=model, datamodule=datamodule)
if __name__ == "__main__":
test()
| [
"anomalib.config.get_configurable_parameters",
"anomalib.utils.callbacks.get_callbacks",
"anomalib.data.get_datamodule",
"anomalib.models.get_model"
] | [((1098, 1114), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1112, 1114), False, 'from argparse import ArgumentParser, Namespace\n'), ((1773, 1924), 'anomalib.config.get_configurable_parameters', 'get_configurable_parameters', ([], {'model_name': 'args.model', 'model_config_path': 'args.model_config_path', 'weight_file': 'args.weight_file', 'openvino': 'args.openvino'}), '(model_name=args.model, model_config_path=args.\n model_config_path, weight_file=args.weight_file, openvino=args.openvino)\n', (1800, 1924), False, 'from anomalib.config import get_configurable_parameters\n'), ((1977, 1999), 'anomalib.data.get_datamodule', 'get_datamodule', (['config'], {}), '(config)\n', (1991, 1999), False, 'from anomalib.data import get_datamodule\n'), ((2012, 2029), 'anomalib.models.get_model', 'get_model', (['config'], {}), '(config)\n', (2021, 2029), False, 'from anomalib.models import get_model\n'), ((2047, 2068), 'anomalib.utils.callbacks.get_callbacks', 'get_callbacks', (['config'], {}), '(config)\n', (2060, 2068), False, 'from anomalib.utils.callbacks import get_callbacks\n'), ((2084, 2130), 'pytorch_lightning.Trainer', 'Trainer', ([], {'callbacks': 'callbacks'}), '(callbacks=callbacks, **config.trainer)\n', (2091, 2130), False, 'from pytorch_lightning import Trainer\n')] |
README.md exists but content is empty.
- Downloads last month
- 5