| """Calculates the Frechet Inception Distance (FID) to evalulate GANs |
| |
| The FID metric calculates the distance between two distributions of images. |
| Typically, we have summary statistics (mean & covariance matrix) of one |
| of these distributions, while the 2nd distribution is given by a GAN. |
| |
| When run as a stand-alone program, it compares the distribution of |
| images that are stored as PNG/JPEG at a specified location with a |
| distribution given by summary statistics (in pickle format). |
| |
| The FID is calculated by assuming that X_1 and X_2 are the activations of |
| the pool_3 layer of the inception net for generated samples and real world |
| samples respectively. |
| |
| See --help to see further details. |
| |
| Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead |
| of Tensorflow |
| |
| Copyright 2018 Institute of Bioinformatics, JKU Linz |
| |
| Licensed under the Apache License, Version 2.0 (the "License"); |
| you may not use this file except in compliance with the License. |
| You may obtain a copy of the License at |
| |
| http://www.apache.org/licenses/LICENSE-2.0 |
| |
| Unless required by applicable law or agreed to in writing, software |
| distributed under the License is distributed on an "AS IS" BASIS, |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| See the License for the specific language governing permissions and |
| limitations under the License. |
| """ |
| import os |
| import pathlib |
| from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser |
|
|
| import numpy as np |
| import torch |
| import torchvision.transforms as TF |
| from PIL import Image |
| from scipy import linalg |
| from torch.nn.functional import adaptive_avg_pool2d |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import torchvision |
|
|
| try: |
| from tqdm import tqdm |
| except ImportError: |
| |
| def tqdm(x): |
| return x |
|
|
|
|
| IMAGE_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm', |
| 'tif', 'tiff', 'webp'} |
|
|
|
|
| try: |
| from torchvision.models.utils import load_state_dict_from_url |
| except ImportError: |
| from torch.utils.model_zoo import load_url as load_state_dict_from_url |
|
|
| |
| |
| FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' |
|
|
|
|
| class InceptionV3(nn.Module): |
| """Pretrained InceptionV3 network returning feature maps""" |
|
|
| |
| |
| DEFAULT_BLOCK_INDEX = 3 |
|
|
| |
| BLOCK_INDEX_BY_DIM = { |
| 64: 0, |
| 192: 1, |
| 768: 2, |
| 2048: 3 |
| } |
|
|
| def __init__(self, |
| output_blocks=(DEFAULT_BLOCK_INDEX,), |
| resize_input=True, |
| normalize_input=True, |
| requires_grad=False, |
| use_fid_inception=True): |
| """Build pretrained InceptionV3 |
| |
| Parameters |
| ---------- |
| output_blocks : list of int |
| Indices of blocks to return features of. Possible values are: |
| - 0: corresponds to output of first max pooling |
| - 1: corresponds to output of second max pooling |
| - 2: corresponds to output which is fed to aux classifier |
| - 3: corresponds to output of final average pooling |
| resize_input : bool |
| If true, bilinearly resizes input to width and height 299 before |
| feeding input to model. As the network without fully connected |
| layers is fully convolutional, it should be able to handle inputs |
| of arbitrary size, so resizing might not be strictly needed |
| normalize_input : bool |
| If true, scales the input from range (0, 1) to the range the |
| pretrained Inception network expects, namely (-1, 1) |
| requires_grad : bool |
| If true, parameters of the model require gradients. Possibly useful |
| for finetuning the network |
| use_fid_inception : bool |
| If true, uses the pretrained Inception model used in Tensorflow's |
| FID implementation. If false, uses the pretrained Inception model |
| available in torchvision. The FID Inception model has different |
| weights and a slightly different structure from torchvision's |
| Inception model. If you want to compute FID scores, you are |
| strongly advised to set this parameter to true to get comparable |
| results. |
| """ |
| super(InceptionV3, self).__init__() |
|
|
| self.resize_input = resize_input |
| self.normalize_input = normalize_input |
| self.output_blocks = sorted(output_blocks) |
| self.last_needed_block = max(output_blocks) |
|
|
| assert self.last_needed_block <= 3, \ |
| 'Last possible output block index is 3' |
|
|
| self.blocks = nn.ModuleList() |
|
|
| if use_fid_inception: |
| inception = fid_inception_v3() |
| else: |
| inception = _inception_v3(weights='DEFAULT') |
|
|
| |
| block0 = [ |
| inception.Conv2d_1a_3x3, |
| inception.Conv2d_2a_3x3, |
| inception.Conv2d_2b_3x3, |
| nn.MaxPool2d(kernel_size=3, stride=2) |
| ] |
| self.blocks.append(nn.Sequential(*block0)) |
|
|
| |
| if self.last_needed_block >= 1: |
| block1 = [ |
| inception.Conv2d_3b_1x1, |
| inception.Conv2d_4a_3x3, |
| nn.MaxPool2d(kernel_size=3, stride=2) |
| ] |
| self.blocks.append(nn.Sequential(*block1)) |
|
|
| |
| if self.last_needed_block >= 2: |
| block2 = [ |
| inception.Mixed_5b, |
| inception.Mixed_5c, |
| inception.Mixed_5d, |
| inception.Mixed_6a, |
| inception.Mixed_6b, |
| inception.Mixed_6c, |
| inception.Mixed_6d, |
| inception.Mixed_6e, |
| ] |
| self.blocks.append(nn.Sequential(*block2)) |
|
|
| |
| if self.last_needed_block >= 3: |
| block3 = [ |
| inception.Mixed_7a, |
| inception.Mixed_7b, |
| inception.Mixed_7c, |
| nn.AdaptiveAvgPool2d(output_size=(1, 1)) |
| ] |
| self.blocks.append(nn.Sequential(*block3)) |
|
|
| for param in self.parameters(): |
| param.requires_grad = requires_grad |
|
|
| def forward(self, inp): |
| """Get Inception feature maps |
| |
| Parameters |
| ---------- |
| inp : torch.autograd.Variable |
| Input tensor of shape Bx3xHxW. Values are expected to be in |
| range (0, 1) |
| |
| Returns |
| ------- |
| List of torch.autograd.Variable, corresponding to the selected output |
| block, sorted ascending by index |
| """ |
| outp = [] |
| x = inp |
|
|
| if self.resize_input: |
| x = F.interpolate(x, |
| size=(299, 299), |
| mode='bilinear', |
| align_corners=False) |
|
|
| if self.normalize_input: |
| x = 2 * x - 1 |
|
|
| for idx, block in enumerate(self.blocks): |
| x = block(x) |
| if idx in self.output_blocks: |
| outp.append(x) |
|
|
| if idx == self.last_needed_block: |
| break |
|
|
| return outp |
|
|
|
|
| def _inception_v3(*args, **kwargs): |
| """Wraps `torchvision.models.inception_v3`""" |
| try: |
| version = tuple(map(int, torchvision.__version__.split('.')[:2])) |
| except ValueError: |
| |
| version = (0,) |
|
|
| |
| |
| if version >= (0, 6): |
| kwargs['init_weights'] = False |
|
|
| |
| |
| if version < (0, 13) and 'weights' in kwargs: |
| if kwargs['weights'] == 'DEFAULT': |
| kwargs['pretrained'] = True |
| elif kwargs['weights'] is None: |
| kwargs['pretrained'] = False |
| else: |
| raise ValueError( |
| 'weights=={} not supported in torchvision {}'.format( |
| kwargs['weights'], torchvision.__version__ |
| ) |
| ) |
| del kwargs['weights'] |
|
|
| return torchvision.models.inception_v3(*args, **kwargs) |
|
|
|
|
| def fid_inception_v3(): |
| """Build pretrained Inception model for FID computation |
| |
| The Inception model for FID computation uses a different set of weights |
| and has a slightly different structure than torchvision's Inception. |
| |
| This method first constructs torchvision's Inception and then patches the |
| necessary parts that are different in the FID Inception model. |
| """ |
| inception = _inception_v3(num_classes=1008, |
| aux_logits=False, |
| weights=None) |
| inception.Mixed_5b = FIDInceptionA(192, pool_features=32) |
| inception.Mixed_5c = FIDInceptionA(256, pool_features=64) |
| inception.Mixed_5d = FIDInceptionA(288, pool_features=64) |
| inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) |
| inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) |
| inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) |
| inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) |
| inception.Mixed_7b = FIDInceptionE_1(1280) |
| inception.Mixed_7c = FIDInceptionE_2(2048) |
|
|
| state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True) |
| inception.load_state_dict(state_dict) |
| return inception |
|
|
|
|
| class FIDInceptionA(torchvision.models.inception.InceptionA): |
| """InceptionA block patched for FID computation""" |
| def __init__(self, in_channels, pool_features): |
| super(FIDInceptionA, self).__init__(in_channels, pool_features) |
|
|
| def forward(self, x): |
| branch1x1 = self.branch1x1(x) |
|
|
| branch5x5 = self.branch5x5_1(x) |
| branch5x5 = self.branch5x5_2(branch5x5) |
|
|
| branch3x3dbl = self.branch3x3dbl_1(x) |
| branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) |
| branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) |
|
|
| |
| |
| branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, |
| count_include_pad=False) |
| branch_pool = self.branch_pool(branch_pool) |
|
|
| outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] |
| return torch.cat(outputs, 1) |
|
|
|
|
| class FIDInceptionC(torchvision.models.inception.InceptionC): |
| """InceptionC block patched for FID computation""" |
| def __init__(self, in_channels, channels_7x7): |
| super(FIDInceptionC, self).__init__(in_channels, channels_7x7) |
|
|
| def forward(self, x): |
| branch1x1 = self.branch1x1(x) |
|
|
| branch7x7 = self.branch7x7_1(x) |
| branch7x7 = self.branch7x7_2(branch7x7) |
| branch7x7 = self.branch7x7_3(branch7x7) |
|
|
| branch7x7dbl = self.branch7x7dbl_1(x) |
| branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) |
| branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) |
| branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) |
| branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) |
|
|
| |
| |
| branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, |
| count_include_pad=False) |
| branch_pool = self.branch_pool(branch_pool) |
|
|
| outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] |
| return torch.cat(outputs, 1) |
|
|
|
|
| class FIDInceptionE_1(torchvision.models.inception.InceptionE): |
| """First InceptionE block patched for FID computation""" |
| def __init__(self, in_channels): |
| super(FIDInceptionE_1, self).__init__(in_channels) |
|
|
| def forward(self, x): |
| branch1x1 = self.branch1x1(x) |
|
|
| branch3x3 = self.branch3x3_1(x) |
| branch3x3 = [ |
| self.branch3x3_2a(branch3x3), |
| self.branch3x3_2b(branch3x3), |
| ] |
| branch3x3 = torch.cat(branch3x3, 1) |
|
|
| branch3x3dbl = self.branch3x3dbl_1(x) |
| branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) |
| branch3x3dbl = [ |
| self.branch3x3dbl_3a(branch3x3dbl), |
| self.branch3x3dbl_3b(branch3x3dbl), |
| ] |
| branch3x3dbl = torch.cat(branch3x3dbl, 1) |
|
|
| |
| |
| branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, |
| count_include_pad=False) |
| branch_pool = self.branch_pool(branch_pool) |
|
|
| outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] |
| return torch.cat(outputs, 1) |
|
|
|
|
| class FIDInceptionE_2(torchvision.models.inception.InceptionE): |
| """Second InceptionE block patched for FID computation""" |
| def __init__(self, in_channels): |
| super(FIDInceptionE_2, self).__init__(in_channels) |
|
|
| def forward(self, x): |
| branch1x1 = self.branch1x1(x) |
|
|
| branch3x3 = self.branch3x3_1(x) |
| branch3x3 = [ |
| self.branch3x3_2a(branch3x3), |
| self.branch3x3_2b(branch3x3), |
| ] |
| branch3x3 = torch.cat(branch3x3, 1) |
|
|
| branch3x3dbl = self.branch3x3dbl_1(x) |
| branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) |
| branch3x3dbl = [ |
| self.branch3x3dbl_3a(branch3x3dbl), |
| self.branch3x3dbl_3b(branch3x3dbl), |
| ] |
| branch3x3dbl = torch.cat(branch3x3dbl, 1) |
|
|
| |
| |
| |
| |
| branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) |
| branch_pool = self.branch_pool(branch_pool) |
|
|
| outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] |
| return torch.cat(outputs, 1) |
|
|
| class ImagePathDataset(torch.utils.data.Dataset): |
| def __init__(self, files, transforms=None): |
| self.files = files |
| self.transforms = transforms |
|
|
| def __len__(self): |
| return len(self.files) |
|
|
| def __getitem__(self, i): |
| path = self.files[i] |
| img = Image.open(path).convert('RGB') |
| if self.transforms is not None: |
| img = self.transforms(img) |
| return img |
|
|
|
|
| def get_activations(files, model, batch_size=50, dims=2048, device='cpu', |
| num_workers=1, resize=0): |
| """Calculates the activations of the pool_3 layer for all images. |
| |
| Params: |
| -- files : List of image files paths |
| -- model : Instance of inception model |
| -- batch_size : Batch size of images for the model to process at once. |
| Make sure that the number of samples is a multiple of |
| the batch size, otherwise some samples are ignored. This |
| behavior is retained to match the original FID score |
| implementation. |
| -- dims : Dimensionality of features returned by Inception |
| -- device : Device to run calculations |
| -- num_workers : Number of parallel dataloader workers |
| |
| Returns: |
| -- A numpy array of dimension (num images, dims) that contains the |
| activations of the given tensor when feeding inception with the |
| query tensor. |
| """ |
| model.eval() |
|
|
| if batch_size > len(files): |
| print(('Warning: batch size is bigger than the data size. ' |
| 'Setting batch size to data size')) |
| batch_size = len(files) |
| if resize > 0: |
| tform = TF.Compose([TF.Resize((resize, resize)), TF.ToTensor()]) |
| else: |
| tform = TF.ToTensor() |
| dataset = ImagePathDataset(files, transforms=tform) |
| dataloader = torch.utils.data.DataLoader(dataset, |
| batch_size=batch_size, |
| shuffle=False, |
| drop_last=False, |
| num_workers=num_workers) |
|
|
| pred_arr = np.empty((len(files), dims)) |
|
|
| start_idx = 0 |
|
|
| for batch in tqdm(dataloader): |
| batch = batch.to(device) |
|
|
| with torch.no_grad(): |
| pred = model(batch)[0] |
|
|
| |
| |
| if pred.size(2) != 1 or pred.size(3) != 1: |
| pred = adaptive_avg_pool2d(pred, output_size=(1, 1)) |
|
|
| pred = pred.squeeze(3).squeeze(2).cpu().numpy() |
|
|
| pred_arr[start_idx:start_idx + pred.shape[0]] = pred |
|
|
| start_idx = start_idx + pred.shape[0] |
|
|
| return pred_arr |
|
|
|
|
| def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): |
| """Numpy implementation of the Frechet Distance. |
| The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) |
| and X_2 ~ N(mu_2, C_2) is |
| d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). |
| |
| Stable version by Dougal J. Sutherland. |
| |
| Params: |
| -- mu1 : Numpy array containing the activations of a layer of the |
| inception net (like returned by the function 'get_predictions') |
| for generated samples. |
| -- mu2 : The sample mean over activations, precalculated on an |
| representative data set. |
| -- sigma1: The covariance matrix over activations for generated samples. |
| -- sigma2: The covariance matrix over activations, precalculated on an |
| representative data set. |
| |
| Returns: |
| -- : The Frechet Distance. |
| """ |
|
|
| mu1 = np.atleast_1d(mu1) |
| mu2 = np.atleast_1d(mu2) |
|
|
| sigma1 = np.atleast_2d(sigma1) |
| sigma2 = np.atleast_2d(sigma2) |
|
|
| assert mu1.shape == mu2.shape, \ |
| 'Training and test mean vectors have different lengths' |
| assert sigma1.shape == sigma2.shape, \ |
| 'Training and test covariances have different dimensions' |
|
|
| diff = mu1 - mu2 |
|
|
| |
| covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) |
| if not np.isfinite(covmean).all(): |
| msg = ('fid calculation produces singular product; ' |
| 'adding %s to diagonal of cov estimates') % eps |
| print(msg) |
| offset = np.eye(sigma1.shape[0]) * eps |
| covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) |
|
|
| |
| if np.iscomplexobj(covmean): |
| if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): |
| m = np.max(np.abs(covmean.imag)) |
| raise ValueError('Imaginary component {}'.format(m)) |
| covmean = covmean.real |
|
|
| tr_covmean = np.trace(covmean) |
|
|
| return (diff.dot(diff) + np.trace(sigma1) |
| + np.trace(sigma2) - 2 * tr_covmean) |
|
|
|
|
| def calculate_activation_statistics(files, model, batch_size=50, dims=2048, |
| device='cpu', num_workers=1, resize=0): |
| """Calculation of the statistics used by the FID. |
| Params: |
| -- files : List of image files paths |
| -- model : Instance of inception model |
| -- batch_size : The images numpy array is split into batches with |
| batch size batch_size. A reasonable batch size |
| depends on the hardware. |
| -- dims : Dimensionality of features returned by Inception |
| -- device : Device to run calculations |
| -- num_workers : Number of parallel dataloader workers |
| |
| Returns: |
| -- mu : The mean over samples of the activations of the pool_3 layer of |
| the inception model. |
| -- sigma : The covariance matrix of the activations of the pool_3 layer of |
| the inception model. |
| """ |
| act = get_activations(files, model, batch_size, dims, device, num_workers, resize) |
| mu = np.mean(act, axis=0) |
| sigma = np.cov(act, rowvar=False) |
| return mu, sigma |
|
|
|
|
| def compute_statistics_of_path(path, model, batch_size, dims, device, |
| num_workers=1, nimages=None, resize=0): |
| if path.endswith('.npz'): |
| with np.load(path) as f: |
| m, s = f['mu'][:], f['sigma'][:] |
| else: |
| path = pathlib.Path(path) |
| |
| files = sorted([file for ext in IMAGE_EXTENSIONS |
| for file in path.glob('**/*.{}'.format(ext))]) |
| nfiles = len(files) |
| n = nfiles if nimages is None else min(nimages, nfiles) |
| print(f'Found {nfiles} images. Computing FID with {n} images.') |
| files = files[:n] |
| m, s = calculate_activation_statistics(files, model, batch_size, |
| dims, device, num_workers, resize) |
|
|
| return m, s |
|
|
|
|
| def calculate_fid_given_paths(paths, batch_size, device, dims, num_workers=1, nimages=None, resize=0): |
| """Calculates the FID of two paths""" |
| for p in paths: |
| if not os.path.exists(p): |
| raise RuntimeError('Invalid path: %s' % p) |
|
|
| block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] |
|
|
| model = InceptionV3([block_idx]).to(device) |
|
|
| m1, s1 = compute_statistics_of_path(paths[0], model, batch_size, |
| dims, device, num_workers, nimages, resize) |
| m2, s2 = compute_statistics_of_path(paths[1], model, batch_size, |
| dims, device, num_workers, nimages, resize) |
| fid_value = calculate_frechet_distance(m1, s1, m2, s2) |
|
|
| return fid_value |
|
|
|
|
| def save_fid_stats(paths, batch_size, device, dims, num_workers=1, nimages=None, resize=0): |
| """Calculates the FID of two paths""" |
| if not os.path.exists(paths[0]): |
| raise RuntimeError('Invalid path: %s' % paths[0]) |
|
|
| if os.path.exists(paths[1]): |
| raise RuntimeError('Existing output file: %s' % paths[1]) |
|
|
| block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] |
|
|
| model = InceptionV3([block_idx]).to(device) |
|
|
| print(f"Saving statistics for {paths[0]}") |
|
|
| m1, s1 = compute_statistics_of_path(paths[0], model, batch_size, |
| dims, device, num_workers, nimages, resize=0) |
|
|
| np.savez_compressed(paths[1], mu=m1, sigma=s1) |
|
|
|
|
| def main(): |
| parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) |
| parser.add_argument('--batch-size', type=int, default=20, |
| help='Batch size to use') |
| parser.add_argument('--num-workers', type=int, |
| help=('Number of processes to use for data loading. ' |
| 'Defaults to `min(8, num_cpus)`')) |
| parser.add_argument('--device', type=str, default='cuda:0', |
| help='Device to use. Like cuda, cuda:0 or cpu') |
| parser.add_argument('--dims', type=int, default=2048, |
| choices=list(InceptionV3.BLOCK_INDEX_BY_DIM), |
| help=('Dimensionality of Inception features to use. ' |
| 'By default, uses pool3 features')) |
| parser.add_argument('--nimages', type=int, default=50000, help='max number of images to use') |
| parser.add_argument('--resize', type=int, default=0, help='resize images to this size, 0 mean keep original size') |
| parser.add_argument('--save-stats', action='store_true', |
| help=('Generate an npz archive from a directory of samples. ' |
| 'The first path is used as input and the second as output.')) |
| parser.add_argument('path', type=str, nargs=2, |
| help=('Paths to the generated images or ' |
| 'to .npz statistic files')) |
| args = parser.parse_args() |
|
|
| if args.device is None: |
| device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu') |
| else: |
| device = torch.device(args.device) |
|
|
| if args.num_workers is None: |
| try: |
| num_cpus = len(os.sched_getaffinity(0)) |
| except AttributeError: |
| |
| |
| |
| num_cpus = os.cpu_count() |
|
|
| num_workers = min(num_cpus, 8) if num_cpus is not None else 0 |
| else: |
| num_workers = args.num_workers |
|
|
| if args.save_stats: |
| save_fid_stats(args.path, args.batch_size, device, args.dims, num_workers, args.nimages, args.resize) |
| return |
|
|
| fid_value = calculate_fid_given_paths(args.path, |
| args.batch_size, |
| device, |
| args.dims, |
| num_workers, |
| args.nimages, |
| args.resize) |
| print('FID: ', fid_value) |
|
|
|
|
| if __name__ == '__main__': |
| main() |