repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
AgML | AgML-main/experiments/benchmarking/classification_distributed.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import agml
import albumentations as A
import torch
import torch.nn as nn
import pytorch_lightning as pl
from torchvision.models import efficientnet_b4
class EfficientNetB4Transfer(nn.Module):
"""Represents a transfer learning EfficientNetB4 model.
This is the base benchmarking model for image classification, using
the EfficientNetB4 model with two added linear fully-connected layers.
"""
def __init__(self, num_classes, pretrained = True):
super(EfficientNetB4Transfer, self).__init__()
self.base = efficientnet_b4(pretrained = pretrained)
self.l1 = nn.Linear(1000, 256)
self.dropout = nn.Dropout(0.1)
self.relu = nn.ReLU()
self.l2 = nn.Linear(256, num_classes)
def forward(self, x, **kwargs): # noqa
x = self.base(x)
x = x.view(x.size(0), -1)
x = self.dropout(self.relu(self.l1(x)))
x = self.l2(x)
return x
def get_num_gpus(args):
"""Resolves the number of GPUs to use."""
arg_gpus = getattr(args, 'gpus', None)
if isinstance(arg_gpus, int):
return arg_gpus
return torch.cuda.device_count()
class ClassificationBenchmark(pl.LightningModule):
"""Represents an image classification benchmark model."""
def __init__(self, dataset, pretrained = False):
# Initialize the module.
super(ClassificationBenchmark, self).__init__()
# Construct the network.
self._source = agml.data.source(dataset)
self._pretrained = pretrained
self.net = EfficientNetB4Transfer(
self._source.num_classes,
self._pretrained
)
# Construct the loss for training.
self.loss = nn.CrossEntropyLoss()
def forward(self, x):
return self.net.forward(x)
def training_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x)
loss = self.loss(y_pred, y)
acc = accuracy(y_pred, torch.argmax(y, 1)).item()
self.log('accuracy', acc, prog_bar = True, sync_dist = True)
return {
'loss': loss,
'accuracy': acc
}
def validation_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x)
val_loss = self.loss(y_pred, y)
val_acc = accuracy(y_pred, torch.argmax(y, 1))
self.log('val_loss', val_loss.item(), prog_bar = True, sync_dist = True)
self.log('val_accuracy', val_acc.item(), prog_bar = True, sync_dist = True)
return {
'val_loss': val_loss,
'val_accuracy': val_acc
}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters())
def get_progress_bar_dict(self):
tqdm_dict = super(ClassificationBenchmark, self) \
.get_progress_bar_dict()
tqdm_dict.pop('v_num', None)
return tqdm_dict
def accuracy(output, target):
"""Computes the accuracy between `output` and `target`."""
with torch.no_grad():
batch_size = target.size(0)
_, pred = torch.topk(output, 1, 1)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:1].reshape(-1).float().sum(0, keepdim = True)
return correct_k.mul_(100.0 / batch_size)
# Build the data loaders.
def build_loaders(name):
loader = agml.data.AgMLDataLoader(name)
loader.split(train = 0.8, val = 0.1, test = 0.1)
loader.batch(batch_size = 16)
loader.resize_images('imagenet')
loader.normalize_images('imagenet')
loader.labels_to_one_hot()
train_data = loader.train_data
train_data.transform(transform = A.RandomRotate90())
train_ds = train_data.copy().as_torch_dataset()
val_ds = loader.val_data.as_torch_dataset()
val_ds.shuffle_data = False
test_ds = loader.test_data.as_torch_dataset()
return train_ds, val_ds, test_ds
# Main training loop method.
def train(dataset, pretrained, epochs, gpus, save_dir = None):
"""Constructs the training loop and trains a model."""
if save_dir is None:
save_dir = os.path.join(f"/data2/amnjoshi/checkpoints/{dataset}")
os.makedirs(save_dir, exist_ok = True)
# Set up the checkpoint saving callback.
callbacks = [
pl.callbacks.ModelCheckpoint(
dirpath = save_dir, mode = 'min',
filename = f"{dataset}" + "-epoch{epoch:02d}-val_loss_{val_loss:.2f}",
monitor = 'val_loss',
save_top_k = 3,
auto_insert_metric_name = False
),
pl.callbacks.EarlyStopping(
monitor = 'val_loss',
min_delta = 0.001,
patience = 3,
)
]
# Construct the model.
model = ClassificationBenchmark(
dataset = dataset, pretrained = pretrained)
# Construct the data loaders.
train_ds, val_ds, test_ds = build_loaders(dataset)
# Create the trainer and train the model.
trainer = pl.Trainer(
max_epochs = epochs,
gpus = get_num_gpus(gpus),
callbacks = callbacks)
trainer.fit(
model = model,
train_dataloaders = train_ds,
val_dataloaders = val_ds
)
if __name__ == '__main__':
# Parse input arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, help = "The name of the dataset.")
ap.add_argument(
'--not-pretrained', action = 'store_false',
default = True, help = "Whether to load a pretrained model.")
ap.add_argument(
'--checkpoint_dir', type = str, default = None,
help = "The checkpoint directory to save to.")
ap.add_argument(
'--epochs', type = int, default = 50,
help = "How many epochs to train for. Default is 50.")
ap.add_argument(
'--gpus', type = int, default = None,
help = "How many GPUs to use when training.")
args = ap.parse_args()
# Train the model.
train(args.dataset,
args.not_pretrained,
epochs = args.epochs,
gpus = args.gpus,
save_dir = args.checkpoint_dir)
| 6,726 | 31.814634 | 83 | py |
AgML | AgML-main/experiments/benchmarking/detection_lightning_local.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Some of the training code in this file is adapted from the following sources:
1. https://github.com/rwightman/efficientdet-pytorch
2. https://gist.github.com/Chris-hughes10/73628b1d8d6fc7d359b3dcbbbb8869d7
"""
import os
import argparse
from typing import List, Union
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
from mean_average_precision import MeanAveragePrecision
import agml
import albumentations as A
from albumentations.pytorch import ToTensorV2
from agml.models.training_resources.model import get_efficientdet_config, HeadNet, create_model_from_config
from agml.models.training_resources.bench import DetBenchTrain, DetBenchPredict
from ensemble_boxes import ensemble_boxes_wbf
from tools import gpus, checkpoint_dir, MetricLogger, auto_move_data
# Constants
IMAGE_SIZE = 512
def create_model(num_classes = 1, architecture = "tf_efficientdet_d4", pretrained = (False, False)):
if isinstance(pretrained, bool):
if pretrained is False:
pretrained = True
else:
if not pretrained[0]:
pretrained = True
if isinstance(pretrained, str):
return create_model_from_pretrained(num_classes, architecture, pretrained)
config = get_efficientdet_config(architecture)
config.update({'image_size': (IMAGE_SIZE, IMAGE_SIZE)})
net = create_model_from_config(config, pretrained = pretrained, num_classes = num_classes)
net.class_net = HeadNet(
config,
num_outputs = num_classes,
)
return DetBenchTrain(net, config)
# Modification of the above to load pretrained weights from a path.
def create_model_from_pretrained(
num_classes = 1,
architecture = "tf_efficientdet_d4",
pretrained_path = None):
config = get_efficientdet_config(architecture)
config.update({'image_size': (IMAGE_SIZE, IMAGE_SIZE)})
net = create_model_from_config(
config, num_classes = pretrained_path[1], pretrained = False)
if net.num_classes != num_classes:
net.reset_head(num_classes = num_classes)
net.load_state_dict(
torch.load(pretrained_path[0], map_location = 'cpu'))
return DetBenchTrain(net, config)
class AgMLDatasetAdaptor(object):
"""Adapts an AgML dataset for use in a `LightningDataModule`."""
def __init__(self, loader, adapt_class = False):
self.loader = loader
self.adapt_class = adapt_class
def __len__(self) -> int:
return len(self.loader)
def get_image_and_labels_by_idx(self, index):
image, annotation = self.loader[index]
image = Image.fromarray(image)
bboxes = np.array(annotation['bbox']).astype(np.int32)
x_min = bboxes[:, 0]
y_min = bboxes[:, 1]
x_max = bboxes[:, 2] + x_min
y_max = bboxes[:, 3] + y_min
x_min, y_min = np.clip(x_min, 0, image.width), np.clip(y_min, 0, image.height)
x_max, y_max = np.clip(x_max, 0, image.width), np.clip(y_max, 0, image.height)
bboxes = np.dstack((x_min, y_min, x_max, y_max)).squeeze(axis = 0)
class_labels = np.array(annotation['category_id']).squeeze()
if self.adapt_class:
class_labels = np.ones_like(class_labels)
return image, bboxes, class_labels, index
def get_transforms(mode = 'inference'):
"""Returns a set of transforms corresponding to the mode."""
if mode == 'train':
return A.Compose(
[A.HorizontalFlip(p = 0.5),
A.Resize(height = IMAGE_SIZE, width = IMAGE_SIZE, p = 1),
ToTensorV2(p = 1)], p = 1.0,
bbox_params = A.BboxParams(
format = "pascal_voc", min_area = 0,
min_visibility = 0, label_fields = ["labels"]))
elif mode in ['val', 'validation']:
return A.Compose(
[A.Resize(height = IMAGE_SIZE, width = IMAGE_SIZE, p = 1),
ToTensorV2(p = 1)], p = 1.0,
bbox_params = A.BboxParams(
format = "pascal_voc", min_area = 0,
min_visibility = 0, label_fields = ["labels"]))
elif mode == 'inference':
return A.Compose(
[A.Resize(height = IMAGE_SIZE, width = IMAGE_SIZE, p = 1),
ToTensorV2(p = 1)], p = 1.0)
class EfficientDetDataset(Dataset):
def __init__(self, adaptor, transforms = None):
self.ds = adaptor
if transforms is None:
transforms = get_transforms('val')
self.transforms = transforms
def __len__(self):
return len(self.ds)
def __getitem__(self, index):
image, pascal_bboxes, class_labels, image_id = \
self.ds.get_image_and_labels_by_idx(index)
# Add a label dimension for consistency.
if class_labels.ndim == 0:
class_labels = np.expand_dims(class_labels, axis = 0)
# Construct the sample.
sample = {
"image": np.array(image, dtype = np.float32),
"bboxes": pascal_bboxes, "labels": class_labels}
try:
sample = self.transforms(**sample)
except: # debugging
raise Exception(f"Failed sample: {sample}")
sample["bboxes"] = np.array(sample["bboxes"])
image = sample["image"]
labels = sample["labels"]
# Convert 1-channel and 4-channel to 3-channel.
if image.shape[0] == 1:
image = torch.tile(image, (3, 1, 1))
if image.shape[0] == 4:
image = image[:3]
# Convert to yxyx from xyxy.
_, new_h, new_w = image.shape
sample["bboxes"][:, [0, 1, 2, 3]] = \
sample["bboxes"][:, [1, 0, 3, 2]]
# Create the target from the annotations.
target = {
"bboxes": torch.as_tensor(sample["bboxes"], dtype = torch.float32),
"labels": torch.as_tensor(labels), "image_id": torch.tensor([image_id]),
"img_size": (new_h, new_w), "img_scale": torch.tensor([1.0])}
return image, target, image_id
class EfficientDetDataModule(pl.LightningDataModule):
"""A `LightningDataModule` for the `LightningModule`."""
def __init__(self,
train_dataset_adaptor,
validation_dataset_adaptor,
train_transforms = None,
val_transforms = None,
num_workers = 4,
batch_size = 8):
self.train_ds = train_dataset_adaptor
self.valid_ds = validation_dataset_adaptor
if train_transforms is None:
train_transforms = get_transforms('train')
self.train_tfms = train_transforms
if val_transforms is None:
val_transforms = get_transforms('val')
self.val_tfms = val_transforms
self.num_workers = num_workers
self.batch_size = batch_size
super().__init__()
def train_dataset(self) -> EfficientDetDataset:
return EfficientDetDataset(
adaptor = self.train_ds,
transforms = self.train_tfms)
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset(),
batch_size = self.batch_size,
shuffle = True,
pin_memory = True,
drop_last = True,
num_workers = self.num_workers,
collate_fn = self.collate_fn,
)
def val_dataset(self) -> EfficientDetDataset:
return EfficientDetDataset(
adaptor = self.valid_ds,
transforms = self.val_tfms)
def val_dataloader(self) -> DataLoader:
return DataLoader(
self.val_dataset(),
batch_size = self.batch_size,
shuffle = False,
pin_memory = True,
drop_last = True,
num_workers = self.num_workers,
collate_fn = self.collate_fn,
)
@staticmethod
def collate_fn(batch):
images, targets, image_ids = tuple(zip(*batch))
images = torch.stack(images)
images = images.float()
boxes = [target["bboxes"].float() for target in targets]
labels = [target["labels"].float() for target in targets]
img_size = torch.tensor([target["img_size"] for target in targets]).float()
img_scale = torch.tensor([target["img_scale"] for target in targets]).float()
annotations = {
"bbox": boxes, "cls": labels,
"img_size": img_size, "img_scale": img_scale}
return images, annotations, targets, image_ids
class EfficientDetModel(pl.LightningModule):
def __init__(self,
num_classes = 1,
confidence_threshold = 0.3,
learning_rate = 0.0002,
wbf_iou_threshold = 0.44,
inference_transforms = None,
architecture = 'efficientdet_d4',
save_dir = None,
pretrained = False,
validation_dataset_adaptor = None):
super().__init__()
self.model = create_model(
num_classes, architecture = architecture,
pretrained = pretrained)
self.confidence_threshold = confidence_threshold
self.lr = learning_rate
self.wbf_iou_threshold = wbf_iou_threshold
if inference_transforms is None:
inference_transforms = get_transforms('inference')
self.inference_tfms = inference_transforms
# Construct the metric.
self.val_dataset_adaptor = None
if validation_dataset_adaptor is not None:
# Add a metric calculator.
self.val_dataset_adaptor = AgMLDatasetAdaptor(
validation_dataset_adaptor)
self.map = MeanAveragePrecision()
self.metric_logger = DetectionMetricLogger({
'map': MeanAveragePrecision()},
os.path.join(save_dir, f'logs-{self._version}.csv'))
self._sanity_check_passed = False
@auto_move_data
def forward(self, images, targets):
return self.model(images, targets)
def configure_optimizers(self):
return torch.optim.AdamW(self.model.parameters(), lr = self.lr)
def training_step(self, batch, batch_idx):
# Run a forward pass through the model.
images, annotations, _, _ = batch
losses = self.model(images, annotations)
# Calculate and log losses.
self.log("train_loss", losses["loss"], on_step = True,
on_epoch = True, prog_bar = True, logger = True)
self.log("train_class_loss", losses["class_loss"],
on_step = True, on_epoch = True, logger = True)
self.log("train_box_loss", losses["box_loss"], on_step = True,
on_epoch = True, logger = True)
return losses['loss']
@torch.no_grad()
def validation_step(self, batch, batch_idx):
images, annotations, targets, image_ids = batch
outputs = self.model(images, annotations)
detections = outputs["detections"]
# Update the metric.
if self.val_dataset_adaptor is not None and self._sanity_check_passed:
predicted_bboxes, predicted_class_confidences, predicted_class_labels = \
self.post_process_detections(detections)
for idx, pred_box, pred_conf, pred_labels in zip(
image_ids, predicted_bboxes,
predicted_class_confidences,
predicted_class_labels):
image, truth_boxes, truth_cls, _ = \
self.val_dataset_adaptor.get_image_and_labels_by_idx(idx)
metric_update_values = \
[[pred_box, pred_labels, pred_conf], [truth_boxes, truth_cls]]
self.metric_logger.update_metrics(*metric_update_values)
self.map.update(*metric_update_values)
# Compute the metric result.
self.log("map", self.map.compute(), on_step = True, on_epoch = True,
prog_bar = True, logger = True, sync_dist = True)
batch_predictions = {
"predictions": detections,
"targets": targets,
"image_ids": image_ids,
}
logging_losses = {
"class_loss": outputs["class_loss"].detach(),
"box_loss": outputs["box_loss"].detach(),
}
self.log("valid_loss", outputs["loss"], on_step = True, on_epoch = True,
prog_bar = True, logger = True, sync_dist = True)
self.log("valid_class_loss", logging_losses["class_loss"],
on_step = True, on_epoch = True,
logger = True, sync_dist = True)
self.log("valid_box_loss", logging_losses["box_loss"],
on_step = True, on_epoch = True,
logger = True, sync_dist = True)
return {'loss': outputs["loss"], 'batch_predictions': batch_predictions}
def predict(self, images: Union[torch.Tensor, List]):
"""Runs inference on a set of images.
Parameters
----------
images : {torch.Tensor, list}
Either a list of images (which can be numpy arrays, tensors, or
another type), or a torch.Tensor returned from a DataLoader.
Returns
-------
A tuple containing bounding boxes, confidence scores, and class labels.
"""
if isinstance(images, list):
image_sizes = [(image.size[1], image.size[0]) for image in images]
images_tensor = torch.stack([
self.inference_tfms(
image = np.array(image, dtype = np.float32),
)["image"] for image in images])
return self._run_inference(images_tensor, image_sizes)
elif isinstance(images, torch.Tensor):
image_tensor = images
if image_tensor.ndim == 3:
image_tensor = image_tensor.unsqueeze(0)
if image_tensor.shape[-1] != IMAGE_SIZE \
or image_tensor.shape[-2] != IMAGE_SIZE:
raise ValueError(
f"Input tensors must be of shape "
f"(N, 3, {IMAGE_SIZE}, {IMAGE_SIZE})")
num_images = image_tensor.shape[0]
image_sizes = [(IMAGE_SIZE, IMAGE_SIZE)] * num_images
return self._run_inference(image_tensor, image_sizes)
else:
raise TypeError(
"Expected either a list of images or a "
"torch.Tensor of images for `predict()`.")
def _run_inference(self, images_tensor, image_sizes):
dummy_targets = self._create_dummy_inference_targets(
images_tensor.shape[0], self.device, IMAGE_SIZE)
detections = self.model(
images_tensor.to(self.device), dummy_targets)["detections"]
predicted_bboxes, predicted_class_confidences, predicted_class_labels = \
self.post_process_detections(detections)
scaled_bboxes = self._rescale_bboxes(
predicted_bboxes = predicted_bboxes,
image_sizes = image_sizes)
return scaled_bboxes, predicted_class_labels, predicted_class_confidences
@staticmethod
def _create_dummy_inference_targets(num_images, device, size):
return {
"bbox": [
torch.tensor([[0.0, 0.0, 0.0, 0.0]], device = device)
for _ in range(num_images)
],
"cls": [torch.tensor([1.0], device = device) for _ in range(num_images)],
"img_size": torch.tensor(
[(size, size)] * num_images, device = device).float(),
"img_scale": torch.ones(num_images, device = device).float(),
}
def post_process_detections(self, detections):
predictions = [self._postprocess_single_prediction_detections(d) for d in detections]
predicted_bboxes, predicted_class_confidences, predicted_class_labels = self.run_wbf(
predictions, image_size = IMAGE_SIZE, iou_thr = self.wbf_iou_threshold)
return predicted_bboxes, predicted_class_confidences, predicted_class_labels
def _postprocess_single_prediction_detections(self, detections):
# Extract the bounding boxes, confidence scores,
# and class labels from the output detections.
boxes = detections.detach().cpu().numpy()[:, :4]
scores = detections.detach().cpu().numpy()[:, 4]
classes = detections.detach().cpu().numpy()[:, 5]
# Only return boxes which are above the confidence threshold.
valid_indexes = np.where(scores > self.confidence_threshold)[0]
boxes = boxes[valid_indexes]
scores = scores[valid_indexes]
classes = classes[valid_indexes]
return {"boxes": boxes, "scores": scores, "classes": classes}
@staticmethod
def _rescale_bboxes(predicted_bboxes, image_sizes):
scaled_bboxes = []
for bboxes, img_dims in zip(predicted_bboxes, image_sizes):
im_h, im_w = img_dims
if len(bboxes) > 0:
scaled_bboxes.append(
(np.array(bboxes) * [
im_w / IMAGE_SIZE, im_h / IMAGE_SIZE,
im_w / IMAGE_SIZE, im_h / IMAGE_SIZE
]).tolist())
else:
scaled_bboxes.append(bboxes)
return scaled_bboxes
@staticmethod
def run_wbf(predictions, image_size = 512, iou_thr = 0.44, skip_box_thr = 0.43, weights = None):
bboxes, confidences, class_labels = [], [], []
for prediction in predictions:
boxes = [(prediction["boxes"] / image_size).tolist()]
scores = [prediction["scores"].tolist()]
labels = [prediction["classes"].tolist()]
boxes, scores, labels = ensemble_boxes_wbf.weighted_boxes_fusion(
boxes, scores, labels, weights = weights,
iou_thr = iou_thr, skip_box_thr = skip_box_thr)
boxes = boxes * (image_size - 1)
bboxes.append(boxes.tolist())
confidences.append(scores.tolist())
class_labels.append(labels.tolist())
return bboxes, confidences, class_labels
def on_validation_epoch_end(self) -> None:
if not self._sanity_check_passed:
self._sanity_check_passed = True
return
if hasattr(self, 'metric_logger'):
self.metric_logger.compile_epoch()
def on_fit_end(self) -> None:
if hasattr(self, 'metric_logger'):
self.metric_logger.save()
def get_progress_bar_dict(self):
p_bar = super(EfficientDetModel, self).get_progress_bar_dict()
p_bar.pop('v_num', None)
return p_bar
# Calculate and log the metrics.
class DetectionMetricLogger(MetricLogger):
def update_metrics(self, y_pred, y_true) -> None:
self.metrics['map'].update(y_pred, y_true)
def train(dataset, epochs, save_dir = None,
overwrite = None, pretrained_path = None):
"""Constructs the training loop and trains a model."""
save_dir = checkpoint_dir(save_dir, dataset)
log_dir = save_dir.replace('checkpoints', 'logs')
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Set up the checkpoint saving callback.
callbacks = [
pl.callbacks.ModelCheckpoint(
dirpath = save_dir, mode = 'min',
filename = f"{dataset}" + "-epoch{epoch:02d}-valid_loss_{valid_loss:.2f}",
monitor = 'valid_loss',
save_top_k = 3,
auto_insert_metric_name = False
)
]
# Create the loggers.
loggers = [
CSVLogger(log_dir),
TensorBoardLogger(log_dir)
]
# Construct the data.
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(dataset)
loader.shuffle()
loader.split(train = 0.8, val = 0.1, test = 0.1)
dm = EfficientDetDataModule(
train_dataset_adaptor = AgMLDatasetAdaptor(loader.train_data),
validation_dataset_adaptor = AgMLDatasetAdaptor(loader.val_data),
num_workers = 12, batch_size = 4)
# Construct the model.
model = EfficientDetModel(
num_classes = loader.info.num_classes,
architecture = 'tf_efficientdet_d4',
pretrained = True)
# Create the trainer and train the model.
msg = f"Training dataset {dataset}!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = epochs, gpus = gpus(None),
callbacks = callbacks, logger = loggers)
trainer.fit(model, dm)
# Save the final state.
torch.save(model.state_dict(), os.path.join(save_dir, 'final_model.pth'))
def train_per_class(dataset, epochs, save_dir = None, overwrite = None):
"""Constructs the training loop and trains a model."""
save_dir = checkpoint_dir(save_dir, dataset)
log_dir = save_dir.replace('checkpoints', 'logs')
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Construct the loader.
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(dataset)
loader.shuffle()
# Create the loop for each class.
for cl in range(agml.data.source(dataset).num_classes):
# Create the data module with the new, reduced class.
cls = cl + 1
new_loader = loader.take_class(cls)
new_loader.split(train = 0.8, val = 0.1, test = 0.1)
dm = EfficientDetDataModule(
train_dataset_adaptor = AgMLDatasetAdaptor(
new_loader.train_data, adapt_class = True),
validation_dataset_adaptor = AgMLDatasetAdaptor(
new_loader.val_data, adapt_class = True),
num_workers = 12, batch_size = 4)
this_save_dir = os.path.join(
save_dir, f'{new_loader.num_to_class[cls]}-{cls}')
this_log_dir = this_save_dir.replace('checkpoints', 'logs')
# Set up the checkpoint saving callback.
callbacks = [
pl.callbacks.ModelCheckpoint(
dirpath = this_save_dir, mode = 'min',
filename = f"{dataset}" + "-epoch{epoch:02d}-valid_loss_{valid_loss:.2f}",
monitor = 'valid_loss',
save_top_k = 3,
auto_insert_metric_name = False
)
]
# Create the loggers.
loggers = [
CSVLogger(this_log_dir),
TensorBoardLogger(this_log_dir)
]
# Construct the model.
model = EfficientDetModel(
num_classes = 1,
architecture = 'tf_efficientdet_d4',
pretrained = True)
model.load_state_dict(
torch.load('/data2/amnjoshi/full_grape/checkpoints/final_model.pth',
map_location = 'cpu'))
# Create the trainer and train the model.
msg = f"Training dataset {dataset} for class {cls}: {loader.num_to_class[cls]}!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = epochs, gpus = gpus(None),
callbacks = callbacks, logger = loggers)
trainer.fit(model, dm)
# Save the final state.
torch.save(model.state_dict(), os.path.join(this_save_dir, 'final_model.pth'))
if __name__ == '__main__':
# Parse input arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--regenerate-existing', action = 'store_true',
default = False, help = "Whether to re-generate existing benchmarks.")
ap.add_argument(
'--checkpoint_dir', type = str, default = None,
help = "The checkpoint directory to save to.")
ap.add_argument(
'--epochs', type = int, default = 50,
help = "How many epochs to train for. Default is 50.")
ap.add_argument(
'--per-class-for-dataset', action = 'store_true',
default = False, help = "Whether to generate benchmarks per class.")
ap.add_argument(
'--pretrained-model-path', type = str, default = None,
help = "The path to a set of pretrained weights for the model.")
ap.add_argument(
'--pretrained-num-classes', type = str, default = None,
help = "The number of classes in the pretrained model..")
args = ap.parse_args()
# Train the model.
if args.per_class_for_dataset:
train_per_class(args.dataset[0],
epochs = args.epochs,
save_dir = args.checkpoint_dir)
elif args.dataset[0] in agml.data.public_data_sources(ml_task = 'object_detection') \
and len(args.dataset) > 1:
train(args.dataset,
epochs = args.epochs,
save_dir = args.checkpoint_dir,
pretrained_path = (args.pretrained_model_path,
args.pretrained_num_classes))
else:
if args.dataset[0] == 'all':
datasets = [ds for ds in agml.data.public_data_sources(
ml_task = 'object_detection')]
else:
datasets = args.dataset
for ds in datasets:
train(ds,
epochs = args.epochs,
save_dir = args.checkpoint_dir,
overwrite = args.regenerate_existing,
pretrained_path = (args.pretrained_model_path,
args.pretrained_num_classes))
| 26,638 | 37.607246 | 107 | py |
AgML | AgML-main/experiments/benchmarking/miou_evaluation.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import pandas as pd
from tqdm import tqdm
import torch
import pytorch_lightning as pl
import agml
from torchmetrics import IoU
from segmentation_lightning import SegmentationBenchmark
# Define device.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def run_evaluation(model, name):
"""Runs evaluation for mean intersection over union.."""
# Load the test dataset.
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(name)
loader.split(train = 0.8, val = 0.1, test = 0.1)
loader.batch(batch_size = 2)
loader.resize_images((512, 512))
loader.normalize_images('imagenet')
loader.mask_to_channel_basis()
ds = loader.test_data.as_torch_dataset()
# Create the metric.
iou = IoU(num_classes = ds.num_classes + 1)
# Run inference for all of the images in the test dataset.
for i in tqdm(range(len(ds)), leave = False):
image, annotation = ds[i]
y_pred = model(image.to(device)).float().squeeze()
iou(y_pred.detach().cpu(), annotation.int().cpu())
# Compute the mAP for all of the thresholds.
print(iou.compute().detach().cpu().numpy())
return iou.compute().detach().cpu().numpy()
def make_checkpoint(name):
"""Gets a checkpoint for the model name."""
ckpt_path = os.path.join(
"/data2/amnjoshi/final/segmentation_checkpoints", name, "final_model.pth")
state = torch.load(ckpt_path, map_location = 'cpu')
model = SegmentationBenchmark(dataset = name)
model.load_state_dict(state)
model.eval().to(device)
return model
def evaluate(names, log_file = None):
"""Runs the evaluation and saves results to a file."""
print(f"Running mIoU evaluation for {names}.")
# Create the log file.
if log_file is None:
log_file = os.path.join(os.getcwd(), 'miou_evaluation.csv')
# Run the evaluation.
log_contents = {}
bar = tqdm(names)
for name in bar:
ckpt = make_checkpoint(name)
bar.set_description(f"Evaluating {name}")
if hasattr(name, 'name'):
name = name.name
log_contents[name] = run_evaluation(ckpt, name)
# Save the results.
df = pd.DataFrame(columns = ('name', 'miou'))
for name, value in log_contents.items():
df.loc[len(df.index)] = [name, value]
df.to_csv(log_file)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--log_file', type = str, default = None,
help = "The name of the output log file.")
args = ap.parse_args()
# Train the model.
if args.dataset[0] in agml.data.public_data_sources(ml_task = 'semantic_segmentation'):
datasets = args.dataset[0]
else:
if args.dataset[0] == 'all':
datasets = [ds for ds in agml.data.public_data_sources(
ml_task = 'semantic_segmentation')]
elif args.dataset[0] == 'except':
exclude_datasets = args.dataset[1:]
datasets = [
dataset for dataset in agml.data.public_data_sources(
ml_task = 'semantic_segmentation')
if dataset.name not in exclude_datasets]
else:
datasets = args.dataset
evaluate(datasets, args.log_file)
| 3,988 | 31.169355 | 91 | py |
AgML | AgML-main/experiments/benchmarking/detection_modeling.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union
import torch
import numpy as np
from effdet import get_efficientdet_config, DetBenchTrain, create_model_from_config
from agml.models.detection import DetectionModel
from experiments.benchmarking.mean_average_precision_torch import MeanAveragePrecision
class DetectionTrainingModel(DetectionModel):
"""Wraps an `EfficientDet` model for a training experiment."""
def __init__(self,
num_classes: int = 1,
pretrained_weights: str = None,
confidence_threshold: float = 0.3,
learning_rate: float = 0.0002,
wbf_iou_threshold: float = 0.44,
**kwargs):
# Initialize the super module.
super(DetectionTrainingModel, self).__init__(model_initialized = True)
# Initialize the model using the provided arguments for customizability.
self.model = self.make_model(
num_classes = num_classes,
pretrained_weights = pretrained_weights,
architecture = kwargs.get('architecture', 'tf_efficientdet_d4'))
# Set the training parameters.
self._confidence_threshold = confidence_threshold
self.lr = learning_rate
self.wbf_iou_threshold = wbf_iou_threshold
# Construct the metric.
self.val_dataset_adaptor = None
self.map = MeanAveragePrecision(self.model.num_classes)
self._sanity_check_passed = False
@staticmethod
def make_model(num_classes: int,
pretrained_weights: str,
image_size: Union[int, tuple] = 512,
architecture: str = 'tf_efficientdet_d4'):
"""Constructs the `EfficientDet` model from the provided parameters."""
# Parse the input arguments.
if isinstance(image_size, int): image_size = (image_size, image_size)
if pretrained_weights is None or pretrained_weights is False: pretrained_weights = ''
# Construct the configuration.
cfg = get_efficientdet_config(architecture)
cfg.update({'image_size': image_size})
# Build the model.
model_default_pretrained = False
if pretrained_weights == 'coco':
model_default_pretrained = True
net = create_model_from_config(
cfg, pretrained = model_default_pretrained,
num_classes = num_classes)
# Load the pretrained weights if they are provided.
if os.path.exists(pretrained_weights):
# Auto-inference the number of classes from the state dict.
state = torch.load(pretrained_weights, map_location = 'cpu')
weight_key = [i for i in state.keys() if 'class_net.predict.conv_pw.weight' in i][0]
pretrained_num_classes = int(state[weight_key].shape[0] / 9)
# Load the pretrained weights.
net.reset_head(num_classes = pretrained_num_classes)
net.load_state_dict(state)
# Restore the number of classes.
if num_classes != pretrained_num_classes:
net.reset_head(num_classes = num_classes)
# Return the network.
return DetBenchTrain(net)
@staticmethod
def _rescale_bboxes_xyxy(predicted_bboxes, image_sizes):
"""Re-scales output bounding boxes to the original image sizes.
This is re-written in this training subclass since annotations here are
also mapped to the YXYX format, and that needs to be accounted for.
"""
scaled_boxes = []
for bboxes, img_dims in zip(predicted_bboxes, image_sizes):
h, w = img_dims
if len(bboxes) > 0:
# Re-scale the bounding box to the appropriate format.
scale_ratio = [h / 512, w / 512, h / 512, w / 512]
scaled = (np.array(bboxes.detach().cpu()) * scale_ratio).astype(np.int32)
# Convert the Pascal-VOC (yxyx) format to COCO (xywh).
y, x = scaled[:, 0], scaled[:, 1]
h, w = scaled[:, 2] - y, scaled[:, 3] - x
scaled_boxes.append(np.dstack((x, y, w, h)))
continue
# Otherwise, there is no prediction for this image.
scaled_boxes.append(np.array([]))
return scaled_boxes
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr = self.lr)
def training_step(self, batch, batch_idx):
# Run a forward pass through the model.
images, annotations, _ = batch
losses = self.model(images, annotations)
# Calculate and log losses.
self.log("train_loss", losses["loss"], on_step = True,
on_epoch = True, prog_bar = True, logger = True)
self.log("train_class_loss", losses["class_loss"],
on_step = True, on_epoch = True, logger = True)
self.log("train_box_loss", losses["box_loss"], on_step = True,
on_epoch = True, logger = True)
return losses['loss']
@torch.no_grad()
def validation_step(self, batch, batch_idx):
images, annotations, targets = batch
outputs = self.model(images, annotations)
detections = outputs["detections"]
# Calculate the mean average precision.
if self._sanity_check_passed:
boxes, confidences, labels = self._process_detections(self._to_out(detections))
boxes = self._rescale_bboxes(boxes, [[512, 512]] * len(images))
annotations['bbox'] = self._rescale_bboxes_xyxy(
annotations['bbox'], [[512, 512, ]] * len(images))
for pred_box, pred_label, pred_conf, true_box, true_label in zip(
boxes, labels, confidences, annotations['bbox'], annotations['cls']):
metric_update_values = \
dict(boxes = self._to_out(torch.tensor(pred_box, dtype = torch.float32)),
labels = self._to_out(torch.tensor(pred_label, dtype = torch.int32)),
scores = self._to_out(torch.tensor(pred_conf))), \
dict(boxes = self._to_out(torch.tensor(true_box, dtype = torch.float32)),
labels = self._to_out(torch.tensor(true_label, dtype = torch.int32)))
self.map.update(*metric_update_values)
# Log the MAP values.
map_ = self.map.compute().detach().cpu().numpy().item()
self.log("map", map_, prog_bar = True,
on_epoch = True, logger = True, sync_dist = True)
batch_predictions = {
"predictions": detections,
"targets": targets,
}
logging_losses = {
"class_loss": outputs["class_loss"].detach(),
"box_loss": outputs["box_loss"].detach(),
}
self.log("valid_loss", outputs["loss"], on_step = True, on_epoch = True,
prog_bar = True, logger = True, sync_dist = True)
self.log("valid_class_loss", logging_losses["class_loss"],
on_step = True, on_epoch = True,
logger = True, sync_dist = True)
self.log("valid_box_loss", logging_losses["box_loss"],
on_step = True, on_epoch = True,
logger = True, sync_dist = True)
return {'loss': outputs["loss"], 'batch_predictions': batch_predictions}
def on_validation_epoch_end(self) -> None:
"""Log mean average precision at the end of each epoch."""
# No validation should be run during the sanity check step.
if not self._sanity_check_passed:
self._sanity_check_passed = True
return
# Compute the mean average precision and reset it.
if hasattr(self, 'map'):
map_ = self.map.compute().detach().cpu().numpy().item()
self.log("map_epoch", map_, prog_bar = True,
on_epoch = True, logger = True, sync_dist = True)
self.map.reset()
def get_progress_bar_dict(self) -> dict:
"""Remove the `v_num` from the bar; it takes away valuable space."""
p_bar = super(DetectionTrainingModel, self).get_progress_bar_dict()
p_bar.pop('v_num', None)
return p_bar
| 8,868 | 41.845411 | 96 | py |
AgML | AgML-main/experiments/benchmarking/classification_lightning_resnet50.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
from torchmetrics.classification import Precision, Recall, Accuracy
from torchvision.models import resnet50
import agml
import albumentations as A
from tools import gpus, checkpoint_dir, MetricLogger
class ResNet50Transfer(nn.Module):
"""Represents a transfer learning ResNet50 model.
This is the base benchmarking model for image classification, using
the ResNet50 model with two added linear fully-connected layers.
"""
def __init__(self, num_classes, pretrained = True):
super(ResNet50Transfer, self).__init__()
self.base = resnet50(pretrained = pretrained)
self.l1 = nn.Linear(1000, 256)
self.dropout = nn.Dropout(0.1)
self.relu = nn.ReLU()
self.l2 = nn.Linear(256, num_classes)
def forward(self, x, **kwargs): # noqa
x = self.base(x)
x = x.view(x.size(0), -1)
x = self.dropout(self.relu(self.l1(x)))
x = self.l2(x)
return x
class ClassificationBenchmark(pl.LightningModule):
"""Represents an image classification benchmark model."""
def __init__(self, dataset, pretrained = False, save_dir = None):
# Initialize the module.
super(ClassificationBenchmark, self).__init__()
# Construct the network.
self._source = agml.data.source(dataset)
self._pretrained = pretrained
self.net = ResNet50Transfer(
self._source.num_classes,
self._pretrained
)
# Construct the loss for training.
self.loss = nn.CrossEntropyLoss()
# Add a metric calculator.
self.metric_logger = ClassificationMetricLogger({
'accuracy': Accuracy(num_classes = self._source.num_classes),
'precision': Precision(num_classes = self._source.num_classes),
'recall': Recall(num_classes = self._source.num_classes)},
os.path.join(save_dir, f'logs-{self._version}.csv'))
self._sanity_check_passed = False
def forward(self, x):
return self.net.forward(x)
def training_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x)
loss = self.loss(y_pred, y)
acc = accuracy(y_pred, torch.argmax(y, 1)).item()
self.log('accuracy', acc, prog_bar = True, logger = True)
self.log('loss', loss, logger = True)
return {
'loss': loss,
'accuracy': acc
}
def validation_step(self, batch, *args, **kwargs): # noqa
x, y = batch
y_pred = self(x)
val_loss = self.loss(y_pred, y)
val_acc = accuracy(y_pred, torch.argmax(y, 1))
if self._sanity_check_passed:
self.metric_logger.update(y_pred, torch.argmax(y, 1))
self.log('val_loss', val_loss.item(), prog_bar = True, logger = True)
self.log('val_accuracy', val_acc.item(), prog_bar = True, logger = True)
return {
'val_loss': val_loss,
'val_accuracy': val_acc
}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters())
def get_progress_bar_dict(self):
tqdm_dict = super(ClassificationBenchmark, self)\
.get_progress_bar_dict()
tqdm_dict.pop('v_num', None)
return tqdm_dict
def on_validation_epoch_end(self) -> None:
if not self._sanity_check_passed:
self._sanity_check_passed = True
return
self.metric_logger.compile_epoch()
def on_fit_end(self) -> None:
self.metric_logger.save()
# Calculate and log the metrics.
class ClassificationMetricLogger(MetricLogger):
def update_metrics(self, y_pred, y_true) -> None:
for metric in self.metrics.values():
metric.update(y_pred.cpu(), y_true.cpu())
def accuracy(output, target):
"""Computes the accuracy between `output` and `target`."""
with torch.no_grad():
batch_size = target.size(0)
_, pred = torch.topk(output, 1, 1)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:1].reshape(-1).float().sum(0, keepdim = True)
return correct_k.mul_(100.0 / batch_size)
# Build the data loaders.
def build_loaders(name):
pl.seed_everything(2499751)
loader = agml.data.AgMLDataLoader(name)
loader.split(train = 0.8, val = 0.1, test = 0.1)
loader.batch(batch_size = 16)
loader.resize_images('imagenet')
loader.normalize_images('imagenet')
loader.labels_to_one_hot()
train_data = loader.train_data
train_data.transform(transform = A.RandomRotate90())
train_ds = train_data.copy().as_torch_dataset()
val_ds = loader.val_data.as_torch_dataset()
val_ds.shuffle_data = False
test_ds = loader.test_data.as_torch_dataset()
return train_ds, val_ds, test_ds
def train(dataset, pretrained, epochs, save_dir = None, overwrite = None):
"""Constructs the training loop and trains a model."""
save_dir = "/data2/amnjoshi/resnet50_pretrained/checkpoints"
os.makedirs(save_dir, exist_ok = True)
log_dir = save_dir.replace('checkpoints', 'logs')
os.makedirs(log_dir, exist_ok = True)
# Check if the dataset already has benchmarks.
if os.path.exists(save_dir) and os.path.isdir(save_dir):
if not overwrite and len(os.listdir(save_dir)) >= 4:
print(f"Checkpoints already exist for {dataset} "
f"at {save_dir}, skipping generation.")
return
# Set up the checkpoint saving callback.
callbacks = [
pl.callbacks.ModelCheckpoint(
dirpath = save_dir, mode = 'min',
filename = f"{dataset}" + "-epoch{epoch:02d}-val_loss_{val_loss:.2f}",
monitor = 'val_loss',
save_top_k = 3,
auto_insert_metric_name = False
),
pl.callbacks.EarlyStopping(
monitor = 'val_loss',
min_delta = 0.0001,
patience = 10,
)
]
# Construct the model.
model = ClassificationBenchmark(
dataset = dataset, pretrained = pretrained, save_dir = save_dir)
# Construct the data loaders.
train_ds, val_ds, test_ds = build_loaders(dataset)
# Create the loggers.
loggers = [
CSVLogger(log_dir),
TensorBoardLogger(log_dir)
]
# Create the trainer and train the model.
msg = f"Training dataset {dataset}!"
print("\n" + "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg) + "\n")
trainer = pl.Trainer(
max_epochs = epochs, gpus = gpus(None),
callbacks = callbacks, logger = loggers,
log_every_n_steps = 5)
trainer.fit(
model = model,
train_dataloaders = train_ds,
val_dataloaders = val_ds,
)
if __name__ == '__main__':
# Parse input arguments.
ap = argparse.ArgumentParser()
ap.add_argument(
'--dataset', type = str, nargs = '+', help = "The name of the dataset.")
ap.add_argument(
'--regenerate-existing', action = 'store_true',
default = False, help = "Whether to re-generate existing benchmarks.")
ap.add_argument(
'--not-pretrained', action = 'store_false',
default = True, help = "Whether to load a pretrained model.")
ap.add_argument(
'--checkpoint_dir', type = str, default = None,
help = "The checkpoint directory to save to.")
ap.add_argument(
'--epochs', type = int, default = 20,
help = "How many epochs to train for. Default is 20.")
args = ap.parse_args()
# Train the model.
if args.dataset[0] in agml.data.public_data_sources(ml_task = 'image_classification'):
train(args.dataset,
args.not_pretrained,
epochs = args.epochs,
save_dir = args.checkpoint_dir)
else:
if args.dataset[0] == 'all':
datasets = [ds for ds in agml.data.public_data_sources(
ml_task = 'image_classification')]
else:
datasets = args.dataset
for dataset in datasets:
train(dataset,
args.not_pretrained,
epochs = args.epochs,
save_dir = args.checkpoint_dir,
overwrite = args.regenerate_existing)
| 9,011 | 33.136364 | 90 | py |
AgML | AgML-main/agml/models/base.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import abc
from typing import List, Union, overload
import cv2
import numpy as np
import torch
from pytorch_lightning import LightningModule
from agml.framework import AgMLSerializable
from agml.backend.config import model_save_path
from agml.backend.tftorch import is_array_like
from agml.utils.image import imread_context
from agml.utils.downloads import download_model
from agml.models.benchmarks import BenchmarkMetadata
class AgMLModelBase(AgMLSerializable, LightningModule):
"""Base class for all AgML pretrained models.
All pretrained model variants in AgML inherit from this base class,
which provides common methods which each use, such as weight loading
and image input preprocessing, as well as other stubs for common methods.
"""
def __init__(self):
self._benchmark = BenchmarkMetadata(None)
super(AgMLModelBase, self).__init__()
@property
def original(self):
"""Returns the original model architecture (without weights)."""
return self.net.base
@overload
def preprocess_input(self, images: str) -> "torch.Tensor":
...
@overload
def preprocess_input(self, images: List[str]) -> "torch.Tensor":
...
@overload
def preprocess_input(self, images: Union[np.ndarray, torch.Tensor]) -> "torch.Tensor":
...
@overload
def preprocess_input(self, images: List[Union[np.ndarray, torch.Tensor]]) -> "torch.Tensor":
...
@abc.abstractmethod
def preprocess_input(self, *args, **kwargs):
"""Preprocesses input images to model specifications."""
raise NotImplementedError
@overload
def predict(self, images: str) -> "torch.Tensor":
...
@overload
def predict(self, images: List[str]) -> "torch.Tensor":
...
@overload
def predict(self, images: Union[np.ndarray, torch.Tensor]) -> "torch.Tensor":
...
@overload
def predict(self, images: List[Union[np.ndarray, torch.Tensor]]) -> "torch.Tensor":
...
@abc.abstractmethod
def predict(self, *args, **kwargs):
"""Runs model inference on input image(s)."""
raise NotImplementedError
@staticmethod
def _expand_input_images(images):
"""Expands the input list of images to a specification.
This is particularly useful because the model accepts numerous
inputs, ranging from a list of image paths all the way to an
already pre-processed image batch. This method standardizes all
inputs to a common format, in particular, a list of all of the
images that are going to be then passed to the input preprocessing
method, before being passed through the model for inference.
"""
# First check for a path or a list of paths, for speed.
if isinstance(images, str):
with imread_context(images) as image:
return [cv2.cvtColor(image, cv2.COLOR_BGR2RGB), ]
elif isinstance(images, list) and isinstance(images[0], str):
parsed_images = []
for path in images:
with imread_context(path) as image:
parsed_images.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return parsed_images
# Then check if we already have read-in images, either one
# single image or a batch of images.
if is_array_like(images):
# Check if there is one single input image, or a batch of input
# images. If there is a single input image, just return this.
if images.ndim == 3:
return [images, ]
# Check if we have a batch of images first. This check is
# done by seeing if the input is 4-dimensional.
if images.ndim == 4:
# If so, unstack the images along the first dimension.
return [i for i in images]
# Finally, the only remaining viable input type is a list of images.
if isinstance(images, list) and is_array_like(images[0]):
return images
# Otherwise, we need to raise an error.
raise TypeError(
"Expected an input of a list of paths or images, a "
"single path or image, or a batched image tensor for "
f"preprocessing inputs, instead got {type(images)}.")
@staticmethod
def _to_out(tensor: "torch.Tensor") -> "torch.Tensor":
return tensor.detach().cpu().numpy()
@staticmethod
def _get_shapes(images: list) -> list:
"""Gets the height and width of each of the input images."""
shapes = []
for image in images:
if image.ndim == 2:
shapes.append(image.shape)
continue
if image.shape[0] <= 3: # channels first
shapes.append(image.shape[1:])
else: # channels last
shapes.append(image.shape[:2])
return shapes
@property
def benchmark(self):
"""Information about the loaded benchmark."""
return self._benchmark
@benchmark.setter
def benchmark(self, value):
self._benchmark = value
@staticmethod
def _get_benchmark(name):
"""Returns the `state_dict` for a pretrained model benchmark."""
# Check if the benchmark exists; if not, download it.
benchmark_path = os.path.join(model_save_path(), name + '.pth')
if not os.path.exists(benchmark_path):
download_model(name, os.path.dirname(benchmark_path))
# Load the benchmark.
return torch.load(benchmark_path, map_location = 'cpu')
@abc.abstractmethod
def load_benchmark(self, dataset: str):
"""Loads a benchmark for the given AgML dataset."""
raise NotImplementedError
@abc.abstractmethod
def evaluate(self, loader, **kwargs):
"""Evaluates the model on the given loader."""
raise NotImplementedError
| 6,551 | 33.851064 | 96 | py |
AgML | AgML-main/agml/models/classification.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tqdm import tqdm
import torch
import torch.nn as nn
try:
from torchvision.models import efficientnet_b4
except ImportError:
raise ImportError("To use image classification models in `agml.models`, you "
"need to install Torchvision first. You can do this by "
"running `pip install torchvision`.")
from agml.models.base import AgMLModelBase
from agml.models.tools import auto_move_data, imagenet_style_process
from agml.models.metrics.accuracy import Accuracy
class EfficientNetB4Transfer(nn.Module):
"""Wraps an EfficientDetB4 model with a classification head."""
def __init__(self, num_classes):
super(EfficientNetB4Transfer, self).__init__()
self.base = efficientnet_b4(pretrained = False)
self.l1 = nn.Linear(1000, 256)
self.dropout = nn.Dropout(0.1)
self.relu = nn.ReLU()
self.l2 = nn.Linear(256, num_classes)
def forward(self, x, **kwargs): # noqa
x = self.base(x)
x = x.view(x.size(0), -1)
x = self.dropout(self.relu(self.l1(x)))
return self.l2(x)
class ClassificationModel(AgMLModelBase):
"""Wraps an `EfficientNetB4` model for agricultural image classification.
When using the model for inference, you should use the `predict()` method
on any set of input images. This method wraps the `forward()` call with
additional steps which perform the necessary preprocessing on the inputs,
including resizing, normalization, and batching, as well as additional
post-processing on the outputs (such as converting one-hot labels to
integer labels), which will allow for a streamlined inference pipeline.
If you want to use the `forward()` method directly, so that you can just
call `model(inputs)`, then make sure your inputs are properly processed.
This can be done by calling `model.preprocess_input(inputs)` on the
input list of images/single image and then passing that result to the model.
This will also return a one-hot label feature vector instead of integer
labels, in the case that you want further customization of the outputs.
This model can be subclassed in order to run a full training job; the
actual transfer `EfficientNetB4` model can be accessed through the
parameter `net`, and you'll need to implement methods like `training_step`,
`configure_optimizers`, etc. See PyTorch Lightning for more information.
"""
serializable = frozenset(("model", "regression"))
state_override = frozenset(("model",))
def __init__(self, num_classes = None, regression = False, **kwargs):
# Construct the network and load in pretrained weights.
super(ClassificationModel, self).__init__()
self._regression = regression
if not kwargs.get('model_initialized', False):
self._num_classes = num_classes
self.net = self._construct_sub_net(num_classes)
@auto_move_data
def forward(self, batch):
return self.net(batch)
@staticmethod
def _construct_sub_net(num_classes):
return EfficientNetB4Transfer(num_classes)
@staticmethod
def _preprocess_image(image, **kwargs):
"""Preprocesses a single input image to EfficientNet standards.
The preprocessing steps are applied logically; if the images
are passed with preprocessing already having been applied, for
instance, the images are already resized or they are already been
normalized, the operation is not applied again, for efficiency.
Preprocessing includes the following steps:
1. Resizing the image to size (224, 224).
2. Performing normalization with ImageNet parameters.
3. Converting the image into a PyTorch tensor format.
as well as other intermediate steps such as adding a channel
dimension for two-channel inputs, for example.
"""
return imagenet_style_process(image, **kwargs)
@staticmethod
def preprocess_input(images = None, **kwargs) -> "torch.Tensor":
"""Preprocesses the input image to the specification of the model.
This method takes in a set of inputs and preprocesses them into the
expected format for the `EfficientNetB4` image classification model.
There are a variety of inputs which are accepted, including images,
image paths, as well as fully-processed image tensors. The inputs
are expanded and standardized, then run through a preprocessing
pipeline which formats them into a single tensor ready for the model.
Preprocessing steps include normalization, resizing, and converting
to the channels-first format used by PyTorch models. The output
of this method will be a single `torch.Tensor`, which has shape
[N, C, H, W], where `N` is the batch dimension. If only a single
image is passed, this will have a value of 1.
This method is largely beneficial when you just want to preprocess
images into the specification of the model, without getting the output.
Namely, `predict()` is essentially just a wrapper around this method
and `forward()`, so you can run this externally and then run `forward()`
to get the original model outputs, without any extra post-processing.
Parameters
----------
images : Any
One of the following formats (and types):
1. A single image path (str)
2. A list of image paths (List[str])
3. A single image (np.ndarray, torch.Tensor)
4. A list of images (List[np.ndarray, torch.Tensor])
5. A batched tensor of images (np.ndarray, torch.Tensor)
Returns
-------
A 4-dimensional, preprocessed `torch.Tensor`.
"""
images = ClassificationModel._expand_input_images(images)
return torch.stack(
[ClassificationModel._preprocess_image(
image, **kwargs) for image in images], dim = 0)
@torch.no_grad()
def predict(self, images, **kwargs):
"""Runs `EfficientNetB4` inference on the input image(s).
This method is the primary inference method for the model; it
accepts a set of input images (see `preprocess_input()` for a
detailed specification on the allowed input parameters), then
preprocesses them to the model specifications, forward passes
them through the model, and finally returns the predictions.
In essence, this method is a wrapper for `forward()` that allows
for passing a variety of inputs. If, on the other hand, you
have pre-processed inputs and simply want to forward pass through
the model without having to spend computational time on what
is now unnecessary preprocessing, you can simply call `forward()`
and then run `torch.argmax()` on the outputs to get predictions.
Parameters
----------
images : Any
See `preprocess_input()` for the allowed input images.
Returns
-------
A `np.ndarray` with integer labels for each image.
"""
images = self.preprocess_input(images, **kwargs)
out = self.forward(images)
if not self._regression: # standard classification
out = torch.argmax(out, 1)
return self._to_out(torch.squeeze(out))
def evaluate(self, loader, **kwargs):
"""Runs an accuracy evaluation on the given loader.
This method will loop over the `AgMLDataLoader` and compute accuracy.
Parameters
----------
loader : AgMLDataLoader
A semantic segmentation loader with the dataset you want to evaluate.
Returns
-------
The final calculated accuracy.
"""
# Construct the metric and run the calculations.
acc = Accuracy()
bar = tqdm(loader, desc = "Calculating Accuracy")
for sample in bar:
image, truth = sample
pred_label = self.predict(image, **kwargs)
acc.update([pred_label], [truth])
bar.set_postfix({'accuracy': acc.compute().numpy().item()})
# Compute the final accuracy.
return acc.compute().numpy().item()
| 8,921 | 40.887324 | 81 | py |
AgML | AgML-main/agml/models/losses.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of useful loss functions for agricultural ML tasks.
Some of these have been put into use for benchmarking (see the
`training` directory for examples of usage in training scripts).
"""
import torch
import torch.nn as nn
def dice_loss(y_pred, y):
"""Calculates `dice loss` for semantic segmentation.
See https://arxiv.org/abs/1707.03237 for an in-depth explanation.
"""
# Convert ground truth to float for compatibility in operations.
y = y.float()
# Determine whether this is a multi-class or binary task.
try: # Multi-class segmentation
c, h, w = y.shape[1:]
except: # Binary segmentation
h, w = y.shape[1:]; c = 1 # noqa
# Sigmoid for the outputs (since this is automatically done by binary
# cross-entropy loss with logits, the actual base model for semantic
# segmentation does not include any sigmoid activation in it, only the
# `predict()` function wraps it. So, we do it here as well.
y_pred = torch.sigmoid(y_pred)
# Run the dice loss calculations.
pred_flat = torch.reshape(y_pred, [-1, c * h * w])
y_flat = torch.reshape(y, [-1, c * h * w])
intersection = 2.0 * torch.sum(pred_flat * y_flat, dim = 1) + 1e-6
denominator = torch.sum(pred_flat, dim = 1) \
+ torch.sum(y_flat, dim = 1) + 1e-6
return 1. - torch.mean(intersection / denominator)
class DiceLoss(nn.Module):
def forward(self, x, target, **kwargs): # noqa
return dice_loss(x, target)
| 2,104 | 34.677966 | 74 | py |
AgML | AgML-main/agml/models/preprocessing.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A set of data preprocessing functions for `AgMLDataLoaders`.
"""
import inspect
import torch
import numpy as np
from PIL import Image
import albumentations as A
from albumentations.pytorch import ToTensorV2
from agml.models.tools import imagenet_style_process as _isp
def imagenet_preprocess(image, size = None):
"""Preprocesses a single input image to ImageNet standards.
The preprocessing steps are applied logically; if the images
are passed with preprocessing already having been applied, for
instance, the images are already resized or they are already been
normalized, the operation is not applied again, for efficiency.
Preprocessing includes the following steps:
1. Resizing the image to size (224, 224).
2. Performing normalization with ImageNet parameters.
3. Converting the image into a PyTorch tensor format.
as well as other intermediate steps such as adding a channel
dimension for two-channel inputs, for example.
"""
return _isp(image, size = size)
class EfficientDetPreprocessor(object):
"""A preprocessor which prepares a data sample for `EfficientDet`.
This class can be used to construct a preprocessing pipeline which
auto-formats the data in an `AgMLDataLoader` for object detection into
the format necessary for training an `EfficientDet` model. By default,
this includes resizing images, converting bounding boxes to `yxyx`, and
finally preparing the image and annotation for PyTorch.
Using this class supersedes the need for any other transformations or
even image resizing. It can be used as follows:
> loader = agml.data.AgMLDataLoader('grape_detection_californiaday')
> processor = agml.models.EfficientDetPreprocessor(
> image_size = 512, augmentation = [A.HorizontalFlip(0.5)])
> loader.transform(dual_transform = processor)
Parameters
----------
image_size : int
The size to which images will be resized (default of 512).
augmentation : Any
Either a list of albumentations transforms (without being wrapped
into a Compose object), or a custom method which accepts three
arguments: `image`, `bboxes`, and `labels`, and returns the same.
The `bboxes` will be in the XYXY format.
Notes
-----
- Passing `augmentation = None`, the default, is equivalent to preparing
a validation or test loader: preprocessing is applied, but no transforms.
- Note that if you pass a custom augmentation method, the resulting output
is expected to be in PyTorch's format (image should be a tensor with its
first dimension being the image's channels, for example).
"""
def __init__(self, image_size = 512, augmentation = None):
# Parse the image size.
if isinstance(image_size, int):
image_size = (image_size, image_size)
elif isinstance(image_size, (list, tuple)):
if not len(image_size) == 2:
raise ValueError(
"Expected either an integer or sequence of 2 values "
"for `image_size`, instead got ({image_size}).")
# Construct the applied input augmentation.
self._albumentations_params = dict(
bbox_params = A.BboxParams(
format = "pascal_voc", min_area = 0,
min_visibility = 0, label_fields = ["labels"]),
standard_augmentations = [
A.Resize(height = image_size[0],
width = image_size[1], p = 1),
ToTensorV2(p = 1)])
self._check_and_make_augmentation(augmentation)
def _check_and_make_augmentation(self, augmentation):
"""Constructs the applied augmentation from the inputs."""
# If no augmentation is provided, then use the defaults.
if augmentation is None:
self._augmentation = A.Compose([
*self._albumentations_params['standard_augmentations']
], p = 1.0, bbox_params = self._albumentations_params['bbox_params'])
# If a list of augmentations are provided, then use those augmentations
# wrapped alongside the default ones in a `Compose` object.
elif isinstance(augmentation, list):
if not all(isinstance(a, A.BasicTransform) for a in augmentation):
raise ValueError(
"If providing a list of transforms, all of them must be "
f"albumentations augmentations, instead got {augmentation} "
f"of types {[type(i) for i in augmentation]}.")
self._augmentation = A.Compose([
*augmentation, *self._albumentations_params['standard_augmentations']
], p = 1.0, bbox_params = self._albumentations_params['bbox_params'])
# Otherwise, the augmentation should be a method with three input
# arguments, so check it and then wrap it into an application method.
else:
if not len(inspect.signature(augmentation).parameters):
raise ValueError(
f"The input augmentation should have three input arguments, "
f"instead got {inspect.signature(augmentation).parameters}.")
self._method_augmentation = augmentation
self._augmentation = self._apply_method_augmentation
def _apply_method_augmentation(self, image, bboxes, labels):
# Wrapper method to apply a user-provided method augmentation.
image, bboxes, labels = self._method_augmentation(image, bboxes, labels)
return {'image': image, 'bboxes': bboxes, 'labels': labels}
def __call__(self, image, annotation):
# Convert the image type.
image = Image.fromarray(image)
# Clip the bounding boxes to the image shape to prevent errors.
bboxes = np.array(annotation['bbox']).astype(np.int32)
x_min = bboxes[:, 0]
y_min = bboxes[:, 1]
x_max = bboxes[:, 2] + x_min
y_max = bboxes[:, 3] + y_min
x_min, y_min = np.clip(x_min, 0, image.width), \
np.clip(y_min, 0, image.height)
x_max, y_max = np.clip(x_max, 0, image.width), \
np.clip(y_max, 0, image.height)
# Reconstruct the boxes and get the class labels.
bboxes = np.dstack((x_min, y_min, x_max, y_max)).squeeze(axis = 0)
class_labels = np.array(annotation['category_id']).squeeze()
# Add an extra dimension to labels for consistency.
if class_labels.ndim == 0:
class_labels = np.expand_dims(class_labels, axis = 0)
# Add an albumentations augmentation.
sample = {'image': np.array(image, dtype = np.float32),
'bboxes': bboxes, 'labels': class_labels}
sample = self._augmentation(**sample)
image = sample['image']
bboxes = np.array(sample['bboxes'])
labels = sample['labels']
# Convert 1-channel and 4-channel to 3-channel.
if image.shape[0] == 1:
image = torch.tile(image, (3, 1, 1))
if image.shape[0] == 4:
image = image[:3]
# Convert to yxyx from xyxy.
_, new_h, new_w = image.shape
if bboxes.ndim == 1:
bboxes = np.expand_dims(bboxes, axis = 0)
bboxes[:, [0, 1, 2, 3]] = bboxes[:, [1, 0, 3, 2]]
# Create the target from the annotations.
target = {
"bboxes": torch.as_tensor(
bboxes, dtype = torch.float32),
"labels": torch.as_tensor(labels),
"img_size": torch.tensor([new_h, new_w]),
"img_scale": torch.tensor([1.0])}
return image, target
| 8,321 | 41.676923 | 85 | py |
AgML | AgML-main/agml/models/segmentation.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from torchvision.models.segmentation import deeplabv3_resnet50
except ImportError:
raise ImportError("To use image classification models in `agml.models`, you "
"need to install Torchvision first. You can do this by "
"running `pip install torchvision`.")
from agml.models.base import AgMLModelBase
from agml.models.benchmarks import BenchmarkMetadata
from agml.models.tools import auto_move_data, imagenet_style_process
from agml.data.public import source
from agml.utils.general import resolve_list_value
from agml.utils.image import resolve_image_size
from agml.viz.masks import show_image_and_overlaid_mask, show_image_and_mask
# This is last since `agml.models.base` will check for PyTorch Lightning,
# and PyTorch Lightning automatically installed torchmetrics with it.
from torchmetrics import JaccardIndex as IoU
class DeepLabV3Transfer(nn.Module):
"""Wraps a DeepLabV3 model with the right number of classes."""
def __init__(self, num_classes):
super(DeepLabV3Transfer, self).__init__()
self.base = deeplabv3_resnet50(
pretrained = False,
num_classes = num_classes)
def forward(self, x, **kwargs): # noqa
return self.base(x)['out']
class SegmentationModel(AgMLModelBase):
"""Wraps a `DeepLabV3` model for agricultural semantic segmentation.
When using the model for inference, you should use the `predict()` method
on any set of input images. This method wraps the `forward()` call with
additional steps which perform the necessary preprocessing on the inputs,
including resizing, normalization, and batching, as well as additional
post-processing on the outputs (such as converting one-hot labels to
integer labels), which will allow for a streamlined inference pipeline.
If you want to use the `forward()` method directly, so that you can just
call `model(inputs)`, then make sure your inputs are properly processed.
This can be done by calling `model.preprocess_input(inputs)` on the
input list of images/single image and then passing that result to the model.
This will also return a one-hot label feature vector instead of integer
labels, in the case that you want further customization of the outputs.
This model can be subclassed in order to run a full training job; the
actual transfer `EfficientNetB4` model can be accessed through the
parameter `net`, and you'll need to implement methods like `training_step`,
`configure_optimizers`, etc. See PyTorch Lightning for more information.
"""
serializable = frozenset((
"net", "num_classes", "conf_thresh", "image_size"))
state_override = frozenset(("net",))
def __init__(self,
num_classes = 1,
image_size = 512,
**kwargs):
# Construct the network and load in pretrained weights.
super(SegmentationModel, self).__init__()
# If being initialized by a subclass, then don't do any of
# model construction logic (since that's already been done).
if not kwargs.get('model_initialized', False):
self._num_classes = num_classes
self._image_size = resolve_image_size(image_size)
self.net = self._construct_sub_net(num_classes)
if self._num_classes == 1:
conf_threshold = kwargs.get('conf_threshold', 0.2)
if not 0 < conf_threshold < 1:
raise ValueError(
"The given confidence threshold "
"must be between 0 and 1.")
self._conf_thresh = conf_threshold
# By default, the model starts in inference mode.
self.eval()
@auto_move_data
def forward(self, batch):
return self.net(batch)
@staticmethod
def _construct_sub_net(num_classes):
return DeepLabV3Transfer(num_classes)
@staticmethod
def _preprocess_image(image, image_size, **kwargs):
"""Preprocesses a single input image to EfficientNet standards.
The preprocessing steps are applied logically; if the images
are passed with preprocessing already having been applied, for
instance, the images are already resized or they are already been
normalized, the operation is not applied again, for efficiency.
Preprocessing includes the following steps:
1. Resizing the image to size (224, 224).
2. Performing normalization with ImageNet parameters.
3. Converting the image into a PyTorch tensor format.
as well as other intermediate steps such as adding a channel
dimension for two-channel inputs, for example.
"""
return imagenet_style_process(image, size = image_size, **kwargs)
def preprocess_input(self, images, return_shapes = False, **kwargs):
"""Preprocesses the input image to the specification of the model.
This method takes in a set of inputs and preprocesses them into the
expected format for the `DeepLabV3` semantic segmentation model.
There are a variety of inputs which are accepted, including images,
image paths, as well as fully-processed image tensors. The inputs
are expanded and standardized, then run through a preprocessing
pipeline which formats them into a single tensor ready for the model.
Preprocessing steps include normalization, resizing, and converting
to the channels-first format used by PyTorch models. The output
of this method will be a single `torch.Tensor`, which has shape
[N, C, H, W], where `N` is the batch dimension. If only a single
image is passed, this will have a value of 1.
This method is largely beneficial when you just want to preprocess
images into the specification of the model, without getting the output.
Namely, `predict()` is essentially just a wrapper around this method
and `forward()`, so you can run this externally and then run `forward()`
to get the original model outputs, without any extra post-processing.
Parameters
----------
images : Any
One of the following formats (and types):
1. A single image path (str)
2. A list of image paths (List[str])
3. A single image (np.ndarray, torch.Tensor)
4. A list of images (List[np.ndarray, torch.Tensor])
5. A batched tensor of images (np.ndarray, torch.Tensor)
return_shapes : bool
Whether to return the original shapes of the input images.
Returns
-------
A 4-dimensional, preprocessed `torch.Tensor`. If `return_shapes`
is set to True, it also returns the original shapes of the images.
"""
images = self._expand_input_images(images)
shapes = self._get_shapes(images)
images = torch.stack(
[self._preprocess_image(
image, self._image_size, **kwargs) for image in images], dim = 0)
if return_shapes:
return images, shapes
return images
@torch.no_grad()
def predict(self, images, **kwargs):
"""Runs `DeepLabV3` inference on the input image(s).
This method is the primary inference method for the model; it
accepts a set of input images (see `preprocess_input()` for a
detailed specification on the allowed input parameters), then
preprocesses them to the model specifications, forward passes
them through the model, and finally returns the predictions.
In essence, this method is a wrapper for `forward()` that allows
for passing a variety of inputs. If, on the other hand, you
have pre-processed inputs and simply want to forward pass through
the model without having to spend computational time on what
is now unnecessary preprocessing, you can simply call `forward()`
and then run `torch.argmax()` on the outputs to get predictions.
Parameters
----------
images : Any
See `preprocess_input()` for the allowed input images.
Returns
-------
A list of `np.ndarray`s with resized output masks.
"""
# Process the images and run inference.
images, shapes = self.preprocess_input(images, return_shapes = True, **kwargs)
out = torch.sigmoid(self.forward(images))
# Post-process the output masks to a valid format.
if out.shape[1] == 1: # binary class predictions
out[out >= self._conf_thresh] = 1
out[out != 1] = 0
out = torch.squeeze(out, dim = 1)
else: # multi-class predictions to integer labels
out = torch.argmax(out, 1)
# Resize the masks to their original shapes.
masks = []
for mask, shape in zip(
torch.index_select(out, 0, torch.arange(len(out))), shapes):
masks.append(self._to_out(torch.squeeze(F.interpolate(
torch.unsqueeze(torch.unsqueeze(mask, 0), 0).float(),
size = shape).int())))
return resolve_list_value(masks)
def show_prediction(self, image, overlay = False, **kwargs):
"""Shows the output predictions for one input image.
This method is useful for instantly visualizing the predictions
for a single input image. It accepts a single input image (or
any type of valid 'image' input, as described in the method
`preprocess_input()`), and then runs inference on that input
image and displays its predictions in a matplotlib window.
Parameters
----------
image : Any
See `preprocess_input()` for allowed types of inputs.
method : str
Either `True` for overlaid masks, or `False` for
visualizing the mask separately from the image.
kwargs
Visualization keyword arguments.
Returns
-------
The matplotlib figure containing the image.
"""
image = self._expand_input_images(image)[0]
mask = self.predict(image, **kwargs)
if overlay:
return show_image_and_overlaid_mask(image, mask, **kwargs)
return show_image_and_mask(image, mask, **kwargs)
def load_benchmark(self, dataset):
"""Loads a benchmark for the given semantic segmentation dataset.
This method is used to load pretrained weights for a specific AgML dataset.
In essence, it serves as a wrapper for `load_state_dict`, directly getting
the model from its save path in the AWS storage bucket. You can then use the
`benchmark` property to access the metric value of the benchmark, as well as
additional training parameters which you can use to train your own models.
Parameters
----------
dataset : str
The name of the semantic segmentation benchmark to load.
Notes
-----
If the benchmark has a different number of classes than this input model, then
this method will raise an error. This issue may be adapted in the future.
"""
if source(dataset).tasks.ml != 'semantic_segmentation':
raise ValueError(
f"You are trying to load a benchmark for a "
f"{source(dataset).tasks.ml} task ({dataset}) "
f"in a semantic segmentation model.")
# Number of classes must be the same for semantic segmentation.
if source(dataset).num_classes != self._num_classes:
raise ValueError(
f"You cannot load a benchmark for a dataset '{dataset}' "
f"with {source(dataset).num_classes} classes, while your "
f"model has {self._num_classes} classes.")
# Load the benchmark.
state = self._get_benchmark(dataset)
self.load_state_dict(state)
self._benchmark = BenchmarkMetadata(dataset)
def evaluate(self, loader, **kwargs):
"""Runs a mean intersection over union evaluation on the given loader.
This method will loop over the provided `AgMLDataLoader` and compute
the mean intersection over union (mIOU).
Parameters
----------
loader : AgMLDataLoader
A semantic segmentation loader with the dataset you want to evaluate.
Returns
-------
The final calculated mIoU.
"""
# Construct the metric and run the calculations.
iou = IoU(num_classes = self._num_classes + 1)
bar = tqdm(loader, desc = "Calculating Mean Intersection Over Union")
for sample in bar:
image, truth = sample
pred_mask = self.predict(image, **kwargs)
if pred_mask.ndim == 3:
pred_mask = np.transpose(pred_mask, (2, 0, 1))
truth = np.transpose(truth, (2, 0, 1))
iou(torch.from_numpy(pred_mask).int().unsqueeze(0),
torch.from_numpy(truth).unsqueeze(0))
bar.set_postfix({'miou': iou.compute().numpy().item()})
# Compute the final mIoU.
return iou.compute().numpy().item()
| 14,006 | 42.231481 | 86 | py |
AgML | AgML-main/agml/models/tools.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from functools import wraps
import torch
import numpy as np
import albumentations as A
from agml.backend.tftorch import is_array_like
def imagenet_style_process(image, size = None, **kwargs):
"""Preprocesses a single input image to ImageNet standards.
The preprocessing steps are applied logically; if the images
are passed with preprocessing already having been applied, for
instance, the images are already resized or they are already been
normalized, the operation is not applied again, for efficiency.
Preprocessing includes the following steps:
1. Resizing the image to size (224, 224).
2. Performing normalization with ImageNet parameters.
3. Converting the image into a PyTorch tensor format.
as well as other intermediate steps such as adding a channel
dimension for two-channel inputs, for example.
"""
# Convert the image to a NumPy array.
if is_array_like(image) and hasattr(image, 'numpy'):
image = image.numpy()
# Add a channel dimension for grayscale imagery.
if image.ndim == 2:
image = np.expand_dims(image, axis = -1)
# If the image is already in channels-first format, convert
# it back temporarily until preprocessing has concluded.
if image.shape[0] <= 3:
image = np.transpose(image, (1, 2, 0))
# Resize the image to ImageNet standards.
h = w = 224
if size is not None:
h, w = size
rz = A.Resize(height = h, width = w)
if image.shape[0] != h or image.shape[1] != w:
image = rz(image = image)['image']
# Normalize the image to ImageNet standards.
if kwargs.get('normalize', True):
if 1 <= image.max() <= 255:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
image = image.astype(np.float32) / 255.
mean = np.array(mean, dtype = np.float32)
std = np.array(std, dtype = np.float32)
denominator = np.reciprocal(std, dtype = np.float32)
image = (image - mean) * denominator
else: # Otherwise, just scale the image.
if 1 <= image.max() <= 255:
image = image.astype(np.float32) / 255.0
# Convert the image into a PyTorch tensor.
image = torch.from_numpy(image).permute(2, 0, 1)
# Return the processed image.
return image
# Ported from PyTorch Lightning v1.3.0.
def auto_move_data(fn: Callable) -> Callable:
"""
Decorator for :class:`~pytorch_lightning.core.lightning.LightningModule` methods for which
input arguments should be moved automatically to the correct device.
It has no effect if applied to a method of an object that is not an instance of
:class:`~pytorch_lightning.core.lightning.LightningModule` and is typically applied to ``__call__``
or ``forward``.
Args:
fn: A LightningModule method for which the arguments should be moved to the device
the parameters are on.
Example::
# directly in the source code
class LitModel(LightningModule):
@auto_move_data
def forward(self, x):
return x
# or outside
LitModel.forward = auto_move_data(LitModel.forward)
model = LitModel()
model = model.to('cuda')
model(torch.zeros(1, 3))
# input gets moved to device
# tensor([[0., 0., 0.]], device='cuda:0')
"""
@wraps(fn)
def auto_transfer_args(self, *args, **kwargs):
from pytorch_lightning import LightningModule
if not isinstance(self, LightningModule):
return fn(self, *args, **kwargs)
args, kwargs = self.transfer_batch_to_device(
(args, kwargs), device = self.device, dataloader_idx = None) # noqa
return fn(self, *args, **kwargs)
return auto_transfer_args
| 4,452 | 32.734848 | 103 | py |
AgML | AgML-main/agml/models/__init__.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains PyTorch pre-trained weights and benchmarks for
commonly used deep learning models on agricultural datasets within AgML.
"""
# Before anything can be imported, we need to run checks for PyTorch and
# PyTorch Lightning, as these are not imported on their own.
try:
import torch
except ImportError:
raise ImportError('Could not find a PyTorch installation. If you want to use '
'the models in `agml.models`, you will need to install PyTorch '
'first. Try `pip install torch` to do so.')
try:
import pytorch_lightning
except ImportError:
raise ImportError('Could not find a PyTorch Lightning installation. If you want to '
'use the models in `agml.models`, you will need to install PyTorch '
'Lightning first. Try `pip install pytorch-lightning` to do so.')
from .classification import ClassificationModel
from .segmentation import SegmentationModel
from .detection import DetectionModel
from . import metrics
from . import losses
from . import preprocessing
| 1,690 | 37.431818 | 90 | py |
AgML | AgML-main/agml/models/detection.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import torch
import numpy as np
import albumentations as A
from tqdm import tqdm
try:
from ensemble_boxes.ensemble_boxes_wbf import weighted_boxes_fusion
except ImportError:
raise ImportError(
"Could not find an installation of the `ensemble_boxes` package. "
"Try `pip install ensemble-boxes` to install it."
)
try:
from effdet import (
create_model_from_config,
get_efficientdet_config,
DetBenchPredict,
DetBenchTrain
)
except ImportError:
raise ImportError(
"Could not find an installation of the `effdet` package. "
"Try `pip install effdet==0.2.4` to install it (note that "
"the version is important for proper functionality)."
)
from agml.models.base import AgMLModelBase
from agml.models.benchmarks import BenchmarkMetadata
from agml.models.tools import auto_move_data
from agml.models.metrics.map import MeanAveragePrecision
from agml.data.public import source
from agml.backend.tftorch import is_array_like
from agml.utils.image import resolve_image_size
from agml.utils.logging import log
from agml.viz.boxes import show_image_and_boxes
class DetectionModel(AgMLModelBase):
"""Wraps an `EfficientDetD4` model for agricultural object detection.
When using the model for inference, you should use the `predict()` method
on any set of input images. This method wraps the `forward()` call with
additional steps which perform the necessary preprocessing on the inputs,
including resizing, normalization, and batching, as well as additional
post-processing on the outputs (such as converting one-hot labels to
integer labels), which will allow for a streamlined inference pipeline.
If you want to use the `forward()` method directly, so that you can just
call `model(inputs)`, then make sure your inputs are properly processed.
This can be done by calling `model.preprocess_input(inputs)` on the
input list of images/single image and then passing that result to the model.
This will also return a one-hot label feature vector instead of integer
labels, in the case that you want further customization of the outputs.
By default, when instantiating a `DetectionModel`, it is prepared in
inference mode. In order to use this model for training, you need to convert
it to training mode by using `DetectionModel.switch_train()`. To convert
back to inference mode, use `DetectionModel.switch_predict()`.
If you want to use your own custom model and/or training pipeline, without the
existing input restrictions, then you can subclass this model. In the `super`
call in the `__init__` method, pass the parameter `model_initialized = True`,
which will enable you to initialize the model in your own format.
Parameters
----------
num_classes : int
The number of classes for the `EfficientDet` model.
image_size : int, tuple
The shape of image inputs to the model.
conf_threshold : float
Filters bounding boxes by their level of confidence based on this threshold.
"""
serializable = frozenset((
"model", "num_classes", "conf_thresh", "image_size"))
state_override = frozenset(("model",))
def __init__(self,
num_classes = 1,
image_size = 512,
conf_threshold = 0.3,
**kwargs):
# Initialize the base modules.
super(DetectionModel, self).__init__()
# If being initialized by a subclass, then don't do any of
# model construction logic (since that's already been done).
if not kwargs.get('model_initialized', False):
# Construct the network and load in pretrained weights.
self._image_size = resolve_image_size(image_size)
self._confidence_threshold = conf_threshold
self._num_classes = num_classes
self.model = self._construct_sub_net(
self._num_classes, self._image_size)
# Filter out unnecessary warnings.
warnings.filterwarnings(
'ignore', category = UserWarning, module = 'ensemble_boxes')
warnings.filterwarnings(
'ignore', category = UserWarning, module = 'effdet.bench') # noqa
@auto_move_data
def forward(self, batch):
"""Ensures that the input is valid for the model."""
return self.model(batch)
@staticmethod
def _construct_sub_net(num_classes, image_size):
cfg = get_efficientdet_config('tf_efficientdet_d4')
cfg.update({"image_size": image_size})
model = create_model_from_config(
cfg, pretrained = False,
num_classes = num_classes)
return DetBenchPredict(model)
def switch_predict(self):
"""Prepares the model for evaluation mode."""
state = self.model.state_dict()
if not isinstance(self.model, DetBenchPredict):
self.model = DetBenchPredict(self.model.model)
self.model.load_state_dict(state)
def switch_train(self):
"""Prepares the model for training mode."""
state = self.model.state_dict()
if not isinstance(self.model, DetBenchTrain):
self.model = DetBenchTrain(self.model.model)
self.model.load_state_dict(state)
@property
def original(self): # override for detection models.
return self.model.model
@torch.jit.ignore()
def reset_class_net(self, num_classes = 1):
"""Reconfigures the output class net for a new number of classes.
Parameters
----------
num_classes : int
The number of classes to reconfigure the output net to use.
"""
if num_classes != self._num_classes:
self.model.model.reset_head(num_classes = num_classes)
@staticmethod
def _preprocess_image(image, image_size):
"""Preprocesses a single input image to EfficientNet standards.
The preprocessing steps are applied logically; if the images
are passed with preprocessing already having been applied, for
instance, the images are already resized or they are already been
normalized, the operation is not applied again, for efficiency.
Preprocessing includes the following steps:
1. Resizing the image to size (224, 224).
2. Performing normalization with ImageNet parameters.
3. Converting the image into a PyTorch tensor format.
as well as other intermediate steps such as adding a channel
dimension for two-channel inputs, for example.
"""
# Convert the image to a NumPy array.
if is_array_like(image) and hasattr(image, 'numpy'):
image = image.numpy()
# Add a channel dimension for grayscale imagery.
if image.ndim == 2:
image = np.expand_dims(image, axis = -1)
# If the image is already in channels-first format, convert
# it back temporarily until preprocessing has concluded.
if image.shape[0] <= 3:
image = np.transpose(image, (1, 2, 0))
# Resize the image to ImageNet standards.
(w, h) = image_size
rz = A.Resize(height = h, width = w)
if image.shape[0] != h or image.shape[1] != w:
image = rz(image = image)['image']
# Normalize the image to ImageNet standards.
if 1 <= image.max() <= 255:
image = image.astype(np.float32) / 255.
# Convert the image into a PyTorch tensor.
image = torch.from_numpy(image).permute(2, 0, 1)
# Return the processed image.
return image
@torch.jit.ignore()
def preprocess_input(self, images, return_shapes = False):
"""Preprocesses the input image to the specification of the model.
This method takes in a set of inputs and preprocesses them into the
expected format for the `EfficientDetD4` object detection model.
There are a variety of inputs which are accepted, including images,
image paths, as well as fully-processed image tensors. The inputs
are expanded and standardized, then run through a preprocessing
pipeline which formats them into a single tensor ready for the model.
Preprocessing steps include normalization, resizing, and converting
to the channels-first format used by PyTorch models. The output
of this method will be a single `torch.Tensor`, which has shape
[N, C, H, W], where `N` is the batch dimension. If only a single
image is passed, this will have a value of 1.
This method is largely beneficial when you just want to preprocess
images into the specification of the model, without getting the output.
Namely, `predict()` is essentially just a wrapper around this method
and `forward()`, so you can run this externally and then run `forward()`
to get the original model outputs, without any extra post-processing.
Parameters
----------
images : Any
One of the following formats (and types):
1. A single image path (str)
2. A list of image paths (List[str])
3. A single image (np.ndarray, torch.Tensor)
4. A list of images (List[np.ndarray, torch.Tensor])
5. A batched tensor of images (np.ndarray, torch.Tensor)
return_shapes : bool
Whether to return the original shapes of the input images.
Returns
-------
A 4-dimensional, preprocessed `torch.Tensor`. If `return_shapes`
is set to True, it also returns the original shapes of the images.
"""
images = self._expand_input_images(images)
shapes = self._get_shapes(images)
images = torch.stack(
[self._preprocess_image(
image, self._image_size) for image in images], dim = 0)
if return_shapes:
return images, shapes
return images
def _process_detections(self, detections):
"""Post-processes the output detections (boxes, labels) from the model."""
predictions = []
# Convert all of the output detections into predictions. This involves
# selecting the bounding boxes, confidence scores, and classes, and then
# dropping any of them which do not have a confidence score about the
# threshold as determined when the class is initialized.
for d in detections:
# Extract the bounding boxes, confidence scores,
# and class labels from the output detections.
boxes, scores, classes = d[:, :4], d[:, 4], d[:, 5]
# Only return boxes which are above the confidence threshold.
valid_indexes = np.where(scores > self._confidence_threshold)[0]
boxes = boxes[valid_indexes]
scores = scores[valid_indexes]
classes = classes[valid_indexes]
predictions.append({"boxes": boxes, "scores": scores, "classes": classes})
# Run weighted boxes fusion. For an exact description of how this
# works, see the paper: https://arxiv.org/pdf/1910.13302.pdf.
(predicted_bboxes,
predicted_class_confidences,
predicted_class_labels) = self._wbf(predictions)
return predicted_bboxes, predicted_class_confidences, predicted_class_labels
@staticmethod
def _rescale_bboxes(predicted_bboxes, image_sizes):
"""Re-scales output bounding boxes to the original image sizes."""
scaled_boxes = []
for bboxes, img_dims in zip(predicted_bboxes, image_sizes):
h, w = img_dims
if len(bboxes) > 0:
# Re-scale the bounding box to the appropriate format.
scale_ratio = [w / 512, h / 512, w / 512, h / 512]
scaled = (np.array(bboxes) * scale_ratio).astype(np.int32)
# Convert the Pascal-VOC (xyxy) format to COCO (xywh).
x, y = scaled[:, 0], scaled[:, 1]
w, h = scaled[:, 2] - x, scaled[:, 3] - y
scaled_boxes.append(np.dstack((x, y, w, h)))
continue
# Otherwise, there is no prediction for this image.
scaled_boxes.append(np.array([]))
return scaled_boxes
@staticmethod
def _wbf(predictions):
"""Runs weighted boxes fusion on the output predictions."""
bboxes, confidences, class_labels = [], [], []
# Fuse the predictions for each in the batch.
for prediction in predictions:
boxes = [(prediction["boxes"] / 512).tolist()]
scores = [prediction["scores"].tolist()]
labels = [prediction["classes"].tolist()]
# Run the actual fusion and update the containers.
boxes, scores, labels = weighted_boxes_fusion(
boxes, scores, labels,
iou_thr = 0.44, skip_box_thr = 0.43)
boxes = boxes * (512 - 1)
bboxes.append(boxes)
confidences.append(scores)
class_labels.append(labels.astype(np.int32))
return bboxes, confidences, class_labels
@staticmethod
def _remap_outputs(boxes, labels, confidences):
"""Remaps the outputs to the format described in `predict()`."""
squeeze = lambda *args: tuple(list(
np.squeeze(a).tolist() for a in args))
return [squeeze(b, l, c)
for b, l, c in zip(boxes, labels, confidences)]
def _to_out(self, tensor: "torch.Tensor") -> "torch.Tensor":
if isinstance(tensor, dict):
tensor = tensor['detections']
return super()._to_out(tensor)
@torch.no_grad()
def predict(self, images):
"""Runs `EfficientNetD4` inference on the input image(s).
This method is the primary inference method for the model; it
accepts a set of input images (see `preprocess_input()` for a
detailed specification on the allowed input parameters), then
preprocesses them to the model specifications, forward passes
them through the model, and finally returns the predictions.
In essence, this method is a wrapper for `forward()` that allows
for passing a variety of inputs. If, on the other hand, you
have pre-processed inputs and simply want to forward pass through
the model without having to spend computational time on what
is now unnecessary preprocessing, you can simply call `forward()`
and then run the post-processing as described in this method.
Parameters
----------
images : Any
See `preprocess_input()` for the allowed input images.
Returns
-------
A tuple of `n` lists, where `n` is the number of input images.
Each of the `n` lists will contain three values consisting of
the bounding boxes, class labels, and prediction confidences
for the corresponding input image.
"""
# Process the images and run inference.
images, shapes = self.preprocess_input(images, return_shapes = True)
out = self._to_out(self.forward(images))
# Post-process the output detections.
boxes, confidences, labels = self._process_detections(out)
boxes = self._rescale_bboxes(boxes, shapes)
ret = self._remap_outputs(boxes, labels, confidences)
return ret[0] if len(ret) == 1 else ret
def show_prediction(self, image):
"""Shows the output predictions for one input image.
This method is useful for instantly visualizing the predictions
for a single input image. It accepts a single input image (or
any type of valid 'image' input, as described in the method
`preprocess_input()`), and then runs inference on that input
image and displays its predictions in a matplotlib window.
Parameters
----------
image : Any
See `preprocess_input()` for allowed types of inputs.
Returns
-------
The matplotlib figure containing the image.
"""
image = self._expand_input_images(image)[0]
bboxes, labels, _ = self.predict(image)
if isinstance(labels, int):
bboxes, labels = [bboxes], [labels]
return show_image_and_boxes(image, bboxes, labels)
def load_benchmark(self, dataset, strict = False):
"""Loads a benchmark for the given semantic segmentation dataset.
This method is used to load pretrained weights for a specific AgML dataset.
In essence, it serves as a wrapper for `load_state_dict`, directly getting
the model from its save path in the AWS storage bucket. You can then use the
`benchmark` property to access the metric value of the benchmark, as well as
additional training parameters which you can use to train your own models.
Parameters
----------
dataset : str
The name of the object detection benchmark to load.
strict : bool
Whether to require the same number of classes.
Notes
-----
If the given benchmark has a different number of classes than this input model,
then the class network will be loaded with random weights, while the remaining
network weights (backbone, box network, etc.) will use the pretrained weights
for the benchmark. This can be disabled by setting `strict = True`.
"""
if source(dataset).tasks.ml != 'object_detection':
raise ValueError(
f"You are trying to load a benchmark for a "
f"{source(dataset).tasks.ml} task ({dataset}) "
f"in an object detection model.")
# Check loading strictness.
cs = source(dataset).num_classes == self._num_classes
if strict:
if not cs:
raise ValueError(
f"You cannot load a benchmark for a dataset '{dataset}' "
f"with {source(dataset).num_classes} classes, while your "
f"model has {self._num_classes} classes. If you want to, "
f"then you need to set `strict = False`.")
# Load the benchmark.
state = self._get_benchmark(dataset)
if not strict and not cs:
self.reset_class_net(source(dataset).num_classes)
log(f"Loading a state dict for {dataset} with "
f"{source(dataset).num_classes}, while your "
f"model has {self._num_classes} classes. The "
f"class network will use random weights.")
self.load_state_dict(state)
if not strict and not cs:
self.reset_class_net(self._num_classes)
self._benchmark = BenchmarkMetadata(dataset)
def evaluate(self, loader, iou_threshold = 0.5, method = 'accumulate'):
"""Runs a mean average precision evaluation on the given loader.
This method will loop over the provided `AgMLDataLoader` and compute
the mean average precision at the provided `iou_threshold`. This can
be done using two methods. Using the method `average` will compute
the mean average precision for each individual sample and then average
over all of the samples, while using the method `accumulate` will
compute the mean average precision over the entire dataset.
Parameters
----------
loader : AgMLDataLoader
An object detection loader with the dataset you want to evaluate.
iou_threshold : float
The IoU threshold between a ground truth and predicted bounding
box at which point they are considered the same.
method : str
The method to use, as described above.
Returns
-------
The final calculated mean average precision.
"""
if not 0 < iou_threshold < 1:
raise ValueError(
f"The `iou_threshold` must be between 0 and 1, got {iou_threshold}.")
if method not in ['accumulate', 'average']:
raise ValueError(
f"Method must be either `accumulate` or `average`, got {method}.")
# Construct the mean average precision accumulator and run the calculations.
mean_ap = MeanAveragePrecision(
num_classes = self._num_classes, iou_threshold = iou_threshold)
bar = tqdm(loader, desc = "Calculating Mean Average Precision")
if method == 'average':
cumulative_maps = []
for sample in bar:
image, truth = sample
true_box, true_label = truth['bbox'], truth['category_id']
bboxes, labels, conf = self.predict(image)
mean_ap.update(*(
dict(boxes = bboxes, labels = labels, scores = conf),
dict(boxes = true_box, labels = true_label)))
# If averaging, then calculate and reset the mAP, otherwise continue.
if method == 'average':
res = mean_ap.compute()
cumulative_maps.append(res) # noqa
bar.set_postfix({'map': float(res)})
mean_ap.reset()
# Compute the final mAP.
if method == 'average':
result = sum(cumulative_maps) / len(cumulative_maps)
else:
result = mean_ap.compute()
return result
| 22,088 | 41.397313 | 87 | py |
AgML | AgML-main/agml/models/metrics/accuracy.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
@torch.jit.script
def accuracy(output, target):
"""Computes the accuracy between `output` and `target`."""
batch_size = target.size(0)
_, pred = torch.topk(output, 1, 1)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:1].reshape(-1).float().sum(0, keepdim = True)
return correct_k.mul_(100.0 / batch_size)
class Accuracy(nn.Module):
"""A metric to compute accuracy for image classification tasks.
This class is used as a wrapper around accuracy calculations, which allows for
accumulation of predictions over time. The `update` method can be used to update
data, then `compute` to get the calculated accuracy, and finally `reset` can be
used to reset the accumulators to an empty state, allowing new calculations.
"""
def __init__(self):
# Construct the data accumulators.
super(Accuracy, self).__init__()
self._prediction_data, self._truth_data = [], []
def update(self, pred_data, gt_data):
"""Updates the state of the accuracy metric.
The `pred_data` and `gt_data` arguments should both be sequences of integer
labels (e.g., an array of one-dimension), *not* one-hot labels. So, any
activation operations or softmax operations must be applied before inputting
data into the accuracy metric.
"""
if not len(pred_data) == len(gt_data):
raise ValueError("Predictions and truths should be the same length.")
self._prediction_data.extend(pred_data)
self._truth_data.extend(gt_data)
def compute(self):
"""Computes the accuracy between the predictions and ground truths."""
return accuracy(torch.tensor(self._prediction_data),
torch.tensor(self._truth_data))
def reset(self):
"""Resets the accumulator states."""
del self._prediction_data, self._truth_data
self._prediction_data, self._truth_data = [], []
| 2,632 | 38.298507 | 84 | py |
AgML | AgML-main/agml/models/metrics/map.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
from agml.utils.logging import log
@torch.jit.script
def bbox_iou(predicted_box, truth_box):
"""Calculates the IOU of predicted and truth bounding boxes."""
# Calculate the coordinates of the bounding box overlap.
box1_x1 = predicted_box[..., 0:1]
box1_y1 = predicted_box[..., 1:2]
box1_x2 = predicted_box[..., 0:1] + predicted_box[..., 2:3]
box1_y2 = predicted_box[..., 1:2] + predicted_box[..., 3:4]
box2_x1 = truth_box[..., 0:1]
box2_y1 = truth_box[..., 1:2]
box2_x2 = truth_box[..., 0:1] + truth_box[..., 2:3]
box2_y2 = truth_box[..., 1:2] + truth_box[..., 3:4]
x1 = torch.max(box1_x1, box2_x1)
y1 = torch.max(box1_y1, box2_y1)
x2 = torch.min(box1_x2, box2_x2)
y2 = torch.min(box1_y2, box2_y2)
# Get the area of the union.
intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
union_area = (box1_area + box2_area - intersection)
# Return the intersection over union.
return intersection / (union_area + 1e-6)
@torch.no_grad()
def mean_average_precision(
predicted_boxes, truth_boxes, num_classes = 1, iou_thresh = 0.5):
"""Calculates the mean average precision for predicted and true boxes."""
average_precisions = []
# Check whether to add confidence scores to the ground truth boxes.
if len(truth_boxes[0]) == 6:
out_boxes = []
for a in truth_boxes:
out_boxes.append([*a[:2], 1.0, *a[2:]])
truth_boxes = out_boxes.copy()
# Calculate average precision for each class.
pred_boxes, true_boxes = torch.tensor(predicted_boxes), torch.tensor(truth_boxes)
for c in range(num_classes):
# If there are no predictions, then the per-class AP is 0.
if len(pred_boxes) == 0:
average_precisions.append(torch.tensor(0.0))
continue
# Get the predictions and targets corresponding to this class.
detections = pred_boxes[torch.where(pred_boxes[:, 1] == c)[0]].tolist()
ground_truths = true_boxes[torch.where(true_boxes[:, 1] == c)[0]].tolist()
torch_gt = torch.tensor(ground_truths)
# If there are no ground truths, then the per-class AP is 0.
if len(ground_truths) == 0:
average_precisions.append(torch.tensor(0.0))
continue
# Determine the number of boxes for each of the training samples.
numpy_gt = torch.tensor(ground_truths)
amount_bboxes = {int(k.numpy().item()): torch.zeros(v) for k, v in zip(
*torch.unique(numpy_gt[:, 0], return_counts = True))}
# Sort the boxes by probabilities.
detections.sort(key = lambda x: x[2], reverse = True)
tp = torch.zeros((len(detections)))
fp = torch.zeros((len(detections)))
total_true_bboxes = len(ground_truths)
# If there are no boxes for this class, then there are
# no calculations to do, so skip it.
if total_true_bboxes == 0:
continue
for detection_idx, detection in enumerate(detections):
# Get the update number for this detection.
update_num = int(detection[0])
# Only take out the ground_truths that have the same
# training idx as the detection.
ground_truth_img = torch_gt[
torch.where(torch_gt[:, 0] == update_num)[0]]
# Get the bounding box with the highest IoU.
ious = torch.tensor([bbox_iou(
torch.tensor(detection[3:]), gt[3:].clone())
for gt in ground_truth_img])
best_iou = torch.max(ious)
best_gt_idx = torch.argmax(ious)
# If the IoU is above the threshold, then it may be a true positive.'
if best_iou > iou_thresh:
# This should be the first time the box is detected. Otherwise,
# that would mean that there are multiple predicted bounding
# boxes for the same object, which is a false positive.
try:
if amount_bboxes[update_num][best_gt_idx] == 0:
tp[detection_idx] = 1
amount_bboxes[update_num][best_gt_idx] = 1
else:
fp[detection_idx] = 1
except KeyError:
# A false detection.
fp[detection_idx] = 1
# If the IoU is below the threshold, then it is a false positive.
else:
fp[detection_idx] = 1
# Calculate the prediction/recalls and update the array.
tp_cumsum = torch.cumsum(tp, dim = 0)
recalls = tp_cumsum / (total_true_bboxes + 1e-6)
precisions = tp_cumsum / (tp_cumsum + torch.cumsum(fp, dim = 0) + 1e-6)
precisions = torch.cat((torch.tensor([1]), precisions))
recalls = torch.cat((torch.tensor([0]), recalls))
average_precisions.append(torch.trapz(precisions, recalls))
# Calculate the mean of all of the average precisions.
return sum(average_precisions) / len(average_precisions)
class MeanAveragePrecision(nn.Module):
"""A metric to calculate mean average precision for object detection tasks.
This class is used as a wrapper around mean average precision calculations,
which allows for accumulation of predictions over time. The `update` (and
`batch_update`) methods can be used to update data in the class, then `compute`
to get the calculated mean average precision, and finally `reset` to restore
the accumulators to an empty state, allowing from-scratch calculations.
"""
def __init__(self, num_classes = 1, iou_threshold = 0.5):
# Set base parameters.
super(MeanAveragePrecision, self).__init__()
self._num_classes = num_classes
self._iou_threshold = iou_threshold
# Store the truth and prediction data in containers.
self._prediction_data, self._truth_data = [], []
self._num_updates = 0
@staticmethod
def _scalar_to_array(*args):
"""Converts 0-dimensional scalar arrays to 1-d arrays."""
cvt = lambda x: np.expand_dims(x, 0) if x.ndim == 0 else x
outs = [cvt(arg) for arg in args]
return outs[0] if len(args) == 1 else outs
def update(self, pred_data, gt_data):
"""Update the tracker with prediction and ground truth data.
The arguments `pred_data` and `gt_data` should be either dictionaries
(with the following keys), or lists of values which correspond in order
to the same keys listed below:
- `pred_data`: `boxes`, `labels`, and `scores`.
- `gt_data`: `boxes` and `labels`.
Note: To update a batch of data, use `batch_update()`.
"""
# Get the relevant data from the input arguments.
if isinstance(pred_data, dict):
pred_boxes, pred_labels, pred_scores = \
pred_data['boxes'], pred_data['labels'], pred_data['scores']
else:
pred_boxes, pred_labels, pred_scores = pred_data
if isinstance(gt_data, dict):
gt_boxes, gt_labels = \
gt_data['boxes'], gt_data['labels']
else:
gt_boxes, gt_labels = gt_data
# Format the data.
pred_boxes = np.squeeze(pred_boxes)
pred_labels = np.squeeze(pred_labels)
pred_scores = np.squeeze(pred_scores)
pred_labels, gt_labels, pred_scores = \
self._scalar_to_array(pred_labels, gt_labels, pred_scores)
gt_boxes = np.squeeze(gt_boxes)
if pred_boxes.ndim == 1:
pred_boxes = np.expand_dims(pred_boxes, axis = 0)
if gt_boxes.ndim == 1:
gt_boxes = np.expand_dims(gt_boxes, axis = 0)
# Create the data in the proper format.
for bbox, label in zip(gt_boxes, gt_labels):
self._truth_data.append(
[self._num_updates, int(label - 1), 1.0, *bbox])
if pred_boxes.ndim == 1:
pred_boxes = np.expand_dims(pred_boxes, axis = 0)
for bbox, label, score in zip(pred_boxes, pred_labels, pred_scores):
self._prediction_data.append(
[self._num_updates, int(label - 1), score, *bbox])
# Increment the number of updates.
self._num_updates += 1
def compute(self):
"""Computes the mean average precision with the given data."""
if self._num_updates == 0:
log("Tried to compute mean average precision "
"without any data updates; returning 0.0.")
return 0.0
return mean_average_precision(
predicted_boxes = self._prediction_data,
truth_boxes = self._truth_data,
num_classes = self._num_classes,
iou_thresh = self._iou_threshold,
)
def reset(self):
"""Resets the mean average precision."""
del self._prediction_data, self._truth_data
self._prediction_data, self._truth_data = [], []
self._num_updates = 0
| 9,795 | 39.147541 | 85 | py |
AgML | AgML-main/agml/backend/random.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from agml.backend.tftorch import torch, tf, get_backend
def set_seed(seed = None):
"""Sets a new random seed. If None, uses a random seed."""
import random
random.seed(seed)
import numpy as np
np.random.seed(seed)
if get_backend() == 'torch':
torch.random.manual_seed(seed)
elif get_backend() == 'tensorflow':
tf.random.set_seed(seed)
| 983 | 32.931034 | 74 | py |
AgML | AgML-main/agml/backend/tftorch.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This part of the backend controls the AgML methods where either
TensorFlow or PyTorch methods can be used, and prevents unnecessary
importing of either library (which takes a significant amount of time).
"""
import types
import inspect
import logging
import importlib
import functools
import numpy as np
from agml.utils.logging import log
from agml.utils.image import consistent_shapes
# Suppress any irrelevant warnings which will pop up from either backend.
import warnings
warnings.filterwarnings(
'ignore', category = UserWarning, message = '.*Named tensors.*Triggered internally.*')
class StrictBackendError(ValueError):
def __init__(self, message = None, change = None, obj = None):
if message is None:
message = f"Backend was manually set to " \
f"'{get_backend()}', but got an object " \
f"from backend '{change}': {obj}."
super(StrictBackendError, self).__init__(message)
# Check if TensorFlow and PyTorch exist in the environment.
_HAS_TENSORFLOW: bool
_HAS_TORCH: bool
@functools.lru_cache(maxsize = None)
def _check_tf_torch():
global _HAS_TENSORFLOW, _HAS_TORCH
try:
import tensorflow
except ImportError:
_HAS_TENSORFlOW = False
else:
_HAS_TENSORFLOW = True
try:
import torch
except ImportError:
_HAS_TORCH = False
else:
_HAS_TORCH = True
# Default backend is PyTorch.
_BACKEND = None
_USER_SET_BACKEND = False
def get_backend():
"""Returns the current AgML backend."""
return _BACKEND
def set_backend(backend):
"""Change the AgML backend for the current session.
By default, AgML uses PyTorch as a backend, but it is
compatible with both TensorFlow and PyTorch. AgML can
also automatically inference the backend from the
different parameters passed into `AgMLDataLoader` and
other internal library methods.
This method allows a user to automatically set the backend.
"""
global _USER_SET_BACKEND, _BACKEND
# Check whether the user has modified the backend.
mod = inspect.getmodule(inspect.stack()[1][0])
if mod is None: # IPython shell
_USER_SET_BACKEND = True
elif 'agml.' not in mod.__name__:
_USER_SET_BACKEND = True
# If the backend is the same, don't do anything.
if backend == _BACKEND:
return
_check_tf_torch()
if backend not in ['tensorflow', 'tf', 'torch', 'pytorch']:
raise ValueError(f"Invalid backend: {backend}.")
if backend in ['tensorflow', 'tf'] and _BACKEND != 'tensorflow':
if not _HAS_TENSORFLOW:
raise ImportError(
"TensorFlow not found on system, cannot be used as "
"backend. Try running `pip install tensorflow`.")
_BACKEND = 'tf'
log("Switched backend to TensorFlow.", level = logging.INFO)
elif backend in ['torch', 'pytorch'] and _BACKEND != 'torch':
if not _HAS_TORCH:
raise ImportError(
"PyTorch not found on system, cannot be used as "
"backend. Try running `pip install torch`.")
_BACKEND = 'torch'
log("Switched backend to PyTorch.", level = logging.INFO)
def user_changed_backend():
"""Returns whether the backend has been manually changed."""
return _USER_SET_BACKEND
# Ported from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/util/lazy_loader.py
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies. """
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace.
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
# Load TensorFlow and PyTorch lazily to prevent pulling them in when unnecessary.
torch = LazyLoader('torch', globals(), 'torch')
torch_data = LazyLoader('torch_data', globals(), 'torch.utils.data')
torchvision = LazyLoader('torchvision', globals(), 'torchvision')
tf = LazyLoader('tensorflow', globals(), 'tensorflow')
######### GENERAL METHODS #########
def _convert_image_to_torch(image):
"""Converts an image (np.ndarray) to a torch Tensor."""
if isinstance(image, (list, tuple)):
return torch.tensor(image)
if isinstance(image, torch.Tensor) or image.ndim == 4:
if image.shape[0] == 1 and image.shape[-1] <= 3 and image.ndim == 4:
return torch.from_numpy(image).permute(0, 3, 1, 2).float()
return image
if image.shape[0] > image.shape[-1]:
return torch.from_numpy(image).permute(2, 0, 1).float()
return torch.from_numpy(image)
def _postprocess_torch_annotation(image):
"""Post-processes a spatially augmented torch annotation."""
try:
if image.dtype.is_floating_point:
image = (image * 255).int()
except AttributeError:
pass
return image
def as_scalar(inp):
"""Converts an input value to a scalar."""
if isinstance(inp, (int, float)):
return inp
if np.isscalar(inp):
return inp.item()
if isinstance(inp, np.ndarray):
return inp.item()
if isinstance(inp, torch.Tensor):
return inp.item()
if isinstance(inp, tf.Tensor):
return inp.numpy()
raise TypeError(f"Unsupported variable type {type(inp)}.")
def scalar_unpack(inp):
"""Unpacks a 1-d array into a list of scalars."""
return [as_scalar(item) for item in inp]
def is_array_like(inp, no_list = False):
"""Determines if an input is a np.ndarray, torch.Tensor, or tf.Tensor."""
if isinstance(inp, (list, tuple)): # no need to import tensorflow for this
if no_list:
return False
return True
if isinstance(inp, np.ndarray):
return True
if isinstance(inp, torch.Tensor):
return True
if isinstance(inp, tf.Tensor):
return True
return False
def convert_to_batch(images):
"""Converts a set of images to a batch."""
# If `images` is already an array type, nothing to do.
if is_array_like(images):
return images
# NumPy Arrays.
if isinstance(images[0], np.ndarray):
if not consistent_shapes(images):
images = np.array(images, dtype = object)
log("Created a batch of images with different "
"shapes. If you want the shapes to be consistent, "
"run `loader.resize_images('auto')`.")
else:
images = np.array(images)
return images
# Torch Tensors.
if isinstance(images[0], torch.Tensor):
if not consistent_shapes(images):
images = [image.numpy() for image in images]
images = np.array(images, dtype = object)
log("Created a batch of images with different "
"shapes. If you want the shapes to be consistent, "
"run `loader.resize_images('auto')`.")
else:
images = torch.stack(images)
return images
# TensorFlow Tensors.
if isinstance(images[0], tf.Tensor):
if not consistent_shapes(images):
images = tf.ragged.stack(images)
log("Created a batch of images with different "
"shapes. If you want the shapes to be consistent, "
"run `loader.resize_images('auto')`.")
else:
images = tf.stack(images)
return images
######### AGMLDATALOADER METHODS #########
class AgMLObject(object):
"""Base class for the `AgMLDataLoader` to enable inheritance.
This class solves a bug which arises when trying to dynamically
inherit from `tf.keras.utils.Sequence` and/or `torch.utils.data.Dataset`.
The fact that the `AgMLDataLoader` has this `AgMLObject` as a subclass
enables it to be able to handle dynamic inheritance. This is the sole
purpose of this subclass, it does not have any features.
"""
def _add_dataset_to_mro(inst, mode):
"""Adds the relevant backend class to the `AgMLDataLoader` MRO.
This allows for the loader to dynamically inherent from the
`tf.keras.utils.Sequence` and `torch.utils.data.Dataset`.
"""
if mode == 'tf':
if not get_backend() == 'tf':
if user_changed_backend():
raise StrictBackendError(change = 'tf', obj = inst)
set_backend('tf')
if tf.keras.utils.Sequence not in inst.__class__.__bases__:
inst.__class__.__bases__ += (tf.keras.utils.Sequence, )
if mode == 'torch':
if not get_backend() == 'torch':
if user_changed_backend():
raise StrictBackendError(change = 'torch', obj = inst)
if torch_data.Dataset not in inst.__class__.__bases__:
inst.__class__.__bases__ += (torch_data.Dataset,)
| 10,087 | 33.081081 | 104 | py |
AgML | AgML-main/agml/backend/__init__.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Controls the AgML backend system.
The first part of the backend is the backend deep learning library.
The backend, one of {TensorFlow, PyTorch}, primarily exists for internal
purposes, e.g. figuring out which methods to use in the data module or
the actual model configuring/training modules.
Secondly, the backend controls the loading/saving procedure for files within
AgML, specifically data loaded or generated from the data module.
"""
from .config import (
data_save_path,
set_data_save_path,
synthetic_data_save_path,
set_synthetic_save_path,
model_save_path,
set_model_save_path,
clear_all_datasets,
downloaded_datasets
)
from .tftorch import get_backend, set_backend
from .random import set_seed
from . import experimental
| 1,371 | 33.3 | 76 | py |
AgML | AgML-main/agml/viz/labels.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import matplotlib.pyplot as plt
from agml.backend.tftorch import as_scalar, is_array_like
from agml.viz.tools import format_image, _inference_best_shape, convert_figure_to_image
from agml.viz.display import display_image
def show_images_and_labels(images,
labels = None,
info = None,
shape = None,
**kwargs):
"""Visualizes a set of images with their classification labels.
Given a set of images and their corresponding labels, this method
will generate a grid for the images and display them with their
image classification labels displayed underneath them. The shape of
the grid will by default be inferenced to be the two closest factors
of the number of images (to be as close to square as possible).
If you provide an `info` parameter, which will consist of the `info`
property of an AgMLDataLoader (literally pass `loader.info`), then the
method will convert the classification numbers to their label names.
If you don't want to display the image (and just get the output), pass
`no_show` as true in order to bypass this.
Parameters
----------
images : Any
Either a list of images, a tuple of images and labels, or a list
of image/label pairs (like you would get as the output of a dataset).
labels : Any
A list or array of classification labels.
info : DatasetMetadata
The `loader.info` attribute of a dataloader.
shape : Any
The shape of the display grid.
Returns
-------
The matplotlib figure with the plotted info.
"""
if images is not None and labels is None:
if is_array_like(images[0], no_list = True):
if images[0].ndim >= 3:
images, labels = images[0], images[1]
else:
raise ValueError(
"If passing a numpy array for `images`, expected at "
"least three dimensions: (batch, height, width).")
elif isinstance(images[0], (tuple, list)):
if isinstance(images[0][0], np.ndarray):
if len(images[0]) == 2:
_images, _labels = [], []
for content in images:
_images.append(content[0])
_labels.append(content[1])
images, labels = _images, _labels
else:
images, labels = images[0], images[1]
if labels is None:
raise TypeError(
"Invalid format for `images` and `labels`, see documentation.")
if isinstance(images, np.ndarray) and images.shape[0] > 100:
images, labels = [images], [labels]
# Check if the labels are converted to one-hot, and re-convert them back.
if is_array_like(labels):
if not isinstance(labels, (list, tuple)):
if labels.ndim == 2: # noqa
labels = np.argmax(labels, axis = -1)
# If a prime number is passed, e.g. 23, then the `_inference_best_shape`
# method will return the shape of (23, 1). Likely, the user is expecting
# a non-rectangular shape such as (6, 4), where the bottom right axis is
# empty. This method does not support such computations (yet).
if shape is None:
shape = _inference_best_shape(len(images))
if max(shape) > 20:
raise NotImplementedError(
"Length of maximum shape length is greater than 20. "
"This method does not support non-rectangular shapes.")
fig, axes = plt.subplots(
shape[0], shape[1], figsize = (shape[1] * 2, shape[0] * 2))
try:
iter_ax = axes.flat
except AttributeError: # If showing only a single image.
iter_ax = [axes]
for image, label, ax in zip(images, labels, iter_ax):
ax.imshow(format_image(image))
ax.set_aspect(1)
label = as_scalar(label)
if info is not None:
label = info.num_to_class[label]
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.tick_params(axis = 'both', which = 'both', bottom = False,
top = False, left = False, right = False)
plt.setp(ax.spines.values(), visible = False)
ax.set_xlabel(label)
# Display and return the image.
image = convert_figure_to_image()
if not kwargs.get('no_show', False):
_ = display_image(image)
return image
| 5,095 | 38.8125 | 87 | py |
AgML | AgML-main/agml/viz/tools.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A tools module for `agml.viz`, which also serves as almost a
mini-backend to control ops such as the colormap being used.
"""
import os
import io
import json
import functools
import cv2
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from agml.backend.tftorch import tf, torch
from agml.backend.config import _get_config, _update_config
from agml.utils.logging import log
# Sets the colormaps used in the other `agml.viz` methods.
@functools.lru_cache(maxsize = None)
def _load_colormaps():
with open(os.path.join(os.path.dirname(os.path.dirname(__file__)),
'_assets', 'viz_colormaps.json'), 'r') as f:
cmaps = json.load(f)
ret_dict = {}
for map_ in cmaps.items():
ret_dict[map_[0]] = map_[1] * 5
return ret_dict
_COLORMAPS = _load_colormaps()
_COLORMAP_CHOICE: str = 'default'
# Sets the visualization backend: either `matplotlib` or `cv2`.
@functools.lru_cache(maxsize = None)
def _load_backend():
if _get_config('viz_backend') is None:
_update_config('viz_backend', 'matplotlib')
return _get_config('viz_backend')
_BACKEND: str = _load_backend()
def get_colormap():
"""Returns the current AgML colormap."""
global _COLORMAPS, _COLORMAP_CHOICE
return _COLORMAPS[_COLORMAP_CHOICE]
def set_colormap(colormap):
"""Sets the current AgML colormap used in color displays.
This method accepts one argument, `colormap`, which can be
any of the colormaps listed in `_assets/viz_colormaps.json`,
namely one of the following:
1. "default": Traditional matplotlib RGB colors.
2. "agriculture": Various shades of green (for agriculture).
If you want to set a custom colormap, then pass a list of RGB
values which will be used as the colormap.
Parameters
----------
colormap : str
The colormap to set.
"""
global _COLORMAP_CHOICE, _COLORMAPS
if isinstance(colormap, list):
if not all(len(i) == 3 for i in colormap):
raise ValueError(
"If you want a custom colormap, then pass a list of RGB values.")
elif isinstance(colormap, str):
colormap = colormap.lower()
if colormap not in _COLORMAPS.keys():
raise ValueError(f"Invalid colormap {colormap} received.")
else:
raise TypeError(f"Invalid colormap of type {type(colormap)}.")
_COLORMAPS['custom'] = colormap
_COLORMAP_CHOICE = 'custom'
def get_viz_backend():
"""Returns the current AgML visualization backend."""
global _BACKEND
return _BACKEND
def set_viz_backend(backend):
"""Sets the global AgML visualization backend."""
if backend not in ['matplotlib', 'cv2']:
raise ValueError(f"Invalid backend {backend} received.")
global _BACKEND
_update_config('viz_backend', backend)
_BACKEND = backend
def format_image(img, mask = False):
"""Formats an image to be used in a Matplotlib visualization.
This method takes in one of a number of common image/array types
and returns a formatted NumPy array with formatted image data
as expected by matplotlib.
This method is primarily necessary to serve as convenience
in a few situations: converting images from PyTorch's channels
first format to channels last, or removing the extra grayscale
dimension in the case of grayscale images.
Parameters
----------
img : Any
An np.ndarray, torch.Tensor, tf.Tensor, or PIL.Image.
mask : Any
Whether the image is a segmentation mask.
Returns
-------
An np.ndarray formatted correctly for a Matplotlib visualization.
"""
# Get the numpy array from the image type.
if isinstance(img, np.ndarray):
img = img
elif isinstance(img, Image.Image):
img = np.array(img).reshape((img.height, img.width, len(img.getbands())))
elif isinstance(img, torch.Tensor):
if img.is_cuda:
img = img.cpu().detach().numpy()
img = img.numpy()
elif isinstance(img, tf.Tensor):
img = img.numpy()
else:
raise TypeError(
f"Expected either an np.ndarray, torch.Tensor, "
f"tf.Tensor, or PIL.Image, got {type(img)}.")
# Convert channels_first to channels_last.
if img.ndim == 4:
if img.shape[0] > 1:
raise ValueError(
f"Got a batch of images with shape {img.shape}, "
f"expected at most a batch of one image.")
img = np.squeeze(img)
if img.shape[0] <= 3:
img = np.transpose(img, (1, 2, 0))
# Remove the grayscale axis.
if img.shape[-1] == 1:
img = np.squeeze(img)
# If the image is in range 0-255 but a float image, then
# we need to convert it to an integer type.
if mask:
img = img.astype(np.uint8)
else:
if np.issubdtype(img.dtype, np.inexact):
if not img.max() <= 1: # noqa
img = img.astype(np.uint8)
else:
img = (img * 255).astype(np.uint8)
# Convert 64-bit integer to unsigned 8-bit.
if img.dtype == np.int64:
log("Converting image of dtype `np.int64` to `np.uint8` for display. "
"This may cause a loss in precision/invalid result.")
img = img.astype(np.uint8)
# Return the formatted image.
return img
def convert_figure_to_image(fig = None):
"""This method is used to convert a Matplotlib figure to an image array."""
# Use PIL to get the image, then convert to an array.
buf = io.BytesIO()
fig = fig if fig is not None else plt.gcf()
fig.savefig(buf, format = 'png')
buf.seek(0)
arr = np.fromstring(buf.read(), dtype = np.uint8)
return cv2.imdecode(arr, cv2.IMREAD_COLOR)
def _inference_best_shape(n_images):
"""Inferences the best matplotlib row/column layout.
This method searches for the two closest factors of the number
`n_images`, and returns this tuple as the best shape, since this
is the closest to a square grid as possible.
"""
a, b, i = 1, n_images, 0
while a < b:
i += 1
if n_images % i == 0:
a = i
b = n_images // a
return [b, a]
| 6,828 | 31.061033 | 82 | py |
AgML | AgML-main/agml/viz/general.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
from agml.backend.tftorch import is_array_like
from agml.viz.boxes import show_image_and_boxes
from agml.viz.masks import show_image_and_overlaid_mask
from agml.viz.labels import show_images_and_labels
from agml.viz.tools import format_image, _inference_best_shape, convert_figure_to_image
from agml.viz.display import display_image
def show_sample(loader, image_only = False, **kwargs):
"""A simplified convenience method that visualizes a sample from a loader.
This method works for all kind of annotations; it picks the appropriate
visualization method and then calls it with a sample from the loader.
If you want to customize the way the output looks, then you need to use
the actual methods directly.
Parameters
----------
loader : AgMLDataLoader
An AgMLDataLoader of any annotation type.
image_only : bool
Whether to only display the image.
Returns
-------
The matplotlib figure.
"""
if kwargs.get('sample', None) is not None:
sample = kwargs['sample']
else:
sample = loader[0]
if image_only:
return show_images(sample[0])
if loader.task == 'object_detection':
return show_image_and_boxes(
sample, info = loader.info, no_show = kwargs.get('no_show', False))
elif loader.task == 'semantic_segmentation':
return show_image_and_overlaid_mask(
sample, no_show = kwargs.get('no_show', False))
elif loader.task == 'image_classification':
return show_images_and_labels(
sample, info = loader.info, no_show = kwargs.get('no_show', False))
def show_images(images,
shape = None,
**kwargs):
"""Shows multiple images in a grid format with the given shape.
Given a set of images, this method will generate a grid for the
images and display them as such. The shape of the grid will by
default be inferenced to be the two closest factors of the number
of images (to be as close to square as possible).
If you don't want to display the image (and just get the output), pass
`no_show` as true in order to bypass this.
Parameters
----------
images : Any
Either a list of images, a tuple of images and labels, or a list
of image/label pairs (like you would get as the output of a dataset).
shape : Any
The shape of the display grid.
Returns
-------
The matplotlib figure with the plotted images.
"""
# If only one image is passed, then we can directly display that image.
if not isinstance(images, (list, tuple)):
if is_array_like(images):
images = format_image(images)
if not kwargs.get('no_show', False):
display_image(images)
return images
# If a prime number is passed, e.g. 23, then the `_inference_best_shape`
# method will return the shape of (23, 1). Likely, the user is expecting
# a non-rectangular shape such as (6, 4), where the bottom right axis is
# empty. This method does not support such computations (yet).
if shape is None:
shape = _inference_best_shape(len(images))
if max(shape) > 20:
raise NotImplementedError(
"Length of maximum shape length is greater than 20. "
"This method does not support non-rectangular shapes.")
fig, axes = plt.subplots(
shape[0], shape[1], figsize = (shape[1] * 2, shape[0] * 2))
try:
iter_ax = axes.flat
except AttributeError: # If showing only a single image.
iter_ax = [axes]
for image, ax in zip(images, iter_ax):
ax.imshow(format_image(image))
ax.set_aspect(1)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.tick_params(
axis = 'both', which = 'both', bottom = False,
top = False, left = False, right = False
)
plt.setp(ax.spines.values(), visible = False)
fig.tight_layout()
# Display and return the image.
image = convert_figure_to_image()
if not kwargs.get('no_show', False):
display_image(image)
return image
| 4,786 | 35.823077 | 87 | py |
AgML | AgML-main/agml/utils/general.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import cv2
import glob
import math
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from math import pi, floor
from scipy import signal
import numpy as np
# Represents an empty object, but allows passing `None`
# as an independent object in certain cases.
NoArgument = object()
def placeholder(obj):
"""Equivalent of lambda x: x, but enables pickling."""
return obj
def to_camel_case(s):
"""Converts a given string `s` to camel case."""
s = re.sub(r"(_|-)+", " ", s).title().replace(" ", "") # noqa
return ''.join(s)
def resolve_list_value(val):
"""Determines whether a list contains one or multiple values."""
if len(val) == 1:
return val[0]
return val
def resolve_tuple_values(*inputs, custom_error = None):
"""Determines whether values are distributed amongst the values in `inputs`. """
if isinstance(inputs[0], (list, tuple)) and all(c is None for c in inputs[1:]):
if len(inputs[0]) != len(inputs):
# special case for COCO JSON
if len(inputs) == 3 and len(inputs[0]) == 2 and isinstance(inputs[0][1], dict):
try:
return inputs[0][0], inputs[0][1]['bbox'], inputs[0][1]['category_id']
except KeyError:
return inputs[0][0], inputs[0][1]['bboxes'], inputs[0][1]['labels']
if custom_error is not None:
raise ValueError(custom_error)
else:
raise ValueError(f"Expected either a tuple with {len(inputs)} values "
f"or {len(inputs)} values across two arguments.")
else:
return inputs[0]
return inputs
def resolve_tuple(sequence):
"""Resolves a sequence to a tuple."""
if isinstance(sequence, np.ndarray):
sequence = sequence.tolist()
return tuple(i for i in sequence)
def has_nested_dicts(obj):
"""Returns whether a dictionary contains nested dicts."""
return any(isinstance(i, dict) for i in obj.values())
def as_scalar(inp):
"""Converts an input value to a scalar."""
if isinstance(inp, (int, float)):
return inp
if np.isscalar(inp):
return inp.item()
if isinstance(inp, np.ndarray):
return inp.item()
from agml.backend.tftorch import torch
if isinstance(inp, torch.Tensor):
return inp.item()
from agml.backend.tftorch import tf
if isinstance(inp, tf.Tensor):
return inp.numpy()
raise TypeError(f"Unsupported variable type {type(inp)}.")
def scalar_unpack(inp):
"""Unpacks a 1-d array into a list of scalars."""
return [as_scalar(item) for item in inp]
def is_array_like(inp):
"""Determines if an input is a np.ndarray, torch.Tensor, or tf.Tensor."""
if isinstance(inp, np.ndarray):
return True
from agml.backend.tftorch import torch
if isinstance(inp, torch.Tensor):
return True
from agml.backend.tftorch import tf
if isinstance(inp, tf.Tensor):
return True
return False
def shapes(seq):
"""Returns the shapes (or lengths) of all of the objects in the sequence."""
try:
return [getattr(obj, 'shape', len(obj)) for obj in seq]
except:
raise ValueError(f"One or more of the objects has no shape or length: {seq}.")
def weak_squeeze(arr, ndims = 2):
"""Performs a 'weak squeeze', adding a dimension back if necessary."""
if isinstance(arr, np.ndarray):
arr = np.squeeze(arr)
while arr.ndim < ndims:
arr = np.expand_dims(arr, axis = 0)
return arr
def is_float(num):
"""Determines if a number is a float."""
is_ = isinstance(num, float) or isinstance(num, np.float32) or isinstance(num, np.float64)
if is_: return True
try:
float(num)
except ValueError:
return False
return True
def is_int(num):
"""Determines if a number is an int."""
is_ = isinstance(num, int) or isinstance(num, np.int32) or isinstance(num, np.int64)
if is_: return True
try:
int(num)
except ValueError:
return False
return True
| 4,734 | 29.352564 | 94 | py |
AgML | AgML-main/agml/data/object.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import abc
from functools import wraps
import cv2
import numpy as np
from agml.framework import AgMLSerializable
from agml.utils.image import imread_context
from agml.backend.tftorch import scalar_unpack
class DataObject(AgMLSerializable):
"""Stores a single piece of data and its corresponding annotation.
This class stores an image and its corresponding annotation
depending on the task, such as a label for a image classification
task, a COCO JSON dictionary for an object detection task, or
a mask for a semantic segmentation task.
Fundamentally, it works as a two-object list, but the data is stored
internally in a specific representation and images are only loaded
lazily when necessary. This object is used internally in the
`AgMLDataLoader`, and objects returned from it are returned as their
expected contents (NumPy arrays/dictionaries/integers) when necessary.
"""
serializable = frozenset(
('image_object', 'annotation_obj', 'dataset_root'))
_abstract = frozenset(('_load_image_input', '_parse_annotation'))
def __init__(self, image, annotation, root):
# The `image` parameter is constant among different tasks.
self._image_object = image
# The `annotation` parameter varies with the task.
self._annotation_obj = annotation
# The `root` is the local root of the dataset. This is used
# for object detection datasets primarily, whose COCO JSON
# dictionary doesn't contain the full path, only the base.
self._dataset_root = root
def __len__(self):
return 2
def __getitem__(self, i):
return self.get()[i]
def __repr__(self):
return f"<DataObject: {self._image_object}, {self._annotation_obj}>"
@staticmethod
def _parse_image(path):
with imread_context(os.path.abspath(path)) as image:
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
@staticmethod
def _parse_depth_image(path):
with imread_context(os.path.abspath(path), flags = -1) as image:
return image.astype(np.int32)
@staticmethod
def _parse_spectral_image(path):
None # noqa, prevents `all abstract methods must be implemented`
raise NotImplementedError("Multi/Hyperspectral images are not yet supported.")
def get(self):
"""Returns the image and annotation pair with applied transforms.
This method is the main exposed method to process the data. It loads
the image and processes the annotation, then applies transformations.
"""
return self._load()
def _load(self):
"""Loads the image and annotation and returns them."""
image = self._load_image_input(self._image_object)
annotation = self._parse_annotation(self._annotation_obj)
return image, annotation
def __init_subclass__(cls, **kwargs):
# Wraps derived abstract methods with the corresponding
# docstring from this `DataObject` class and updates
# the derived class' dictionary.
wrapped_updates = {}
self = super(DataObject, cls).__thisclass__ # noqa
for name, method in cls.__dict__.items():
if name in cls._abstract:
wrapped = wraps(getattr(self, name))(method)
wrapped_updates[name] = wrapped
for name, method in wrapped_updates.items(): # noqa
setattr(cls, name, method)
@staticmethod
def create(contents, task, root):
"""Creates a new `DataObject` for the corresponding task."""
if task == 'image_classification':
cls = ImageClassificationDataObject
elif task == 'image_regression':
cls = ImageRegressionDataObject
elif task == 'object_detection':
cls = ObjectDetectionDataObject
elif task == 'semantic_segmentation':
cls = SemanticSegmentationDataObject
else:
raise ValueError(f"Unsupported task {task}.")
return cls(*contents, root)
# The following methods are used to load the image and annotation.
@abc.abstractmethod
def _load_image_input(self, path):
"""Loads image inputs based on the task. Derived by subclasses."""
raise NotImplementedError()
@abc.abstractmethod
def _parse_annotation(self, obj):
"""Parses an annotation based on the task. Derived by subclasses."""
raise NotImplementedError()
# The following methods function independently whenever an
# annotation is loaded. Depending on the task, the appropriate
# method is called and a corresponding piece of data is returned.
@staticmethod
def _parse_label(obj):
"""Parses a label for an image classification task."""
try:
obj = int(obj)
except TypeError:
raise Exception(f"Could not convert object {obj} of "
f"type {type(obj)} to a scalar integer "
f"for image classification.")
return obj
@staticmethod
def _parse_coco(obj):
"""Parses a COCO JSON dictionary for an object detection task."""
annotation = {'bbox': [], 'category_id': [], 'area': [],
'image_id': "", 'iscrowd': [], 'segmentation': []}
for a_set in obj:
x, y, w, h = a_set['bbox']
x = int(np.clip(x, 0, None))
y = int(np.clip(y, 0, None))
a_set['bbox'] = [x, y, w, h]
annotation['bbox'].append(a_set['bbox'])
annotation['category_id'].append(a_set['category_id'])
annotation['iscrowd'].append(a_set['iscrowd'])
annotation['segmentation'].append(a_set['segmentation'])
annotation['area'].append(a_set['area'])
annotation['image_id'] = a_set['image_id']
for key, value in annotation.items():
# Creating nested sequences from ragged arrays (see numpy).
if key in ['segmentation']:
out = np.array(value, dtype = object)
else:
out = np.array(value)
if np.isscalar(out):
out = scalar_unpack(out)
annotation[key] = out
return annotation
@staticmethod
def _parse_mask(obj):
"""Parses a mask for a semantic segmentation task."""
with imread_context(os.path.realpath(obj)) as image:
if image.ndim == 3:
if not np.all(image[:, :, 0] == image[:, :, 1]):
raise TypeError(
f"Invalid annotation mask of shape {image.shape}.")
image = image[:, :, 0]
return np.squeeze(image)
class ImageClassificationDataObject(DataObject):
"""Serves as a `DataObject` for image classification tasks."""
def _load_image_input(self, path):
return self._parse_image(path)
def _parse_annotation(self, obj):
return self._parse_label(obj)
class ImageRegressionDataObject(DataObject):
"""Serves as a `DataObject` for image regression tasks."""
def _load_image_input(self, contents):
# The easy case, when there is only one input image.
if isinstance(contents, str) and os.path.exists(contents):
return self._parse_image(contents)
# Otherwise, we have a dictionary containing multiple
# input types, so we need to independently load those.
images = dict.fromkeys(contents.keys(), None)
for c_type, path in contents.items():
if c_type == 'image':
images[c_type] = self._parse_image(path)
elif c_type == 'depth_image':
images[c_type] = self._parse_depth_image(path)
else:
images[c_type] = self._parse_spectral_image(path)
return images
def _parse_annotation(self, obj):
return obj
class ObjectDetectionDataObject(DataObject):
"""Serves as a `DataObject` from object detection tasks."""
def _load_image_input(self, path):
path = os.path.join(self._dataset_root, 'images', path)
return self._parse_image(path)
def _parse_annotation(self, obj):
return self._parse_coco(obj)
class SemanticSegmentationDataObject(DataObject):
"""Serves as a `DataObject` for semantic segmentation tasks."""
def _load_image_input(self, path):
return self._parse_image(path)
def _parse_annotation(self, obj):
return self._parse_mask(obj)
| 9,127 | 36.875519 | 86 | py |
AgML | AgML-main/agml/data/tools.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from agml.backend.tftorch import is_array_like
from agml.utils.general import resolve_list_value
def _resolve_coco_annotations(annotations):
"""Resolves COCO annotations to a standard format.
Generally, COCO annotations will either be represented as a
dictionary which contains multiple elements for each of its keys,
e.g. multiple bounding boxes and areas, or a list of multiple COCO
dictionaries. This method resolves it into the former case.
"""
if is_array_like(annotations) and not isinstance(annotations, np.ndarray):
return annotations.numpy()
if isinstance(annotations, np.ndarray):
return annotations
if isinstance(annotations, list):
if not isinstance(annotations[0], dict):
return annotations
if len(annotations) == 1:
return annotations
annotation = {'bboxes': [], 'labels': [], 'area': [],
'image_id': "", 'iscrowd': [], 'segmentation': []}
for a_set in annotations:
annotation['bboxes'].append(a_set['bbox'])
annotation['labels'].append(a_set['category_id'])
annotation['iscrowd'].append(a_set['iscrowd'])
annotation['segmentation'].append(a_set['segmentation'])
annotation['area'].append(a_set['area'])
annotation['image_id'] = a_set['image_id']
for key, value in annotation.items():
out = np.array(value)
if np.isscalar(out):
out = out.item()
annotation[key] = out
return annotation
elif isinstance(annotations, dict):
return annotations
else:
raise TypeError(
"Expected either a single COCO annotation "
"dictionary or a list of multiple dictionaries.")
def coco_to_bboxes(annotations):
"""Extracts the bounding boxes and labels from COCO JSON annotations.
Given either a list of COCO JSON annotations, or a single
dictionary with multiple values, this method will extract the
bounding boxes and category labels from the annotations and
return just the bounding boxes and category labels in two arrays.
Parameters
----------
annotations : {list, dict}
The COCO JSON annotations in list/dict format.
Returns
-------
Two arrays consisting of the bounding boxes and labels.
"""
annotations = _resolve_coco_annotations(annotations)
return annotations['bboxes'], annotations['labels']
def convert_bbox_format(annotations_or_bboxes, fmt):
"""Converts bounding box formats for COCO JSON and others.
This method converts the format of bounding boxes as specified
in the 'fmt' argument, which describes the format of the bounding
boxes passed to the 'annotations_or_bboxes' argument. From there,
it will convert to the standard COCO JSON bounding box format,
namely ('x1', 'y1' 'width', 'height'). This method supports the
following conversions (note that 'x1' and 'y1' are the top-left
coordinates, and 'x2' and 'y2' are top-right):
1. ('x1', 'x2', 'y1', 'y2') to COCO JSON.
2. ('x_min', 'y_min', 'x_max', 'y_max') to COCO JSON. This format
can also be passed with the simple string `pascal_voc`.
3. ('x_min', 'y_min', 'width', 'height') to COCO JSON.
Note that the variables in the bounding boxes in the above format
can be in any order, this just needs to be reflected in the 'fmt'
argument, and it should contain some combination of the above.
Parameters
----------
annotations_or_bboxes : {np.ndarray, list, dict}
Either a COCO JSON annotation dictionary, or a numpy array/list
with all of the annotations.
fmt : {list, tuple}
A list or tuple with one of the above formats.
Returns
-------
The initial argument type (either dict or array) with the bounding
boxes formatted in the COCO JSON format.
"""
annotations_or_bboxes = _resolve_coco_annotations(annotations_or_bboxes)
if isinstance(annotations_or_bboxes, dict):
annotations = annotations_or_bboxes['bboxes']
else:
annotations = annotations_or_bboxes
if isinstance(annotations[0], (int, float)):
annotations = [annotations]
if isinstance(fmt, str):
if 'voc' in fmt or 'pascal' in fmt:
fmt = 'x_min y_min x_max y_max'
elif 'efficientdet' in fmt or 'effdet' in fmt:
fmt = 'y_min x_min y_max x_max'
if ',' in fmt:
fmt = fmt.split(',')
else:
fmt = fmt.split(' ')
if len(fmt) != 4:
raise ValueError(f"Argument 'fmt' should contain 4 values, got {len(fmt)}.")
# Define all of the intermediate conversion methods
def _x1_x2_y1_y2_to_coco(annotation): # noqa
x1, x2, y1, y2 = annotation
width, height = abs(x2 - x1), abs(y2 - y1)
return [x1, y1, width, height]
def _xmin_ymin_xmax_ymax_to_coco(annotation): # noqa
xmin, ymin, xmax, ymax = annotation
width, height = abs(xmax - xmin), abs(ymax - ymin)
return [xmin, ymin, width, height]
def _xmin_ymin_width_height_to_coco(annotation): # noqa
xmin, ymin, width, height = annotation
x1, y1 = xmin, ymin - height
return [x1, y1, width, height]
def _x1_y1_width_height_to_coco(annotation): # noqa
return annotation # This is just here for reordering.
# Resolve the format
fmt_bases = [['x1', 'x2', 'y1', 'y2'],
['x_min', 'y_min', 'x_max', 'y_max'],
['x_min', 'y_min', 'width', 'height'],
['x1', 'y1', 'width', 'height']]
fmt_map = {0: _x1_x2_y1_y2_to_coco,
1: _xmin_ymin_xmax_ymax_to_coco,
2: _xmin_ymin_width_height_to_coco,
3: _x1_y1_width_height_to_coco}
fmt_found = False
map_fmt, select_order = None, None
for indx, base in enumerate(fmt_bases):
if all(i in base for i in fmt):
fmt_found, map_fmt = True, fmt_map[indx]
select_order = [base.index(i) for i in fmt]
if not fmt_found:
raise ValueError(
f"Invalid format {fmt}, see `convert_bbox_format` "
f"for information about valid formats.")
# Convert the formats
formatted_annotations = []
for bbox in annotations:
sorted_bbox = np.array(bbox)[select_order]
formatted_annotations.append(map_fmt(sorted_bbox))
formatted_annotations = np.array(formatted_annotations)
if isinstance(annotations_or_bboxes, dict):
res = annotations_or_bboxes.copy()
res['bboxes'] = formatted_annotations
return res
return resolve_list_value(formatted_annotations)
| 7,351 | 39.395604 | 84 | py |
AgML | AgML-main/agml/data/experimental.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Experimental data utilities that are in development.
"""
import functools
import numpy as np
__all__ = ['generate_keras_segmentation_dual_transform']
def generate_keras_segmentation_dual_transform(*layers):
"""Generates a `dual_transform` pipeline from Keras preprocessing layers.
This method takes in Keras preprocessing layers and generates a
transformation pipeline for the `dual_transform` argument in
*semantic segmentation* loaders, which applies the transform in the
same fashion to both the image and annotation.
This is due to the fact that TensorFlow has its operation-level
random states different than its module-level random state, so
the layers need to have their seeds manually set in order to work.
In essence, for each of the preprocessing layers passed, this
method conducts the following operations:
> def preprocessing_transform(image, annotation):
> layer = functools.partial(KerasPreprocessingLayer, **kwargs)
> seed = np.random.randint(BUFFER_SIZE) # up to sys.maxsize
> image = layer(image, seed = seed)
> annotation = layer(annotation, seed = seed)
> return image, annotation
It then repeats this transform for all of the preprocessing layers
passed, and returns a method which has this behavior wrapped into
it and can perform it when the preprocessing is actually conducted.
Parameters
----------
layers : Any
Either a Sequential model with preprocessing layers, or a
set of instantiated preprocessing layers.
Returns
-------
"""
import tensorflow as tf
if len(layers) == 1:
if isinstance(layers[0], tf.keras.Sequential):
layers = layers[0].layers
# These methods perform the behavior indicated in the
# code snippet above (for each of the layers given).
def _single_preprocessing_layer_base(layer_, build_dict):
def _internal(image, annotation, seed):
instantiated_layer = functools.partial(layer_, **build_dict)
seed_update = {}
if seed is not None:
seed_update['seed'] = seed
image = instantiated_layer(**seed_update)(image)
annotation = instantiated_layer(**seed_update)(annotation)
return image, annotation
return _internal
preprocessing_methods, use_seeds = [], []
for layer in layers:
config = layer.get_config()
if 'seed' in config:
config.pop('seed')
use_seeds.append(True)
else:
use_seeds.append(False)
preprocessing_methods.append(
_single_preprocessing_layer_base(layer.__class__, config))
def _execute_preprocessing(layers_, use_seeds_):
def _execute(image, annotation):
for p_layer, seed_ in zip(layers_, use_seeds_):
seed = np.random.randint(2147483647) if seed_ else None
image, annotation = p_layer(image, annotation, seed = seed)
return image, annotation
return _execute
return _execute_preprocessing(preprocessing_methods, use_seeds)
| 3,748 | 34.704762 | 77 | py |
AgML | AgML-main/agml/data/manager.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from agml.framework import AgMLSerializable
from agml.data.object import DataObject
from agml.data.builder import DataBuilder
from agml.data.metadata import DatasetMetadata
from agml.data.managers.transforms import TransformManager
from agml.data.managers.resize import ImageResizeManager
from agml.data.managers.training import TrainingManager
from agml.utils.general import NoArgument
from agml.backend.tftorch import convert_to_batch, is_array_like
from agml.utils.random import seed_context
from agml.utils.image import consistent_shapes
from agml.utils.logging import log
class DataManager(AgMLSerializable):
"""Manages the data for a dataset loaded in an `AgMLDataLoader`.
The `DataManager` is the core internal object which, as its name
suggests, manages the data being used in an `AgMLDataLoader`. Upon
instantiation of an `AgMLDataLoader`, the internal data, which is
represented generally as image paths and corresponding annotations,
is placed into a `DataManager` container.
Accessing data from an `AgMLDataLoader` calls the `DataManager`,
which takes into account all of the potential splits, transforms,
and batching that may have been applied to the loader.
This enables more streamlined processing and transforming of data,
as well as parallelizing data loading as desired (in the future).
Another way to think about the `DataManager` is it being a list
of `DataObjects`, with extra logic for processing.
"""
serializable = frozenset((
'data_objects', 'resize_manager', 'accessors', 'task',
'dataset_name', 'shuffle', 'batch_size', 'dataset_root',
'transform_manager', 'builder', 'train_manager'))
def __init__(self, builder, task, name, root, **kwargs):
# Set the basic class information.
self._task = task
self._dataset_name = name
self._dataset_root = root
# Create the `DataObject`s from the `DataBuilder`.
if not isinstance(builder, DataBuilder):
builder = DataBuilder.from_data(
contents = builder,
info = DatasetMetadata(name),
root = root)
self._builder = builder
self._create_objects(self._builder, task)
# Set up the internal transform managers. These control
# the application of transformations to the input data.
self._transform_manager = TransformManager(task = task)
self._resize_manager = ImageResizeManager(
task = task, dataset = name, root = root
)
# The transform and resize managers are wrapped inside of a
# `TrainingManager`, which controls the application of
# preprocessing to the data based on the class state.
self._train_manager = TrainingManager(
transform_manager = self._transform_manager,
resize_manager = self._resize_manager,
task = task
)
# While we store data in the list of `DataObject`s, the actual
# accessing of the data by index doesn't happen by directly
# accessing that list, but instead by accessing the array of
# indices below. This is useful for two reasons.
#
# It allows a much more simpler way to store and access the
# state of the data. E.g., when splitting the data or accessing
# it, we only need to check through this array rather than
# searching through or shuffling the actual list of objects.
#
# Secondly, it makes batching data much more straightforward and
# allows for storing representations of the data in different
# formats, e.g., shuffling, to be done without having to interfere
# with the actual `DataObject`s, which is much more convenient.
self._accessors = np.arange(len(self._data_objects))
# The following parameters store various parameters which are
# used internally or accessed by the `AgMLDataLoader` externally.
self._batch_size = None
self._shuffle = kwargs.get('shuffle', True)
self._maybe_shuffle()
def data_length(self):
"""Calculates the length of the data based on the batching state."""
return len(self._accessors)
def _get_random_index(self):
"""Returns a random accessor index to get a specific sample."""
return np.random.choice(self._accessors)
def _create_objects(self, builder, task):
"""Creates `DataObject`s from the provided content.
Here, `content` is a dictionary mapping an an input data piece,
an image, with its corresponding expected output, its annotation.
"""
self._data_objects = []
contents = builder.get_contents()
if 'inputs' in contents.keys():
contents = zip(tuple(contents['inputs']),
tuple(contents['outputs']))
else:
contents = contents.items()
for content in list(contents):
self._data_objects.append(DataObject.create(
contents = content, task = task,
root = self._dataset_root))
def _maybe_shuffle(self, seed = None):
"""Wraps automatic shuffling to see if it is enabled or not."""
if self._shuffle:
self.shuffle(seed = seed)
def update_train_state(self, state):
"""Updates the training state in the `TrainingManager`."""
self._train_manager.update_state(state)
def shuffle(self, seed = None):
"""Shuffles the contents of the `DataManager`.
This method simply shuffles the order of the `DataObject`s
which are stored inside this `DataManager`. Optionally, a seed
can be provided to shuffle them inside of a specific context.
"""
if seed is None:
np.random.shuffle(self._accessors)
else:
with seed_context(seed):
np.random.shuffle(self._accessors)
def generate_split_contents(self, splits):
"""Generates split contents given a dictionary of the split indexes.
This method, given a set of data split indexes, applies the indexing
to the original content and gets a mapping of images and annotations
which are returned back to the `AgMLDataLoader` to be constructed into
`DataBuilder`s and wrapped into new `DataManager`s.
"""
if self._task == 'object_detection':
contents = np.array(list(
self._builder.get_contents().items()), dtype = object)
else:
contents = np.array(list(self._builder.get_contents().items()))
try:
return {k: dict(contents[v]) for k, v in splits.items()}
except IndexError:
raise Exception(
f"Could not generate split contents, likely due to an error "
f"with the metadata for the dataset `{self._dataset_name}`. "
f"Please raise this error with the AgML team.")
def batch_data(self, batch_size):
"""Batches the data into consistent groups.
The batched data is stored inside of this manager, as a set of
indexes which are read and loaded when the data is accessed.
See the information above the `_accessors` parameter above.
"""
# If the data is already batched and a new batch size is called,
# then update the existing batch sizes. For unbatching the data,
# update the batch state and then flatten the accessor array.
if self._batch_size is not None:
try:
self._accessors = np.concatenate(self._accessors).ravel()
except ValueError:
# The array is currently 0-dimensional.
pass
if batch_size is None or batch_size == 0:
self._batch_size = None
return
# If we have a batch size of `1`, then don't do anything
# since this doesn't really mean to do anything.
if batch_size == 1:
return
# Otherwise, calculate the actual batches and the overflow
# of the contents, and then update the accessor.
num_splits = len(self._accessors) // batch_size
data_items = np.array(self._accessors)
overflow = len(self._accessors) - num_splits * batch_size
extra_items = data_items[-overflow:]
try:
batches = np.array_split(
np.array(self._accessors
[:num_splits * batch_size]), num_splits)
except ValueError:
log(f"There is less data ({len(self._accessors)}) than the provided "
f"batch size ({batch_size}). Consider using a smaller batch size.")
batches = [self._accessors]
else:
if len(extra_items) < batch_size:
batches.append(extra_items)
self._accessors = np.array(batches, dtype = object)
self._batch_size = batch_size
def assign_resize(self, image_size, method):
"""Assigns a resizing factor for the image and annotation data."""
if image_size is None:
image_size = 'default'
self._resize_manager.assign(image_size, method)
def push_transforms(self, **transform_dict):
"""Pushes a transformation to the data transform pipeline."""
# Check if any transforms are being reset and assign them as such.
if all(i is NoArgument for i in transform_dict):
transform_dict = {
'transform': 'reset',
'target_transform': 'reset',
'dual_transform': 'reset'
}
else:
empty_keys, reset_keys = [], []
for key, value in transform_dict.items():
if value is NoArgument:
empty_keys.append(key)
if value is None:
reset_keys.append(key)
if len(empty_keys) != 0:
for key, value in transform_dict.items():
if value is None:
transform_dict[key] = 'reset'
elif value is NoArgument:
transform_dict[key] = None
# There is no `dual_transform` object for image classification.
if self._task == 'image_classification':
if transform_dict['dual_transform'] is None:
transform_dict['dual_transform'] = 'reset'
# Assign the transforms to the manager in order.
for key in ['transform', 'target_transform', 'dual_transform']:
self._transform_manager.assign(key, transform_dict[key])
def _load_one_image_and_annotation(self, obj):
"""Loads one image and annotation from a `DataObject`."""
return self._train_manager.apply(
obj = obj, batch_state = self._batch_size is not None
)
def _load_multiple_items(self, indexes):
"""Loads multiple images and annotations from a set of `DataObject`s."""
# Either we're getting multiple batches, or just multiple items.
contents = []
if self._batch_size is not None:
for i in indexes:
contents.append(self._load_batch(self._accessors[i]))
else:
for i in indexes:
contents.append(self._load_one_image_and_annotation(
self._data_objects[self._accessors[i]]))
return contents
def _batch_multi_image_inputs(self, images):
"""Converts either a list of images or multiple input types into a batch."""
# If the input images are just a simple batch.
if is_array_like(images[0]):
return convert_to_batch(images)
# Otherwise, convert all of them independently.
keys = images[0].keys()
batches = {k: [] for k in keys}
for sample in images:
for key in sample:
batches[key].append(sample[key])
return {k: self._batch_multi_image_inputs(i) for k, i in batches.items()}
def _batch_multi_output_annotations(self, annotations):
"""Converts either a list of annotations or multiple annotation types into a batch."""
# If the output annotations are simple objects.
if (isinstance(annotations[0], (list, np.ndarray))
or isinstance(annotations, (list, np.ndarray))
and isinstance(annotations[0], (int, float))):
if not consistent_shapes(annotations):
annotations = np.array(annotations, dtype = object)
else:
annotations = np.array(annotations)
return annotations
# For object detection, just return the COCO JSON dictionaries.
if self._task == 'object_detection':
return annotations
# Otherwise, convert all of them independently.
keys = annotations[0].keys()
batches = {k: [] for k in keys}
for sample in annotations:
for key in sample:
batches[key].append(sample[key])
return {k: self._batch_multi_output_annotations(i) for k, i in batches.items()}
def _load_batch(self, batch_indexes):
"""Gets a batch of data from the dataset.
This differs from simply getting multiple pieces of data from the
dataset, such as a slice, in that it also stacks the data together
into a valid batch and returns it as such.
"""
# Get the images and annotations from the data objects.
images, annotations = [], []
for index in batch_indexes:
image, annotation = self._load_one_image_and_annotation(
self._data_objects[index])
images.append(image)
annotations.append(annotation)
# Attempt to create batched image arrays.
images = self._batch_multi_image_inputs(images)
# Attempt the same for the annotation arrays. This is more complex
# since there are many different types of annotations, namely labels,
# annotation masks, COCO JSON dictionaries, etc. We need to properly
# create a batch in each of these cases.
annotations = self._batch_multi_output_annotations(annotations)
# Return the batches.
return self._train_manager.make_batch(
images = images,
annotations = annotations
)
def get(self, indexes):
"""Loads and processes a piece (or pieces) of data from the dataset.
This is the actual accessor method that performs the loading of data
and the relevant processing as dictated by loading, image resizing,
transform application, and other internal processing methods such as
creating batches. This is called by the `AgMLDataLoader` to get data.
"""
# If there is only one index and the data is not batched,
# then we just need to return a single `DataObject`.
if isinstance(indexes, int) and self._batch_size is None:
return self._load_one_image_and_annotation(
self._data_objects[self._accessors[indexes]]
)
# If we have a batch of images, then return the batch.
if isinstance(indexes, int) and self._batch_size is not None:
return self._load_batch(self._accessors[indexes])
# Otherwise, if there are multiple indexes (e.g., an unstacked
# slice or just a tuple of integers), then we get multiple images.
if isinstance(indexes, (list, tuple)):
return self._load_multiple_items(indexes)
| 16,197 | 41.626316 | 94 | py |
AgML | AgML-main/agml/data/loader.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import copy
import glob
from typing import Union
from collections.abc import Sequence
from decimal import getcontext, Decimal
import numpy as np
from agml.framework import AgMLSerializable
from agml.data.manager import DataManager
from agml.data.builder import DataBuilder
from agml.data.metadata import DatasetMetadata, make_metadata
from agml.utils.logging import log
from agml.utils.io import get_file_list, get_dir_list
from agml.utils.data import load_public_sources
from agml.utils.general import NoArgument, resolve_list_value
from agml.utils.random import inject_random_state
from agml.backend.config import data_save_path, synthetic_data_save_path
from agml.backend.experimental import AgMLExperimentalFeatureWrapper
from agml.backend.tftorch import (
get_backend, set_backend,
user_changed_backend, StrictBackendError,
_add_dataset_to_mro, # noqa
)
from agml.viz.general import show_sample
class AgMLDataLoaderMeta(type):
def __instancecheck__(self, instance):
# This override allows for objects of type `AgMLMultiDatasetLoader`
# to be treated as an `AgMLDataLoader` when the following command
# is run: `isinstance(a, AgMLDataLoader)` (hacky fix, essentially).
if isinstance(instance, self.__class__):
return True
from agml.data.multi_loader import AgMLMultiDatasetLoader
if isinstance(instance, AgMLMultiDatasetLoader):
return True
return False
class AgMLDataLoader(AgMLSerializable, metaclass = AgMLDataLoaderMeta):
"""Loads and provides a processing interface for a dataset.
The `AgMLDataLoader` is the main interface to AgML's public dataset
interface, and exposes an API which enables the downloading and
subsequent local loading of a public dataset, as well as various
preprocessing functions and hooks to integrate into existing pipelines.
Methods provided include splitting the dataset into train/val/test sets,
batching the data, applying transforms, and more. All of the processing
code is contained internally, so all you need to do is instantiate the
loader and call the relevant methods to apply the preprocessing methods.
`AgMLDataLoader` supports both TensorFlow and PyTorch as backends, and
can automatically perform tensor conversion and batching to enable
seamless usage in training or inference pipelines. Data can also be
exported into native TensorFlow and PyTorch objects.
There is also support for using custom datasets outside of the AgML
public data repository. To do this, you need to pass an extra argument
containing metadata for the dataset, after which point the loader
will work as normal (and all interfaces, except for the info parameters
which are not provided, will also be available for standard use).
Parameters
----------
dataset : str
The name of the public dataset you want to load. See the helper
method `agml.data.public_data_sources()` for a list of datasets.
If using a custom dataset, this can be any valid string.
kwargs : dict, optional
dataset_path : str, optional
A custom path to download and load the dataset from.
overwrite : bool, optional
Whether to rewrite and re-install the dataset.
meta : dict, optional
A dictionary consisting of metadata properties, if you want
to create a custom loader. At minimum, this needs to contain
two parameters: `task`, indicating the type of machine learning
task that the dataset is for, and `classes`, a list of the
classes that the dataset contains.
Notes
-----
See the methods for examples on how to use an `AgMLDataLoader` effectively.
"""
serializable = frozenset((
'info', 'builder', 'manager', 'train_data',
'val_data', 'test_data', 'is_split', 'meta_properties'))
def __new__(cls, dataset, **kwargs):
# If a single dataset is passed, then we use the base `AgMLDataLoader`.
# However, if an iterable of datasets is passed, then we need to
# dispatch to the subclass `AgMLMultiDatasetLoader` for them.
if isinstance(dataset, (str, DatasetMetadata)):
return super(AgMLDataLoader, cls).__new__(cls)
elif isinstance(dataset, Sequence):
if len(dataset) == 1:
log("Received a sequence with only one element when "
"instantiating an `AgMLDataLoader`. Returning "
"a regular, single-element data loader.")
return super(AgMLDataLoader, cls).__new__(cls)
from agml.data.multi_loader import AgMLMultiDatasetLoader
return AgMLMultiDatasetLoader(dataset, **kwargs)
raise TypeError(
f"Expected either a single dataset name (or metadata), or"
f"a list of dataset names/metadata when instantiating an "
f"`AgMLDataLoader`. Got {dataset} of type {type(dataset)}.")
def __getnewargs__(self):
return self._info.name,
def __init__(self, dataset, **kwargs):
"""Instantiates an `AgMLDataLoader` with the dataset."""
# Set up the dataset and its associated metadata.
self._info = make_metadata(dataset, kwargs.get('meta', None))
# The data for the class is constructed in two stages. First, the
# internal contents are constructed using a `DataBuilder`, which
# finds and wraps the local data in a proper format.
self._builder = DataBuilder(
info = self._info,
dataset_path = kwargs.get('dataset_path', None),
overwrite = kwargs.get('overwrite', False)
)
# These contents are then passed to a `DataManager`, which conducts
# the actual loading and processing of the data when called.
self._manager = DataManager(
builder = self._builder,
task = self._info.tasks.ml,
name = self._info.name,
root = self._builder.dataset_root
)
# If the dataset is split, then the `AgMLDataLoader`s with the
# split and reduced data are stored as accessible class properties.
self._train_data = None
self._val_data = None
self._test_data = None
self._is_split = False
# Set the direct access metadata properties like `num_images` and
# `classes`, since these can be modified depending on the state of
# the loader, whilst the `info` parameter attributes cannot.
self._meta_properties = {
'num_images': self._info.num_images,
'classes': self._info.classes,
'num_classes': self._info.num_classes,
'num_to_class': self._info.num_to_class,
'class_to_num': self._info.class_to_num,
'data_distributions': {self.name: self._info.num_images}}
@classmethod
def custom(cls, name, dataset_path = None, classes = None, **kwargs):
"""Creates an `AgMLDataLoader` with a set of custom data.
If you have a custom dataset that you want to use in an `AgMLDataLoader`,
this method constructs the loader using similar semantics to the regular
loader instantiation. It is a wrapper around using the `meta` argument to
provide dataset properties that provides additional convenience for some
circumstances, as summarized below.
Functionally, this method is equivalent to instantiating `AgMLDataLoader`
with an extra argument `meta` that contains metadata for the dataset, with
the `task` and `classes` keys required and the others not necessary. This
would look like follows:
> loader = AgMLDataLoader('name', meta = {'task': task, 'classes': classes})
This method replaces the meta dictionary with keyword arguments to allow
for a more Pythonic construction of the custom loader. This method, however
includes additional optimizations which allow for a more convenient way
to instantiate the loader:
1. It automatically inferences the task from the structure which the data is
in, so you don't need to provide the task at all to this method.
2. For image classification and object detection task, this method will
attempt to automatically inference the classes in the loader (by searching
for the image directories for image classification tasks, and searching
in the COCO JSON file for object detection). Semantic segmentation tasks,
however, still require the list of classes to be passed.
This makes it so that in a variety of cases, the loader can be instantiated
without even requiring any metadata, as most of it can be inferred directly
by this method and thus streamlines the procedure for using custom data.
If you want to cache the metadata, rather than constantly putting them as
arguments, then create a file `.meta.json` at the path `/root/.meta.json`
with the parameters that you want.
Parameters
----------
name : str
A name for the custom dataset (this can be any valid string).
dataset_path : str, optional
A custom path to load the dataset from. If this is not passed,
we will assume that the dataset is at the traditional path:
`~/.agml/datasets/<name>` (or the changed default data path).
Otherwise, the dataset can be passed as a path such as `/root/name`,
or `/root`; in the latter case the method will check for `/root/name`.
classes : list, tuple
A list of string-labels for the classes of the dataset, in order.
This is not required for image classification/object detection.
kwargs : dict
Any other metadata for the dataset, this is not required.
Returns
-------
An `AgMLDataLoader` outfitted with the custom dataset.
"""
# Check the name and ensure that no dataset with that name exists.
if name in load_public_sources().keys() or not isinstance(name, str):
raise ValueError(f"Invalid name '{name}', the name should be "
f"a string that is not an existing dataset in "
f"the AgML public data source repository.")
# Locate the path to the dataset.
if dataset_path is None:
dataset_path = os.path.abspath(os.path.join(data_save_path(), name))
if not os.path.exists(dataset_path):
raise NotADirectoryError(
f"Existing directory '{dataset_path}' for dataset of name "
f"{name} not found, pass a custom path if you want to use "
f"a custom dataset path for the dataset.")
else:
dataset_path = os.path.abspath(os.path.expanduser(dataset_path))
if not os.path.exists(dataset_path):
if not os.path.exists(dataset_path):
raise NotADirectoryError(
f"Could not find a directory for dataset '{name}' at the "
f"provided dataset path: {dataset_path}.")
if not dataset_path.endswith(name):
dataset_path = os.path.join(dataset_path, name)
if not os.path.exists(dataset_path):
raise NotADirectoryError(
f"Could not find a directory for dataset '{name}' at the "
f"provided dataset path: {dataset_path}.")
# Infer the task based on the provided dataset path.
if os.path.exists(os.path.join(dataset_path, 'annotations.json')):
task = 'object_detection'
elif os.path.exists(os.path.join(dataset_path, 'images')) and \
os.path.exists(os.path.join(dataset_path, 'annotations')):
task = 'semantic_segmentation'
elif len(get_file_list(dataset_path)) == 0 and \
len(get_dir_list(dataset_path)) != 0:
task = 'image_classification'
else:
raise TypeError("Unrecognized dataset annotation format.")
# Check if there is a metadata file.
kwargs['classes'] = classes
if os.path.exists(os.path.join(dataset_path, '.meta.json')):
with open(os.path.join(dataset_path, '.meta.json'), 'r') as f:
kwargs.update(json.load(f))
# Infer the classes for image classification/object detection.
classes = kwargs.pop('classes')
if classes is None:
if task == 'semantic_segmentation':
raise ValueError(
"Classes are required for a semantic segmentation task.")
elif task == 'image_classification':
classes = get_dir_list(dataset_path)
else: # object detection
with open(os.path.join(dataset_path, 'annotations.json'), 'r') as f:
classes = [c['name'] for c in json.load(f)['categories']]
# Construct and return the `AgMLDataLoader`.
return cls(name, dataset_path = dataset_path,
meta = {'task': task, 'classes': classes, **kwargs})
@classmethod
def helios(cls, name, dataset_path = None):
"""Creates an `AgMLDataLoader` from a Helios-generated dataset.
Given the path to a Helios-generated (and converted) dataset, this method
will generate an `AgMLDataLoader` which is constructed using similar
semantics to the regular instantiation. This method is largely similar to
`AgMLDataLoader.custom()`, but also takes into account the extra
information which is provided in the `.metadata` directory of the Helios
generated dataset, allowing it to contain potentially even more info.
"""
# Instantiate from a list of datasets.
if isinstance(name, (list, tuple)):
if dataset_path is None:
dataset_path = [None] * len(name)
elif isinstance(dataset_path, str):
dataset_path = [dataset_path] * len(name)
else:
if not len(dataset_path) == len(name):
raise ValueError("The number of dataset paths must be "
"the same as the number of dataset names.")
datasets = [cls.helios(n, dataset_path = dp)
for n, dp in zip(name, dataset_path)]
return cls.merge(*datasets)
# Instantiate from a wildcard pattern.
if isinstance(name, str) and '*' in name:
if dataset_path is None:
dataset_path = os.path.abspath(synthetic_data_save_path())
elif not os.path.exists(dataset_path):
raise NotADirectoryError(
f"Existing directory '{dataset_path}' for dataset of name "
f"{name} not found, pass a custom path if you want to use "
f"a custom dataset path for the dataset.")
# Get the list of datasets.
possible_datasets = glob.glob(os.path.join(dataset_path, name))
if len(possible_datasets) == 0:
raise ValueError(f"No datasets found for pattern: {name}.")
datasets = [cls.helios(os.path.basename(p), dataset_path = dataset_path)
for p in sorted(possible_datasets)]
return cls.merge(*datasets)
# Locate the path to the dataset, using synthetic semantics.
if dataset_path is None:
dataset_path = os.path.abspath(
os.path.join(synthetic_data_save_path(), name))
if not os.path.exists(dataset_path):
raise NotADirectoryError(
f"Existing directory '{dataset_path}' for dataset of name "
f"{name} not found, pass a custom path if you want to use "
f"a custom dataset path for the dataset.")
else:
dataset_path = os.path.abspath(os.path.expanduser(dataset_path))
if not os.path.exists(dataset_path):
if not os.path.exists(dataset_path):
raise NotADirectoryError(
f"Could not find a directory for Helios dataset '{name}' "
f"at the provided dataset path: {dataset_path}.")
if not dataset_path.endswith(name):
dataset_path = os.path.join(dataset_path, name)
if not os.path.exists(dataset_path):
raise NotADirectoryError(
f"Could not find a directory for Helios dataset '{name}' "
f"at the provided dataset path: {dataset_path}.")
# Load the information file.
info_file = os.path.join(dataset_path, '.metadata', 'agml_info.json')
if not os.path.exists(info_file):
raise FileNotFoundError(f"The information file at '{info_file}' for the "
f"Helios dataset {name} could not be found.")
with open(info_file, 'r') as f:
meta = json.load(f)
# Construct the loader.
return cls.custom(name, dataset_path, **meta)
@staticmethod
def merge(*loaders, classes = None):
"""Merges a set of `AgMLDataLoader`s into a single loader.
Given a set of input `AgMLDataLoader`s, this method will return a single
`AgMLDataLoader` which is capable of returning data from any and every one
of the input loaders. The resultant loader is functionally equivalent to
the `AgMLDataLoader` returned by instantiating an `AgMLDataLoader` from a
sequence of AgML public data sources, except that in this case, the input
loaders may be subject to a number of input modifications before merging.
This also allows the usage of both an AgML public data source and a custom
dataset together in a single multi-dataset loader. As such, this method
should be used with caution, as since input loaders may be allowed to have
any modification, certain methods may not function as expected. For instance,
if one of the passed loaders has already been split, then the overall new
multi-loader cannot be split as a whole. Similarly, if also using a custom
dataset, then any properties of the `info` parameter which are not passed
to the dataset cannot be used, even if the other datasets have them.
Parameters
----------
loaders : Tuple[AgMLDataLoader]
A collection of `AgMLDataLoader`s (but not any `AgMLDataLoader`s
which are already holding a collection of datasets).
classes : list
A list of classes in the new loader. This argument can be used to
construct a custom ordering (non-alphabetical) of classes in the loader.
Returns
-------
A new `AgMLDataLoader` wrapping the input datasets.
"""
# Validate the input loaders.
from agml.data.multi_loader import AgMLMultiDatasetLoader
if len(loaders) == 1:
raise ValueError("There should be at least two inputs to the `merge` method.")
for loader in loaders:
if isinstance(loader, AgMLMultiDatasetLoader):
raise TypeError("Cannot merge datasets which already hold a "
"collection of multiple datasets.")
# Instantiate the `AgMLMultiDatasetLoader`.
return AgMLMultiDatasetLoader._instantiate_from_collection(
*loaders, classes = classes)
def __add__(self, other):
if not isinstance(other, AgMLDataLoader):
return NotImplemented
return AgMLDataLoader.merge(self, other)
def __len__(self):
return self._manager.data_length()
def __getitem__(self, indexes: Union[list, int, slice]):
if isinstance(indexes, slice):
data = np.arange(self._manager.data_length())
indexes = data[indexes].tolist()
if isinstance(indexes, int):
indexes = [indexes]
if np.isscalar(indexes):
indexes = [indexes.item()] # noqa
for idx in indexes:
if idx not in range(len(self)):
if idx not in [-i for i in range(1, len(self) + 1, 1)]:
raise IndexError(
f"Index {idx} out of range of "
f"AgMLDataLoader length: {len(self)}.")
return self._manager.get(resolve_list_value(indexes))
def __iter__(self):
for indx in range(len(self)):
yield self[indx]
def __repr__(self):
out = f"<AgMLDataLoader: (dataset={self.name}"
out += f", task={self.task}"
out += f", images={self.num_images}"
out += f") at {hex(id(self))}>"
return out
def __str__(self):
return repr(self)
def __copy__(self):
"""Copies the loader and updates its state."""
cp = super(AgMLDataLoader, self).__copy__()
cp.copy_state(self)
return cp
def copy(self):
"""Returns a deep copy of the data loader's contents."""
return self.__copy__()
def copy_state(self, loader):
"""Copies the state of another `AgMLDataLoader` into this loader.
This method copies the state of another `AgMLDataLoader` into this
loader, including its transforms, resizing, and training state. Other
general parameters such as batch size and shuffling are left intact.
Parameters
----------
loader : AgMLDataLoader
The data loader from which the state should be copied.
Returns
-------
This `AgMLDataLoader`.
"""
# Re-construct the training manager.
new_train_manager = loader._manager._train_manager.__copy__()
self._manager._train_manager = new_train_manager
# Re-construct the transform manager.
new_transform_manager = loader._manager._transform_manager.__copy__()
self._manager._transform_manager = new_transform_manager
self._manager._train_manager._transform_manager = new_transform_manager
# Re-construct the resizing manager.
new_resize_manager = loader._manager._resize_manager.__copy__()
self._manager._resize_manager = new_resize_manager
self._manager._train_manager._resize_manager = new_resize_manager
@property
def name(self):
"""Returns the name of the dataset in the loader."""
return self._info.name
@property
def dataset_root(self):
"""Returns the local path to the dataset being used."""
return self._builder.dataset_root
@property
def info(self):
"""Returns a `DatasetMetadata` object containing dataset info.
The contents returned in the `DatasetMetadata` object can be used
to inspect dataset metadata, such as the location the data was
captured, the data formats, and the license/copyright information.
See the `DatasetMetadata` class for more information.
"""
return self._info
@property
def task(self):
"""Returns the ML task that this dataset is constructed for."""
return self._info.tasks.ml
@property
def num_images(self):
"""Returns the number of images in the dataset."""
return self._meta_properties.get('num_images')
@property
def classes(self):
"""Returns the classes that the dataset is predicting."""
return self._meta_properties.get('classes')
@property
def num_classes(self):
"""Returns the number of classes in the dataset."""
return self._meta_properties.get('num_classes')
@property
def num_to_class(self):
"""Returns a mapping from a number to a class label."""
return self._meta_properties.get('num_to_class')
@property
def class_to_num(self):
"""Returns a mapping from a class label to a number."""
return self._meta_properties.get('class_to_num')
@property
def data_distributions(self):
"""Displays the distribution of images from each source."""
return self._meta_properties.get('data_distributions')
@property
def image_size(self):
"""Returns the determined image size for the loader.
This is primarily useful when using auto shape inferencing, to
access what the final result ends up being. Otherwise, it may
just return `None` or the shape that the user has set.
"""
return self._manager._resize_manager.size
def _generate_split_loader(
self, contents, split, meta_properties = None, **kwargs):
"""Generates a split `AgMLDataLoader`."""
# Check if the data split exists.
if contents is None:
raise ValueError(
f"Attempted to access '{split}' split when "
f"the data has not been split for '{split}'.")
# Load a new `DataManager` and update its internal managers
# using the state of the existing loader's `DataManager`.
builder = DataBuilder.from_data(
contents = [contents, kwargs.get('labels_for_image', None)],
info = self.info,
root = self.dataset_root,
builder = self._builder)
current_manager = copy.deepcopy(self._manager.__getstate__())
current_manager.pop('builder')
current_manager['builder'] = builder
# Build the new accessors and construct the `DataManager`.
accessors = np.arange(len(builder.get_contents()))
if self._manager._shuffle:
np.random.shuffle(accessors)
current_manager['accessors'] = accessors
batch_size = current_manager.pop('batch_size')
current_manager['batch_size'] = None
new_manager = DataManager.__new__(DataManager)
new_manager.__setstate__(current_manager)
# After the builder and accessors have been generated, we need
# to generate a new list of `DataObject`s.
new_manager._create_objects(
new_manager._builder, self.task)
# Update the `TransformManager` and `ResizeManager` of the
# `TrainManager` in the `DataManager` (they need to be synchronized).
new_manager._train_manager._transform_manager = \
new_manager._transform_manager
new_manager._train_manager._resize_manager = \
new_manager._resize_manager
# Batching data needs to be done independently.
if batch_size is not None:
new_manager.batch_data(batch_size = batch_size)
# Update the metadata parameters.
if meta_properties is None:
meta_properties = self._meta_properties.copy()
meta_properties['num_images'] = len(contents)
meta_properties['data_distributions'] = {
self.name: len(contents)}
# Instantiate a new `AgMLDataLoader` from the contents.
loader_state = self.copy().__getstate__()
loader_state['builder'] = builder
loader_state['manager'] = new_manager
loader_state['meta_properties'] = meta_properties
cls = super(AgMLDataLoader, self).__new__(AgMLDataLoader)
cls.__setstate__(loader_state)
for attr in ['train', 'val', 'test']:
setattr(cls, f'_{attr}_data', None)
cls._is_split = True
return cls
@property
def train_data(self):
"""Stores the `train` split of the data in the loader."""
if isinstance(self._train_data, AgMLDataLoader):
return self._train_data
self._train_data = self._generate_split_loader(
self._train_data, split = 'train')
return self._train_data
@property
def val_data(self):
"""Stores the `val` split of the data in the loader."""
if isinstance(self._val_data, AgMLDataLoader):
return self._val_data
self._val_data = self._generate_split_loader(
self._val_data, split = 'val')
return self._val_data
@property
def test_data(self):
"""Stores the `test` split of the data in the loader."""
if isinstance(self._test_data, AgMLDataLoader):
return self._test_data
self._test_data = self._generate_split_loader(
self._test_data, split = 'test')
return self._test_data
def eval(self) -> "AgMLDataLoader":
"""Sets the `AgMLDataLoader` in evaluation mode.
Evaluation mode disables transforms, and only keeps the loader applying
resizing to the contents. If the loader was previously set into TensorFlow
or PyTorch mode, however, it will also keep up tensor conversion and
potential batch adding (see `as_keras_sequence()` and `as_torch_dataset()`
methods for more information on the exact operations).
This method does not completely disable preprocessing, to completely
disable preprocessing, use `loader.disable_preprocessing()`. Additionally,
if you want to keep only the resizing but not the implicit tensor
conversions based on the backend, then run:
> loader.disable_preprocessing() # or loader.reset_preprocessing()
> loader.eval()
This will refresh the backend conversions and return it to `eval` mode.
Returns
-------
The `AgMLDataLoader` object.
"""
self._manager.update_train_state('eval')
return self
def disable_preprocessing(self) -> "AgMLDataLoader":
"""Disables all preprocessing on the `AgMLDataLoader`.
This sets the loader in a no-preprocessing mode (represented internally as
`False`), where only the raw data is returned: no transforms, resizing, or
any conversion to any type of backend. This can be used to test or inspect
the original data contents of the loader before processing.
The loader can be set into any mode from here, for instance see `eval()`,
`as_keras_sequence()`, and `as_torch_dataset()` for specific examples on
the different potential training and evaluation states. If you just want
to reset the loader to its default state, which applies only transforms
and resizing, then use `loader.reset_preprocessing()`.
Returns
-------
The `AgMLDataLoader` object.
"""
self._manager.update_train_state(False)
return self
def reset_preprocessing(self) -> "AgMLDataLoader":
"""Re-enables preprocessing on the `AgMLDataLoader`.
This resets the loader back to its default train state, namely where it
applies just the given transforms and content resizing. This is a consistent
method, meaning that regardless of the prior train state of the loader
before running this method, it will hard reset it to its original state
(similar to `disable_preprocessing()`, but it keeps some preprocessing).
Returns
-------
The `AgMLDataLoader` object.
"""
self._manager.update_train_state(None)
return self
def on_epoch_end(self):
"""Shuffles the dataset on the end of an epoch for a Keras sequence.
If `as_keras_sequence()` is called and the `AgMLDataLoader` inherits
from `tf.keras.utils.Sequence`, then this method will shuffle the
dataset on the end of each epoch to improve training.
"""
self._manager._maybe_shuffle()
def as_keras_sequence(self) -> "AgMLDataLoader":
"""Sets the `DataLoader` in TensorFlow mode.
This TensorFlow extension converts the loader into a TensorFlow mode,
adding inheritance from the superclass `keras.utils.Sequence` to enable
it to be used directly in a Keras pipeline, and adding extra preprocessing
to the images and annotations to make them compatible with TensorFlow.
The main features added on enabling this include:
1. Conversion of output images and annotations to `tf.Tensor`s.
2. Adding an implicit batch size dimension to images even when the
data is not batched (for compatibility in `Model.fit()`).
3. Adding inheritance from `keras.utils.Sequence` so that any
`AgMLDataLoader` object can be used directly in `Model.fit()`.
4. Setting the data loader to use a constant image shape, namely
`auto` (which will default to (512, 512) if none is found).
This can be overridden by manually setting the image shape
parameter back after running this method. Note that this may
result in errors when attempting implicit tensor conversion.
Returns
-------
The `AgMLDataLoader` object.
"""
_add_dataset_to_mro(self, 'tf')
self._manager.update_train_state('tf')
return self
def as_torch_dataset(self) -> "AgMLDataLoader":
"""Sets the `DataLoader` in PyTorch mode.
This PyTorch extension converts the loader into a PyTorch mode, adding
inheritance from th superclass `torch.utils.data.Dataset` to enable it to
be used directly in a PyTorch pipeline, and adding extra preprocessing to
the images and annotations to make them compatible with PyTorch.
The main features added on enabling this include:
1. Conversion of output images and annotations to `torch.Tensor`s.
2. Converting the channel format of the input images from the default,
channels_last, into channels_first (NHWC -> NCHW).
3. Adding inheritance from `torch.utils.data.Dataset` so that any
`AgMLDataLoader` object can be used with a `torch.utils.data.DataLoader`.
4. Setting the data loader to use a constant image shape, namely
`auto` (which will default to (512, 512) if none is found).
This can be overridden by manually setting the image shape
parameter back after running this method. Note that this may
result in errors when attempting implicit tensor conversion.
Returns
-------
The `AgMLDataLoader` object.
"""
_add_dataset_to_mro(self, 'torch')
self._manager.update_train_state('torch')
return self
@property
def shuffle_data(self):
"""Returns whether the loader is set to shuffle data or not.
By default, if no value is passed in initialization, this is set to
`True`. It can be manually toggled to `False` using this property.
"""
return self._manager._shuffle
@shuffle_data.setter
def shuffle_data(self, value):
"""Set whether the loader should shuffle data or not.
This can be used to enable/disable shuffling, by passing
either `True` or `False`, respectively.
"""
if not isinstance(value, bool):
raise TypeError("Expected either `True` or `False` for 'shuffle_data'.")
self._manager._shuffle = value
def shuffle(self, seed = None):
"""Potentially shuffles the contents of the loader.
If shuffling is enabled on this loader (`shuffle = False` has
not been passed to the instantiation), then this method will
shuffle the order of contents in it. A seed can be provided to
shuffle the dataset to an expected order.
If the data is already batched, then the batch contents will be
shuffled. For instance, if we have data batches [[1, 2], [3, 4]],
then the shuffling result will be [[3, 4], [1, 2]]. If you want
all of the contents to be shuffled, call `shuffle` before batching.
Note that the data is automatically shuffled upon instantiation,
unless the `shuffle = False` parameter is passed at instantiation.
However, this disables automatic shuffling for the class
permanently, and this method must be called to shuffle the data.
Parameters
----------
seed : int, optional
A pre-determined seed for shuffling.
Returns
-------
The `AgMLDataLoader` object.
"""
self._manager.shuffle(seed = seed)
return self
def take_dataset(self, name) -> "AgMLDataLoader":
"""Takes one of the datasets in a multi-dataset loader.
This method selects one of the datasets (as denoted by `name`)
in this multi-dataset collection and returns an `AgMLDataLoader`
with its contents. These contents will be subject to any transforms
and modifications as applied by the main loader, but the returned
loader will be a copy, such that any new changes made to the main
multi-dataset loader will not affect the new loader.
Note that this method only works for multi-dataset collections.
Parameters
----------
name : str
The name of one of the sub-datasets of the loader.
Returns
-------
An `AgMLDataLoader`.
"""
raise ValueError(
"The `loader.take_dataset` method only works for multi-dataset loaders.")
def take_class(self, classes, reindex = True) -> "AgMLDataLoader":
"""Reduces the dataset to a subset of class labels.
This method, given a set of either integer or string class labels,
will return a new `AgMLDataLoader` containing a subset of the
original dataset, where the only classes in the dataset are those
specified in the `classes` argument.
The new loader will have info parameters like `num_classes` and
`class_to_num` updated for the new set of classes; however, the
original `info` metadata will remain the same as the original.
Note that if the dataset contains images which have bounding boxes
corresponding to multiple classes, this method will not work.
Parameters
----------
classes : list, int, str
Either a single integer/string for a single class, or a list
of integers or strings for multiple classes. Integers should
be one-indexed for object detection.
reindex : bool
Re-indexes all of the new classes starting from 1, in ascending
order based on their number in the original dataset.
Notes
-----
This method only works for object detection datasets.
"""
if self._info.tasks.ml != 'object_detection':
raise RuntimeError("The `take_class` method can only be "
"used for object detection datasets.")
# Parse the provided classes and determine their numerical labels.
if isinstance(classes, str):
if classes not in self.classes:
raise ValueError(
f"Received a class '{classes}' for `loader.take_class`, "
f"which is not in the classes for {self.name}: {self.classes}")
classes = [self.class_to_num[classes]]
elif isinstance(classes, int):
try: self.num_to_class[classes]
except IndexError:
raise ValueError(
f"The provided class number {classes} is out of "
f"range for {self.num_classes} classes. Make sure "
f"you are using zero-indexing.")
classes = [classes]
else:
parsed_classes = []
if isinstance(classes[0], str):
for cls in classes:
if cls not in self.classes:
raise ValueError(
f"Received a class '{cls}' for `loader.take_class`, which "
f"is not in the classes for {self.name}: {self.classes}")
parsed_classes.append(self.class_to_num[cls])
elif isinstance(classes[0], int):
for cls in classes:
try:
self.num_to_class[cls]
except IndexError:
raise ValueError(
f"The provided class number {cls} is out of "
f"range for {self.num_classes} classes. Make "
f"sure you are using zero-indexing.")
parsed_classes.append(cls)
classes = parsed_classes.copy()
# Ensure that there are no images with multi-category boxes.
categories = self._builder._labels_for_image
if not all(len(np.unique(c)) == 1 for c in categories.values()):
raise ValueError(
f"Dataset {self.name} has images with multiple categories for "
f"bounding boxes, cannot take an individual set of classes.")
# Get the new data which will go in the loader. The `DataBuilder`
# stores a mapping of category IDs corresponding to the bounding
# boxes in each image, so we use these to determine the new boxes.
new_category_map = {
k: v for k, v in categories.items() if v[0] in classes}
new_coco_map = {
k: v for k, v in self._builder._data.items()
if k in new_category_map.keys()}
# Create the new info parameters for the class. If reindexing
# is requested, then we re-index the classes based on the order
# in which they are given, and then create a new dictionary
# to map the original annotations to the new ones (used later).
if reindex:
old_to_new = {cls: idx + 1 for idx, cls in enumerate(classes)}
new_classes = [self.num_to_class[c] for c in classes]
new_properties = {
'num_images': len(new_coco_map.keys()),
'classes': new_classes,
'num_classes': len(new_classes),
'num_to_class': {i + 1: c for i, c in enumerate(new_classes)},
'class_to_num': {c: i + 1 for i, c in enumerate(new_classes)}}
else:
new_classes = [self.num_to_class[c] for c in classes]
new_properties = {
'num_images': len(new_coco_map.keys()),
'classes': new_classes,
'num_classes': len(classes),
'num_to_class': {c: self.num_to_class[c] for c in classes},
'class_to_num': {self.num_to_class[c]: c for c in classes}}
# Create the new loader.
obj = self._generate_split_loader(
new_coco_map, 'train',
meta_properties = new_properties,
labels_for_image = new_category_map)
obj._is_split = False
# Re-index the loader if requested to.
if reindex:
class AnnotationRemap(AgMLSerializable):
"""A helper class to remap annotation labels for multiple datasets."""
serializable = frozenset(("map",))
def __init__(self, o2n):
self._map = o2n
def __call__(self, contents, name):
"""Re-maps the annotation for the new, multi-dataset mapping."""
image, annotations = contents
# Re-map the annotation ID.
category_ids = annotations['category_id']
category_ids[np.where(category_ids == 0)[0]] = 1 # fix
new_ids = np.array([self._map[c]
for c in category_ids])
annotations['category_id'] = new_ids
return image, annotations
# Maps the annotations.
obj._manager._train_manager._set_annotation_remap_hook(
AnnotationRemap(old_to_new)) # noqa
# Return the loader.
return obj
@inject_random_state
def take_random(self, k) -> "AgMLDataLoader":
"""Takes a random set of contents from the loader.
This method selects a sub-sample of the contents in the loader,
based on the provided number of (or proportion of) elements `k`.
It then returns a new loader with just this reduced number of
elements. The new loader is functionally similar to the original
loader, and contains all of the transforms/batching/other settings
which have been applied to it up until this method is called.
Note that the data which is sampled as part of this new loader
is not removed from the original loader; this simply serves as an
interface to use a random set of images from the full dataset.
Parameters
----------
k : int, float
Either an integer specifying the number of samples or a float
specifying the proportion of images from the total to take.
{random_state}
Returns
-------
A reduced `AgMLDataLoader` with the new data.
"""
# Parse the input to an integer.
if isinstance(k, float):
# Check that 0.0 <= k <= 1.0.
if not 0.0 <= k <= 1.0:
raise ValueError(
"If passing a proportion to `take_class`, "
"it should be in range [0.0, 1.0].")
# Convert the proportion float to an absolute int. Note that
# the method used is rounding up to the nearest int for cases
# where there is not an exact proportional equivalent.
getcontext().prec = 4 # noqa
proportion = Decimal(k) / Decimal(1)
num_images = self.num_images
k = int(proportion * num_images)
# If the input is an integer (or the float is converted to an int
# above), then select a random sampling of images from the dataset.
if isinstance(k, int):
# Check that `k` is valid for the number of images in the dataset.
if not 0 <= k <= self.num_images:
raise ValueError(
f"Received a request to take a random sampling of "
f"{k} images, when the dataset has {self.num_images}.")
# We use a similar functionality to the `split` method here,
# essentially choosing a random sampling up until `k` and then
# using the `DataManager` to access the reduced data.
split = np.arange(0, self.num_images)
np.random.shuffle(split)
indices = split[:k]
content = list(self._manager.generate_split_contents(
{'content': indices}).values())[0]
# Create a new `AgMLDataLoader` from the new contents.
obj = self._generate_split_loader(content, 'train')
obj._is_split = False
return obj
# Otherwise, raise an error.
else:
raise TypeError(
f"Expected only an int or a float when "
f"taking a random split, got {type(k)}.")
@inject_random_state
def split(self, train = None, val = None, test = None, shuffle = True):
"""Splits the data into train, val and test splits.
By default, this method does nothing (or if the data has been
split into sets, it resets them all to one set). Setting the
`train`, `val`, and `test` parameters randomly divides the
data into train, validation, and/or test sets, depending on
which ones are provided and their values.
Values can either be passed as exact numbers or as proportions,
e.g. either `train = 80, test = 20` in a 100-value dataset, or
as `train = 0.8, test = 0.2`. Whichever value is not passed,
e.g. `val` in this case, has no value in the loader.
Parameters
----------
train : int, float
The split for training data.
val : int, float
The split for validation data.
test : int, float
The split for testing data.
shuffle : bool
Whether to shuffle the split data.
{random_state}
Notes
-----
Any processing applied to this `AgMLDataLoader` will also be present
in the split loaders until they are accessed from the class. If you
don't want these to be applied, access them right after splitting.
"""
# Check if the data is already split or batched.
if not AgMLExperimentalFeatureWrapper.nested_splitting():
if self._is_split:
raise ValueError("Cannot split already split data.")
elif self._manager._batch_size is not None:
raise ValueError("Cannot split already batched data. "
"Split the data before batching.")
# If no parameters are passed, then don't do anything.
arg_dict = {'train': train, 'val': val, 'test': test}
valid_args = {k: v for k, v in arg_dict.items() if v is not None}
if all(i is None for i in arg_dict.values()):
return None
# There are two valid ways to pass splits. The first involves
# passing the split values as floats, the second as ints. If we
# receive the splits as floats, then we convert them to ints
# in order to maintain maximum precision if we do manage to get
# them as ints. Then the procedure is the same.
if all(isinstance(i, float) for i in valid_args.values()):
# To prevent potential precision errors, we need to convert the
# splits to `Decimal` objects and then set the decimal precision.
getcontext().prec = 4 # noqa
valid_args = {k: Decimal(v) / Decimal(1) for k, v in valid_args.items()}
if not sum(valid_args.values()) == Decimal(1):
raise ValueError(f"Got floats for input splits and expected a sum "
f"of 1, instead got {sum(valid_args.values())}.")
# Convert the splits from floats to ints. If the sum of the int
# splits are greater than the total number of data, then the largest
# split is decreased in order to keep compatibility in usage.
num_images = self.num_images
proportions = {k: int(v * Decimal(num_images)) for k, v in valid_args.items()}
if sum(proportions.values()) != num_images:
diff = sum(proportions.values()) - num_images
largest_split = list(proportions.keys())[
list(proportions.values()).index(
max(proportions.values()))]
proportions[largest_split] = proportions[largest_split] - diff
valid_args = proportions.copy()
# Create the actual data splits.
if all(isinstance(i, int) for i in valid_args.values()):
# Ensure that the sum of the splits is the length of the dataset.
if not sum(valid_args.values()) == self.num_images:
raise ValueError(f"Got ints for input splits and expected a sum "
f"equal to the dataset length, {self.num_images},"
f"but instead got {sum(valid_args.values())}.")
# The splits will be generated as sequences of indices.
generated_splits = {}
split = np.arange(0, self.num_images)
names, splits = list(valid_args.keys()), list(valid_args.values())
# Shuffling of the indexes will occur first, such that there is an even
# shuffle across the whole sample and not just in the individual splits.
if shuffle:
np.random.shuffle(split)
# Create the list of split indexes.
if len(valid_args) == 1:
generated_splits[names[0]] = split
elif len(valid_args) == 2:
generated_splits = {k: v for k, v in zip(
names, [split[:splits[0]], split[splits[0]:]])}
else:
split_1 = split[:splits[0]]
split_2 = split[splits[0]: splits[0] + splits[1]]
split_3 = split[splits[0] + splits[1]:]
generated_splits = {k: v for k, v in zip(
names, [split_1, split_2, split_3])}
# Get the actual split contents from the manager. These contents are
# not `DataObject`s, rather they are simply the actual mapping of
# data to be passed to a `DataBuilder` when constructing the splits.
contents = self._manager.generate_split_contents(generated_splits)
# Build new `DataBuilder`s and `DataManager`s for the split data.
for split, content in contents.items():
setattr(self, f'_{split}_data', content)
# Otherwise, raise an error for an invalid type.
else:
raise TypeError(
"Expected either only ints or only floats when generating "
f"a data split, got {[type(i) for i in arg_dict.values()]}.")
def batch(self, batch_size = None):
"""Batches sets of image and annotation data according to a size.
This method will group sets of data together into batches of size
`batch_size`. In turn, items gathered from the loader will, rather
than being an image and annotation, be an array of images and an
array of annotations (not an array of image/annotation pairs).
Batching data will include a `batch` dimension for the images and
annotations that are returned (e.g., the image array will have
dimensions NHWC instead of HWC). If the data is not batched, this
dimension will not be present unless the loader is in training mode.
The data can be un-batched by passing `None` to batch size (or
calling the method with no arguments).
Parameters
----------
batch_size : int, None
The number of groups to batch data together into.
Notes
-----
The last batch will be of size <= `batch_size`.
"""
self._manager.batch_data(
batch_size = batch_size
)
def resize_images(self, image_size = None, method = 'bilinear'):
"""Resizes images within the loader to a specified size.
This method applies a resizing parameter for images before they are
returned from the data loader. The default starting point, if this
method is never called, is to apply no resizing. If the loader is set
in "training" mode and no size is specified, it defaults to (512, 512).
Image resizing contains a few modes:
1. `default` or `None`: No resizing, leaves images in default size.
This is the default parameter if nothing is passed.
2. `train`: This will set a default training size of (512, 512).
3. `imagenet`: This will set a default size of (224, 224).
4. custom size: Resizes the images to the provided size.
5. `auto`: Dynamically selects an image size based on a few factors.
For example, if there are certain outliers in a dataset which are
of a different size while the majority remain the same, then, the
behavior of this method is chosen by a majority threshold of the
sizes of all the images in the dataset. If no shape can be
inferenced, it returns a default size of (512, 512).
The resizing also applies to the annotation in certain cases,
depending on the task and the actual content of the annotation:
- For object detection, the bounding box coordinates will
be resized and the area of the box will in turn be recomputed.
- For semantic segmentation, the annotation mask will be resized,
using a nearest-neighbor interpolation to keep it as similar
as possible to the original mask (preventing data loss).
Parameters
----------
image_size : optional
The resizing parameter for the image.
method : optional
The method to resize the images. Should be one of 'nearest',
'bilinear', 'linear', or 'cubic'. Defaults to 'bilinear'.
Notes
-----
If a transform pipeline is provided, images will be resized
*before* being passed into the transform pipeline.
"""
self._manager.assign_resize(
image_size = image_size,
method = method
)
def transform(self,
transform = NoArgument,
target_transform = NoArgument,
dual_transform = NoArgument):
"""Applies vision transforms to the input image and annotation data.
This method applies transformations to the image and annotation data
in the dataset. Transforms include augmentations and other processing
methods, and can be applied independently to the image and annotation,
or together to both (`transform`, `target_transform`, `dual_transform`).
The hierarchy in which transforms are applied is:
transform -> --------|
|-----> dual_transform
target_transform -> --|
The `transform` and `target_transform` argument are used for methods
which act independently on the image and the annotation, respectively.
The values passed to these arguments can be:
- An `albumentations` transform pipeline.
- A `keras.Sequential` model (or preprocessing layer) or a
set of `torchvision.transform`s.
- A method which accepts one input and returns one output.
The `dual_transform` argument is used for non-image-classification
tasks. The following describe the types of arguments that can be
passed to `dual_transform`, depending on the task:
Object Detection:
- An `albumentations` transform pipeline with `bbox_params` in
to be applied to both the image and the bounding boxes.
- A method (not a torchvision or Keras preprocessing pipeline)
that accepts two inputs and returns two outputs.
Semantic Segmentation:
- An `albumentations` transform pipeline that may include
spatial and/or visual augmentation.
- A method to independently or dually apply transformations
to the image and annotation mask.
- A `torchvision.transforms` or `tf.keras.Sequential` pipeline
which will be applied to the image and mask using the same
random seed, for reproducibility. Use the provided method
`generate_keras_segmentation_dual_transform` for this.
If you want to reset the transforms, then simply call this method
with no arguments. Alternatively, to reset just a single transform,
pass the value of that argument as `None`.
Parameters
----------
transform : optional
A transform to be applied independently to the input image.
target_transform : optional
A transform to be applied independently to the annotation.
dual_transform : optional
A transform to be applied to both the input and annotation.
Notes
-----
- Image resizing takes place before any transformations are applied.
After the transforms are applied in this order, they returned and
if passed again, they will have a different transform applied to
them. The state is independent of the images passed.
- Albumentations transforms are special in that even transforms which
would normally be passed to `dual_transform` (e.g., they act on the
input image and the output annotation) can simply be passed to the
`transform` argument and they will automatically be applied.
"""
self._manager.push_transforms(
transform = transform,
target_transform = target_transform,
dual_transform = dual_transform
)
def normalize_images(self, method = 'scale'):
"""Converts images from 0-255 integers to 0-1 floats and normalizes.
This is a convenience method to convert all images from integer-valued
arrays into float-valued arrays, and normalizes them (using shifting
and scaling from mean and std). This is useful for training in order
to reduce computational complexity (instead of large-valued integer
multiplication, only float multiplication), and for extracting the
most information out of different types of imagery.
There are three different 'normalization' modes that can be initialized
with this method, as described below:
1. `scale`: This simply scales images from the 0-255 pixel range to
the 0-1 range (and converts them to floats as such).
2. `imagenet`: This performs normalization using the traditional ImageNet
mean and standard deviation:
(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
This is useful when trying to conduct transfer learning, for instance.
3. `standard`: This performs normalization using a pre-calculated mean
and standard deviation for the dataset (see the public sources JSON).
To remove normalization altogether, pass `None` as a parameter.
Parameters
----------
method : str
The method by which to normalize the images.
Notes
-----
This method is not implicitly called when converting to PyTorch/TensorFlow
mode, it needs to be manually called even if you just want 0-1 scaled images.
"""
if method not in ['scale', 'imagenet', 'standard', None]:
raise ValueError(f"Received invalid normalization method: '{method}'.")
if method == 'scale':
normalization_params = 'scale'
elif method == 'imagenet':
normalization_params = 'imagenet'
elif method == 'standard':
normalization_params = self._info.image_stats
else:
normalization_params = None
self.transform(
transform = ('normalize', normalization_params)
)
def labels_to_one_hot(self, add = True):
"""Converts image classification numerical labels to one-hot labels.
This is a convenience method to apply one-hot vector transformations
to the output labels for image classification. Essentially, if we have
a set of labels, [1, 2], it will convert it to [[0, 1, 0], [0, 0, 1]].
This is a more commonly used format for image classification.
Parameters
----------
add : bool
If set to `None` or `False`, this will remove the one-hot
label transformation from the manager. This is `True` by default,
which adds the one-hot label transformation.
"""
if self._info.tasks.ml != 'image_classification':
raise RuntimeError("The `one_hot` label transformation can only "
"be used for image classification tasks.")
self.transform(
target_transform = ('one_hot', self._info.num_classes, add)
)
def mask_to_channel_basis(self, add = True):
"""Converts semantic segmentation masks to channel-wise.
This is a convenience method to convert integer-labeled semantic
segmentation masks into channel-by-channel masks, essentially
one-hot vector transformation but for semantic segmentation. Note
that if the task is binary segmentation, e.g. there is only one
class, then this method will do nothing.
This method should traditionally be called *after* applying general
transformations to the loader, in order to prevent any issues.
Parameters
----------
add : bool
If set to `None` or `False`, this will remove the one-hot
label transformation from the manager. This is `True` by default,
which adds the one-hot label transformation.
"""
if self._info.tasks.ml != 'semantic_segmentation':
raise ValueError("The `mask_to_channel_basis` transformation "
"can only be used for semantic segmentation tasks.")
# Warn about binary segmentation tasks.
if self._info.num_classes == 1:
log(f"No mask-to-channel transformation will be applied for "
f"a binary segmentation task (dataset {self.name}).")
return
self.transform(
target_transform = ('channel_basis', self._info.num_classes, add)
)
def generalize_class_detections(self):
"""Generalizes object detection classes to a single class.
This is a convenience method for object detection tasks, and
converts all of the individual class labels in the task into
a single class, essentially allowing the model to purely
focus on detection of objects and fine-tuning bounding boxes,
with no focus on differentiating classes of different boxes.
This method is intended to be used for multi-dataset loaders,
and will raise an error if using with a single-dataset loader.
"""
raise ValueError(
"This method can only be used with multi-dataset loaders.")
def export_contents(self, export_format = None):
"""Exports the internal contents of the `AgMLDataLoader`.
This method serves as a hook for high-level users who simply want
to download and get the data, by exporting the unprocessed metadata
of the actual dataset, with the following formats:
Image Classification: A mapping between the local image paths and
the numerical labels.
Object Detection: A mapping between the local image paths (the full
path, not just the file name), and the COCO JSON annotations
corresponding to each of the images. To get the original COCO
JSON annotation file contents, use `export_format = 'coco'`.
Semantic Segmentation: A mapping between the local image paths
and the local annotation mask paths.
The `export_format` argument can be used to customize what this method
returns. By default, it is set to `None`, and returns dictionaries with
the above specified mappings. However, setting `export_format = 'arrays'`
will return two arrays, with the first array containing the image paths
and the second array containing the annotation data.
Parameters
----------
export_format : optional
The format to export the data in. Defaults to a mapping.
Returns
-------
The raw contents of the dataset.
"""
return self._builder.export_contents(
export_format = export_format
)
def export_tensorflow(self):
"""Exports the contents of the loader in a native TensorFlow dataset.
This method constructs a `tf.data.Dataset` from the contents of the
dataset. The dataset maintains the same loading and preprocessing
pipeline as the actual `DataLoader`, but allows for faster computation
and integration into a TensorFlow pipeline.
When constructing the `tf.Data.Dataset`, the `AgMLDataLoader` uses
the pre-set parameters of the class, including the transforms, image
resizing, and the training mode. In particular, the tensor conversion
and automatic batching is done inherently by the `tf.data.Dataset`, but
transforms can be disabled in the same way they would be in a normal
`AgMLDataLoader` by running
> loader.disable_preprocessing()
> loader.export_tensorflow()
or potentially
> loader.eval()
> loader.export_tensorflow()
Image resizing is an exception in that if a specific size is not set,
then it will automatically be set to (512, 512) to prevent errors.
The same behavior of transforms and image resizing applies also to
batching. Calling the relevant methods before exporting the dataset
will result in those methods being applied to the result. The only
exception is shuffling, since the data is always shuffled upon being
exported to a `tf.data.Dataset`. This enables better computation.
Note that if the data is batched, then it is also prefetched.
Please note, transforms will not be applied if exporting an object
detection loader. This is due to the impossibility of applying
transforms to COCO JSON dictionaries and passing them in TensorFlow's
graph mode. Use `as_keras_sequence` if you want to use transforms.
Returns
-------
A `tf.data.Dataset` enabled to function like the `AgMLDataLoader`, but
as a native TensorFlow object for TensorFlow pipelines.
"""
# Update the backend management system.
from agml.data.exporters.tensorflow import TFExporter
if get_backend() != 'tf':
if user_changed_backend():
raise StrictBackendError(
change = 'tf', obj = self.export_tensorflow)
set_backend('tf')
# Build the exporter.
exporter = TFExporter(
task = self.info.tasks.ml,
builder = self._builder
)
# Update the current state of the loader.
exporter.assign_state(state = self._manager._train_manager.state)
# Parse the transforms and resizing for the class.
transforms = self._manager._transform_manager.get_transform_states()
resizing = self._manager._resize_manager.size
exporter.digest_transforms(
transforms = transforms,
resizing = resizing
)
# Construct and return the loader.
return exporter.build(batch_size = self._manager._batch_size)
def export_torch(self, **loader_kwargs):
"""Exports the contents of the loader in a native PyTorch loader.
This method wraps the contents of this data loader inside of a
`torch.utils.data.DataLoader`. This method differs from the
`export_tensorflow()` method in that there is no need to convert
directly to a `tf.data.Dataset`, rather if this `AgMLDataLoader`
inherits from `torch.utils.data.Dataset`, it can just be directly
wrapped into a `torch.utils.data.DataLoader`.
The returned `DataLoader` is functionally similar to the
`AgMLDataLoader` in terms of preprocessing and transforming. You
can pass arguments to the `DataLoader` instantiation as keyword
arguments to this method.
Note that the `AgMLDataLoader` which this method encloses is
instead a copy of the instance the method is run on, so that any
changes to the loader afterwards don't affect the exported loader.
Parameters
----------
loader_kwargs : optional
A set of keyword arguments for the `torch.utils.data.DataLoader`.
See the documentation for the loader for more information.
Returns
-------
A `torch.utils.data.DataLoader` enclosing a copy of this loader.
"""
from agml.backend.tftorch import torch
from torch.utils.data import DataLoader
if get_backend() != 'torch':
if user_changed_backend():
raise StrictBackendError(
change = 'torch', obj = self.export_torch)
set_backend('torch')
# Make a copy of the `AgMLDataLoader` so the following changes
# don't affect the original loader, just the new one.
obj = self.copy()
# Convert to a PyTorch dataset.
obj.as_torch_dataset()
# The `DataLoader` automatically batches objects using its
# own mechanism, so we remove batching from this DataLoader.
batch_size = loader_kwargs.pop(
'batch_size', obj._manager._batch_size)
obj.batch(None)
shuffle = loader_kwargs.pop(
'shuffle', obj._manager._shuffle)
# The `collate_fn` for object detection is different because
# the COCO JSON dictionaries each have different formats. So,
# we need to replace it with a custom function.
collate_fn = loader_kwargs.pop('collate_fn')
if obj.task == 'object_detection' and collate_fn is None:
def collate_fn(batch):
images = torch.stack(
[i[0] for i in batch], dim = 0)
coco = tuple(zip(*[i[1] for i in batch]))
return images, coco
# Return the DataLoader with a copy of this AgMLDataLoader, so
# that changes to this will not affect the returned loader.
return DataLoader(
obj,
batch_size = batch_size,
shuffle = shuffle,
collate_fn = collate_fn,
**loader_kwargs
)
def show_sample(self, image_only = False, no_show = False):
"""Shows a single data sample from the dataset.
This method generates a data sample from the dataset with an image and
its corresponding annotation (or, if `image_only` is True, then only the
image itself). This data sample is then displayed, unless `no_show` is
True in which case the processed sample will simply be returned.
Parameters
----------
image_only : optional
Whether to show only the image or the image and the annotation.
no_show : optional
Whether to display the sample or not.
Returns
-------
The data sample with/without annotation.
"""
# Get the sample (and take only the first one in a batch if batched).
image, annotations = self[self._manager._get_random_index()]
if len(image.shape) == 4:
image = image[0]
annotations = annotations[0]
# Show the sample.
show_sample(self,
image_only = image_only,
no_show = no_show,
sample = (image, annotations))
| 75,355 | 43.828079 | 90 | py |
AgML | AgML-main/agml/data/multi_loader.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import collections
from typing import Union
from decimal import Decimal, getcontext
import numpy as np
from agml.framework import AgMLSerializable
from agml.data.metadata import DatasetMetadata
from agml.data.loader import AgMLDataLoader
from agml.utils.general import (
resolve_list_value, NoArgument
)
from agml.utils.random import seed_context, inject_random_state
from agml.utils.image import consistent_shapes
from agml.utils.logging import log
from agml.backend.tftorch import (
get_backend, set_backend,
user_changed_backend, StrictBackendError,
is_array_like, convert_to_batch
)
class CollectionWrapper(AgMLSerializable):
"""Wraps a collection of items and calls their attributes and methods."""
serializable = frozenset(('collection', 'keys'))
def __init__(self, collection, keys = None, ignore_types = False):
self._collection = collection
if not ignore_types:
if not all(isinstance(c, type(collection[0])) for c in collection):
raise TypeError(
f"Items in a collection should all be of the "
f"same type, got {[type(i) for i in collection]}")
self._keys = keys
def __len__(self):
return len(self._collection)
def __getitem__(self, item):
if isinstance(item, str):
return self._collection[self._keys.index(item)]
return self._collection[item]
@property
def keys(self):
return self._keys
def get_attributes(self, attr):
return [getattr(c, attr) for c in self._collection]
def call_method(self, method, args = None, kwargs = None):
if kwargs is None:
kwargs = {}
if args is None:
args = ()
elif isinstance(args, tuple):
if not len(args) == len(self._collection):
raise IndexError(
f"Got {len(args)} unique arguments for a "
f"collection of length {len(self._collection)}.")
return [getattr(c, method)(arg, **kwargs)
for c, arg in zip(self._collection, args)]
return [getattr(c, method)(*args, **kwargs) for c in self._collection]
def apply(self, method, args = None):
if args is None:
for c in self._collection:
method(c)
else:
for c, arg in zip(self._collection, args):
method(c, arg)
class MultiDatasetMetadata(AgMLSerializable):
"""Stores metadata for a collection of AgML datasets.
Functionally, this class is just a wrapper around multiple
`DatasetMetadata` objects, and for each of the traditional
attributes in the original `DatasetMetadata`, returns a
dictionary with all of the values for the corresponding
datasets rather than just a single value.
"""
serializable = frozenset(("names", "metas", "task"))
def __init__(self, datasets):
# Build a collection of metadata objects.
self._names = datasets
self._metas = CollectionWrapper(
[DatasetMetadata(d) for d in datasets])
self._validate_tasks()
@classmethod
def _from_collection(cls, metas):
"""Instantiates a `MultiDatasetMetadata` object from a collection."""
obj = MultiDatasetMetadata.__new__(MultiDatasetMetadata)
obj._names = [meta.name for meta in metas]
# We need to ignore types since some metadata objects might be
# regular `DatasetMetadata`, but some might be `CustomDatasetMetadata`.
obj._metas = CollectionWrapper(metas, ignore_types = True)
obj._validate_tasks()
return obj
def _validate_tasks(self):
# For a collection of datasets to work, they all need
# to be of the same task. This is a check of this.
tasks = self._metas.get_attributes('tasks')
if not all(tasks[0].ml == t.ml for t in tasks):
raise ValueError("To use a collection of datasets, all of them "
f"must be of the same task. Got tasks {tasks} "
f"for the provided datasets {self._names}.")
self._task = tasks[0]
def __getattr__(self, attr):
# This is the main functionality of the `DatasetMetadata`
# class. Rather than re-writing each of the attributes,
# this method simply checks whether the requested attribute
# or method exists in the original `DatasetMetadata` class,
# then calls the relevant method on each of the stored
# metadata objects and returns them.
if hasattr(DatasetMetadata, attr):
obj = getattr(DatasetMetadata, attr)
# If it is a property, then we need to return something.
if isinstance(obj, property):
return {k: v for k, v in zip(
self._names, self._metas.get_attributes(attr))}
# If it is a method, then it is just printing.
if isinstance(obj, types.FunctionType):
self._metas.call_method(attr)
return lambda: None # To act like a function.
# Otherwise, raise an error for an invalid argument.
else:
raise AttributeError(f"Invalid attribute {attr} for `DatasetMetadata`.")
class AnnotationRemap(AgMLSerializable):
"""A helper class to remap annotation labels for multiple datasets."""
serializable = frozenset((
"general_class_to_num", "num_to_class", "task",
"generalize_class_detections"))
def __init__(self, general_class_to_num, num_to_class,
task, generalize_class_detections = False):
self._task = task
self._num_to_class = num_to_class
self._general_class_to_num = general_class_to_num
self._generalize_class_detections = generalize_class_detections
def __call__(self, contents, name):
"""Re-maps the annotation for the new, multi-dataset mapping."""
image, annotations = contents
# For image classification, simply re-map the label number.
if self._task == 'image_classification':
annotations = self._general_class_to_num[
self._num_to_class[name][annotations].lower()]
# For semantic segmentation, re-map the mask IDs.
if self._task == 'semantic_segmentation':
unique_values = np.unique(annotations)[1:] # skip background
new_values = np.array([self._general_class_to_num[
self._num_to_class[name][c].lower()]
for c in unique_values])
for u, n in zip(unique_values, new_values):
annotations[np.where(annotations == u)[0]] = n
# For object detection, also just remap the annotation ID.
if self._task == 'object_detection':
if self._generalize_class_detections:
annotations['category_id'] = np.ones_like(
annotations['category_id'])
else:
category_ids = annotations['category_id']
category_ids[np.where(category_ids == 0)[0]] = 1 # fix
new_ids = np.array([self._general_class_to_num[
self._num_to_class[name][c].lower()]
for c in category_ids])
annotations['category_id'] = new_ids
return image, annotations
class AgMLMultiDatasetLoader(AgMLSerializable):
"""Loads and holds a collection of multiple datasets.
This class serves as an interface for the `AgMLDataLoader` when
using multiple datasets together, and enables similar seamless
usage as with the traditional `AgMLDataLoader`, with additional
features to support the usage of multiple datasets.
Functionally, this class acts as a wrapper around multiple
`AgMLDataLoader` objects, and draws upon similar functionality
to the `DataManager` in order to access data from multiple objects.
"""
serializable = frozenset(
('info', 'loaders', 'loader_accessors', 'class_meta',
'set_to_keys', 'bounds', 'batch_size', 'shuffle_data',
'data_distributions', 'is_split', 'train_data',
'val_data', 'test_data'))
def __init__(self, datasets, **kwargs):
"""Instantiates an `AgMLDataLoader` with multiple datasets."""
# The order of the dataset images and classes should be
# agnostic to the order which they are passed in. So, the
# same datasets but in a different order should yield the
# same class mapping and internal representation. We sort
# the datasets in order to ensure the same order every time.
datasets = sorted(datasets)
# Set up the datasets and their associated metadata. Since
# there are multiple datasets, the `info` parameter here
# will return a `MultipleDatasetMetadata` object which
# is functionally similar to the `DatasetMetadata` object,
# but returns dictionaries with the relevant parameters for
# each of the different datasets, rather than just for one.
self._info = MultiDatasetMetadata(datasets)
# Create a set of `AgMLDataLoader` objects for each of the
# datasets which are provided to the loader. These will
# be the primary access interface for the objects.
self._make_loaders(datasets, **kwargs)
# Adapt all of the individual class types in the sub-datasets,
# such that if there are two classes which are the same, then
# the `class` that they represent should be the same.
self._adapt_classes()
# Similar to how the internal `DataManager` works, this wrapper
# will access data from the internal `AgMLDataLoader` using an
# accessor array, which will be the length of all of the datasets
# in the loader combined. It will go the length of the datasets
# in the order provided, where index 0 represents the first item
# in the first dataset, and index 1 represents the last item in
# the last dataset. Batching works in a similar way.
self._loader_accessors = np.arange(
sum(v for v in self._info.num_images.values()))
sets = self._info.num_images.keys()
bounds = np.cumsum(list(self._info.num_images.values())).tolist()
bounds = (0, ) + (*bounds, )
self._set_to_keys = {}
self._bounds = {s: b for s, b in zip(sets, bounds)}
for i, set_ in enumerate(sets):
value = 0 if i == 0 else 1
self._set_to_keys.update(dict.fromkeys(
np.arange(bounds[i] - value,
bounds[i + 1] + 1), set_))
# The batch size is modified similarly like the `DataManager`.
# Since all of the data loaders should have the same properties,
# transforms, and other values, we get the `make_batch` method
# from the first loader and bound that to make batches.
self._batch_size = None
self._make_batch = self._loaders[0]._manager._train_manager.make_batch
# Shuffling in a multi-dataset loader takes place on a high-level.
# The contents of the actual datasets themselves are not shuffled.
# Instead, the accessor array is shuffled (like the DataManager).
self._shuffle_data = kwargs.get('shuffle', True)
if self._shuffle_data:
self.shuffle(self._shuffle_data)
# We need to transform the class annotations before they are sent
# through any other transforms, since those other transforms may
# modify the value of the annotation and potentially cause issues
# with the output annotation. However, we need this to happen
# even when transforms are disabled. So, we modify each of the
# loaders' `TrainingManager`s with a special argument to call an
# extra helper method which in turn modifies the class annotation
# before any other transforms are applied, and in any case.
self._loaders.apply(
lambda x: x._manager._train_manager._set_annotation_remap_hook(
AnnotationRemap(
self.class_to_num, self._info.num_to_class, self.task)))
# We can't use the `auto` resizing mode for the resizing manager,
# because of the complexity of trying to make it work with multiple
# different data types. So, disable auto mode to prevent errors.
self._loaders.apply(
lambda x: x._manager._resize_manager.disable_auto())
# The data is not split to begin with. So, we set the split
# parameter to false and store all of the split datasets themselves
# as empty variables (which will be updated if and when it is split).
self._is_split = False
self._train_data = None
self._val_data = None
self._test_data = None
# Calculate the distribution of the data.
self._data_distributions = self._info.num_images
# Total number of images in the entire dataset.
self._num_images = sum(self._info.num_images.values())
@classmethod
def _instantiate_from_collection(cls, *loaders, classes):
"""Instantiates an `AgMLMultiDatasetLoader` directly from a collection.
This method is, in essence, a wrapper around the actual `__init__`
method for the multi-loader, but one which takes into account the fact
that the loaders are already instantiated, and thus works around those
already-provided parameters, rather than starting from scratch.
"""
obj = AgMLMultiDatasetLoader.__new__(AgMLMultiDatasetLoader)
# Create the custom dataset metadata wrapper.
obj._info = MultiDatasetMetadata._from_collection([
loader.info for loader in loaders])
# Add the loaders and adapt classes.
obj._loaders = CollectionWrapper(
loaders, keys = [loader.info.name for loader in loaders])
obj._adapt_classes(cls = classes)
# The remaining contents here are directly copied from the above
# `__init__` method, without comments (see above for information):
# Construct the accessor array.
obj._loader_accessors = np.arange(
sum(v for v in obj._info.num_images.values()))
sets = obj._info.num_images.keys()
bounds = np.cumsum(list(obj._info.num_images.values())).tolist()
bounds = (0, ) + (*bounds, )
obj._set_to_keys = {}
obj._bounds = {s: b for s, b in zip(sets, bounds)}
for i, set_ in enumerate(sets):
value = 0 if i == 0 else 1
obj._set_to_keys.update(dict.fromkeys(
np.arange(bounds[i] - value,
bounds[i + 1] + 1), set_))
# Set the batch size and shuffling.
obj._batch_size = None
obj._make_batch = obj._loaders[0]._manager._train_manager.make_batch
obj._shuffle_data = loaders[0].shuffle_data
if obj._shuffle_data:
obj.shuffle(obj._shuffle_data)
# Transform annotations and resizing.
obj._loaders.apply(
lambda x: x._manager._train_manager._set_annotation_remap_hook(
AnnotationRemap(
obj.class_to_num, obj._info.num_to_class, obj.task)))
obj._loaders.apply(
lambda x: x._manager._resize_manager.disable_auto())
# Finalize parameters.
obj._is_split = False
obj._train_data = None
obj._val_data = None
obj._test_data = None
obj._data_distributions = obj._info.num_images
obj._num_images = sum(obj._info.num_images.values())
# Return the object.
return obj
def __len__(self):
# Return the length of the data, subject to batching.
return self._data_length()
def __getitem__(self, indexes: Union[int, slice, tuple, list]):
# The `__getitem__` logic adopts the `DataManager` approach
# towards getting multiple items, wrapped into this class.
if isinstance(indexes, slice):
data = np.arange(self._data_length())
indexes = data[indexes].tolist()
if isinstance(indexes, int):
indexes = [indexes]
for idx in indexes:
if idx not in range(len(self)):
raise IndexError(
f"Index {idx} out of range of "
f"AgMLDataLoader length: {len(self)}.")
return self._get_item_impl(resolve_list_value(indexes))
def __iter__(self):
for indx in range(len(self)):
yield self[indx]
def __repr__(self):
dsp = ", "
out = f"<AgMLDataLoader: (datasets=[{dsp.join(self._info.name)}]"
out += f", task={self.task}"
out += f") at {hex(id(self))}>"
return out
def __str__(self):
return repr(self)
def copy(self):
"""Returns a deep copy of the data loader's contents."""
return self.__copy__()
def __copy__(self):
"""Copies the loader and updates its state."""
cp = super(AgMLMultiDatasetLoader, self).__copy__()
cp.copy_state(self)
return cp
def copy_state(self, loader):
"""Copies the state of another `AgMLDataLoader` into this loader.
This method copies the state of another `AgMLDataLoader` into this
loader, including its transforms, resizing, and training state. Other
general parameters such as batch size and shuffling are left intact.
Parameters
----------
loader : AgMLDataLoader
The data loader from which the state should be copied.
Returns
-------
This `AgMLDataLoader`.
"""
# Copy the state for all the sub-loaders. If the loader state is of
# a multi-loader, then only copy the state of its first loader.
if isinstance(loader, AgMLMultiDatasetLoader):
loader = loader._loaders[0]
self._loaders.apply(
lambda x: x.copy_state(loader)
)
def _make_loaders(self, datasets, **kwargs):
"""Constructs the loaders for the datasets in the collection."""
# Get and validate the `dataset_path` argument.
if 'dataset_path' in kwargs:
dataset_path = kwargs.get('dataset_path')
if isinstance(dataset_path, collections.Sequence):
if not len(datasets) == len(dataset_path):
raise IndexError(
f"Got a sequence for the `dataset_path` of a "
f"multi-dataset `AgMLDataLoader`, but it is not "
f"the same length as the number of datasets: "
f"{len(datasets)} datasets ({datasets}) but "
f"{len(dataset_path)} paths ({dataset_path}).")
elif isinstance(dataset_path, str):
dataset_path = [dataset_path] * len(datasets)
else:
dataset_path = False
kwargs.update({'dataset_path': dataset_path})
# Create all of the loaders.
self._loaders = CollectionWrapper([
AgMLDataLoader(dataset, **kwargs) for dataset in datasets],
keys = datasets)
def _adapt_classes(self, cls = None):
"""Adapts the classes in the loader."""
# Get all of the unique classes in the loader.
classes = self._info.classes.values()
class_values = [[o.lower() for o in c] for c in classes]
class_values = [i for s in class_values for i in s]
unique_classes = np.unique(class_values).tolist()
# Check that they match the given classes, if such a list is passed.
if cls is not None:
if not set(cls) == set(unique_classes): # noqa
raise ValueError(
f"Given list of classes {cls} to `AgMLDataLoader.merge`, "
f"but calculated classes {unique_classes}. Check that the "
f"given classes match the actual classes in the given datasets.")
unique_classes = cls
# Create a class metadata storing all of the unique
# classes belonging to this loader and their mappings.
self._class_meta = {
'classes': unique_classes,
'num_classes': len(unique_classes),
'class_to_num': {
v: k + 1 for k, v in enumerate(unique_classes)},
'num_to_class': {
k + 1: v for k, v in enumerate(unique_classes)}
}
def _data_length(self):
"""Calculates the length of the data from the different datasets."""
return len(self._loader_accessors)
@property
def info(self):
"""Returns a `DatasetMetadata` object containing dataset info.
The contents returned in the `DatasetMetadata` object can be used
to inspect dataset metadata, such as the location the data was
captured, the data formats, and the license/copyright information.
See the `DatasetMetadata` class for more information.
"""
return self._info
@property
def task(self):
"""Returns the ML task that this dataset is constructed for."""
return self._info._task.ml
@property
def num_images(self):
"""Returns the number of images in the entire dataset."""
return sum(self.data_distributions.values())
@property
def classes(self):
"""Returns the classes that the dataset is predicting."""
return self._class_meta['classes']
@property
def num_classes(self):
"""Returns the number of classes in the dataset."""
return self._class_meta['num_classes']
@property
def num_to_class(self):
"""Returns a mapping from a number to a class label."""
return self._class_meta['num_to_class']
@property
def class_to_num(self):
"""Returns a mapping from a class label to a number."""
return self._class_meta['class_to_num']
@property
def data_distributions(self):
"""Returns a distribution of data in the loader.
This property returns a dictionary which provides information
about the data within the loader; e.g., how many images are
from each of the datasets in the collection.
"""
return self._data_distributions
def _generate_split_loader(self, loaders, split):
"""Generates a split `AgMLDataLoader`."""
# Check if the data split exists.
if any(loader is None for loader in loaders):
raise ValueError(
f"Attempted to access split loader {split} when "
f"parent loader has not been split.")
# Create a new `CollectionWrapper` around the datasets.
new_collection = CollectionWrapper(
loaders, keys = [l_.name for l_ in loaders])
# Get the state of the current loader and update it
# with the new collection of loaders. Then, update
# the accessors and number of images with the newly
# reduced quantity (due to the splitting of data).
loader_state = self.copy().__getstate__()
loader_state['loaders'] = new_collection
total_num_images = sum(len(loader) for loader in loaders)
data_distributions = {
loader.name: len(loader) for loader in loaders}
loader_state['data_distributions'] = data_distributions
accessors = np.arange(0, total_num_images)
if self._shuffle_data:
np.random.shuffle(accessors)
loader_state['loader_accessors'] = accessors
batch_size = loader_state.pop('batch_size')
loader_state['batch_size'] = None
# Re-generate the mapping for bounds.
sets = self._info.num_images.keys()
bound_ranges = np.cumsum([len(loader) for loader in loaders]).tolist()
bound_ranges = (0, ) + (*bound_ranges, )
set_to_keys = {}
bounds = {s: b for s, b in zip(sets, bound_ranges)}
for i, set_ in enumerate(sets):
value = 0 if i == 0 else 1
set_to_keys.update(dict.fromkeys(
np.arange(bound_ranges[i],
bound_ranges[i + 1] + value), set_))
loader_state['set_to_keys'] = set_to_keys
loader_state['bounds'] = bounds
# Create the new loader from the updated state.
new_loader = AgMLMultiDatasetLoader.__new__(AgMLMultiDatasetLoader)
new_loader.__setstate__(loader_state)
# Batching data should be re-done independently.
if batch_size is not None:
new_loader.batch(batch_size = batch_size)
# Block out all of the splits of the already split
# loader and set the `_is_split` attribute to True,
# preventing future splits, and return.
for attr in ['train', 'val', 'test']:
setattr(new_loader, f'_{attr}_data', None)
new_loader._is_split = True
return new_loader
@property
def train_data(self):
"""Stores the `train` split of the data in the loader."""
if isinstance(self._train_data, AgMLMultiDatasetLoader):
return self._train_data
self._train_data = self._generate_split_loader(
self._loaders.get_attributes('train_data'), split = 'train')
return self._train_data
@property
def val_data(self):
"""Stores the `val` split of the data in the loader."""
if isinstance(self._val_data, AgMLMultiDatasetLoader):
return self._val_data
self._val_data = self._generate_split_loader(
self._loaders.get_attributes('val_data'), split = 'val')
return self._val_data
@property
def test_data(self):
"""Stores the `test` split of the data in the loader."""
if isinstance(self._test_data, AgMLMultiDatasetLoader):
return self._test_data
self._test_data = self._generate_split_loader(
self._loaders.get_attributes('test_data'), split = 'test')
return self._test_data
def eval(self):
"""Sets the `AgMLDataLoader` in evaluation mode.
Evaluation mode disables transforms, and only keeps the loader applying
resizing to the contents. If the loader was previously set into TensorFlow
or PyTorch mode, however, it will also keep up tensor conversion and
potential batch adding (see `as_keras_sequence()` and `as_torch_dataset()`
methods for more information on the exact operations).
This method does not completely disable preprocessing, to completely
disable preprocessing, use `loader.disable_preprocessing()`. Additionally,
if you want to keep only the resizing but not the implicit tensor
conversions based on the backend, then run:
> loader.disable_preprocessing() # or loader.reset_preprocessing()
> loader.eval()
This will refresh the backend conversions and return it to `eval` mode.
Returns
-------
The `AgMLDataLoader` object.
"""
self._loaders.call_method('eval')
return self
def disable_preprocessing(self):
"""Disables all preprocessing on the `AgMLDataLoader`.
This sets the loader in a no-preprocessing mode (represented internally as
`False`), where only the raw data is returned: no transforms, resizing, or
any conversion to any type of backend. This can be used to test or inspect
the original data contents of the loader before processing.
The loader can be set into any mode from here, for instance see `eval()`,
`as_keras_sequence()`, and `as_torch_dataset()` for specific examples on
the different potential training and evaluation states. If you just want
to reset the loader to its default state, which applies only transforms
and resizing, then use `loader.reset_preprocessing()`.
Returns
-------
The `AgMLDataLoader` object.
"""
self._loaders.call_method('disable_preprocessing')
return self
def reset_preprocessing(self):
"""Re-enables preprocessing on the `AgMLDataLoader`.
This resets the loader back to its default train state, namely where it
applies just the given transforms and content resizing. This is a consistent
method, meaning that regardless of the prior train state of the loader
before running this method, it will hard reset it to its original state
(similar to `disable_preprocessing()`, but it keeps some preprocessing).
Returns
-------
The `AgMLDataLoader` object.
"""
self._loaders.call_method('reset_preprocessing')
return self
def on_epoch_end(self):
"""Shuffles the dataset on the end of an epoch for a Keras sequence.
If `as_keras_sequence()` is called and the `AgMLDataLoader` inherits
from `tf.keras.utils.Sequence`, then this method will shuffle the
dataset on the end of each epoch to improve training.
"""
self._loaders.call_method('on_epoch_end')
def as_keras_sequence(self):
"""Sets the `DataLoader` in TensorFlow mode.
This TensorFlow extension converts the loader into a TensorFlow mode,
adding inheritance from the superclass `keras.utils.Sequence` to enable
it to be used directly in a Keras pipeline, and adding extra preprocessing
to the images and annotations to make them compatible with TensorFlow.
The main features added on enabling this include:
1. Conversion of output images and annotations to `tf.Tensor`s.
2. Adding an implicit batch size dimension to images even when the
data is not batched (for compatibility in `Model.fit()`).
3. Adding inheritance from `keras.utils.Sequence` so that any
`AgMLDataLoader` object can be used directly in `Model.fit()`.
4. Setting the data loader to use a constant image shape, namely
`auto` (which will default to (512, 512) if none is found).
This can be overridden by manually setting the image shape
parameter back after running this method. Note that this may
result in errors when attempting implicit tensor conversion.
Returns
-------
The `AgMLDataLoader` object.
"""
self._loaders.call_method('as_keras_sequence')
return self
def as_torch_dataset(self):
"""Sets the `DataLoader` in PyTorch mode.
This PyTorch extension converts the loader into a PyTorch mode, adding
inheritance from th superclass `torch.utils.data.Dataset` to enable it to
be used directly in a PyTorch pipeline, and adding extra preprocessing to
the images and annotations to make them compatible with PyTorch.
The main features added on enabling this include:
1. Conversion of output images and annotations to `torch.Tensor`s.
2. Converting the channel format of the input images from the default,
channels_last, into channels_first (NHWC -> NCHW).
3. Adding inheritance from `torch.utils.data.Dataset` so that any
`AgMLDataLoader` object can be used with a `torch.utils.data.DataLoader`.
4. Setting the data loader to use a constant image shape, namely
`auto` (which will default to (512, 512) if none is found).
This can be overridden by manually setting the image shape
parameter back after running this method. Note that this may
result in errors when attempting implicit tensor conversion.
Returns
-------
The `AgMLDataLoader` object.
"""
self._loaders.call_method('as_torch_dataset')
@property
def shuffle_data(self):
"""Returns whether the loader is set to shuffle data or not.
By default, if no value is passed in initialization, this is set to
`True`. It can be manually toggled to `False` using this property.
"""
return self._shuffle_data
@shuffle_data.setter
def shuffle_data(self, value):
"""Set whether the loader should shuffle data or not.
This can be used to enable/disable shuffling, by passing
either `True` or `False`, respectively.
"""
if not isinstance(value, bool):
raise TypeError("Expected either `True` or `False` for 'shuffle_data'.")
self._shuffle_data = value
def shuffle(self, seed = None):
"""Potentially shuffles the contents of the loader.
If shuffling is enabled on this loader (`shuffle = False` has
not been passed to the instantiation), then this method will
shuffle the order of contents in it. A seed can be provided to
shuffle the dataset to an expected order.
If the data is already batched, then the batch contents will be
shuffled. For instance, if we have data batches [[1, 2], [3, 4]],
then the shuffling result will be [[3, 4], [1, 2]]. If you want
all of the contents to be shuffled, call `shuffle` before batching.
Note that the data is automatically shuffled upon instantiation,
unless the `shuffle = False` parameter is passed at instantiation.
However, this disables automatic shuffling for the class
permanently, and this method must be called to shuffle the data.
Parameters
----------
seed : int, optional
A pre-determined seed for shuffling.
Returns
-------
The `AgMLDataLoader` object.
"""
if seed is None:
np.random.shuffle(self._loader_accessors)
else:
with seed_context(seed):
np.random.shuffle(self._loader_accessors)
return self
def take_dataset(self, name) -> "AgMLDataLoader":
"""Takes one of the datasets in the multi-dataset collection.
This method selects one of the datasets (as denoted by `name`)
in this multi-dataset collection and returns an `AgMLDataLoader`
with its contents. These contents will be subject to any transforms
and modifications as applied by the main loader, but the returned
loader will be a copy, such that any new changes made to the main
multi-dataset loader will not affect the new loader.
Parameters
----------
name : str
The name of one of the sub-datasets of the loader.
Returns
-------
An `AgMLDataLoader`.
"""
return self._loaders[name].copy()
@inject_random_state
def take_random(self, k) -> "AgMLMultiDatasetLoader":
"""Takes a random set of contents from the loader.
This method selects a sub-sample of the contents in the loader,
based on the provided number of (or proportion of) elements `k`.
It then returns a new loader with just this reduced number of
elements. The new loader is functionally similar to the original
loader, and contains all of the transforms/batching/other settings
which have been applied to it up until this method is called.
Note that the data which is sampled as part of this new loader
is not removed from the original loader; this simply serves as an
interface to use a random set of images from the full dataset.
For a multi-dataset loader, data is sampled from each of the
sub-datasets proportionally, e.g., the proportion of images
in the new dataset (per each sub-dataset) will be the same as
in the original dataset.
Parameters
----------
k : {int, float}
Either an integer specifying the number of samples or a float
specifying the proportion of images from the total to take.
Returns
-------
A reduced `AgMLDataLoader` with the new data.
"""
# Parse the input to an integer.
if isinstance(k, float):
# Check that 0.0 <= k <= 1.0.
if not 0.0 <= k <= 1.0:
raise ValueError(
"If passing a proportion to `take_class`, "
"it should be in range [0.0, 1.0].")
# Convert the proportion float to an absolute int. Note that
# the method used is rounding up to the nearest int for cases
# where there is not an exact proportional equivalent.
getcontext().prec = 4 # noqa
proportion = Decimal(k) / Decimal(1)
num_images = self.num_images
k = int(proportion * num_images)
# If the input is an integer (or the float is converted to an int
# above), then select a random sampling of images from the dataset.
if isinstance(k, int):
# Check that `k` is valid for the number of images in the dataset.
if not 0 <= k <= self.num_images:
raise ValueError(
f"Received a request to take a random sampling of "
f"{k} images, when the dataset has {self.num_images}.")
# Calculate the proportions. If the total sum is less than `k`,
# add 1 to the dataset with the lowest number of images.
getcontext().prec = 4 # noqa
num_images = self.num_images
proportions = {key: int((Decimal(val) / Decimal(num_images)) * k)
for key, val in self._data_distributions.items()}
if sum(proportions.values()) != num_images:
diff = sum(proportions.values()) - k
smallest_split = list(proportions.keys())[
list(proportions.values()).index(
min(proportions.values()))]
proportions[smallest_split] = proportions[smallest_split] - diff
return self._generate_split_loader(
self._loaders.call_method(
'take_random', tuple(proportions.values())), 'train')
# Otherwise, raise an error.
else:
raise TypeError(
f"Expected only an int or a float when "
f"taking a random split, got {type(k)}.")
@inject_random_state
def split(self, train = None, val = None, test = None, shuffle = True):
"""Splits the data into train, val and test splits.
For this multi-dataset loader, an even split of data will
be selected from each dataset. E.g., if you have a loader
of two datasets, each with 100 images, and want a train/test
split of 0.9/0.1, then 90 images from each dataset will form the
new training set, and 10 images from each will form the new
test set. This is to ensure consistency in training.
By default, this method does nothing (or if the data has been
split into sets, it resets them all to one set). Setting the
`train`, `val`, and `test` parameters randomly divides the
data into train, validation, and/or test sets, depending on
which ones are provided and their values.
Values can either be passed as exact numbers or as proportions,
e.g. either `train = 80, test = 20` in a 100-value dataset, or
as `train = 0.8, test = 0.2`. Whichever value is not passed,
e.g. `val` in this case, has no value in the loader.
Parameters
----------
train : {int, float}
The split for training data.
val : {int, float}
The split for validation data.
test : {int, float}
The split for testing data.
shuffle : bool
Whether to shuffle the split data.
Notes
-----
Any processing applied to this `AgMLDataLoader` will also be present
in the split loaders until they are accessed from the class. If you
don't want these to be applied, access them right after splitting.
"""
# We run this by applying dataset splitting to all of the individual
# loaders, which then create and get their own even split of data
# for the dataset they are loading. This will first ensure an even
# split of data from each of the different datasets.
#
# Similar to the `AgMLDataLoader` itself, we will not access the
# `train/val/test_data` parameters instantly, as this will allow
# any new transforms or other parameters which are applied to the
# parent loader to be also applied to all of the child split loaders
# until they are actually accessed in this overhead multi-dataset
# loader class. Then, they will be unset and created.
self._loaders.apply(
lambda x: x.split(
train = train, val = val, test = test, shuffle = shuffle))
def batch(self, batch_size = None):
"""Batches sets of image and annotation data according to a size.
This method will group sets of data together into batches of size
`batch_size`. In turn, items gathered from the loader will, rather
than being an image and annotation, be an array of images and an
array of annotations (not an array of image/annotation pairs).
Batching data will include a `batch` dimension for the images and
annotations that are returned (e.g., the image array will have
dimensions NHWC instead of HWC). If the data is not batched, this
dimension will not be present unless the loader is in training mode.
The data can be un-batched by passing `None` to batch size (or
calling the method with no arguments).
Parameters
----------
batch_size : int, None
The number of groups to batch data together into.
Notes
-----
The last batch will be of size <= `batch_size`.
"""
# If the data is already batched and a new batch size is called,
# then update the existing batch sizes. For unbatching the data,
# update the batch state and then flatten the accessor array.
if self._batch_size is not None:
try:
self._loader_accessors = np.concatenate(self._loader_accessors).ravel()
except ValueError:
# The array is currently 0-dimensional.
pass
if batch_size is None or batch_size == 0:
self._batch_size = None
return
# If we have a batch size of `1`, then don't do anything
# since this doesn't really mean to do anything.
if batch_size == 1:
return
# Otherwise, calculate the actual batches and the overflow
# of the contents, and then update the accessor.
num_splits = len(self._loader_accessors) // batch_size
data_items = np.array(self._loader_accessors)
overflow = len(self._loader_accessors) - num_splits * batch_size
extra_items = data_items[-overflow:]
try:
batches = np.array_split(
np.array(self._loader_accessors
[:num_splits * batch_size]), num_splits)
except ValueError:
log(f"There is less data ({len(self._loader_accessors)}) than the provided "
f"batch size ({batch_size}). Consider using a smaller batch size.")
batches = [self._loader_accessors]
else:
if len(extra_items) < batch_size:
batches.append(extra_items)
self._loader_accessors = np.array(batches, dtype = object)
self._batch_size = batch_size
# Update the batch creation method.
self._make_batch = self._loaders[0]._manager._train_manager.make_batch
def resize_images(self, image_size = None, method = 'bilinear'):
"""Resizes images within the loader to a specified size.
This method applies a resizing parameter for images before they are
returned from the data loader. The default starting point, if this
method is never called, is to apply no resizing. If the loader is set
in "training" mode and no size is specified, it defaults to (512, 512).
Image resizing contains a few modes:
1. `default` or `None`: No resizing, leaves images in default size.
This is the default parameter if nothing is passed.
2. `train`: This will set a default training size of (512, 512).
3. `imagenet`: This will set a default size of (224, 224).
4. custom size: Resizes the images to the provided size.
The resizing also applies to the annotation in certain cases,
depending on the task and the actual content of the annotation:
- For object detection, the bounding box coordinates will
be resized and the area of the box will in turn be recomputed.
- For semantic segmentation, the annotation mask will be resized,
using a nearest-neighbor interpolation to keep it as similar
as possible to the original mask (preventing data loss).
Parameters
----------
image_size : optional
The resizing parameter for the image.
method : optional
The method to resize the images. Should be one of 'nearest',
'bilinear', 'linear', or 'cubic'. Defaults to 'bilinear'.
Notes
-----
- There is no `auto` parameter for resizing images when using
multiple data loaders. If auto is passed, it will warn the
user and switch to `default`.
- If a transform pipeline is provided, images will be resized
*before* being passed into the transform pipeline.
"""
if image_size == 'auto':
log("There is no `auto` parameter for resizing images when using"
"multiple datasets in a loader. Switching to `default`.")
self._loaders.apply(
lambda x: x.resize_images(
image_size = image_size, method = method
)
)
def transform(self,
transform = NoArgument,
target_transform = NoArgument,
dual_transform = NoArgument):
"""Applies vision transforms to the input image and annotation data.
This method applies transformations to the image and annotation data
in the dataset. Transforms include augmentations and other processing
methods, and can be applied independently to the image and annotation,
or together to both (`transform`, `target_transform`, `dual_transform`).
The hierarchy in which transforms are applied is:
transform -> --------|
|-----> dual_transform
target_transform -> --|
The `transform` and `target_transform` argument are used for methods
which act independently on the image and the annotation, respectively.
The values passed to these arguments can be:
- An `albumentations` transform pipeline.
- A `keras.Sequential` model (or preprocessing layer) or a
set of `torchvision.transform`s.
- A method which accepts one input and returns one output.
The `dual_transform` argument is used for non-image-classification
tasks. The following describe the types of arguments that can be
passed to `dual_transform`, depending on the task:
Object Detection:
- An `albumentations` transform pipeline with `bbox_params` in
to be applied to both the image and the bounding boxes.
- A method (not a torchvision or Keras preprocessing pipeline)
that accepts two inputs and returns two outputs.
Semantic Segmentation:
- An `albumentations` transform pipeline that may include
spatial and/or visual augmentation.
- A method to independently or dually apply transformations
to the image and annotation mask.
- A `torchvision.transforms` or `tf.keras.Sequential` pipeline
which will be applied to the image and mask using the same
random seed, for reproducibility. Use the provided method
`generate_keras_segmentation_dual_transform` for this.
If you want to reset the transforms, then simply call this method
with no arguments. Alternatively, to reset just a single transform,
pass the value of that argument as `None`.
Parameters
----------
transform : optional
A transform to be applied independently to the input image.
target_transform : optional
A transform to be applied independently to the annotation.
dual_transform : optional
A transform to be applied to both the input and annotation.
Notes
-----
- Image resizing takes place before any transformations are applied.
After the transforms are applied in this order, they returned and
if passed again, they will have a different transform applied to
them. The state is independent of the images passed.
- Albumentations transforms are special in that even transforms which
would normally be passed to `dual_transform` (e.g., they act on the
input image and the output annotation) can simply be passed to the
`transform` argument and they will automatically be applied.
"""
self._loaders.apply(
lambda x: x._manager.push_transforms(
transform = transform,
target_transform = target_transform,
dual_transform = dual_transform
)
)
def normalize_images(self, method = 'scale'):
"""Converts images from 0-255 integers to 0-1 floats and normalizes.
This is a convenience method to convert all images from integer-valued
arrays into float-valued arrays, and normalizes them (using shifting
and scaling from mean and std). This is useful for training in order
to reduce computational complexity (instead of large-valued integer
multiplication, only float multiplication), and for extracting the
most information out of different types of imagery.
There are three different 'normalization' modes that can be initialized
with this method, as described below:
1. `scale`: This simply scales images from the 0-255 pixel range to
the 0-1 range (and converts them to floats as such).
2. `imagenet`: This performs normalization using the traditional ImageNet
mean and standard deviation:
(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
This is useful when trying to conduct transfer learning, for instance.
3. `standard`: This performs normalization using a pre-calculated mean
and standard deviation for the dataset (see the public sources JSON).
To remove normalization altogether, pass `None` as a parameter.
Parameters
----------
method : str
The method by which to normalize the images.
Notes
-----
This method is not implicitly called when converting to PyTorch/TensorFlow
mode, it needs to be manually called even if you just want 0-1 scaled images.
"""
if method not in ['scale', 'imagenet', 'standard', None]:
raise ValueError(f"Received invalid normalization method: '{method}'.")
if method == 'scale':
normalization_params = 'scale'
elif method == 'imagenet':
normalization_params = 'imagenet'
elif method == 'standard':
normalization_params = self._info.image_stats
else:
normalization_params = None
# Normalization parameters may be specific to each dataset in
# the loader, so we need to make sure we account for this.
self._loaders.apply(
lambda x, transform: x._manager.push_transforms(
transform = transform,
target_transform = NoArgument,
dual_transform = NoArgument),
args = [('normalize', normalization_params[key])
for key in self._loaders.keys]
)
def labels_to_one_hot(self):
"""Converts image classification numerical labels to one-hot labels.
This is a convenience method to apply one-hot vector transformations
to the output labels for image classification. Essentially, if we have
a set of labels, [1, 2], it will convert it to [[0, 1, 0], [0, 0, 1]].
This is a more commonly used format for image classification.
"""
if self.task != 'image_classification':
raise RuntimeError("The `one_hot` label transformation can only "
"be used for image classification tasks.")
self.transform(
target_transform = ('one_hot', self.num_classes)
)
def mask_to_channel_basis(self):
"""Converts semantic segmentation masks to channel-wise.
This is a convenience method to convert integer-labeled semantic
segmentation masks into channel-by-channel masks, essentially
one-hot vector transformation but for semantic segmentation. Note
that if the task is binary segmentation, e.g. there is only one
class, then this method will do nothing.
This method should traditionally be called *after* applying general
transformations to the loader, in order to prevent any issues.
"""
if self.task != 'semantic_segmentation':
raise ValueError("The `mask_to_channel_basis` transformation "
"can only be used for semantic segmentation tasks.")
self.transform(
target_transform = ('channel_basis', self._info.num_classes)
)
def generalize_class_detections(self):
"""Generalizes object detection classes to a single class.
This is a convenience method for object detection tasks, and
converts all of the individual class labels in the task into
a single class, essentially allowing the model to purely
focus on detection of objects and fine-tuning bounding boxes,
with no focus on differentiating classes of different boxes.
"""
if self.task != 'object_detection':
raise ValueError("The `generalize_class_detections` transformation"
"can only be used for object detection tasks.")
self._loaders.apply(
lambda x: x._manager._train_manager._set_annotation_remap_hook(
AnnotationRemap(
self.class_to_num, self._info.num_to_class,
self.task, generalize_class_detections = True)))
def export_contents(self, export_format = None):
"""Exports the internal contents of the `AgMLDataLoader`s.
This method serves as a hook for high-level users who simply want
to download and get the data, by exporting the unprocessed metadata
of the actual dataset, with the following formats:
Image Classification: A mapping between the local image paths and
the numerical labels.
Object Detection: A mapping between the local image paths (the full
path, not just the file name), and the COCO JSON annotations
corresponding to each of the images. To get the original COCO
JSON annotation file contents, use `export_format = 'coco'`.
Semantic Segmentation: A mapping between the local image paths
and the local annotation mask paths.
The `export_format` argument can be used to customize what this method
returns. By default, it is set to `None`, and returns dictionaries with
the above specified mappings. However, setting `export_format = 'arrays'`
will return two arrays, with the first array containing the image paths
and the second array containing the annotation data.
Parameters
----------
export_format : optional
The format to export the data in. Defaults to a mapping.
Returns
-------
The raw contents of the datasets.
"""
return {
k: self._loaders[k].export_contents(
export_format = export_format)
for k in self._loaders.keys
}
@staticmethod
def _calculate_data_and_loader_index(index, bound_map, set_map):
loader_idx = set_map[index]
is_equal = np.where(index == np.array(list(bound_map.values())))[0]
if is_equal.size != 0:
data_idx = 0
else:
data_idx = int(index - list(bound_map.values())[int(
np.searchsorted(np.array(
list(bound_map.values())), index) - 1)])
if data_idx < 0:
data_idx = 0
return loader_idx, data_idx
def _load_one_image_and_annotation(self, index):
"""Loads one image and annotation from a `DataObject`."""
# Get the image and annotation from the corresponding loader.
loader, data_idx = self._calculate_data_and_loader_index(
index, self._bounds, self._set_to_keys)
return self._loaders[loader][data_idx]
def _load_multiple_items(self, indexes):
"""Loads multiple images and annotations from a set of `DataObject`s."""
# Either we're getting multiple batches, or just multiple items.
contents = []
if self._batch_size is not None:
for i in indexes:
contents.append(self._load_batch(self._loader_accessors[i]))
else:
for i in indexes:
contents.append(self._load_one_image_and_annotation(
self._loader_accessors[i]))
return contents
def _batch_multi_image_inputs(self, images):
"""Converts either a list of images or multiple input types into a batch."""
# If the input images are just a simple batch.
if is_array_like(images[0]):
return convert_to_batch(images)
# Otherwise, convert all of them independently.
keys = images[0].keys()
batches = {k: [] for k in keys}
for sample in images:
for key in sample:
batches[key].append(sample[key])
return {k: self._batch_multi_image_inputs(i) for k, i in batches.items()}
def _batch_multi_output_annotations(self, annotations):
"""Converts either a list of annotations or multiple annotation types into a batch."""
# If the output annotations are simple objects.
if (isinstance(annotations[0], (list, np.ndarray))
or isinstance(annotations, (list, np.ndarray))
and isinstance(annotations[0], (int, float))):
if not consistent_shapes(annotations):
annotations = np.array(annotations, dtype = object)
else:
annotations = np.array(annotations)
return annotations
# For object detection, just return the COCO JSON dictionaries.
if self.task == 'object_detection':
return annotations
# Otherwise, convert all of them independently.
keys = annotations[0].keys()
batches = {k: [] for k in keys}
for sample in annotations:
for key in sample:
batches[key].append(sample[key])
return {k: self._batch_multi_output_annotations(i) for k, i in batches.items()}
def _load_batch(self, batch_indexes):
"""Gets a batch of data from the dataset.
This differs from simply getting multiple pieces of data from the
dataset, such as a slice, in that it also stacks the data together
into a valid batch and returns it as such.
"""
# Get the images and annotations from the data objects.
images, annotations = [], []
for index in batch_indexes:
image, annotation = self._load_one_image_and_annotation(index)
images.append(image)
annotations.append(annotation)
# Attempt to create batched image arrays.
images = self._batch_multi_image_inputs(images)
# Attempt the same for the annotation arrays. This is more complex
# since there are many different types of annotations, namely labels,
# annotation masks, COCO JSON dictionaries, etc. We need to properly
# create a batch in each of these cases.
annotations = self._batch_multi_output_annotations(annotations)
# Return the batches.
return self._make_batch(
images = images,
annotations = annotations
)
def _get_item_impl(self, indexes):
"""Loads and processes a piece (or pieces) of data from the dataset.
This is the actual accessor method that performs the loading of data
and the relevant processing as dictated by loading, image resizing,
transform application, and other internal processing methods such as
creating batches. This is called by the `AgMLDataLoader` to get data.
"""
# If there is only one index and the data is not batched,
# then we just need to return a single `DataObject`.
if isinstance(indexes, int) and self._batch_size is None:
return self._load_one_image_and_annotation(
self._loader_accessors[indexes])
# If we have a batch of images, then return the batch.
if isinstance(indexes, int) and self._batch_size is not None:
return self._load_batch(self._loader_accessors[indexes])
# Otherwise, if there are multiple indexes (e.g., an unstacked
# slice or just a tuple of integers), then we get multiple images.
if isinstance(indexes, (list, tuple)):
return self._load_multiple_items(indexes)
def export_torch(self, **loader_kwargs):
"""Exports the contents of the loader in a native PyTorch loader.
This method wraps the contents of this data loader inside of a
`torch.utils.data.DataLoader`. This method differs from the
`export_tensorflow()` method in that there is no need to convert
directly to a `tf.data.Dataset`, rather if this `AgMLDataLoader`
inherits from `torch.utils.data.Dataset`, it can just be directly
wrapped into a `torch.utils.data.DataLoader`.
The returned `DataLoader` is functionally similar to the
`AgMLDataLoader` in terms of preprocessing and transforming. You
can pass arguments to the `DataLoader` instantiation as keyword
arguments to this method.
Note that the `AgMLDataLoader` which this method encloses is
instead a copy of the instance the method is run on, so that any
changes to the loader afterwards don't affect the exported loader.
Parameters
----------
loader_kwargs : optional
A set of keyword arguments for the `torch.utils.data.DataLoader`.
See the documentation for the loader for more information.
Returns
-------
A `torch.utils.data.DataLoader` enclosing a copy of this loader.
"""
from agml.backend.tftorch import torch
from torch.utils.data import DataLoader
if get_backend() != 'torch':
if user_changed_backend():
raise StrictBackendError(
change = 'torch', obj = self.export_torch)
set_backend('torch')
# Make a copy of the `AgMLDataLoader` so the following changes
# don't affect the original loader, just the new one.
obj = self.copy()
# Convert to a PyTorch dataset.
obj.as_torch_dataset()
# The `DataLoader` automatically batches objects using its
# own mechanism, so we remove batching from this DataLoader.
batch_size = loader_kwargs.pop(
'batch_size', obj._batch_size)
obj.batch(None)
shuffle = loader_kwargs.pop(
'shuffle', obj._shuffle_data)
# The `collate_fn` for object detection is different because
# the COCO JSON dictionaries each have different formats. So,
# we need to replace it with a custom function.
collate_fn = loader_kwargs.pop('collate_fn')
if obj.task == 'object_detection' and collate_fn is None:
def collate_fn(batch):
images = torch.stack(
[i[0] for i in batch], dim = 0)
coco = tuple([i[1] for i in batch])
return images, coco
# Return the DataLoader with a copy of this AgMLDataLoader, so
# that changes to this will not affect the returned loader.
return DataLoader(
obj,
batch_size = batch_size,
shuffle = shuffle,
collate_fn = collate_fn,
**loader_kwargs
)
| 66,025 | 42.639128 | 94 | py |
AgML | AgML-main/agml/data/managers/transform_helpers.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import numpy as np
from agml.framework import AgMLSerializable
from agml.utils.random import seed_context
class TransformApplierBase(AgMLSerializable):
"""Applies a transform to the input data.
This is used as a wrapper class for applying transformations in
more complex cases. Derived classes implement the `apply` method
to wrap the input transformation.
This is used in cases such as `albumentations` transforms, which
require keyword arguments. This wrapper allows them to be used
as a traditional method for consistency in application.
"""
serializable = frozenset(('transform', ))
def __init__(self, transform):
self._transform = transform
@abc.abstractmethod
def apply(self, *args, **kwargs):
"""Applies the transformation to the input data."""
return
def __call__(self, *args):
return self.apply(*args)
def __str__(self):
return self.__class__.__name__ + f": {self._transform}"
class AlbumentationsTransformSingle(TransformApplierBase):
def apply(self, image):
transform = self._transform(image = image)
return transform['image']
class AlbumentationsTransformMask(TransformApplierBase):
def apply(self, image, mask):
transform = self._transform(image = image, mask = mask)
return transform['image'], transform['mask']
class AlbumentationsTransformCOCO(TransformApplierBase):
def apply(self, image, coco):
# This method is a bit more complex. We can't just apply it
# to the COCO JSON dictionary as we need to extract the
# bounding boxes/category IDs, do the transformation on those,
# and then re-insert them into the COCO dictionary.
coco_boxes = coco['bbox']
coco_labels = coco['category_id']
bboxes = np.c_[coco_boxes, coco_labels]
transform = self._transform(image = image, bboxes = bboxes)
image, bboxes = transform['image'], \
np.array(transform['bboxes'])[:, :-1]
coco['bbox'], areas = bboxes, []
for box in bboxes:
areas.append(
(box[0] + box[2]) * (box[1] + box[3]))
coco['area'] = areas.copy()
return image, coco
class SameStateImageMaskTransform(TransformApplierBase):
def apply(self, image, mask):
# This method applies a transformation to the image and mask.
# Essentially, we need to set the random seed before applying
# it to the image and mask to get the same results for both
# the image and the mask. So, we change the seed each time, but
# we also reset the seed so that it goes back to normal afterwards.
seed = np.random.randint((1 << 31) - 1) # default is 32-bit systems
with seed_context(seed) as context:
image = self._transform(image)
context.reset()
mask = self._transform(mask)
return image, mask
class NormalizationTransformBase(TransformApplierBase, abc.ABC):
"""A subclass to mark transforms as normalizing transforms."""
pass
class ScaleTransform(NormalizationTransformBase):
def apply(self, image):
if image.max() >= 1 or np.issubdtype(image.dtype, np.integer):
image = (image / 255).astype(np.float32)
return image
class NormalizationTransform(NormalizationTransformBase):
def apply(self, image):
# This method applies normalization to input images, scaling them
# to a 0-1 float range, and performing scaling normalization.
if image.max() >= 1 or np.issubdtype(image.dtype, np.integer):
image = (image / 255).astype(np.float32)
mean, std = self._transform
mean = np.array(mean, dtype = np.float32)
std = np.array(std, dtype = np.float32)
denominator = np.reciprocal(std, dtype = np.float32)
image = (image - mean) * denominator
return image
class OneHotLabelTransform(TransformApplierBase):
def apply(self, labels):
# This applies a one-hot label transformation. The only argument
# in the `_transform` parameter is simply the number of labels.
one_hot = np.zeros(shape = (self._transform, ))
one_hot[labels] = 1
return one_hot.astype(np.float32)
class MaskToChannelBasisTransform(TransformApplierBase):
def apply(self, mask):
# Converts a 2-d mask of `n` labels, excluding background, to a
# `n x h x w` 3-dimensional mask with each channel a label.
input_shape = mask.shape
mask = mask.ravel()
n = mask.shape[0]
mask = np.array(mask, dtype = np.int32)
out = np.zeros(shape = (n, self._transform + 1))
out[np.arange(n), mask] = 1
out = np.reshape(out, input_shape + (self._transform + 1,))
return out[..., 1:].astype(np.int32)
class ToTensorSeg(TransformApplierBase):
def apply(self, image, mask):
# This is essentially the same as `torchvision.transforms.ToTensor`,
# except it doesn't scale the values of the segmentation mask from
# 0-255 -> 0-1, as this would break the segmentaiotn pipeline.
if image.ndim == 2:
image = image[:, :, None]
image = torch.from_numpy(image).permute(2, 0, 1).contiguous()
if isinstance(image, torch.ByteTensor):
image = image.to(dtype = torch.float32).div(255)
mask = torch.from_numpy(mask)
return mask
| 6,074 | 36.5 | 76 | py |
AgML | AgML-main/agml/data/managers/training.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import numpy as np
from agml.framework import AgMLSerializable
from agml.utils.image import needs_batch_dim
from agml.backend.tftorch import (
tf, torch, set_backend, get_backend,
user_changed_backend, StrictBackendError,
_convert_image_to_torch, is_array_like
)
class TrainState(Enum):
NONE = None
TF = 'tf'
TORCH = 'torch'
EVAL = 'eval'
EVAL_TF = 'eval-tf'
EVAL_TORCH = 'eval-torch'
FALSE = False
# Shorthand form of the enum for testing explicit values.
t_ = TrainState
class TrainingManager(AgMLSerializable):
"""Controls the training state for the `AgMLDataLoader`.
This manager is responsible for managing the backend system and
training parameters of the `AgMLDataLoader`. In particular, it
manages the data in different circumstances, such as when the
data is in "train" and "eval" modes, or when it is set to be in
a TensorFlow/PyTorch-compatible mode.
This allows for more efficient compatibility management with the
different backends, for instance dynamic tensor conversion and
image formatting to account for channels_first vs. channels_last,
as well as being able to manage whether certain preprocessing steps
should be applied, allowing for independent train and eval modes.
"""
serializable = frozenset((
'transform_manager', 'resize_manager',
'state', 'task', 'remap_hook', 'name'))
def __init__(self, transform_manager, resize_manager, task = None):
# Update the general parameters for the loader.
self._task = task
self._name = resize_manager._dataset_name
# The `TrainingManager` is responsible for applying the
# actual transforms, and thus controls the `TransformManager`
# and the `ImageResizeManager` and their relevant states.
self._transform_manager = transform_manager
self._resize_manager = resize_manager
# The `state` the the loader is in is variable, and can be
# changed during program execution (e.g., between 'train'
# and 'eval', or even into a specific backend).
#
# The `state` determines the actual preprocessing functions
# that it should apply, as it controls the managers.
# This variable tracks that state and uses it to determine
# which steps that it should apply.
#
# See the `update_state()` method to see the valid states.
self._state: "TrainState" = TrainState.NONE
# A hook for multi-dataset loaders.
self._remap_hook = False
@property
def state(self):
"""Exposes the internal state."""
return self._state
def update_state(self, state):
"""Updates the state of the training manager.
This updates the state which is used to determine what preprocessing
steps, if any, are applied to the data. Valid states include:
1. `None`: This means that all of the transforms and image resizing
will be applied to the data, but no automatic batching or tensor
conversion. This is also the default state if none is specified.
2. 'tf': This is one of the two training modes. This applies transforms,
image resizing, as well as automatic batching and tensor conversion.
3. 'torch': This is one of the two training modes. This applies
transforms, image resizing, tensor conversion, as well as image
formatting, but no automatic batching.
4. 'eval': By default, this only enables image resizing. However, if
the loader is set to a 'tf' or a 'torch' state, and from here it
is converted to 'eval', then it also keeps potential tensor conversion,
automatic batching, and image formatting.
5. `False`: This disables all preprocessing and simply returns the raw
loaded images and annotations.
There are certain caveats here. Specifically, if you want to use 'eval'
mode but maintain the tensor conversion and other related features, you
need to use the following order of methods.
> loader.as_keras_sequence() # or loader.as_torch_dataset()
> loader.eval()
If you want to completely disable preprocessing but keep the image
resizing, then you need to first disable all preprocessing, then
set the loader in 'eval' mode.
> loader.as_keras_sequence() # or loader.as_torch_dataset()
> loader.disable_preprocessing()
> loader.eval()
To re-enable preprocessing at this point, e.g., just the transforms
and resizing but no tensor conversion or automatic batching, then use
the following, which resets the train state of the loader.
> loader.reset_processing()
This enables the loader to track multiple states.
"""
# Fully disable all preprocessing.
if t_(state) == TrainState.FALSE:
self._state = TrainState.FALSE
# Set the correct 'eval' mode, based on the prior state.
elif t_(state) == TrainState.EVAL:
if self._state == TrainState.TF:
self._state = TrainState.EVAL_TF
elif self._state == TrainState.TORCH:
self._state = TrainState.EVAL_TORCH
else:
self._state = TrainState.EVAL
# Apply a 'tf' or 'torch' backend conversion.
elif t_(state) == TrainState.TORCH:
self._state = TrainState.TORCH
if get_backend() == 'tf':
if user_changed_backend():
raise StrictBackendError(
change = 'torch', obj = t_(state))
set_backend('torch')
self._resize_manager.assign('train-auto')
elif t_(state) == TrainState.TF:
self._state = TrainState.TF
if get_backend() == 'torch':
if user_changed_backend():
raise StrictBackendError(
change = 'tf', obj = t_(state))
set_backend('tf')
self._resize_manager.assign('train-auto')
# Set the default conversion (`None`).
elif t_(state) == TrainState.NONE:
self._state = TrainState.NONE
def _set_annotation_remap_hook(self, hook):
"""Used to modify class annotations for multi-dataset loaders."""
self._remap_hook = hook
def apply(self, obj, batch_state):
"""Applies preprocessing and conversions to the data contents.
This method is responsible for actually loading and processing
the data according to the training state, including loading the
data, applying transforms and resizing, as well as the training
management as described in the class. This is called by an
enclosing `DataManager`.
See the `TransformManager` and the `ImageResizeManager` for more
information on the specific preprocessing applied there, and the
`update_state()` method for more information on the training.
"""
# Extract the raw contents from the `DataObject`.
contents = obj.get()
# If there is a hook to apply (for multi-dataset loaders),
# then apply the hook before doing anything else.
if self._remap_hook:
contents = self._remap_hook(contents, self._name) # noqa
# If the state is set to `False`, then just return the raw contents.
if self._state is TrainState.FALSE:
return contents
# In any other case other than `False`, we resize the images.
contents = self._resize_manager.apply(contents)
# If we are in a training state or `None`, (so not an evaluation
# state or `False`), then we apply the transforms to the images.
if self._state not in [TrainState.EVAL,
TrainState.EVAL_TF,
TrainState.EVAL_TORCH]:
contents = self._transform_manager.apply(contents)
# If the images are not in a batch, then we convert them to tensors
# here, otherwise, they will be converted when the batch is created.
if not batch_state:
contents = self._train_state_apply(contents)
# Return the processed contents.
return contents
def _train_state_apply(self, contents):
"""Preprocesses the data according to the class's training state."""
if self._state is TrainState.NONE:
return contents
elif self._state in [TrainState.TF, TrainState.EVAL_TF]:
return self._tf_tensor_convert(contents, self._task)
elif self._state in [TrainState.TORCH, TrainState.EVAL_TORCH]:
return self._torch_tensor_convert(contents, self._task)
return contents
def make_batch(self, images, annotations):
"""Creates a batch of data out of processed images and annotations."""
if self._state in [TrainState.NONE, TrainState.FALSE, TrainState.EVAL]:
return images, annotations
elif self._state in [TrainState.TF, TrainState.EVAL_TF]:
return self._tf_tensor_batch_convert(
(images, annotations), self._task)
elif self._state in [TrainState.TORCH, TrainState.EVAL_TORCH]:
return self._torch_tensor_batch_convert(
(images, annotations), self._task)
return images, annotations
@staticmethod
def _tf_tensor_image_convert(image):
"""Converts potential multi-image input dicts to tensors."""
if isinstance(image, np.ndarray):
return tf.constant(image)
return {k: tf.constant(i) for k, i in image.items()}
@staticmethod
def _tf_tensor_image_batch_convert(batch):
"""Converts potential multi-image input batch dicts to tensor dicts."""
if isinstance(batch, np.ndarray):
return tf.stack(batch)
return {k: tf.stack(b) for k, b in batch.items()}
@staticmethod
def _tf_tensor_convert(contents, task):
"""Converts contents to `tf.Tensor`s where possible."""
# Convert the image and annotation to `tf.Tensor`s.
image, annotation = contents
image = TrainingManager._tf_tensor_image_convert(image)
if task in ['image_classification',
'image_regression',
'semantic_segmentation']:
if isinstance(annotation, (int, np.ndarray)):
annotation = tf.constant(annotation)
else:
for k, v in annotation.items():
annotation[k] = tf.constant(v)
elif task == 'object_detection':
annotation = TrainingManager._tf_tensor_coco_convert(
annotation)
# Add a first-dimension batch to the image.
if isinstance(image, tf.Tensor):
if needs_batch_dim(image):
image = tf.expand_dims(image, axis = 0)
else:
for k, v in image.items():
if needs_batch_dim(v):
image[k] = tf.expand_dims(v, axis = 0)
return image, annotation
@staticmethod
def _tf_tensor_batch_convert(contents, task):
"""Converts batch contents to `tf.Tensor`s where possible."""
# This stacks the images and annotations together.
images, annotations = contents
images = TrainingManager._tf_tensor_image_batch_convert(images)
if task == 'image_classification':
annotations = tf.constant(annotations)
elif task == 'image_regression':
if isinstance(annotations, np.ndarray):
annotations = tf.constant(annotations)
else:
for k, v in annotations.items():
annotations[k] = tf.constant(v)
elif task == 'semantic_segmentation':
annotations = tf.stack(annotations, axis = 0)
elif task == 'object_detection':
annotations = [TrainingManager._tf_tensor_coco_convert(
a_set) for a_set in annotations]
return images, annotations
@staticmethod
def _tf_tensor_coco_convert(contents):
"""Converts a COCO JSON dictionary to a `tf.Tensor`."""
coco_tensor = {}
for key, value in contents.items():
coco_tensor[key] = tf.constant(value)
return coco_tensor
@staticmethod
def _torch_tensor_image_convert(image):
"""Converts potential multi-image input dicts to tensors."""
if is_array_like(image):
return _convert_image_to_torch(image)
return {k: _convert_image_to_torch(i) for k, i in image.items()}
@staticmethod
def _torch_tensor_image_batch_convert(batch):
"""Converts potential multi-image input batch dicts to tensor dicts."""
if is_array_like(batch):
return torch.stack([
_convert_image_to_torch(image) for image in batch])
return {k: TrainingManager.
_torch_tensor_image_batch_convert(b) for k, b in batch.items()}
@staticmethod
def _torch_tensor_convert(contents, task):
"""Converts contents to `torch.Tensor`s where possible."""
image, annotation = contents
image = TrainingManager._torch_tensor_image_convert(image)
if task in ['image_classification',
'image_regression']:
if isinstance(annotation, (int, np.ndarray)):
annotation = torch.tensor(annotation)
else:
for k, v in annotation.items():
annotation[k] = torch.tensor(v)
elif task == 'semantic_segmentation':
annotation = _convert_image_to_torch(annotation)
elif task == 'object_detection':
annotation = TrainingManager._torch_tensor_coco_convert(
annotation)
return image, annotation
@staticmethod
def _torch_tensor_batch_convert(contents, task):
"""Converts batch contents to `torch.Tensor`s where possible."""
images, annotations = contents
images = TrainingManager._torch_tensor_image_batch_convert(images)
if task in ['image_classification',
'image_regression']:
if isinstance(annotations, (int, np.ndarray)):
annotations = torch.tensor(annotations)
else:
for k, v in annotations.items():
annotations[k] = torch.tensor(v)
elif task == 'semantic_segmentation':
annotations = torch.stack([
_convert_image_to_torch(a) for a in annotations])
elif task == 'object_detection':
annotations = [TrainingManager._torch_tensor_coco_convert(
a_set) for a_set in annotations]
return images, annotations
@staticmethod
def _torch_tensor_coco_convert(contents):
"""Converts a COCO JSON dictionary to a `torch.Tensor`."""
coco_tensor = {}
for key, value in contents.items():
if key == 'segmentation':
value = np.empty(0)
if not isinstance(value, torch.Tensor):
coco_tensor[key] = torch.tensor(value)
else:
coco_tensor[key] = value
return coco_tensor
| 15,928 | 40.481771 | 82 | py |
AgML | AgML-main/agml/data/managers/transforms.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import inspect
import functools
from enum import Enum
from agml.framework import AgMLSerializable
from agml.backend.tftorch import (
get_backend, set_backend, user_changed_backend, StrictBackendError
)
from agml.data.managers.transform_helpers import (
AlbumentationsTransformSingle,
AlbumentationsTransformMask,
AlbumentationsTransformCOCO,
SameStateImageMaskTransform,
NormalizationTransformBase,
ScaleTransform,
NormalizationTransform,
OneHotLabelTransform,
MaskToChannelBasisTransform,
ToTensorSeg
)
from agml.utils.logging import log
class TransformKind(Enum):
Transform = 'transform'
TargetTransform = 'target_transform'
DualTransform = 'dual_transform'
# Shorthand form of the enum for testing explicit values.
t_ = TransformKind
class TransformManager(AgMLSerializable):
"""Manages the validation and application of transformations.
This class serves as a helper for the `DataManager` class that
focuses solely on applying transforms to the input images and
annotations.
There are three main transforms stored internally.
1. `transform`: A transform which is applied to only the input image.
2. `target_transform`: A transform which is applied only to the output.
3. `dual_transform`: A transform which is applied to both.
Note that the values of these transforms don't always directly match
the provided value to the corresponding argument in the method
`AgMLDataLoader.transform()`. An example is an albumentations
transform, which, while being passed to `transform`, ends up being
stored as a `dual_transform` internally. The above case applies to
semantic segmentation and object detection transformations, but
there is no `dual_transform` for image classification.
"""
serializable = frozenset(
('task', 'transforms', 'time_inserted_transforms',
'contains_tf_transforms', 'contains_torch_transforms'))
def __init__(self, task):
self._task = task
self._transforms = dict()
# A user might want to apply a certain set of transforms first,
# which are then followed by a different set of transforms. E.g.,
# calling `loader.transform()` with one set of transforms and then
# following with another call to `loader.transform()` with the
# intention that the transforms in the second call will only be
# applied after all of the transforms in the first call are.
#
# So, while the transform types are tracked in the `_transforms`
# attribute, we track the moment that transforms are inserted
# using this attribute, which is a list of different transforms.
# The `apply()` method loops sequentially through each transform
# in this list and applies them as required.
self._time_inserted_transforms = []
# Check if the loader already contains TensorFlow/PyTorch transforms.
# This is to track whether there are issues when a new transform is added
# or if there already exists one, so we whether to apply transforms.
self._contains_tf_transforms = False
self._contains_torch_transforms = False
def get_transform_states(self):
"""Returns a copy of the existing transforms."""
transform_dict = {}
for name, state in self._transforms.items():
transform_dict[name] = state.copy()
return transform_dict
def _pop_transform(self, t_type, search_param):
"""Removes a certain type of transform from the manager."""
for i, tfm in enumerate(self._transforms[search_param]):
if isinstance(tfm, t_type):
self._transforms[search_param].pop(i)
break
for i, tfm in enumerate(self._time_inserted_transforms):
if isinstance(tfm[1], t_type):
self._time_inserted_transforms.pop(i)
break
def assign(self, kind, transform):
"""Assigns a new transform to the manager."""
# Determine if the transform is being reset or unchanged.
if transform == 'reset':
self._transforms.pop(kind, None)
new_time_transforms = []
for tfm in self._time_inserted_transforms:
if tfm[0] == kind:
continue
new_time_transforms.append(tfm)
self._time_inserted_transforms = new_time_transforms.copy()
return
elif transform is None:
return
# If an `albumentations` transform is passed to the `transform`
# argument, then it is checked first to see if it is potentially
# just processing the input image, and then passed as just a
# `transform` (or else it will clash in object detection tasks).
# Otherwise, it is stored internally as a `dual_transform`.
if transform is not None:
try:
if 'albumentations' in transform.__module__:
if t_(kind) == TransformKind.Transform:
if len(transform.processors) != 0:
kind = 'dual_transform'
if self._task == 'semantic_segmentation':
kind = 'dual_transform'
except AttributeError:
# Some type of object that doesn't have `__module__`.
pass
# We can only do this after the albumentations check, to ensure
# that we are adding the transforms to the correct location.
prev = self._transforms.get(kind, None)
# Validate the transformation based on the task and kind.
if self._task == 'image_classification':
if t_(kind) == TransformKind.Transform:
transform = self._maybe_normalization_or_regular_transform(transform)
elif t_(kind) == TransformKind.TargetTransform:
if isinstance(transform, tuple): # a special convenience case
if transform[0] == 'one_hot':
if transform[2] is not True: # removing the transform
self._pop_transform(OneHotLabelTransform, kind)
return
transform = OneHotLabelTransform(transform[1])
else:
raise ValueError("There is no `dual_transform` for image "
"classification tasks. Please pass the "
"input as a `transform` or `target_transform`.")
elif self._task == 'image_regression':
if t_(kind) == TransformKind.Transform:
transform = self._maybe_normalization_or_regular_transform(transform)
elif t_(kind) == TransformKind.TargetTransform:
if isinstance(transform, tuple): # a special convenience case
if transform[0] == 'one_hot':
if transform[2] is not True: # removing the transform
self._pop_transform(OneHotLabelTransform, kind)
return
transform = OneHotLabelTransform(transform[1])
else:
pass
elif self._task == 'semantic_segmentation':
if t_(kind) == TransformKind.Transform:
transform = self._maybe_normalization_or_regular_transform(transform)
elif t_(kind) == TransformKind.TargetTransform:
if isinstance(transform, tuple): # a special convenience case
if transform[0] == 'channel_basis':
if transform[2] is not True: # removing the transform
self._pop_transform(MaskToChannelBasisTransform, kind)
transform = MaskToChannelBasisTransform(transform[1])
else:
transform = self._maybe_normalization_or_regular_transform(transform)
else:
transform = self._construct_image_and_mask_transform(transform)
elif self._task == 'object_detection':
if t_(kind) == TransformKind.Transform:
transform = self._maybe_normalization_or_regular_transform(transform)
elif t_(kind) == TransformKind.TargetTransform:
pass
else:
transform = self._construct_image_and_coco_transform(transform)
# Add the transformation to the internal storage.
if transform is not None:
if prev is not None:
self._transforms[kind].append(transform)
else:
self._transforms[kind] = [transform]
self._time_inserted_transforms.append((kind, transform))
def apply(self, contents):
"""Applies a transform to a set of input data.
This method controls the application of the actual transforms. It
does this inside of a context that can control the application
of the transform and manage errors more effectively.
The method of application is mostly similar across the different
tasks, as the differentiation of the methods is taken care of
in the `assign` method and each transform is converted to act
like a generic method with simple input arguments.
The hierarchy in which transforms are applied is:
transform -> --------|
|-----> dual_transform
target_transform -> --|
Furthermore, image resizing takes place before any transformations
are applied. After the transforms are applied in this order, they
returned and if passed again, they will have a different transform
applied to them. The state is independent of the images passed.
"""
image, annotation = contents
# Iterate through the different transforms.
for (kind, transform) in self._time_inserted_transforms:
if t_(kind) == TransformKind.Transform:
image = self._apply_to_objects(
transform, (image, ), kind)
if t_(kind) == TransformKind.TargetTransform:
annotation = self._apply_to_objects(
transform, (annotation, ), kind)
if t_(kind) == TransformKind.DualTransform:
image, annotation = self._apply_to_objects(
transform, (image, annotation), kind)
# Return the processed image and annotation.
return image, annotation
@staticmethod
def _apply_to_objects(transform, contents, kind):
"""Applies the actual transformations in a context."""
try:
return transform(*contents)
except Exception as e:
default_msg = (f"Encountered an error when attempting to apply "
f"a transform ({transform}) of kind '{kind}' to "
f"objects: {contents}. See the above traceback.")
# A specific case of exception where the image first needs
# to be converted to a PIL image before being used in a
# general `torchvision.transforms` pipeline.
if "PIL" in str(e):
raise TypeError("If using a `torchvision.transforms` pipeline "
"when not in PyTorch training mode, you need "
"to include `ToTensor()` in the pipeline.")
# Otherwise, raise the default exception.
raise Exception(default_msg)
def _maybe_normalization_or_regular_transform(self, transform):
"""Dispatches to the correct single-image transform construction."""
if isinstance(transform, tuple):
if transform[0] == 'normalize':
return self._build_normalization_transform(transform)
return self._construct_single_image_transform(transform)
def _build_normalization_transform(self, transform):
"""Constructs a normalization transform if passed.
This is a special case for transforms passed by the `normalize_images`
'method of the `AgMLDataLoader`, since these are treated almost as
their own independent management system in terms of resetting or
applying them in a different method. This is called by `assign`.
"""
# First, we check if a normalization transform already exists
# within the transform dict, and then we get its location.
norm_transform_index, norm_transform_index_time = -1, -1
try:
for i, t in enumerate(self._transforms['transform']):
if isinstance(t, NormalizationTransformBase):
norm_transform_index = i
break
for i, (_, t) in enumerate(self._time_inserted_transforms):
if isinstance(t, NormalizationTransformBase):
norm_transform_index_time = i
break
except:
self._transforms['transform'] = []
if transform[1] == 'scale':
tfm = ScaleTransform(None)
if norm_transform_index != -1:
self._transforms['transform'][norm_transform_index] = tfm
self._time_inserted_transforms[norm_transform_index_time] \
= ('transform', tfm)
else:
self._transforms['transform'].append(tfm)
self._time_inserted_transforms.append(('transform', tfm))
elif hasattr(transform[1], 'mean') or transform[1] == 'imagenet':
try:
mean, std = transform[1].mean, transform[1].std
except AttributeError:
# Default ImageNet mean and std.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
tfm = NormalizationTransform((mean, std))
if norm_transform_index != -1:
self._transforms['transform'][norm_transform_index] = tfm
self._time_inserted_transforms[norm_transform_index_time] \
= ('transform', tfm)
else:
self._transforms['transform'].append(tfm)
self._time_inserted_transforms.append(('transform', tfm))
elif transform[1] == 'reset':
if norm_transform_index != -1:
self._transforms['transform'].pop(norm_transform_index)
self._time_inserted_transforms.pop(norm_transform_index_time)
return None
def _transform_update_and_check(self, tfm_type):
"""Checks whether a TensorFlow/PyTorch transform has been added."""
# Check for transform conflicts.
if tfm_type == 'torch' and self._contains_tf_transforms:
raise TypeError("Received a PyTorch-type transform, yet the loader "
"already contains TensorFlow/Keras transforms. This "
"will cause an error, please only pass one format. If "
"you want to remove a transform, pass a value of `None` "
"to reset all of the transforms for a certain type.")
if tfm_type == 'tf' and self._contains_torch_transforms:
raise TypeError("Received a TensorFlow/Keras-type transform, yet the "
"loader already contains PyTorch transforms. This "
"will cause an error, please only pass one format. If "
"you want to remove a transform, pass a value of `None` "
"to reset all of the transforms for a certain type.")
# Update the transform type.
if tfm_type == 'torch':
self._contains_torch_transforms = True
if tfm_type == 'tf':
self._contains_tf_transforms = True
# The following methods implement different checks which validate
# as well as process input transformations, and manage the backend.
# The transforms here will be also checked to match a specific
# backend. Alternatively, the backend will dynamically be switched.
def _construct_single_image_transform(self, transform):
"""Validates a transform which is applied to a single image.
This is used for image classification transforms, which only
apply to the input image, as well as other general tasks whenever
a transform that is only applied to the image is passed, e.g.,
a visual augmentation like random contrasting.
"""
# This case is used for clearing a transformation.
if transform is None:
return None
# A general functional transformation. We don't verify what happens in
# the function. The only check which occurs here that the signature of
# the function is valid. Note that this also includes partial methods.
elif isinstance(transform, (types.FunctionType, functools.partial)):
sig = inspect.signature(transform).parameters
if not len(sig) == 1:
raise TypeError("Expected a single-image transform passed "
"to `transform` to accept one input image, "
f"instead got {len(sig)} parameters.")
return transform
# An `albumentations` transform to be applied to the image. This
# wraps the transform into a method which treats it as a regular
# functional transform, e.g. no keyword arguments (for easy use).
elif 'albumentations' in transform.__module__:
return AlbumentationsTransformSingle(transform)
# A set of `torchvision` transforms wrapped into a `T.Compose` object
# or just a single transformation. This simply confirms the backend.
elif 'torchvision' in transform.__module__:
if get_backend() != 'torch':
if user_changed_backend():
raise StrictBackendError(change = 'tf', obj = transform)
set_backend('torch')
self._transform_update_and_check('torch')
return transform
# A `tf.keras.Sequential` preprocessing model or an individual
# Keras preprocessing layer. This simply confirms the backend.
elif 'keras' in transform.__module__ or 'tensorflow' in transform.__module__:
if get_backend() != 'tf':
if user_changed_backend():
raise StrictBackendError(change = 'torch', obj = transform)
set_backend('tf')
self._transform_update_and_check('tf')
return transform
# Otherwise, it may be a transform from a (lesser-known) third-party
# library, in which case we just return it as a callable. Transforms
# which are used in a more complex manner should be passed as decorators.
return transform
def _construct_image_and_mask_transform(self, transform):
"""Validates a transform for an image and annotation mask.
This is used for a semantic segmentation transform. Such
transformations should be passed as the following:
- An `albumentations` transform pipeline that may include
spatial and/or visual augmentation.
- A method to independently or dually apply transformations
to the image and annotation mask.
- A `torchvision.transforms` or `tf.keras.Sequential` pipeline
which will be applied to the image and mask using the same
random seed, for reproducibility. Use the provided method
`generate_keras_segmentation_dual_transform` for this.
"""
# This case is used for clearing a transformation.
if transform is None:
return None
# A general functional transformation. We don't verify what happens in
# the function. The only check which occurs here that the signature of
# the function is valid. Note that this also includes partial methods.
elif isinstance(transform, (types.FunctionType, functools.partial)):
sig = inspect.signature(transform).parameters
if not len(sig) == 2:
raise TypeError(f"Expected a semantic segmentation transform "
f"passed to `transform` to accept two args: "
f"an input image and an annotation mask, "
f"instead got {len(sig)} parameters.")
return transform
# An `albumentations` transform to be applied to the image. This
# wraps the transform into a method which treats it as a regular
# functional transform, e.g. no keyword arguments (for easy use).
elif 'albumentations' in transform.__module__:
return AlbumentationsTransformMask(transform)
# If we have the case of a transform that needs to be applied to
# both the input and the output mask simultaneously, then we wrap
# that into a class which undertakes that behavior. This happens
# when the signature of the input function accepts only one input
# parameter or it belongs to `torchvision` transform (not Keras).
if len(inspect.signature(transform).parameters) == 1:
if 'torchvision' in transform.__module__:
if get_backend() != 'torch':
if user_changed_backend():
raise StrictBackendError(
change = 'tf', obj = transform)
set_backend('torch')
# Update `torchvision.transforms.ToTensor` to a custom
# updated class as this will modify the mask incorrectly.
import torchvision
if isinstance(transform, torchvision.transforms.ToTensor):
transform = ToTensorSeg(None)
log("Updated `ToTensor` transform in the provided pipeline "
f"{transform} to an updated transform which does not "
f"modify the mask. If you want to change this behaviour, "
f"please raise an error with the AgML team.")
elif isinstance(transform, torchvision.transforms.Compose):
tfm_list = transform.transforms.copy()
for i, compose_tfm in enumerate(transform.transforms):
if isinstance(compose_tfm, torchvision.transforms.ToTensor):
tfm_list[i] = ToTensorSeg(None)
transform = torchvision.transforms.Compose(tfm_list)
log("Updated `ToTensor` transform in the provided pipeline "
f"{transform} to an updated transform which does not "
f"modify the mask. If you want to change this behaviour, "
f"please raise an error with the AgML team.")
self._transform_update_and_check('torch')
elif 'keras.layers' in transform.__module__:
if get_backend() != 'tf':
if user_changed_backend():
raise StrictBackendError(
change = 'torch', obj = transform)
set_backend('tf')
log('Got a Keras transformation for a dual image and '
'mask transform. If you are passing preprocessing '
'layers to this method, then use `agml.data.experimental'
'.generate_keras_segmentation_dual_transform` in order '
'for the random state to be applied properly.', 'warning')
self._transform_update_and_check('tf')
return SameStateImageMaskTransform(transform)
# Another type of transform, most likely some form of transform
# class. No checks are applied here, since we can't account for
# each of the potential cases of the transformations.
return transform
@staticmethod
def _construct_image_and_coco_transform(transform):
"""Validates a transform for an image and COCO JSON dictionary.
This is used for object detection transforms. Such transformations
should be wrapped into a method (unless they are albumentations
transforms). The method should accept two input arguments, the image
and the COCO JSON dictionary, and return the two respectively.
"""
# This case is used for clearing a transformation.
if transform is None:
return None
# A general functional transformation. We don't verify what happens in
# the function. The only check which occurs here that the signature of
# the function is valid. Note that this also includes partial methods.
elif isinstance(transform, (types.FunctionType, functools.partial)):
sig = inspect.signature(transform).parameters
if not len(sig) == 2:
raise TypeError(f"Expected a object detection transform passed "
f"to `transform` to accept two args: an input "
f"image and a COCO JSON dictionary, instead "
f"got {len(sig)} parameters.")
return transform
# An `albumentations` transform to be applied to the image. This
# wraps the transform into a method which treats it as a regular
# functional transform, e.g. no keyword arguments (for easy use).
elif 'albumentations' in transform.__module__:
return AlbumentationsTransformCOCO(transform)
# Another type of transform, most likely some form of transform
# class. No checks are applied here, since we can't account for
# each of the potential cases of the transformations.
return transform
| 26,516 | 48.288104 | 89 | py |
AgML | AgML-main/agml/data/exporters/tensorflow.py | # Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from agml.backend.tftorch import tf
from agml.data.managers.training import TrainState
from agml.data.object import DataObject
from agml.utils.logging import log
class TFExporter(object):
"""Exports an `AgMLDataLoader` as a `tf.data.Dataset`.
This class manages the methods and conversions between `AgMLDataLoader`
parameters, managers, and methods, into TensorFlow native methods.
"""
def __init__(self, task, builder):
self._task = task
self._builder = builder
def digest_transforms(self, transforms, resizing):
"""Parses the transforms for the `AgMLDataLoader`."""
self._transforms = {
k: state for k, state in transforms.items()}
self._size = resizing if resizing is not None else (512, 512)
def assign_state(self, state):
"""Updates the training state for the `tf.data.Dataset`."""
if state in [TrainState.EVAL,
TrainState.EVAL_TF,
TrainState.EVAL_TORCH,
TrainState.FALSE]:
self._state = 'eval'
else:
self._state = 'train'
def _build_from_contents_by_type(self, builder):
"""Parses the provided mapping into a valid set of contents."""
if self._task == 'image_classification':
ds = self._build_image_classification(builder)
return ds.map(self._image_classification_load)
elif self._task == 'semantic_segmentation':
ds = self._build_semantic_segmentation(builder)
return ds.map(self._semantic_segmentation_load)
else:
ds = self._build_object_detection(builder)
return ds.map(self._object_detection_load)
def _apply_resizing_by_type(self, ds):
"""Applies resizing based on the task."""
if self._task == 'image_classification':
return ds.map(self._image_classification_resize)
elif self._task == 'semantic_segmentation':
return ds.map(self._semantic_segmentation_resize)
else:
return ds.map(self._object_detection_resize)
def build(self, batch_size = None):
"""Builds the `tf.data.Dataset` using the provided parameters."""
# Construct the dataset from the contents.
ds = self._build_from_contents_by_type(self._builder)
# Apply the digested transforms and resizing.
ds = self._apply_resizing_by_type(ds)
if self._state != 'eval':
# No transforms for object detection, since it is near impossible
# for TensorFlow's graph mode to use COCO JSON dictionaries.
if self._task == 'object_detection':
if len(self._transforms) != 0:
log("Got transforms when exporting an `AgMLDataLoader`"
"to a `tf.data.Dataset`. These transforms will not be "
"applied. To use transforms in TensorFlow, use the "
"`as_keras_sequence()` method instead.")
else:
tfm = self._apply_transforms
if len(self._transforms) != 0:
ds = ds.map(tfm)
# Apply batching and prefetching, then return the dataset.
if batch_size is not None:
ds = ds.batch(batch_size)
ds = ds.prefetch(batch_size)
return ds
# The following methods are used to parse the input
# contents into valid methods for the loaders.
@staticmethod
def _build_image_classification(builder):
images, labels = builder.export_contents(
export_format = 'arrays')
images, labels = tf.constant(images), tf.constant(labels)
ds = tf.data.Dataset.from_tensor_slices((images, labels))
return ds.shuffle(len(images))
@staticmethod
def _build_semantic_segmentation(builder):
images, masks = builder.export_contents(
export_format = 'arrays')
images, masks = tf.constant(images), tf.constant(masks)
ds = tf.data.Dataset.from_tensor_slices((images, masks))
return ds.shuffle(len(images))
@staticmethod
def _build_object_detection(builder):
images, annotations = builder.export_contents(
export_format = 'arrays')
images = tf.constant(images)
processed_annotations = [
DataObject._parse_coco(a) for a in annotations]
features = {'bbox': [], 'category_id': [], 'area': [],
'image_id': [], 'iscrowd': [], 'segmentation': []}
for a_set in processed_annotations:
for feature in features.keys():
features[feature].append(a_set[feature]) # noqa
for feature in features.keys():
features[feature] = tf.ragged.constant(features[feature])
feature_ds = tf.data.Dataset.from_tensor_slices(features)
ds = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(images), feature_ds))
return ds.shuffle(len(images))
# The following methods are used for loading images and
# annotations for each of the different types of tasks.
@staticmethod
@tf.function
def _image_classification_load(image, label):
image = tf.cast(tf.image.decode_jpeg(
tf.io.read_file(image)), tf.float32) / 255.
return image, tf.convert_to_tensor(label)
@staticmethod
@tf.function
def _semantic_segmentation_load(image, mask):
image = tf.cast(tf.image.decode_jpeg(
tf.io.read_file(image)), tf.float32) / 255.
mask = tf.image.decode_jpeg(tf.io.read_file(mask))
return image, mask
@staticmethod
@tf.function
def _object_detection_load(image, coco):
image = tf.cast(tf.image.decode_jpeg(
tf.io.read_file(image)), tf.float32) / 255.
ret_coco = coco.copy()
for key in coco.keys():
try: ret_coco[key] = coco[key].to_tensor()
except: pass
return image, ret_coco
# The following methods apply resizing to the data.
def _image_classification_resize(self, image, label):
image = (tf.image.resize(
image, self._size, method = 'nearest'), tf.float32)
return image, label
def _semantic_segmentation_resize(self, image, mask):
image = tf.cast(tf.image.resize(
image, self._size, method = 'nearest'), tf.float32)
mask = tf.cast(tf.image.resize(
mask, self._size, method = 'nearest'), tf.float32)
return image, mask
def _object_detection_resize(self, image, coco):
# Helper for the `tf.py_function` for object detection.
def _resize_image_and_bboxes(image, coco_boxes):
nonlocal size
y_scale, x_scale = image.shape[0:2]
stack_boxes = tf.stack(
[coco_boxes[:, 0] / x_scale,
coco_boxes[:, 1] / y_scale,
coco_boxes[:, 2] / x_scale,
coco_boxes[:, 3] / y_scale], axis = -1)
image = tf.cast(tf.image.resize(
image, size), tf.float32)
y_new, x_new = image.shape[0:2]
new_stack = tf.cast(tf.stack(
[stack_boxes[:, 0] * x_new,
stack_boxes[:, 1] * y_new,
stack_boxes[:, 2] * x_new,
stack_boxes[:, 3] * y_new], axis = -1
), tf.int32)
areas = new_stack[:, 2] * new_stack[:, 3]
return image, new_stack, areas
# The actual resizing can't take place in graph mode, so we
# dispatch to a `tf.py_function` to do the resizing, then
# re-assign the values back to the COCO JSON dictionary.
size = self._size
image, ret_coco_boxes, ret_areas = tf.py_function(
_resize_image_and_bboxes,
[image, coco['bbox']],
[tf.float32, tf.int32, tf.int32])
coco['bbox'] = ret_coco_boxes
coco['area'] = ret_areas
return image, coco
# The following method manages the application of transforms.
def _apply_transforms(self, image, annotation):
# Helper for the `tf.py_function` for most transforms.
def _py_apply(img, ann):
nonlocal transforms
img, ann = img.numpy(), ann.numpy()
for key, state in transforms.items():
if key == 'transform':
for t in state:
img = t(img)
elif key == 'target_transform':
for t in state:
ann = t(ann)
else:
for t in state:
img, ann = t(img, ann)
return img, ann
# The actual transforming can't take place in graph mode
# (in most cases), so we dispatch and reassign.
transforms = self._transforms
image, annotation = tf.py_function(
_py_apply,
[image, annotation],
[tf.float32, tf.int32])
return image, annotation
| 9,639 | 38.02834 | 79 | py |
NeLLoC | NeLLoC-main/utils.py | '''
Code by Hrituraj Singh
Indian Institute of Technology Roorkee
'''
from torchvision import datasets, transforms
import configparser
import os
import torch.nn.functional as F
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
from torch.nn.utils import weight_norm as wn
from PIL import *
import os
import pickle
normalize=lambda x: x/255.
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo)
return dict
class CustomDataSet(torch.utils.data.Dataset):
def __init__(self, main_dir, transform):
self.main_dir = main_dir
self.transform = transform
self.all_imgs = []
for (dirpath, dirnames, filenames) in os.walk(self.main_dir):
self.all_imgs.extend([os.path.join(dirpath, file) for file in filenames])
self.all_imgs=sorted(self.all_imgs)
def __len__(self):
return len(self.all_imgs)
def __getitem__(self, idx):
img_loc = self.all_imgs[idx]
# print(img_loc)
image = Image.open(img_loc).convert("RGB")
tensor_image = self.transform(image)
return tensor_image
def parse_value_from_string(val_str):
if(is_int(val_str)):
val = int(val_str)
elif(is_float(val_str)):
val = float(val_str)
elif(is_list(val_str)):
val = parse_list(val_str)
elif(is_bool(val_str)):
val = parse_bool(val_str)
else:
val = val_str
return val
def is_int(val_str):
start_digit = 0
if(val_str[0] =='-'):
start_digit = 1
flag = True
for i in range(start_digit, len(val_str)):
if(str(val_str[i]) < '0' or str(val_str[i]) > '9'):
flag = False
break
return flag
def is_float(val_str):
flag = False
if('.' in val_str and len(val_str.split('.'))==2):
if(is_int(val_str.split('.')[0]) and is_int(val_str.split('.')[1])):
flag = True
else:
flag = False
elif('e' in val_str and len(val_str.split('e'))==2):
if(is_int(val_str.split('e')[0]) and is_int(val_str.split('e')[1])):
flag = True
else:
flag = False
else:
flag = False
return flag
def is_bool(var_str):
if( var_str=='True' or var_str == 'true' or var_str =='False' or var_str=='false'):
return True
else:
return False
def parse_bool(var_str):
if(var_str=='True' or var_str == 'true' ):
return True
else:
return False
def is_list(val_str):
if(val_str[0] == '[' and val_str[-1] == ']'):
return True
else:
return False
def parse_list(val_str):
sub_str = val_str[1:-1]
splits = sub_str.split(',')
output = []
for item in splits:
item = item.strip()
if(is_int(item)):
output.append(int(item))
elif(is_float(item)):
output.append(float(item))
elif(is_bool(item)):
output.append(parse_bool(item))
else:
output.append(item)
return output
def show_many(image,number_sqrt,dim=32, channels=3):
if image.size(1)==3:
image=image.permute(0,2,3,1)
canvas_recon = np.empty((dim * number_sqrt, dim * number_sqrt, channels))
count=0
for i in range(number_sqrt):
for j in range(number_sqrt):
canvas_recon[i * dim:(i + 1) * dim, j * dim:(j + 1) * dim,:] = \
image[count]
count+=1
plt.rcParams["axes.grid"] = False
plt.figure(figsize=(number_sqrt, number_sqrt))
plt.axis('off')
plt.imshow(canvas_recon)
plt.show()
def grey_show_many(image,number_sqrt):
canvas_recon = np.empty((28 * number_sqrt, 28 * number_sqrt))
count=0
for i in range(number_sqrt):
for j in range(number_sqrt):
canvas_recon[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = \
image[count].reshape([28, 28])
count+=1
plt.rcParams["axes.grid"] = False
plt.figure(figsize=(number_sqrt, number_sqrt))
plt.axis('off')
plt.imshow(canvas_recon, origin="upper", cmap="gray")
plt.show()
def color_sample(net, device,num_sqrt=10):
sample = torch.Tensor(num_sqrt**2, 3, 32, 32).to(device)
sample.fill_(0)
with torch.no_grad():
#Generating images pixel by pixel
for i in range(32):
for j in range(32):
out = net(sample).view(-1,256,3,32,32)[:,:,:,i,j].permute(0,2,1).contiguous()
probs = F.softmax(out.view(-1,256), dim=-1).data
sample[:,:,i,j] = (torch.multinomial(probs, 1).float() / 255.0).view(-1,3)
#Saving images row wise
show_many(sample.detach().cpu(),num_sqrt)
def discretized_mix_logistic_uniform(x, l, alpha=0.0001):
x = x.permute(0, 2, 3, 1)
l = l.permute(0, 2, 3, 1)
xs = [int(y) for y in x.size()]
ls = [int(y) for y in l.size()]
nr_mix = int(ls[-1] / 10)
logit_probs = l[:, :, :, :nr_mix]
l = l[:, :, :, nr_mix:].contiguous().view(xs + [nr_mix * 3])
means = l[:, :, :, :, :nr_mix]
log_scales = torch.clamp(l[:, :, :, :, nr_mix:2 * nr_mix], min=-7.)
coeffs = F.tanh(l[:, :, :, :, 2 * nr_mix:3 * nr_mix])
x = x.contiguous()
x = x.unsqueeze(-1) + Variable(torch.zeros(xs + [nr_mix]).to(x.device), requires_grad=False)
m2 = (means[:, :, :, 1, :] + coeffs[:, :, :, 0, :]
* x[:, :, :, 0, :]).view(xs[0], xs[1], xs[2], 1, nr_mix)
m3 = (means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * x[:, :, :, 0, :] +
coeffs[:, :, :, 2, :] * x[:, :, :, 1, :]).view(xs[0], xs[1], xs[2], 1, nr_mix)
means = torch.cat((means[:, :, :, 0, :].unsqueeze(3), m2, m3), dim=3)
centered_x = x - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = F.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = F.sigmoid(min_in)
cdf_plus=torch.where(x > 0.999, torch.tensor(1.0).to(x.device),cdf_plus)
cdf_min=torch.where(x <- 0.999, torch.tensor(0.0).to(x.device),cdf_min)
uniform_cdf_min = ((x+1.)/2*255)/256.
uniform_cdf_plus = ((x+1.)/2*255+1)/256.
pi=torch.softmax(logit_probs,-1).unsqueeze(-2).repeat(1,1,1,3,1)
mix_cdf_plus=((1-alpha)*pi*cdf_plus+(alpha/nr_mix)*uniform_cdf_plus).sum(-1)
mix_cdf_min=((1-alpha)*pi*cdf_min+(alpha/nr_mix)*uniform_cdf_min).sum(-1)
log_probs =torch.log(mix_cdf_plus-mix_cdf_min)
return -log_probs.sum()
| 6,542 | 29.863208 | 96 | py |
NeLLoC | NeLLoC-main/model.py | import torch.nn as nn
import torch
import torch.nn.functional as F
class MaskedCNN(nn.Conv2d):
def __init__(self, mask_type, *args, **kwargs):
self.mask_type = mask_type
assert mask_type in ['A', 'B'], "Unknown Mask Type"
super(MaskedCNN, self).__init__(*args, **kwargs)
self.register_buffer('mask', self.weight.data.clone())
_, depth, height, width = self.weight.size()
self.mask.fill_(1)
if mask_type =='A':
self.mask[:,:,height//2,width//2:] = torch.zeros(1)
self.mask[:,:,height//2+1:,:] = torch.zeros(1)
else:
self.mask[:,:,height//2,width//2+1:] = torch.zeros(1)
self.mask[:,:,height//2+1:,:] = torch.zeros(1)
def forward(self, x):
self.weight.data*=self.mask
return super(MaskedCNN, self).forward(x)
class LocalPixelCNN(nn.Module):
"""
Network of PixelCNN as described in A Oord et. al.
"""
def __init__(self, res_num=10, in_kernel = 7, in_channels=3, channels=256, out_channels=256, device=None):
super(LocalPixelCNN, self).__init__()
self.channels = channels
self.layers = {}
self.device = device
self.res_num=res_num
self.in_cnn=MaskedCNN('A',in_channels,channels, in_kernel, 1, in_kernel//2, bias=False)
self.activation=nn.ReLU()
self.resnet_cnn11=torch.nn.ModuleList([MaskedCNN('B',channels,channels, 1, 1, 0) for i in range(0,res_num)])
self.resnet_cnn3=torch.nn.ModuleList([MaskedCNN('B',channels,channels, 1, 1, 0) for i in range(0,res_num)])
self.resnet_cnn12=torch.nn.ModuleList([MaskedCNN('B',channels,channels, 1, 1, 0) for i in range(0,res_num)])
self.out_cnn1=nn.Conv2d(channels, channels, 1)
self.out_cnn2=nn.Conv2d(channels, out_channels, 1)
def forward(self, x):
x=self.in_cnn(x)
x=self.activation(x)
for i in range(0, self.res_num):
x_mid=self.resnet_cnn11[i](x)
x_mid=self.activation(x_mid)
x_mid=self.resnet_cnn3[i](x_mid)
x_mid=self.activation(x_mid)
x_mid=self.resnet_cnn12[i](x_mid)
x_mid=self.activation(x_mid)
x=x+x_mid
x=self.out_cnn1(x)
x=self.activation(x)
x=self.out_cnn2(x)
return x
| 2,366 | 31.875 | 116 | py |
NeLLoC | NeLLoC-main/train.py | import os
import torch
from torch import optim
from torch.utils import data
import torch.nn as nn
from model import *
import numpy as np
import torchvision
import torch.nn.functional as F
from torch.optim import lr_scheduler
from torchvision import datasets, transforms
from torch.autograd import Variable
from utils import *
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor()
])
trainset = torchvision.datasets.CIFAR10(root='../data/cifar10', train=True, download=False, transform=transform_train)
train = data.DataLoader(trainset, batch_size=100, shuffle=True, num_workers=3)
testset = torchvision.datasets.CIFAR10(root='../data/cifar10', train=False, download=False, transform=transform_train)
test = data.DataLoader(testset, batch_size=1000, shuffle=True, num_workers=3)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = LocalPixelCNN(res_num=0, in_kernel = 7, in_channels=3, channels=256, out_channels=100).to(device)
optimizer = optim.Adam(net.parameters(), lr=3e-4)
scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99995)
criterion = lambda real, fake : discretized_mix_logistic_uniform(real, fake, alpha=0.0001)
rescaling = lambda x : (x - .5) * 2.
rescaling_inv = lambda x : .5 * x + .5
for e in range(1001):
print('epoch',e)
net.train()
for images, labels in train:
images = rescaling(images).to(device)
optimizer.zero_grad()
output = net(images)
loss = criterion(images, output)
loss.backward()
optimizer.step()
scheduler.step()
with torch.no_grad():
net.eval()
bpd_cifar_sum=0.
for i, (images, labels) in enumerate(test):
images = rescaling(images).to(device)
output = net(images)
loss = criterion(images, output).item()
bpd_cifar_sum+=loss/(np.log(2.)*(1000*32*32*3))
bpd_cifar=bpd_cifar_sum/10
print('bpd_cifar',bpd_cifar)
save_path='./model_save/'
torch.save(net.state_dict(), save_path+'rs0_cifar_ks7.pt')
| 2,146 | 26.883117 | 118 | py |
NeLLoC | NeLLoC-main/coder/distributions.py | import torch
import numpy as np
rescaling = lambda x : (x - .5) * 2.
rescaling_inv = lambda x : .5 * x + .5
def discretized_mix_logistic_cdftable(means, log_scales,pi, alpha=0.0001):
x=rescaling(torch.arange(0,256)/255.).view(256,1).repeat(1,10).to(means.device)
centered_x = x - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = torch.sigmoid(min_in)
cdf_plus=torch.where(x > 0.999, torch.tensor(1.0).to(x.device),cdf_plus)
cdf_min=torch.where(x <- 0.999, torch.tensor(0.0).to(x.device),cdf_min)
uniform_cdf_min = ((x+1.)/2*255)/256.
uniform_cdf_plus = ((x+1.)/2*255+1)/256.
mix_cdf_plus=((1-alpha)*pi*cdf_plus+(alpha/10)*uniform_cdf_plus).sum(-1)
mix_cdf_min=((1-alpha)*pi*cdf_min+(alpha/10)*uniform_cdf_min).sum(-1)
return mix_cdf_plus,mix_cdf_min
def compute_stats(l):
nr_mix=10
pi=torch.softmax(l[:nr_mix],-1)
l=l[nr_mix:].view(3,30)
means=l[:,:nr_mix]
log_scales = torch.clamp(l[:,nr_mix:2 * nr_mix], min=-7.)
coeffs = torch.tanh(l[:,2 * nr_mix:3 * nr_mix])
return means,coeffs,log_scales, pi
def cdf_table_processing(cdf_plus,cdf_min,p_prec):
p_total=1<<p_prec
cdf_min=np.rint(cdf_min.numpy()* p_total)
cdf_plus=np.rint(cdf_plus.numpy()* p_total)
probs=cdf_plus-cdf_min
probs[probs==0]=1
probs[np.argmax(probs)]+=(p_total-np.sum(probs))
return np.concatenate(([0],np.cumsum(probs)[:-1])),probs
| 1,553 | 33.533333 | 83 | py |
NeLLoC | NeLLoC-main/coder/ans_coder.py | from coder.distributions import *
import numpy as np
class ANSStack(object):
def __init__(self, s_prec , t_prec, p_prec):
self.s_prec=s_prec
self.t_prec=t_prec
self.p_prec=p_prec
self.t_mask = (1 << t_prec) - 1
self.s_min=1 << s_prec - t_prec
self.s_max=1 << s_prec
self.s, self.t_stack= self.s_min, []
def push(self,c_min,p):
while self.s >= p << (self.s_prec - self.p_prec):
self.t_stack.append(self.s & self.t_mask )
self.s=self.s>> self.t_prec
self.s = (self.s//p << self.p_prec) + self.s%p + c_min
assert self.s_min <= self.s < self.s_max
def pop(self):
return self.s & ((1 << self.p_prec) - 1)
def update(self,s_bar,c_min,p):
self.s = p * (self.s >> self.p_prec) + s_bar - c_min
while self.s < self.s_min:
t_top=self.t_stack.pop()
self.s = (self.s << self.t_prec) + t_top
assert self.s_min <= self.s < self.s_max
def get_length(self):
return len(self.t_stack)*self.t_prec+len(bin(self.s))
def get_length(s,t_stack):
return ((len(t_stack))*16+len(bin(s)))/(32*32*3)
def cpu_ans_compression(model,img,h,w,k,p_prec=16):
c_list=[]
p_list=[]
rf=int(k/2)
with torch.no_grad():
for i in range(0,h):
for j in range(0,w):
up=max(0,i-rf)
left=max(0,j-rf)
down=i+1
right=j+1+int(i>0)*rf
m,n=min(rf,i),min(rf,j)
patch_int=img[:,:,up:down,left:right]
model_output=model(rescaling(patch_int/255.))
means,coeffs,log_scales, pi=compute_stats(model_output[0,:,m,n].view(-1))
c_0=rescaling(int(img[0,0,i,j])/255.)
c_1=rescaling(int(img[0,1,i,j])/255.)
for c in range(0,3):
if c==0:
mean=means[0:1, :]
elif c==1:
mean=means[1:2, :] + coeffs[0:1, :]* c_0
else:
mean=means[2:3, :] + coeffs[1:2, :]* c_0 +coeffs[ 2:3, :] * c_1
cdf_min_table,probs_table= cdf_table_processing(*discretized_mix_logistic_cdftable(mean,log_scales[c:c+1],pi),p_prec)
c_list.append(int(cdf_min_table[patch_int[0,c,m,n]]))
p_list.append(int(probs_table[patch_int[0,c,m,n]]))
ans_stack=ANSStack(s_prec = 32,t_prec = 16, p_prec=p_prec)
for i in np.arange(len(c_list)-1,-1,-1):
c_min,p=c_list[i],p_list[i]
ans_stack.push(c_min,p)
return ans_stack
def cpu_ans_decompression(model,ans_stack,h,w,k,p_prec=16):
with torch.no_grad():
rf=int(k/2)
decode_img=torch.zeros([1,3,h,w])
for i in range(0,h):
for j in range(0,w):
up=max(0,i-rf)
left=max(0,j-rf)
down=i+1
right=j+1+int(i>0)*rf
patch=decode_img[:,:,up:down,left:right]
m,n=min(rf,i),min(rf,j)
model_output=model(rescaling(patch/255.))
means,coeffs,log_scales, pi=compute_stats(model_output[0,:,m,n].view(-1))
c_vector=[0,0,0]
for c in range(0,3):
if c==0:
mean=means[0:1, :]
elif c==1:
mean=means[1:2, :] + coeffs[0:1, :]* c_vector[0]
else:
mean=means[2:3, :] + coeffs[1:2, :]* c_vector[0] +coeffs[2:3, :] * c_vector[1]
cdf_min_table,probs_table= cdf_table_processing(*discretized_mix_logistic_cdftable(mean,log_scales[c:c+1],pi),p_prec)
s_bar = ans_stack.pop()
pt=np.searchsorted(cdf_min_table, s_bar, side='right', sorter=None)-1
decode_img[0,c,i,j]=pt
c_vector[c]=torch.tensor(rescaling(pt/255.))
# patch[0,c,m,n]=pt/255.
c,p=int(cdf_min_table[pt]),int(probs_table[pt])
ans_stack.update(s_bar,c,p)
return decode_img[0] | 4,241 | 36.875 | 137 | py |
NeLLoC | NeLLoC-main/coder/ac_coder.py | from model import *
from decimal import *
from coder.distributions import *
tensor2decimal= lambda x : Decimal(str(x.cpu().item()))
def bin_2_float(binary):
prob = Decimal('0.0')
cur_prob=Decimal('0.5')
for i in binary:
prob=prob+cur_prob* int(i)
cur_prob*=Decimal('0.5')
return prob
def range_2_bin(low, high):
code = []
prob = Decimal('0.0')
cur_prob=Decimal('0.5')
while(prob < low):
acc_prob=prob+cur_prob
if acc_prob > high:
code.append(0)
else:
code.append(1)
prob = acc_prob
cur_prob*=Decimal('0.5')
return code
def cpu_ac_compression(model,img,k):
with torch.no_grad():
model.eval()
device=next(model.parameters()).device
rf=int(k/2)
size=img.size()
total_size=size[1]*size[2]*size[3]
old_low = Decimal('0.0')
old_high = Decimal('1.0')
_range = Decimal('1.0')
for i in range(0,size[2] ):
for j in range(0,size[3]):
up=max(0,i-rf)
left=max(0,j-rf)
down=i+1
right=j+1+int(i>0)*rf
patch=rescaling(img[:,:,up:down,left:right]/255.)
m,n=min(rf,i),min(rf,j)
model_output=model(patch.to(device))
means,coeffs,log_scales, pi=compute_stats(model_output[0,:,m,n].view(-1))
c_0=rescaling(int(img[0,0,i,j])/255.)
c_1=rescaling(int(img[0,1,i,j])/255.)
for c in range(0,3):
if c==0:
mean=means[0:1, :]
elif c==1:
mean=means[1:2, :] + coeffs[0:1, :]* c_0
else:
mean=means[2:3, :] + coeffs[1:2, :]* c_0 +coeffs[ 2:3, :] * c_1
cdf_plus,cdf_min= discretized_mix_logistic_cdftable(mean,log_scales[c:c+1],pi)
low = old_low + _range * tensor2decimal(cdf_min[int(img[0,c,i,j])])
high = old_low + _range * tensor2decimal(cdf_plus[int(img[0,c,i,j])])
_range = high - low
old_low = low
old_high = high
code=range_2_bin(low,high)
return code
def cpu_ac_decompression(model,code,h,w,k):
model.eval()
device=next(model.parameters()).device
with torch.no_grad():
prob = bin_2_float(code)
low = Decimal(0.0)
high = Decimal(1.0)
_range = Decimal(1.0)
rf=int(k/2)
decode_img=torch.zeros([1,3,h,w])
for i in range(0,h):
for j in range(0,w):
up=max(0,i-rf)
left=max(0,j-rf)
down=i+1
right=j+1+int(i>0)*rf
patch=decode_img[:,:,up:down,left:right].clone()
m,n=min(rf,i),min(rf,j)
model_output=model(rescaling(patch/255.).to(device))
means,coeffs,log_scales, pi=compute_stats(model_output[0,:,m,n].view(-1))
c_vector=[0,0,0]
for c in range(0,3):
if c==0:
mean=means[0:1, :]
elif c==1:
mean=means[1:2, :] + coeffs[0:1, :]* c_vector[0]
else:
mean=means[2:3, :] + coeffs[1:2, :]* c_vector[0] +coeffs[2:3, :] * c_vector[1]
cdf_plus,cdf_min= discretized_mix_logistic_cdftable(mean,log_scales[c:c+1],pi)
s=128
bl=0
br=256
for bs in range(0,9):
if tensor2decimal(cdf_min[s])>prob:
br=s
s=int((s+bl)/2)
elif tensor2decimal(cdf_plus[s])<prob:
bl=s
s=int((s+br)/2)
else:
decode_img[0,c,i,j]=s
low=tensor2decimal(cdf_min[s])
high=tensor2decimal(cdf_plus[s])
c_vector[c]=torch.tensor(rescaling(s/255.))
_range=high-low
prob=(prob-low)/_range
break
return decode_img[0]
| 4,462 | 33.596899 | 103 | py |
NeLLoC | NeLLoC-main/batch_coder/distributions.py | import torch
import numpy as np
rescaling = lambda x : (x - .5) * 2.
rescaling_inv = lambda x : .5 * x + .5
def discretized_mix_logistic_cdftable(means, log_scales,pi, alpha=0.0001):
bs=means.size(0)
pi=pi.unsqueeze(1)
x=rescaling(torch.arange(0,256)/255.).view(1,256,1).repeat(bs,1,10)
centered_x = x - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = torch.sigmoid(min_in)
cdf_plus=torch.where(x > 0.999, torch.tensor(1.0).to(x.device),cdf_plus)
cdf_min=torch.where(x <- 0.999, torch.tensor(0.0).to(x.device),cdf_min)
uniform_cdf_min = ((x+1.)/2*255)/256.
uniform_cdf_plus = ((x+1.)/2*255+1)/256.
mix_cdf_plus=((1-alpha)*pi*cdf_plus+(alpha/10)*uniform_cdf_plus).sum(-1)
mix_cdf_min=((1-alpha)*pi*cdf_min+(alpha/10)*uniform_cdf_min).sum(-1)
return mix_cdf_plus,mix_cdf_min
def compute_stats(l):
bs=l.size(0)
nr_mix=10
pi=torch.softmax(l[:,:nr_mix],-1)
l=l[:,nr_mix:].view(bs,3,30)
means=l[:,:,:nr_mix]
log_scales = torch.clamp(l[:,:,nr_mix:2 * nr_mix], min=-7.)
coeffs = torch.tanh(l[:,:,2 * nr_mix:3 * nr_mix])
return means,coeffs,log_scales, pi
def get_mean_c1(means,mean_linear,x):
return means+x.unsqueeze(-1)*mean_linear
def get_mean_c2(means,mean_linear,x):
return means+torch.bmm(x.view(-1,1,2),mean_linear.view(-1,2,10)).view(-1,1,10)
def cdf_table_processing(cdf_plus,cdf_min,p_prec):
p_total=np.asarray((1 << p_prec),dtype='uint32')
bs=cdf_plus.size(0)
cdf_min=np.rint(cdf_min.numpy()* p_total).astype('uint32')
cdf_plus=np.rint(cdf_plus.numpy()* p_total).astype('uint32')
probs=cdf_plus-cdf_min
probs[probs==0]=1
argmax_index=np.argmax(probs,axis=1).reshape(-1,1)
diff=p_total-np.sum(probs,-1,keepdims=True)
value=diff+np.take_along_axis(probs, argmax_index.reshape(-1,1), axis=-1)
np.put_along_axis(probs, argmax_index,value , axis=-1)
return np.concatenate((np.zeros((bs,1),dtype='uint32'),np.cumsum(probs[:,:-1],axis=-1,dtype='uint32')),1),probs
| 2,159 | 35 | 115 | py |
NeLLoC | NeLLoC-main/batch_coder/ians_coder.py | import torch
import numpy as np
from batch_coder.distributions import *
s_prec = 64
t_prec = 32
t_mask = (1 << t_prec) - 1
s_min = 1 << s_prec - t_prec
s_max = 1 << s_prec
s_prec_u,t_prec_u=np.uint8(s_prec),np.uint8(t_prec)
def get_length(s,t_stack):
return ((len(t_stack))*t_prec+sum(len(bin(i)) for i in s))/(32*32*3)
def cpu_ans_compression(model,img,h=32,w=32,k=7,p_prec=20):
p_prec_u=np.uint8(p_prec)
c_list=[]
p_list=[]
rf=int(k/2)
with torch.no_grad():
for i in range(0,h):
for j in range(0,w):
up=max(0,i-rf)
left=max(0,j-rf)
down=i+1
right=j+1+int(i>0)*rf
m,n=min(rf,i),min(rf,j)
patch_int=img[:,:,up:down,left:right]
patch=rescaling(patch_int/255.)
model_output=model(patch)
means,coeffs,log_scales, pi=compute_stats(model_output[:,:,m,n].view(img.size(0),-1))
c_0=rescaling(img[:,0:1,i,j]/255.)
c_1=rescaling(img[:,0:2,i,j]/255.)
for c in range(0,3):
if c==0:
mean=means[:,0:1, :]
elif c==1:
mean=get_mean_c1(means[:,1:2, :], coeffs[:,0:1, :],c_0)
else:
mean=get_mean_c2(means[:,2:3, :], coeffs[:,1:3, :],c_1)
cdf_min_table,probs_table= cdf_table_processing(*discretized_mix_logistic_cdftable(mean,log_scales[:,c:c+1],pi),p_prec)
c_list.append(np.take_along_axis(cdf_min_table,patch_int[:,c,m,n].numpy().reshape(-1,1),axis=-1).reshape(-1))
p_list.append(np.take_along_axis(probs_table,patch_int[:,c,m,n].numpy().reshape(-1,1),axis=-1).reshape(-1))
s, t_stack = np.asarray([s_min]*img.size(0),dtype='uint64'), []
for i in np.arange(len(c_list)-1,-1,-1):
c_min,p=c_list[i],p_list[i]
pos= s>>(s_prec - p_prec) >= p
while True in pos:
t_stack.extend(np.uint32(s[pos]))
s[pos]>>= t_prec_u
pos= s>>(s_prec_u - p_prec_u) >= p
s = (s//p << p_prec_u) + s%p + c_min
return s,t_stack
def cpu_ans_decompression(model,s,t_stack,bs,h=32,w=32,k=7,p_prec=20):
p_prec_u=np.uint8(p_prec)
with torch.no_grad():
# device=next(model.parameters()).device
rf=int(k/2)
decode_img=torch.zeros([bs,3,h,w])
for i in range(0,h):
for j in range(0,w):
up=max(0,i-rf)
left=max(0,j-rf)
down=i+1
right=j+1+int(i>0)*rf
patch=rescaling(decode_img[:,:,up:down,left:right]/255.)
m,n=min(rf,i),min(rf,j)
model_output=model(patch)
means,coeffs,log_scales, pi=compute_stats(model_output[:,:,m,n].view(bs,-1))
for c in range(0,3):
if c==0:
mean=means[:,0:1, :]
elif c==1:
mean=get_mean_c1(means[:,1:2, :], coeffs[:,0:1, :],rescaling(decode_img[:,0:1,i,j]/255.))
else:
mean=get_mean_c2(means[:,2:3, :], coeffs[:,1:3, :],rescaling(decode_img[:,0:2,i,j]/255.))
cdf_min_table,probs_table= cdf_table_processing(*discretized_mix_logistic_cdftable(mean,log_scales[:,c:c+1],pi),p_prec)
s_bar = s & np.uint64(((1 << p_prec) - 1))
pt=np.asarray([np.searchsorted(cdf_min_table[i], s_bar[i], side='right', sorter=None)-1 for i in range(0,bs)])
decode_img[:,c,i,j]=torch.tensor(pt)
patch[:,c,m,n]=torch.tensor(pt/255. )
c_min=np.take_along_axis(cdf_min_table,pt.reshape(-1,1),axis=-1).reshape(-1)
p=np.take_along_axis(probs_table,pt.reshape(-1,1),axis=-1).reshape(-1)
s = p * (s >> np.uint8(p_prec)) + s_bar - c_min
# Renormalize
pos= s < s_min
while True in pos:
t_top=t_stack[-sum(pos):]
del t_stack[-sum(pos):]
s[pos] = (s[pos] << t_prec_u) + t_top
pos= s < s_min
# for yo in s:
# assert s_min <= yo < s_max
return decode_img
| 4,562 | 37.025 | 150 | py |
onnx | onnx-main/docs/docsgen/source/conf.py | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=W0622
# type: ignore
import os
import sys
import warnings
import onnx
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
# -- Project information -----------------------------------------------------
author = "ONNX"
copyright = "2023"
project = "ONNX"
release = onnx.__version__
version = onnx.__version__
# define the latest opset to document,
# this is meant to avoid documenting opset not released yet
max_opset = onnx.helper.VERSION_TABLE[-1][2]
# define the latest opset to document for every opset
_opsets = [t for t in onnx.helper.VERSION_TABLE if t[2] == max_opset][-1]
max_opsets = {
'': max_opset,
'ai.onnx.ml': _opsets[3],
'ai.onnx.training': _opsets[4],
}
# -- General configuration ---------------------------------------------------
extensions = [
"myst_parser",
"onnx_sphinx",
"sphinx_copybutton",
"sphinx_exec_code",
"sphinx_tabs.tabs",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.githubpages",
"sphinx.ext.graphviz",
"sphinx.ext.ifconfig",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
myst_enable_extensions = [
"amsmath",
"attrs_inline",
"colon_fence",
"deflist",
"dollarmath",
"fieldlist",
"html_admonition",
"html_image",
"linkify",
"replacements",
"smartquotes",
"strikethrough",
"substitution",
"tasklist",
]
coverage_show_missing_items = True
exclude_patterns = []
graphviz_output_format = "svg"
html_css_files = ["css/custom.css"]
html_favicon = "onnx-favicon.png"
html_sidebars = {}
html_static_path = ["_static"]
html_theme = "furo"
language = "en"
mathdef_link_only = True
master_doc = "index"
onnx_doc_folder = os.path.join(os.path.abspath(os.path.dirname(__file__)), "operators")
pygments_style = "default"
source_suffix = [".rst", ".md"]
templates_path = ["_templates"]
html_context = {
"default_mode": "auto", # auto: the documentation theme will follow the system default that you have set (light or dark)
}
html_theme_options = {
"light_logo": "onnx-horizontal-color.png",
"dark_logo": "onnx-horizontal-white.png",
}
intersphinx_mapping = {
"numpy": ("https://numpy.org/doc/stable/", None),
"python": (f"https://docs.python.org/{sys.version_info.major}/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"torch": ("https://pytorch.org/docs/stable/", None),
}
sphinx_gallery_conf = {
"examples_dirs": ["examples"],
"gallery_dirs": ["auto_examples", "auto_tutorial"],
"capture_repr": ("_repr_html_", "__repr__"),
"ignore_repr_types": r"matplotlib.text|matplotlib.axes",
"binder": {
"org": "onnx",
"repo": ".",
"notebooks_dir": "auto_examples",
"binderhub_url": "https://mybinder.org",
"branch": "master",
"dependencies": "./requirements.txt",
},
}
warnings.filterwarnings("ignore", category=FutureWarning)
| 3,121 | 24.590164 | 125 | py |
onnx | onnx-main/onnx/tools/net_drawer.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# A library and utility for drawing ONNX nets. Most of this implementation has
# been borrowed from the caffe2 implementation
# https://github.com/pytorch/pytorch/blob/master/caffe2/python/net_drawer.py
#
# The script takes two required arguments:
# -input: a path to a serialized ModelProto .pb file.
# -output: a path to write a dot file representation of the graph
#
# Given this dot file representation, you can-for example-export this to svg
# with the graphviz `dot` utility, like so:
#
# $ dot -Tsvg my_output.dot -o my_output.svg
import argparse
import json
from collections import defaultdict
from typing import Any, Callable, Dict, Optional
import pydot
from onnx import GraphProto, ModelProto, NodeProto
OP_STYLE = {
"shape": "box",
"color": "#0F9D58",
"style": "filled",
"fontcolor": "#FFFFFF",
}
BLOB_STYLE = {"shape": "octagon"}
_NodeProducer = Callable[[NodeProto, int], pydot.Node]
def _escape_label(name: str) -> str:
# json.dumps is poor man's escaping
return json.dumps(name)
def _form_and_sanitize_docstring(s: str) -> str:
url = "javascript:alert("
url += _escape_label(s).replace('"', "'").replace("<", "").replace(">", "")
url += ")"
return url
def GetOpNodeProducer( # noqa: N802
embed_docstring: bool = False, **kwargs: Any
) -> _NodeProducer:
def really_get_op_node(op: NodeProto, op_id: int) -> pydot.Node:
if op.name:
node_name = f"{op.name}/{op.op_type} (op#{op_id})"
else:
node_name = f"{op.op_type} (op#{op_id})"
for i, input_ in enumerate(op.input):
node_name += "\n input" + str(i) + " " + input_
for i, output in enumerate(op.output):
node_name += "\n output" + str(i) + " " + output
node = pydot.Node(node_name, **kwargs)
if embed_docstring:
url = _form_and_sanitize_docstring(op.doc_string)
node.set_URL(url)
return node
return really_get_op_node
def GetPydotGraph( # noqa: N802
graph: GraphProto,
name: Optional[str] = None,
rankdir: str = "LR",
node_producer: Optional[_NodeProducer] = None,
embed_docstring: bool = False,
) -> pydot.Dot:
if node_producer is None:
node_producer = GetOpNodeProducer(embed_docstring=embed_docstring, **OP_STYLE)
pydot_graph = pydot.Dot(name, rankdir=rankdir)
pydot_nodes: Dict[str, pydot.Node] = {}
pydot_node_counts: Dict[str, int] = defaultdict(int)
for op_id, op in enumerate(graph.node):
op_node = node_producer(op, op_id)
pydot_graph.add_node(op_node)
for input_name in op.input:
if input_name not in pydot_nodes:
input_node = pydot.Node(
_escape_label(input_name + str(pydot_node_counts[input_name])),
label=_escape_label(input_name),
**BLOB_STYLE,
)
pydot_nodes[input_name] = input_node
else:
input_node = pydot_nodes[input_name]
pydot_graph.add_node(input_node)
pydot_graph.add_edge(pydot.Edge(input_node, op_node))
for output_name in op.output:
if output_name in pydot_nodes:
pydot_node_counts[output_name] += 1
output_node = pydot.Node(
_escape_label(output_name + str(pydot_node_counts[output_name])),
label=_escape_label(output_name),
**BLOB_STYLE,
)
pydot_nodes[output_name] = output_node
pydot_graph.add_node(output_node)
pydot_graph.add_edge(pydot.Edge(op_node, output_node))
return pydot_graph
def main() -> None:
parser = argparse.ArgumentParser(description="ONNX net drawer")
parser.add_argument(
"--input",
type=str,
required=True,
help="The input protobuf file.",
)
parser.add_argument(
"--output",
type=str,
required=True,
help="The output protobuf file.",
)
parser.add_argument(
"--rankdir",
type=str,
default="LR",
help="The rank direction of the pydot graph.",
)
parser.add_argument(
"--embed_docstring",
action="store_true",
help="Embed docstring as javascript alert. Useful for SVG format.",
)
args = parser.parse_args()
model = ModelProto()
with open(args.input, "rb") as fid:
content = fid.read()
model.ParseFromString(content)
pydot_graph = GetPydotGraph(
model.graph,
name=model.graph.name,
rankdir=args.rankdir,
node_producer=GetOpNodeProducer(
embed_docstring=args.embed_docstring, **OP_STYLE
),
)
pydot_graph.write_dot(args.output)
if __name__ == "__main__":
main()
| 4,901 | 30.625806 | 86 | py |
onnx | onnx-main/onnx/reference/ops/op_lp_pool.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=W0221,R0913,R0914
import numpy as np
from onnx.reference.ops._op_common_pool import CommonPool
class LpPool(CommonPool):
def _run( # type: ignore
self,
x,
auto_pad=None,
ceil_mode=None,
dilations=None,
kernel_shape=None,
p=2,
pads=None,
strides=None,
count_include_pad=None,
):
# utilize AvgPool the same fashion Pytorch does. Note that there is a difference in computation.
# it needs another PR to address.
# https://github.com/pytorch/pytorch/blob/f58ba553b78db7f88477f9ba8c9333bd1590e30a/torch/nn/functional.py#L1015
power_average = CommonPool._run(
self,
"AVG",
count_include_pad,
np.power(np.absolute(x), p),
auto_pad=auto_pad,
ceil_mode=ceil_mode,
dilations=dilations,
kernel_shape=kernel_shape,
pads=pads,
strides=strides,
)
kernel_element_count = np.prod(kernel_shape)
return (np.power(kernel_element_count * power_average[0], 1.0 / p),)
| 1,216 | 27.97619 | 119 | py |
onnx | onnx-main/onnx/reference/ops/op_hamming_window.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=W0221
import numpy as np
from onnx.reference.ops._op_common_window import _CommonWindow
class HammingWindow(_CommonWindow):
"""
Returns
:math:`\\omega_n = \\alpha - \\beta \\cos \\left( \\frac{\\pi n}{N-1} \\right)`
where *N* is the window length.
See `hamming_window
<https://pytorch.org/docs/stable/generated/torch.hamming_window.html>`_.
`alpha=0.54, beta=0.46`
"""
def _run(self, size, output_datatype=None, periodic=None): # type: ignore
ni, N_1 = self._begin(size, periodic, output_datatype)
alpha = 25.0 / 46.0
beta = 1 - alpha
res = alpha - np.cos(ni * np.pi * 2 / N_1) * beta
return self._end(size, res, output_datatype)
| 811 | 29.074074 | 83 | py |
onnx | onnx-main/onnx/reference/ops/op_hann_window.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=W0221
import numpy as np
from onnx.reference.ops._op_common_window import _CommonWindow
class HannWindow(_CommonWindow):
"""
Returns
:math:`\\omega_n = \\sin^2\\left( \\frac{\\pi n}{N-1} \\right)`
where *N* is the window length.
See `hann_window
<https://pytorch.org/docs/stable/generated/torch.hann_window.html>`_
"""
def _run(self, size, output_datatype=None, periodic=None): # type: ignore
ni, N_1 = self._begin(size, periodic, output_datatype)
res = np.sin(ni * np.pi / N_1) ** 2
return self._end(size, res, output_datatype)
| 690 | 27.791667 | 78 | py |
onnx | onnx-main/onnx/reference/ops/op_col2im.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=R0913,R0914,W0221
import numpy as np
from onnx.reference.op_run import OpRun
from onnx.reference.ops._op_common_indices import _get_indices, _is_out
def _col2im_shape_check_2d(X, output_shape, kernel_shape, dilations, pads, strides): # type: ignore
output_height, output_width = output_shape
kernel_height, kernel_width = kernel_shape
dilation_height, dilation_width = dilations
stride_height, stride_width = strides
ndim = len(X.shape)
if not (
(ndim == 2 and X.shape[0] != 0 and X.shape[1] != 0)
or (ndim == 3 and X.shape[1] != 0 and X.shape[2] != 0)
):
raise ValueError(
"Expected 2D or 3D (batch mode) tensor for input with possibly 0 batch size and non-zero dimensions for input."
)
batch_dim = 0 if len(X.shape) == 3 else -1
n_input_plane = X.shape[batch_dim + 1]
if n_input_plane % (kernel_width * kernel_height) != 0:
raise ValueError(
f"Expected size of input's dimension 1 to be divisible by the "
f"product of kernel_size, but got input.size(1)={n_input_plane} "
f"and kernel_size={kernel_shape}."
)
input_length = X.shape[batch_dim + 2]
n_blocks_height = (
output_height + pads[0, :].sum() - dilation_height * (kernel_height - 1) - 1
) // stride_height + 1
n_blocks_width = (
output_width + pads[1, :].sum() - dilation_width * (kernel_width - 1) - 1
) // stride_width + 1
if input_length != (n_blocks_height * n_blocks_width):
raise ValueError(
f"Given batch_dim={batch_dim}, n_input_plane={n_input_plane}, X.shape={X.shape}, "
f"output_shape={output_shape}, kernel_shape={kernel_shape}, "
f"dilations={dilations}, pads={pads}, strides={strides}, "
f"expected size of input's dimension 2 to match the calculated number of ",
f"sliding blocks {n_blocks_height} * {n_blocks_width} = {n_blocks_height * n_blocks_width}, "
f"but got input.size(2)={input_length}.",
)
if not (n_blocks_height >= 1 and n_blocks_width >= 1):
raise ValueError(
f"Given batch_dim={batch_dim}, n_input_plane={n_input_plane}, X.shape={X.shape}, "
f"output_shape={output_shape}, kernel_shape={kernel_shape}, "
f"dilations={dilations}, pads={pads}, strides={strides}, "
f"calculated shape of the array of sliding blocks as ({n_blocks_height}, {n_blocks_width}), "
f"which is too small (non-positive)."
)
def _col2im_naive_implementation_2d(res, image_shape, kernel_shape, dilations, pads, strides): # type: ignore
# source: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/im2col.h
n_dims = len(pads) // 2
new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)])
_col2im_shape_check_2d(res, image_shape, kernel_shape, dilations, new_pads, strides)
data_col = res.ravel()
data_im = np.zeros(image_shape, dtype=res.dtype).flatten()
kernel_h, kernel_w = kernel_shape
channels_col = kernel_h * kernel_w
stride_h, stride_w = strides
dilation_h, dilation_w = dilations
pad_h, pad_w = new_pads[:, 0]
height, width = image_shape
output_height, output_width = image_shape
height_col = (
output_height + new_pads[0, :].sum() - (dilation_h * (kernel_h - 1) + 1)
) // stride_h + 1
width_col = (
output_width + new_pads[1, :].sum() - (dilation_w * (kernel_w - 1) + 1)
) // stride_w + 1
for c_col in range(channels_col):
w_offset = c_col % kernel_w
h_offset = (c_col // kernel_w) % kernel_h
c_im = c_col // (kernel_h * kernel_w)
for h_col in range(height_col):
h_im = h_col * stride_h - pad_h + h_offset * dilation_h
for w_col in range(width_col):
w_im = w_col * stride_w - pad_w + w_offset * dilation_w
if 0 <= h_im < height and 0 <= w_im < width:
i_im = (c_im * height + h_im) * width + w_im
i_col = (c_col * height_col + h_col) * width_col + w_col
if 0 <= i_col < data_col.shape[0]:
data_im[i_im] += data_col[i_col]
return data_im.reshape(image_shape)
def _col2im_shape_check(X, output_shape, kernel_shape, dilations, pads, strides): # type: ignore
n_input_plane = X.shape[0]
kernel_size = np.prod(kernel_shape)
if n_input_plane % kernel_size != 0:
raise ValueError(
f"Expected size of input's dimension 1 to be divisible by the "
f"product of kernel_size={kernel_size}, "
f"but got input.size(1)={n_input_plane} "
f"and kernel_shape={kernel_shape}, X.shape={X.shape}, output_shape={output_shape}."
)
input_length = X.shape[1]
n_dims = len(output_shape)
n_blocks = []
for i in range(n_dims):
n_block = (
output_shape[i]
+ pads[i, :].sum()
- dilations[i] * (kernel_shape[i] - 1)
- 1
) // strides[i] + 1
n_blocks.append(n_block)
block_size = np.prod(n_blocks)
if input_length != block_size:
raise ValueError(
f"Given n_input_plane={n_input_plane}, X.shape={X.shape}, "
f"output_shape={output_shape}, kernel_shape={kernel_shape}, "
f"dilations={dilations}, pads={pads}, strides={strides}, "
f"expected size of input's dimension 2 to match the calculated number of "
f"sliding blocks {n_blocks} = {block_size}, "
f"but got input.size(2)={input_length}.",
)
def col2im_naive_implementation(data, image_shape, kernel_shape, dilations, pads, strides): # type: ignore
"""
Naive implementation for `col2im`.
"""
n_dims = len(pads) // 2
new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)])
_col2im_shape_check(data, image_shape, kernel_shape, dilations, new_pads, strides)
data_col = data
data_im = np.zeros(image_shape, dtype=data.dtype)
dim_col = []
for i in range(n_dims):
col = (
image_shape[i]
+ new_pads[i, :].sum()
- (dilations[i] * (kernel_shape[i] - 1) + 1)
) // strides[i] + 1
dim_col.append(col)
kernel_size = np.prod(kernel_shape)
col_size = np.prod(dim_col)
for c_col in range(kernel_size):
offset = _get_indices(c_col, kernel_shape)
for col in range(col_size):
ind_col = _get_indices(col, dim_col)
ind_im = []
for i in range(n_dims):
ind = (
ind_col[i] * strides[i] - new_pads[i, 0] + offset[i] * dilations[i]
)
ind_im.append(ind)
if not _is_out(ind_im, data_im.shape):
data_im[tuple(ind_im)] += data_col[c_col, col]
return data_im
class Col2Im(OpRun):
def _run(self, data, image_shape, block_shape, dilations=None, pads=None, strides=None): # type: ignore
if dilations is None:
dilations = [1 for s in image_shape]
if pads is None:
pads = [0 for s in image_shape] * 2
if strides is None:
strides = [1 for s in image_shape]
bl = np.prod(block_shape)
C = data.shape[1] // bl
data = data.reshape(data.shape[:1] + (C,) + (bl,) + data.shape[2:])
ks = tuple(block_shape)
res = None
for n in range(data.shape[0]):
for c in range(data.shape[1]):
out = col2im_naive_implementation(
data[n, c, ...], image_shape, ks, dilations, pads, strides
)
if res is None:
new_shape = data.shape[:2] + out.shape
res = np.empty(new_shape, dtype=data.dtype)
res[n, c, ...] = out
return (res,) # type: ignore
| 8,046 | 37.319048 | 123 | py |
onnx | onnx-main/onnx/reference/ops/op_resize.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=C0123,C3001,R0912,R0913,R0914,R1730,W0221,W0613
from typing import Any, Callable, List, Optional, Tuple
import numpy as np
from onnx.reference.op_run import OpRun
def _cartesian(
arrays: List[np.ndarray], out: Optional[np.ndarray] = None
) -> np.ndarray:
"""
From https://stackoverflow.com/a/1235363
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n // arrays[0].size
out[:, 0] = np.repeat(arrays[0], m)
if arrays[1:]:
_cartesian(arrays[1:], out=out[0:m, 1:])
for j in range(1, arrays[0].size):
out[j * m : (j + 1) * m, 1:] = out[0:m, 1:]
return out
def _nearest_coeffs(ratio: float, mode: str = "round_prefer_floor") -> np.ndarray:
if type(ratio) == int or ratio.is_integer():
return np.array([0, 1])
if mode == "round_prefer_floor":
return np.array([ratio <= 0.5, ratio > 0.5])
if mode == "round_prefer_ceil":
return np.array([ratio < 0.5, ratio >= 0.5])
if mode == "floor":
return np.array([1, 0])
if mode == "ceil":
return np.array([0, 1])
raise ValueError(f"Unexpected value {mode!r}.")
def _cubic_coeffs(
ratio: float, scale: Optional[float] = None, A: float = -0.75
) -> np.ndarray:
# scale is unused
coeffs = [
((A * (ratio + 1) - 5 * A) * (ratio + 1) + 8 * A) * (ratio + 1) - 4 * A,
((A + 2) * ratio - (A + 3)) * ratio * ratio + 1,
((A + 2) * (1 - ratio) - (A + 3)) * (1 - ratio) * (1 - ratio) + 1,
((A * ((1 - ratio) + 1) - 5 * A) * ((1 - ratio) + 1) + 8 * A)
* ((1 - ratio) + 1)
- 4 * A,
]
return np.array(coeffs)
def _cubic_coeffs_antialias(ratio: float, scale: float, A: float = -0.75) -> np.ndarray:
if scale > 1.0: # Antialias is applied when downsampling
scale = 1.0
def compute_coeff(x: float) -> float:
x = abs(x)
x_2 = x * x
x_3 = x * x_2
if x <= 1:
return (A + 2) * x_3 - (A + 3) * x_2 + 1
if x < 2:
return A * x_3 - 5 * A * x_2 + 8 * A * x - 4 * A
return 0.0
i_start = int(np.floor(-2 / scale) + 1)
i_end = 2 - i_start
args = [scale * (i - ratio) for i in range(i_start, i_end)]
coeffs = [compute_coeff(x) for x in args]
return np.array(coeffs) / sum(coeffs)
def _linear_coeffs(ratio: float, scale: Optional[float] = None) -> np.ndarray:
# scale is unused
return np.array([1 - ratio, ratio])
def _linear_coeffs_antialias(ratio: float, scale: float) -> np.ndarray:
if scale > 1.0: # Antialias is applied when downsampling
scale = 1.0
start = int(np.floor(-1 / scale) + 1)
footprint = 2 - 2 * start
args = (np.arange(start, start + footprint) - ratio) * scale
coeffs = np.clip(1 - np.abs(args), 0, 1)
return np.array(coeffs) / sum(coeffs) # type: ignore[no-any-return]
def _get_neighbor_idxes(x: float, n: int, limit: int) -> np.ndarray:
"""
Return the n nearest indexes to x among `[0, limit)`,
prefer the indexes smaller than x.
As a result, the ratio must be in `(0, 1]`.
Examples::
get_neighbor_idxes(4, 2, 10) == [3, 4]
get_neighbor_idxes(4, 3, 10) == [3, 4, 5]
get_neighbor_idxes(4.4, 3, 10) == [3, 4, 5]
get_neighbor_idxes(4.5, 3, 10) == [3, 4, 5]
get_neighbor_idxes(4.6, 3, 10) == [4, 5, 6]
get_neighbor_idxes(4.4, 1, 10) == [4]
get_neighbor_idxes(4.6, 1, 10) == [5]
:param x:
:param n: the number of the wanted indexes
:param limit: the maximum value of index
:return: An np.array containing n nearest indexes in ascending order
"""
idxes = sorted(range(limit), key=lambda idx: (abs(x - idx), idx))[:n]
idxes = sorted(idxes)
return np.array(idxes)
def _get_neighbor(x: float, n: int, data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Pad `data` in 'edge' mode, and get n nearest elements in the padded array
and their indexes in the original array.
:param x: center index (in the unpadded coordinate system) of the found nearest elements.
:param n: the number of neighbors.
:param data: the array
:return: A tuple containing the indexes of neighbor elements
(the index can be smaller than 0 or higher than len(data))
and the value of these elements
"""
pad_width = np.ceil(n / 2).astype(int)
padded = np.pad(data, pad_width, mode="edge")
x += pad_width
idxes = _get_neighbor_idxes(x, n, len(padded))
ret = padded[idxes]
return idxes - pad_width, ret
def _interpolate_1d_with_x(
data: np.ndarray,
scale_factor: float,
output_width_int: int,
x: float,
get_coeffs: Callable[[float, float], np.ndarray],
roi: Optional[np.ndarray] = None,
extrapolation_value: float = 0.0,
coordinate_transformation_mode: str = "half_pixel",
exclude_outside: bool = False,
) -> np.ndarray:
input_width = len(data)
output_width = scale_factor * input_width
if coordinate_transformation_mode == "align_corners":
if output_width == 1:
x_ori = 0.0
else:
x_ori = x * (input_width - 1) / (output_width - 1)
elif coordinate_transformation_mode == "asymmetric":
x_ori = x / scale_factor
elif coordinate_transformation_mode == "tf_crop_and_resize":
if roi is None:
raise ValueError("roi cannot be None.")
if output_width == 1:
x_ori = (roi[1] - roi[0]) * (input_width - 1) / 2
else:
x_ori = x * (roi[1] - roi[0]) * (input_width - 1) / (output_width - 1)
x_ori += roi[0] * (input_width - 1)
# Return extrapolation_value directly as what TF CropAndResize does
if x_ori < 0 or x_ori > input_width - 1:
return np.array(extrapolation_value)
elif coordinate_transformation_mode == "pytorch_half_pixel":
if output_width == 1:
x_ori = -0.5
else:
x_ori = (x + 0.5) / scale_factor - 0.5
elif coordinate_transformation_mode == "half_pixel":
x_ori = (x + 0.5) / scale_factor - 0.5
elif coordinate_transformation_mode == "half_pixel_symmetric":
# Maps the center of the implicit ROI to the center of the output canvas.
# The difference with `half_pixel` will be only relevant
# when output_width_int != output_width
adjustment = output_width_int / output_width
center = input_width / 2
offset = center * (1 - adjustment)
x_ori = offset + (x + 0.5) / scale_factor - 0.5
else:
raise ValueError(
f"Invalid coordinate_transformation_mode: {coordinate_transformation_mode!r}."
)
x_ori_int = np.floor(x_ori).astype(int).item()
# ratio must be in (0, 1] since we prefer the pixel on the left of `x_ori`
if x_ori.is_integer():
ratio = 1
else:
ratio = x_ori - x_ori_int
coeffs = get_coeffs(ratio, scale_factor)
n = len(coeffs)
idxes, points = _get_neighbor(x_ori, n, data)
if exclude_outside:
for i, idx in enumerate(idxes):
if idx < 0 or idx >= input_width:
coeffs[i] = 0
coeffs /= sum(coeffs)
return np.dot(coeffs, points).item() # type: ignore[no-any-return]
def _interpolate_nd_with_x(
data: np.ndarray,
n: int,
scale_factors: List[float],
output_size: List[int],
x: List[float],
get_coeffs: Callable[[float, float], np.ndarray],
roi: Optional[np.ndarray] = None,
exclude_outside: bool = False,
**kwargs: Any,
) -> np.ndarray:
if n == 1:
return _interpolate_1d_with_x(
data,
scale_factors[0],
output_size[0],
x[0],
get_coeffs,
roi=roi,
exclude_outside=exclude_outside,
**kwargs,
)
res1d = []
for i in range(data.shape[0]):
r = _interpolate_nd_with_x(
data[i],
n - 1,
scale_factors[1:],
output_size[1:],
x[1:],
get_coeffs,
roi=None if roi is None else np.concatenate([roi[1:n], roi[n + 1 :]]),
exclude_outside=exclude_outside,
**kwargs,
)
res1d.append(r)
return _interpolate_1d_with_x(
res1d, # type: ignore[arg-type] # FIXME
scale_factors[0],
output_size[0],
x[0],
get_coeffs,
roi=None if roi is None else [roi[0], roi[n]], # type: ignore[arg-type] # FIXME
exclude_outside=exclude_outside,
**kwargs,
)
def _get_all_coords(data: np.ndarray) -> np.ndarray:
# FIXME: Fix input type
return _cartesian(
[list(range(data.shape[i])) for i in range(len(data.shape))] # type: ignore[arg-type,misc]
)
def _interpolate_nd(
data: np.ndarray,
get_coeffs: Callable[[float, float], np.ndarray],
output_size: Optional[List[int]] = None,
scale_factors: Optional[List[float]] = None,
axes: Optional[List[int]] = None,
roi: Optional[np.ndarray] = None,
keep_aspect_ratio_policy: Optional[str] = "stretch",
exclude_outside: bool = False,
**kwargs: Any,
) -> np.ndarray:
if output_size is None and scale_factors is None:
raise ValueError("output_size is None and scale_factors is None.")
r = len(data.shape)
if axes is not None:
if scale_factors is not None:
new_scale_factors = [1.0] * r
for i, d in enumerate(axes):
new_scale_factors[d] = scale_factors[i]
scale_factors = new_scale_factors
if output_size is not None:
new_output_size = [data.shape[i] for i in range(r)]
for i, d in enumerate(axes):
new_output_size[d] = output_size[i]
output_size = new_output_size
if roi is not None:
new_roi = ([0.0] * r) + ([1.0] * r)
naxes = len(axes)
for i, d in enumerate(axes):
new_roi[d] = roi[i]
new_roi[r + d] = roi[naxes + i]
roi = new_roi # type: ignore[assignment] # FIXME
else:
axes = list(range(r))
if output_size is not None:
scale_factors = [output_size[i] / data.shape[i] for i in range(r)]
if keep_aspect_ratio_policy != "stretch":
if keep_aspect_ratio_policy == "not_larger":
scale = np.array(scale_factors)[axes].min()
elif keep_aspect_ratio_policy == "not_smaller":
scale = np.array(scale_factors)[axes].max()
else:
raise ValueError(
f"Invalid keep_aspect_ratio_policy={keep_aspect_ratio_policy!r}"
)
scale_factors = [scale if i in axes else 1.0 for i in range(r)]
def round_half_up(x: float) -> int:
return int(x + 0.5)
output_size = [
round_half_up(scale * data.shape[i]) if i in axes else data.shape[i]
for i in range(r)
]
else:
output_size = (scale_factors * np.array(data.shape)).astype(int) # type: ignore[union-attr]
if scale_factors is None:
raise ValueError("scale_factors is None.")
if output_size is None:
raise ValueError("output_size is None.")
ret = np.zeros(output_size)
for x in _get_all_coords(ret):
ret[tuple(x)] = _interpolate_nd_with_x(
data,
len(data.shape),
scale_factors,
output_size,
x,
get_coeffs,
roi=roi,
exclude_outside=exclude_outside,
**kwargs,
)
return ret
class Resize(OpRun):
def _run( # type: ignore
self,
X,
roi,
scales=None,
sizes=None,
antialias=None,
axes=None,
coordinate_transformation_mode=None,
cubic_coeff_a=None,
exclude_outside=None,
extrapolation_value=None,
keep_aspect_ratio_policy=None,
mode=None,
nearest_mode=None,
):
if mode == "nearest": # type: ignore
if antialias:
raise RuntimeError(
f"antilias={antialias!r} is not supported for mode={mode!r}."
)
if nearest_mode is not None:
def fct(x, scale_factor):
return _nearest_coeffs(x, mode=nearest_mode)
else:
fct = _nearest_coeffs
elif mode == "cubic":
fct_ = _cubic_coeffs_antialias if antialias else _cubic_coeffs
def fct(x, scale):
return fct_(x, scale, A=cubic_coeff_a)
elif mode == "linear":
fct = _linear_coeffs_antialias if antialias else _linear_coeffs
else:
raise ValueError(f"Unexpected value {mode!r} for mode.")
if axes is None:
output = _interpolate_nd(
X,
fct,
scale_factors=scales,
output_size=sizes,
roi=roi,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
exclude_outside=exclude_outside,
coordinate_transformation_mode=coordinate_transformation_mode, # type: ignore
extrapolation_value=extrapolation_value, # type: ignore
).astype(X.dtype)
return (output,)
# axes is not None
not_axes = [a for a in range(len(X.shape)) if a not in axes]
perm = tuple(not_axes + axes)
permuted = np.transpose(X, perm)
new_shape = (-1, *tuple(X.shape[a] for a in axes))
reshaped = permuted.reshape(new_shape)
res = None
for i in range(reshaped.shape[0]):
output = _interpolate_nd(
reshaped[i],
fct,
scale_factors=scales,
output_size=sizes,
roi=roi,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
exclude_outside=exclude_outside,
coordinate_transformation_mode=coordinate_transformation_mode, # type: ignore
extrapolation_value=extrapolation_value, # type: ignore
).astype(X.dtype)
if res is None:
res = np.empty((reshaped.shape[0], *output.shape), dtype=output.dtype)
res[i] = output
res_reshaped = res.reshape(tuple(X.shape[a] for a in not_axes) + res[0].shape) # type: ignore
new_perm = list(perm)
for i, a in enumerate(perm):
new_perm[a] = i
final = np.transpose(res_reshaped, tuple(new_perm))
return (final,)
| 15,638 | 32.274468 | 102 | py |
onnx | onnx-main/onnx/reference/ops/op_stft.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=R0913,R0914,R0915,W0613,W0221
import numpy as np
from onnx.reference.op_run import OpRun
from onnx.reference.ops.op_concat_from_sequence import _concat_from_sequence
from onnx.reference.ops.op_dft import _cfft as _dft
from onnx.reference.ops.op_slice import _slice
def _concat(*args, axis=0): # type: ignore
return np.concatenate(args, axis=axis)
def _unsqueeze(a, axis): # type: ignore
try:
return np.expand_dims(a, axis=axis)
except TypeError:
# numpy 1.18 supports axes as a tuple
if len(axis) == 1:
return np.expand_dims(a, axis=tuple(axis)[0])
for x in reversed(axis):
a = np.expand_dims(a, axis=x)
return a
def _stft(x, fft_length, hop_length, n_frames, window, onesided=False): # type: ignore
"""
Applies one dimensional FFT with window weights.
torch defines the number of frames as:
`n_frames = 1 + (len - n_fft) // hop_length`.
"""
last_axis = len(x.shape) - 1 # op.Sub(op.Shape(op.Shape(x)), one)
axis = [-2]
axis2 = [-3]
window_size = window.shape[0]
# building frames
seq = []
for fs in range(n_frames):
begin = fs * hop_length
end = begin + window_size
sliced_x = _slice(x, np.array([begin]), np.array([end]), axis) # type: ignore
# sliced_x may be smaller
new_dim = sliced_x.shape[-2:-1]
missing = (window_size - new_dim[0],)
new_shape = sliced_x.shape[:-2] + missing + sliced_x.shape[-1:]
cst = np.zeros(new_shape, dtype=x.dtype)
pad_sliced_x = _concat(sliced_x, cst, axis=-2)
# same size
un_sliced_x = _unsqueeze(pad_sliced_x, axis2)
seq.append(un_sliced_x)
# concatenation
new_x = _concat_from_sequence(seq, axis=-3, new_axis=0)
# calling weighted dft with weights=window
shape_x = new_x.shape
shape_x_short = shape_x[:-2]
shape_x_short_one = tuple(1 for _ in shape_x_short)
window_shape = (*shape_x_short_one, window_size, 1)
weights = np.reshape(window, window_shape)
weighted_new_x = new_x * weights
result = _dft(
weighted_new_x, fft_length, last_axis, onesided=onesided
) # normalize=False
return result
def _istft(x, fft_length, hop_length, window, onesided=False): # type: ignore
"""
Reverses of `stft`.
"""
zero = [0]
one = [1]
two = [2]
axisf = [-2]
n_frames = x.shape[-2]
expected_signal_len = fft_length[0] + hop_length * (n_frames - 1)
# building frames
seqr = []
seqi = []
seqc = []
for fs in range(n_frames):
begin = fs
end = fs + 1
frame_x = np.squeeze(
_slice(x, np.array([begin]), np.array([end]), axisf), axis=axisf[0] # type: ignore
)
# ifft
ift = _dft(frame_x, fft_length, axis=-1, onesided=onesided, normalize=True)
n_dims = len(ift.shape)
# real part
n_dims_1 = n_dims - 1
sliced = _slice(ift, np.array(zero), np.array(one), [n_dims_1]) # type: ignore
ytmp = np.squeeze(sliced, axis=n_dims_1)
ctmp = np.full(ytmp.shape, fill_value=1, dtype=x.dtype) * window
shape_begin = ytmp.shape[:-1]
n_left = fs * hop_length
size = ytmp.shape[-1]
n_right = expected_signal_len - (n_left + size)
left_shape = (*shape_begin, n_left)
right_shape = (*shape_begin, n_right)
right = np.zeros(right_shape, dtype=x.dtype)
left = np.zeros(left_shape, dtype=x.dtype)
y = _concat(left, ytmp, right, axis=-1)
yc = _concat(left, ctmp, right, axis=-1)
# imaginary part
sliced = _slice(ift, np.array(one), np.array(two), [n_dims_1]) # type: ignore
itmp = np.squeeze(sliced, axis=n_dims_1)
yi = _concat(left, itmp, right, axis=-1)
# append
seqr.append(_unsqueeze(y, axis=-1))
seqi.append(_unsqueeze(yi, axis=-1))
seqc.append(_unsqueeze(yc, axis=-1))
# concatenation
redr = _concat_from_sequence(seqr, axis=-1, new_axis=0)
redi = _concat_from_sequence(seqi, axis=-1, new_axis=0)
redc = _concat_from_sequence(seqc, axis=-1, new_axis=0)
# unweight
resr = redr.sum(axis=-1, keepdims=0) # type: ignore
resi = redi.sum(axis=-1, keepdims=0) # type: ignore
resc = redc.sum(axis=-1, keepdims=0) # type: ignore
rr = resr / resc
ri = resi / resc
# Make complex
rr0 = np.expand_dims(rr, axis=0)
ri0 = np.expand_dims(ri, axis=0)
conc = _concat(rr0, ri0, axis=0)
# rotation, bring first dimension to the last position
result_shape = conc.shape
reshaped_result = conc.reshape((2, -1))
transposed = np.transpose(reshaped_result, (1, 0))
other_dimensions = result_shape[1:]
final_shape = _concat(other_dimensions, two, axis=0)
final = transposed.reshape(final_shape)
return final
class STFT(OpRun):
def _run(self, x, frame_step, window=None, frame_length=None, onesided=None): # type: ignore
if frame_length is None:
if window is None:
frame_length = x.shape[-2]
else:
frame_length = window.shape[0]
hop_length = frame_step
if window is None:
window = np.ones((frame_length,), dtype=x.dtype)
n_frames = 1 + (x.shape[-2] - frame_length) // frame_step
res = _stft(x, [frame_length], hop_length, n_frames, window, onesided=onesided)
return (res.astype(x.dtype),)
| 5,591 | 31.511628 | 97 | py |
onnx | onnx-main/onnx/reference/ops/op_blackman_window.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=W0221
import numpy as np
from onnx.reference.ops._op_common_window import _CommonWindow
class BlackmanWindow(_CommonWindow):
"""
Returns
:math:`\\omega_n = 0.42 - 0.5 \\cos \\left( \\frac{2\\pi n}{N-1} \\right) +
0.08 \\cos \\left( \\frac{4\\pi n}{N-1} \\right)`
where *N* is the window length.
See `blackman_window
<https://pytorch.org/docs/stable/generated/torch.blackman_window.html>`_
"""
def _run(self, size, output_datatype=None, periodic=None): # type: ignore
ni, N_1 = np.arange(size), size
if periodic == 0:
N_1 = N_1 - 1
alpha = 0.42
beta = 0.08
pi = np.pi
y = np.cos((ni * (pi * 2)) / N_1) * (-0.5)
y += np.cos((ni * (pi * 4)) / N_1) * beta
y += alpha
return self._end(size, y, output_datatype)
| 932 | 27.272727 | 79 | py |
onnx | onnx-main/onnx/reference/ops/op_grid_sample.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=R0912,R0913,R0914,R0915,R1702,R1716,W0221
import numbers
from typing import List
import numpy as np
from onnx.reference.op_run import OpRun
from onnx.reference.ops.op_resize import _get_all_coords
class GridSample(OpRun):
# https://github.com/pytorch/pytorch/blob/v2.0.0/aten/src/ATen/native/GridSampler.h#L26
def _gs_denormalize(self, n, length: int, align_corners: bool): # type: ignore
# n is the normalized coordinate (float)
# x is the unormalized coordinate (float)
if align_corners:
# Align to corners
# x_min = 0
# x_max = d-1
# Linear mapping from [x_min, x_max] to [-1, 1]
# Solving linear equation n = ax + b
# a = 2/(d-1)
# b = -1
# n = 2/(d-1) x - 1
# n(d-1) = 2x - (d-1)
# x = (n+1)(d-1) / 2
x = (n + 1) / 2.0 * (length - 1)
else:
# Not align to corners
# x_min = -0.5
# x_max = d-0.5
# Linear mapping from [x_min, x_max] to [-1, 1]
# Solving linear equation n = ax + b
# a = 2/d
# b = 1/d - 1
# n = 2/d x + 1/d - 1
# nd = 2x + 1 - d
# x = (nd + d - 1) / 2
# x = ((n + 1) d - 1) / 2
x = ((n + 1) * length - 1) / 2.0
return x
def _gs_denormalize_coordinates(self, n, dims, align_corners: bool):
x = np.zeros(len(n), dtype=np.float32)
for i, (v, dim) in enumerate(zip(n, dims)):
x[i] = self._gs_denormalize(n=v, length=dim, align_corners=align_corners)
return x
def _gs_reflect(self, x, x_min, x_max): # type: ignore
"""
Reflect by the near border till within the borders
Use float for borders to avoid potential issues with integer T
"""
fx = x
rng = x_max - x_min
if fx < x_min:
dx = x_min - fx
n = int(dx / rng)
r = dx - n * rng
if n % 2 == 0:
fx = x_min + r
else:
fx = x_max - r
elif fx > x_max:
dx = fx - x_max
n = int(dx / rng)
r = dx - n * rng
if n % 2 == 0:
fx = x_max - r
else:
fx = x_min + r
return fx
def _gs_get_cubic_coeffs(self, x, coeffs): # type: ignore
"""
Calculate cubic convolution interpolation coefficients
ROBERT G. KEYS https://ieeexplore.ieee.org/document/1163711
Use float to avoid potential issues with integer.
"""
cubic_alpha = -0.75
x = abs(x)
coeffs[0] = (
(cubic_alpha * (x + 1) - 5 * cubic_alpha) * (x + 1) + 8 * cubic_alpha
) * (x + 1) - 4 * cubic_alpha
coeffs[1] = ((cubic_alpha + 2) * x - (cubic_alpha + 3)) * x * x + 1
coeffs[2] = ((cubic_alpha + 2) * (1 - x) - (cubic_alpha + 3)) * (1 - x) * (
1 - x
) + 1
coeffs[3] = (
(cubic_alpha * (2 - x) - 5 * cubic_alpha) * (2 - x) + 8 * cubic_alpha
) * (2 - x) - 4 * cubic_alpha
def _gs_get_linear_coeffs(self, x, coeffs):
x = abs(x)
coeffs[0] = 1 - x
coeffs[1] = x
def _gs_bicubic_interpolate(self, p, x, y): # type: ignore
v = np.empty((4,), dtype=p.dtype)
coeffs = np.empty((4,), dtype=p.dtype)
self._gs_get_cubic_coeffs(x, coeffs)
for i in range(4):
v[i] = coeffs @ p[i, :]
self._gs_get_cubic_coeffs(y, coeffs)
return coeffs @ v
def _gs_cubic_interpolation_1d_with_x(self, data, x, border, padding_mode):
v = np.empty((4,), dtype=data.dtype)
coeffs = np.empty((4,), dtype=data.dtype)
x_0 = int(np.floor(x))
x_1 = x_0 + 1
x_2 = x_0 + 2
x_minus_1 = x_0 - 1
self._gs_get_cubic_coeffs(x - x_0, coeffs)
v[0] = self._pixel_at_array(
array=data, i=x_minus_1, border=border, padding_mode=padding_mode
)
v[1] = self._pixel_at_array(
array=data, i=x_0, border=border, padding_mode=padding_mode
)
v[2] = self._pixel_at_array(
array=data, i=x_1, border=border, padding_mode=padding_mode
)
v[3] = self._pixel_at_array(
array=data, i=x_2, border=border, padding_mode=padding_mode
)
return coeffs @ v
def _gs_linear_interpolation_1d_with_x(self, data, x, border, padding_mode):
v = np.empty((2,), dtype=data.dtype)
coeffs = np.empty((2,), dtype=data.dtype)
x_0 = int(np.floor(x))
x_1 = x_0 + 1
self._gs_get_linear_coeffs(x - x_0, coeffs)
v[0] = self._pixel_at_array(
array=data, i=x_0, border=border, padding_mode=padding_mode
)
v[1] = self._pixel_at_array(
array=data, i=x_1, border=border, padding_mode=padding_mode
)
return coeffs @ v
def _gs_linear_interpolation_nd_with_x(self, data, x, border, padding_mode):
num_dims = data.ndim
assert num_dims == len(x) == int(len(border) / 2)
if num_dims == 1:
return self._gs_linear_interpolation_1d_with_x(
data=data, x=x[0], border=border, padding_mode=padding_mode
)
res1d = []
for i in range(data.shape[0]):
r = self._gs_linear_interpolation_nd_with_x(
data=data[i],
x=x[1:],
border=list(border[1:num_dims])
+ list(border[1 + num_dims : 2 * num_dims]),
padding_mode=padding_mode,
)
res1d.append(r)
res1d = np.array(res1d)
return self._gs_linear_interpolation_1d_with_x(
data=res1d,
x=x[0],
border=[border[0], border[num_dims]],
padding_mode=padding_mode,
)
def _gs_cubic_interpolation_nd_with_x(self, data, x, border, padding_mode):
num_dims = data.ndim
assert num_dims == len(x) == int(len(border) / 2)
if num_dims == 1:
return self._gs_cubic_interpolation_1d_with_x(
data=data, x=x[0], border=border, padding_mode=padding_mode
)
res1d = []
for i in range(data.shape[0]):
r = self._gs_cubic_interpolation_nd_with_x(
data=data[i],
x=x[1:],
border=list(border[1:num_dims])
+ list(border[1 + num_dims : 2 * num_dims]),
padding_mode=padding_mode,
)
res1d.append(r)
res1d = np.array(res1d)
return self._gs_cubic_interpolation_1d_with_x(
data=res1d,
x=x[0],
border=[border[0], border[num_dims]],
padding_mode=padding_mode,
)
def _clamp(self, val, lo, hi): # type: ignore
if val < lo:
return lo
if val > hi:
return hi
return val
def _pixel_at_ndarray(self, ndarray, x: List, border, padding_mode): # type: ignore
# boarder: [x_1_min, x_2_min, ..., x_1_max, x_2_max, ...]
num_dims = ndarray.ndim
assert num_dims == len(x) == int(len(border) / 2)
if num_dims == 1:
return self._pixel_at_array(
array=ndarray, i=x[0], border=border, padding_mode=padding_mode
)
i = x[0]
d = ndarray.shape[0]
if padding_mode == "zeros":
if i >= 0 and i < d:
ndarray = ndarray[i]
else:
# Trick
i = 0
ndarray = np.zeros_like(ndarray[i])
elif padding_mode == "border":
i = self._clamp(i, 0, d - 1)
ndarray = ndarray[i]
else: # padding_mode == "reflection"
i = int(self._gs_reflect(i, border[0], border[num_dims]))
ndarray = ndarray[i]
return self._pixel_at_ndarray(
ndarray=ndarray,
x=x[1:],
border=list(border[1:num_dims]) + list(border[1 + num_dims : 2 * num_dims]),
padding_mode=padding_mode,
)
def _pixel_at_array(self, array, i: int, border, padding_mode): # type: ignore
assert array.ndim == 1
d = array.shape[0]
if padding_mode == "zeros":
if i >= 0 and i < d:
pixel = array[i]
else:
pixel = 0
elif padding_mode == "border":
i = self._clamp(i, 0, d - 1)
pixel = array[i]
else: # padding_mode == "reflection"
i = int(self._gs_reflect(i, border[0], border[1]))
pixel = array[i]
return pixel
def _prepare_border(self, dims, align_corners: bool):
# boarder: [x_1_min, x_2_min, ..., x_1_max, x_2_max, ...]
num_dims = len(dims)
borders = np.zeros(num_dims * 2)
for i in range(num_dims):
# min
borders[i] = -0.5
# max
borders[i + num_dims] = dims[i] - 0.5
if align_corners:
# min
borders[i] = 0.0
# max
borders[i + num_dims] = dims[i] - 1.0
return borders
def _cpp_std_round(self, x):
# https://en.cppreference.com/w/cpp/numeric/math/round
def round_single_value(v):
if v >= 0.0:
return np.floor(v + 0.5)
else:
return np.ceil(v - 0.5)
if isinstance(x, numbers.Number):
return round_single_value(x)
else:
assert x.ndim == 1
x_rounded = np.zeros_like(x)
for i in range(x.shape[0]):
x_rounded[i] = round_single_value(x[i])
x_rounded = x_rounded.astype(np.int32)
return x_rounded
def _run(self, X, grid, mode=None, padding_mode=None, align_corners=None):
# This implementation supports GridSample arbitrary dimensions.
mode = mode or self.mode # type: ignore
padding_mode = padding_mode or self.padding_mode # type: ignore
align_corners = align_corners or self.align_corners # type: ignore
x_dims = X.shape
grid_dims = grid.shape
N = x_dims[0]
C = x_dims[1]
y_dims = (N, C, *grid_dims[1:-1])
if np.prod(y_dims) == 0:
return np.array([], dtype=X.dtype)
Y = np.empty(y_dims, dtype=X.dtype)
for n in range(N):
grid_data = grid[n]
for c in range(C):
# Because the indices in the grid_data are always in the "reverse" dimensional order.
# To interpolate for certain positions, we either have to transpose the X_data or
# reverse the indices.
# In this implementation, we took the latter approach.
X_data = X[n, c]
num_dims = len(x_dims[2:])
dims = x_dims[2:]
# Prepare borders.
border = self._prepare_border(dims, align_corners=align_corners)
for ox in _get_all_coords(Y[n, c]):
# normalized coordinates.
nx = grid_data[tuple(ox)]
nx = nx[::-1]
# denormalized coordinates.
x = self._gs_denormalize_coordinates(
n=nx, dims=dims, align_corners=align_corners
)
if mode == "nearest":
# PyTorch round the index to nearest even.
# https://github.com/pytorch/pytorch/pull/97000
x = np.rint(x)
# https://github.com/pytorch/pytorch/blob/v2.0.0/aten/src/ATen/native/GridSampler.h#L142
for i, v in enumerate(x):
x_min = border[i]
x_max = border[i + num_dims]
if v < x_min or v > x_max:
if padding_mode == "border":
x[i] = self._clamp(v, 0, dims[i] - 1)
elif padding_mode == "reflection":
x[i] = self._gs_reflect(v, x_min, x_max)
if mode == "nearest":
x = x.astype(np.int32)
Y[n][c][tuple(ox)] = self._pixel_at_ndarray(
ndarray=X_data,
x=x,
border=border,
padding_mode=padding_mode,
)
elif mode == "linear":
Y[n][c][tuple(ox)] = self._gs_linear_interpolation_nd_with_x(
data=X_data, x=x, border=border, padding_mode=padding_mode
)
elif mode == "cubic":
Y[n][c][tuple(ox)] = self._gs_cubic_interpolation_nd_with_x(
data=X_data, x=x, border=border, padding_mode=padding_mode
)
else:
raise RuntimeError(
"GridSample interpolation only supports nearest, linear, and cubic modes."
)
return (Y.astype(X.dtype),)
| 13,472 | 35.217742 | 108 | py |
onnx | onnx-main/onnx/backend/test/case/node/gridsample.py | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
class GridSample(Base):
@staticmethod
def export_gridsample() -> None:
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="linear",
padding_mode="zeros",
align_corners=0,
)
# X shape, [N, C, H, W] - [1, 1, 4, 4]
X = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0],
[4.0, 5.0, 6.0, 7.0],
[8.0, 9.0, 10.0, 11.0],
[12.0, 13.0, 14.0, 15.0],
]
]
],
dtype=np.float32,
)
# Grid shape, [N, H_out, W_out, 2] - [1, 6, 6, 2]
Grid = np.array(
[
[
[
[-1.0000, -1.0000],
[-0.6000, -1.0000],
[-0.2000, -1.0000],
[0.2000, -1.0000],
[0.6000, -1.0000],
[1.0000, -1.0000],
],
[
[-1.0000, -0.6000],
[-0.6000, -0.6000],
[-0.2000, -0.6000],
[0.2000, -0.6000],
[0.6000, -0.6000],
[1.0000, -0.6000],
],
[
[-1.0000, -0.2000],
[-0.6000, -0.2000],
[-0.2000, -0.2000],
[0.2000, -0.2000],
[0.6000, -0.2000],
[1.0000, -0.2000],
],
[
[-1.0000, 0.2000],
[-0.6000, 0.2000],
[-0.2000, 0.2000],
[0.2000, 0.2000],
[0.6000, 0.2000],
[1.0000, 0.2000],
],
[
[-1.0000, 0.6000],
[-0.6000, 0.6000],
[-0.2000, 0.6000],
[0.2000, 0.6000],
[0.6000, 0.6000],
[1.0000, 0.6000],
],
[
[-1.0000, 1.0000],
[-0.6000, 1.0000],
[-0.2000, 1.0000],
[0.2000, 1.0000],
[0.6000, 1.0000],
[1.0000, 1.0000],
],
]
],
dtype=np.float32,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 6, 6]
Y = np.array(
[
[
[
[0.0000, 0.1500, 0.5500, 0.9500, 1.3500, 0.7500],
[0.6000, 1.5000, 2.3000, 3.1000, 3.9000, 2.1000],
[2.2000, 4.7000, 5.5000, 6.3000, 7.1000, 3.7000],
[3.8000, 7.9000, 8.7000, 9.5000, 10.3000, 5.3000],
[5.4000, 11.1000, 11.9000, 12.7000, 13.5000, 6.9000],
[3.0000, 6.1500, 6.5500, 6.9500, 7.3500, 3.7500],
]
]
],
dtype=np.float32,
)
expect(node, inputs=[X, Grid], outputs=[Y], name="test_gridsample")
@staticmethod
def export_gridsample_paddingmode() -> None:
# X shape, [N, C, H, W] - [1, 1, 3, 2]
X = np.array(
[[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
dtype=np.float32,
)
# Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2]
Grid = np.array(
[
[
[
[-10.0000, -10.0000],
[-5.0000, -5.0000],
[-0.2000, -0.2000],
[10.0000, 10.0000],
],
[
[10.0000, 10.0000],
[-0.2000, -0.2000],
[5.0000, 5.0000],
[10.0000, 10.0000],
],
]
],
dtype=np.float32,
)
# setting padding_mode = 'zeros'
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
padding_mode="zeros",
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_zeros = np.array(
[[[[0.0000, 0.0000, 1.7000, 0.0000], [0.0000, 1.7000, 0.0000, 0.0000]]]],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_zeros],
name="test_gridsample_zeros_padding",
)
# setting padding_mode = 'border'
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
padding_mode="border",
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_border = np.array(
[[[[0.0000, 0.0000, 1.7000, 5.0000], [5.0000, 1.7000, 5.0000, 5.0000]]]],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_border],
name="test_gridsample_border_padding",
)
# setting padding_mode = 'reflection'
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
padding_mode="reflection",
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_reflection = np.array(
[[[[2.5000, 0.0000, 1.7000, 2.5000], [2.5000, 1.7000, 5.0000, 2.5000]]]],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_reflection],
name="test_gridsample_reflection_padding",
)
@staticmethod
def export_gridsample_mode_aligncorners() -> None:
# X shape, [N, C, H, W] - [1, 1, 3, 2]
X = np.array(
[[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
dtype=np.float32,
)
# Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2]
Grid = np.array(
[
[
[
[-1.0000, -1.0000],
[-0.5000, -0.5000],
[-0.2000, -0.2000],
[0.0000, 0.0000],
],
[
[0.0000, 0.0000],
[-0.2000, -0.2000],
[0.5000, 0.5000],
[1.0000, 1.0000],
],
]
],
dtype=np.float32,
)
# setting mode = 'bilinear', default align_corners = 0
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="linear",
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_bilinear = np.array(
[[[[0.0000, 0.5000, 1.7000, 2.5000], [2.5000, 1.7000, 4.5000, 1.2500]]]],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_bilinear],
name="test_gridsample_bilinear",
)
# setting mode = 'bilinear', align_corners = 1
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="linear",
align_corners=1,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_align_corners = np.array(
[[[[0.0000, 1.2500, 2.0000, 2.5000], [2.5000, 2.0000, 3.7500, 5.0000]]]],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_align_corners],
name="test_gridsample_aligncorners_true",
)
# setting mode = 'nearest'
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="nearest",
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_nearest = np.array(
[[[[0.0, 0.0, 2.0, 2.0], [2.0, 2.0, 5.0, 0.0]]]],
dtype=np.float32,
)
expect(
node, inputs=[X, Grid], outputs=[Y_nearest], name="test_gridsample_nearest"
)
# setting mode = 'bicubic'
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="cubic",
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_bicubic = np.array(
[[[[-0.1406, 0.3828, 1.7556, 2.9688], [2.9688, 1.7556, 5.1445, 1.3906]]]],
dtype=np.float32,
)
expect(
node, inputs=[X, Grid], outputs=[Y_bicubic], name="test_gridsample_bicubic"
)
# ============================================================================
# Additional tests
# The reference output tensors were generated using PyTorch 2.0.
Grid = np.array(
[
[
[[-1.0, -0.8], [-0.6, -0.5], [-0.1, -0.2], [0.7, 0.0]],
[[0.0, 0.4], [0.2, -0.2], [-0.3, 0.5], [-1.0, 1.0]],
]
],
dtype=np.float32,
)
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="nearest",
align_corners=0,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_nearest = np.array(
[[[[0.0, 0.0, 2.0, 3.0], [4.0, 3.0, 4.0, 4.0]]]],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_nearest],
name="test_gridsample_nearest_align_corners_0_additional_1",
)
# setting mode = 'nearest'
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="nearest",
align_corners=1,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_nearest = np.array(
[[[[0.0, 0.0, 2.0, 3.0], [2.0, 3.0, 4.0, 4.0]]]],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_nearest],
name="test_gridsample_nearest_align_corners_1_additional_1",
)
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="linear",
align_corners=0,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_bilinear = np.array(
[[[[0.0000, 0.4500, 1.8000, 2.4000], [3.7000, 2.1000, 3.7000, 1.0000]]]],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_bilinear],
name="test_gridsample_bilinear_align_corners_0_additional_1",
)
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="linear",
align_corners=1,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_bilinear = np.array(
[[[[0.4000, 1.2000, 2.0500, 2.8500], [3.3000, 2.2000, 3.3500, 4.0000]]]],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_bilinear],
name="test_gridsample_bilinear_align_corners_1_additional_1",
)
# These two new bicubic tests produces slightly higher error ~5e-5
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="cubic",
align_corners=0,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_bicubic = np.array(
[
[
[
[-0.173250, 0.284265, 1.923106, 2.568000],
[5.170375, 2.284414, 4.744844, 1.046875],
]
]
],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_bicubic],
name="test_gridsample_bicubic_align_corners_0_additional_1",
)
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="cubic",
align_corners=1,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_bicubic = np.array(
[
[
[
[0.304001, 1.128750, 2.266270, 3.144844],
[4.531500, 2.455360, 4.599819, 4.000000],
]
]
],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_bicubic],
name="test_gridsample_bicubic_align_corners_1_additional_1",
)
@staticmethod
def export_volumeetric_gridsample_mode_aligncorners() -> None:
X = np.array(
[
[
[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[9.0, 10.0], [11.0, 12.0]],
]
]
],
dtype=np.float32,
)
Grid = np.array(
[
[
[
[[-1.0, -1.0, -1.0], [-1.0, -0.5, 0.3]],
[[-0.5, -0.5, -0.5], [1.0, -0.6, -1.0]],
[[-0.2, -0.2, -0.2], [0.4, 0.2, 0.6]],
[[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]],
],
[
[[0.0, 0.0, 0.0], [-1.0, 1.0, 0.0]],
[[-0.2, -0.2, -0.2], [1.0, 0.4, -0.2]],
[[0.5, 0.5, 0.5], [-1.0, -0.8, 0.8]],
[[1.0, 1.0, 1.0], [0.4, 0.6, -0.3]],
],
]
],
dtype=np.float32,
)
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="nearest",
align_corners=0,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_nearest = np.array(
[
[
[
[[1.0, 5.0], [1.0, 0.0], [5.0, 12.0], [5.0, 5.0]],
[[5.0, 0.0], [5.0, 0.0], [12.0, 9.0], [0.0, 8.0]],
]
]
],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_nearest],
name="test_gridsample_volumetric_nearest_align_corners_0",
)
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="nearest",
align_corners=1,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_nearest = np.array(
[
[
[
[[1.0, 5.0], [1.0, 2.0], [5.0, 12.0], [5.0, 5.0]],
[[5.0, 7.0], [5.0, 8.0], [12.0, 9.0], [12.0, 8.0]],
]
]
],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_nearest],
name="test_gridsample_volumetric_nearest_align_corners_1",
)
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="linear",
align_corners=0,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_bilinear = np.array(
[
[
[
[
[0.1250, 3.4000],
[2.0000, 0.4500],
[4.7000, 10.9000],
[6.5000, 3.0000],
],
[
[6.5000, 1.7500],
[4.7000, 3.3000],
[11.0000, 2.5200],
[1.5000, 5.4900],
],
]
]
],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_bilinear],
name="test_gridsample_volumetric_bilinear_align_corners_0",
)
node = onnx.helper.make_node(
"GridSample",
inputs=["X", "Grid"],
outputs=["Y"],
mode="linear",
align_corners=1,
)
# Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
Y_bilinear = np.array(
[
[
[
[
[1.0000, 6.7000],
[3.7500, 2.4000],
[5.4000, 9.3000],
[6.5000, 6.0000],
],
[
[6.5000, 7.0000],
[5.4000, 6.6000],
[9.2500, 8.4000],
[12.0000, 6.1000],
],
]
]
],
dtype=np.float32,
)
expect(
node,
inputs=[X, Grid],
outputs=[Y_bilinear],
name="test_gridsample_volumetric_bilinear_align_corners_1",
)
"""
For someone who want to test by script. Comment it cause github ONNX CI
do not have the torch python package.
@staticmethod
def export_gridsample_torch(): # type: () -> None
node = onnx.helper.make_node(
'GridSample',
inputs=['X', 'Grid'],
outputs=['Y'],
mode='bilinear',
padding_mode='zeros',
align_corners=0,
)
# X shape, [N, C, H, W] - [1, 1, 4, 4]
# Grid shape, [N, H_out, W_out, 2] - [1, 6, 6, 2]
# Y shape, [N, C, H_out, W_out] - [1, 1, 6, 6]
import torch
X = torch.arange(3 * 3).view(1, 1, 3, 3).float()
d = torch.linspace(-1, 1, 6)
meshx, meshy = torch.meshgrid((d, d))
grid = torch.stack((meshy, meshx), 2)
Grid = grid.unsqueeze(0)
Y = torch.nn.functional.grid_sample(X, Grid, mode='bilinear',
padding_mode='zeros', align_corners=False)
expect(node, inputs=[X.numpy(), Grid.numpy()], outputs=[Y.numpy()],
name='test_gridsample_torch')
"""
| 19,682 | 29.658879 | 87 | py |
onnx | onnx-main/onnx/backend/test/case/node/resize.py | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
from onnx.reference.ops.op_resize import _cubic_coeffs as cubic_coeffs
from onnx.reference.ops.op_resize import (
_cubic_coeffs_antialias as cubic_coeffs_antialias,
)
from onnx.reference.ops.op_resize import _interpolate_nd as interpolate_nd
from onnx.reference.ops.op_resize import _linear_coeffs as linear_coeffs
from onnx.reference.ops.op_resize import (
_linear_coeffs_antialias as linear_coeffs_antialias,
)
from onnx.reference.ops.op_resize import _nearest_coeffs as nearest_coeffs
class Resize(Base):
@staticmethod
def export_resize_upsample_scales_nearest() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="nearest",
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
# [[[[1. 1. 1. 2. 2. 2.]
# [1. 1. 1. 2. 2. 2.]
# [3. 3. 3. 4. 4. 4.]
# [3. 3. 3. 4. 4. 4.]]]]
output = interpolate_nd(
data, lambda x, _: nearest_coeffs(x), scale_factors=scales
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_nearest",
)
@staticmethod
def export_resize_downsample_scales_nearest() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="nearest",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
# [[[[1. 3.]]]]
output = interpolate_nd(
data, lambda x, _: nearest_coeffs(x), scale_factors=scales
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_downsample_scales_nearest",
)
@staticmethod
def export_resize_upsample_sizes_nearest() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 7, 8], dtype=np.int64)
# [[[[1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [3. 3. 3. 3. 4. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4. 4.]]]]
output = interpolate_nd(
data, lambda x, _: nearest_coeffs(x), output_size=sizes
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_upsample_sizes_nearest",
)
@staticmethod
def export_resize_downsample_sizes_nearest() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 1, 3], dtype=np.int64)
# [[[[1. 2. 4.]]]]
output = interpolate_nd(
data, lambda x, _: nearest_coeffs(x), output_size=sizes
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_downsample_sizes_nearest",
)
@staticmethod
def export_resize_upsample_scales_linear() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="linear",
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
# [[[[1. 1.25 1.75 2. ]
# [1.5 1.75 2.25 2.5 ]
# [2.5 2.75 3.25 3.5 ]
# [3. 3.25 3.75 4. ]]]]
output = interpolate_nd(
data, lambda x, _: linear_coeffs(x), scale_factors=scales
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_linear",
)
@staticmethod
def export_resize_upsample_scales_linear_align_corners() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="linear",
coordinate_transformation_mode="align_corners",
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
# [[[[1. 1.33333333 1.66666667 2. ]
# [1.66666667 2. 2.33333333 2.66666667]
# [2.33333333 2.66666667 3. 3.33333333]
# [3. 3.33333333 3.66666667 4. ]]]]
output = interpolate_nd(
data,
lambda x, _: linear_coeffs(x),
scale_factors=scales,
coordinate_transformation_mode="align_corners",
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_linear_align_corners",
)
@staticmethod
def export_resize_downsample_scales_linear() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="linear",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
# [[[[2.6666665 4.3333331]]]]
output = interpolate_nd(
data, lambda x, _: linear_coeffs(x), scale_factors=scales
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_downsample_scales_linear",
)
@staticmethod
def export_resize_downsample_scales_linear_align_corners() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="linear",
coordinate_transformation_mode="align_corners",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
# [[[[1. 3.142857]]]]
output = interpolate_nd(
data,
lambda x, _: linear_coeffs(x),
scale_factors=scales,
coordinate_transformation_mode="align_corners",
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_downsample_scales_linear_align_corners",
)
@staticmethod
def export_resize_upsample_scales_cubic() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="cubic",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
# [[[[ 0.47265625 0.76953125 1.24609375 1.875 2.28125
# 2.91015625 3.38671875 3.68359375]
# [ 1.66015625 1.95703125 2.43359375 3.0625 3.46875
# 4.09765625 4.57421875 4.87109375]
# [ 3.56640625 3.86328125 4.33984375 4.96875 5.375
# 6.00390625 6.48046875 6.77734375]
# [ 6.08203125 6.37890625 6.85546875 7.484375 7.890625
# 8.51953125 8.99609375 9.29296875]
# [ 7.70703125 8.00390625 8.48046875 9.109375 9.515625
# 10.14453125 10.62109375 10.91796875]
# [10.22265625 10.51953125 10.99609375 11.625 12.03125
# 12.66015625 13.13671875 13.43359375]
# [12.12890625 12.42578125 12.90234375 13.53125 13.9375
# 14.56640625 15.04296875 15.33984375]
# [13.31640625 13.61328125 14.08984375 14.71875 15.125
# 15.75390625 16.23046875 16.52734375]]]]
output = interpolate_nd(
data, lambda x, _: cubic_coeffs(x), scale_factors=scales
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_cubic",
)
@staticmethod
def export_resize_upsample_scales_cubic_align_corners() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="cubic",
coordinate_transformation_mode="align_corners",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
# [[[[ 1. 1.34110787 1.80029155 2.32944606 2.67055394
# 3.19970845 3.65889213 4. ]
# [ 2.36443149 2.70553936 3.16472303 3.69387755 4.03498542
# 4.56413994 5.02332362 5.36443149]
# [ 4.20116618 4.54227405 5.00145773 5.53061224 5.87172012
# 6.40087464 6.86005831 7.20116618]
# [ 6.31778426 6.65889213 7.1180758 7.64723032 7.98833819
# 8.51749271 8.97667638 9.31778426]
# [ 7.68221574 8.02332362 8.48250729 9.01166181 9.35276968
# 9.8819242 10.34110787 10.68221574]
# [ 9.79883382 10.13994169 10.59912536 11.12827988 11.46938776
# 11.99854227 12.45772595 12.79883382]
# [11.63556851 11.97667638 12.43586006 12.96501458 13.30612245
# 13.83527697 14.29446064 14.63556851]
# [13. 13.34110787 13.80029155 14.32944606 14.67055394
# 15.19970845 15.65889213 16. ]]]]
output = interpolate_nd(
data,
lambda x, _: cubic_coeffs(x),
scale_factors=scales,
coordinate_transformation_mode="align_corners",
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_cubic_align_corners",
)
@staticmethod
def export_resize_downsample_scales_cubic() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="cubic",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
# [[[[ 1.47119141 2.78125 4.08251953]
# [ 6.71142578 8.02148438 9.32275391]
# [11.91650391 13.2265625 14.52783203]]]]
output = interpolate_nd(
data, lambda x, _: cubic_coeffs(x), scale_factors=scales
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_downsample_scales_cubic",
)
@staticmethod
def export_resize_downsample_scales_cubic_align_corners() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="cubic",
coordinate_transformation_mode="align_corners",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
# [[[[ 1. 2.39519159 3.79038317]
# [ 6.58076634 7.97595793 9.37114951]
# [12.16153268 13.55672427 14.95191585]]]]
output = interpolate_nd(
data,
lambda x, _: cubic_coeffs(x),
scale_factors=scales,
coordinate_transformation_mode="align_corners",
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_downsample_scales_cubic_align_corners",
)
@staticmethod
def export_resize_upsample_sizes_cubic() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="cubic",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 9, 10], dtype=np.int64)
# [[[[ 0.45507922 0.64057922 0.97157922 1.42257922 1.90732922
# 2.22332922 2.70807922 3.15907922 3.49007922 3.67557922]
# [ 1.39437963 1.57987963 1.91087963 2.36187963 2.84662963
# 3.16262963 3.64737963 4.09837963 4.42937963 4.61487963]
# [ 2.95130693 3.13680693 3.46780693 3.91880693 4.40355693
# 4.71955693 5.20430693 5.65530693 5.98630693 6.17180693]
# [ 5.20525069 5.39075069 5.72175069 6.17275069 6.65750069
# 6.97350069 7.45825069 7.90925069 8.24025069 8.42575069]
# [ 6.88975 7.07525 7.40625 7.85725 8.342
# 8.658 9.14275 9.59375 9.92475 10.11025 ]
# [ 8.57424931 8.75974931 9.09074931 9.54174931 10.02649931
# 10.34249931 10.82724931 11.27824931 11.60924931 11.79474931]
# [10.82819307 11.01369307 11.34469307 11.79569307 12.28044307
# 12.59644307 13.08119307 13.53219307 13.86319307 14.04869307]
# [12.38512037 12.57062037 12.90162037 13.35262037 13.83737037
# 14.15337037 14.63812037 15.08912037 15.42012037 15.60562037]
# [13.32442078 13.50992078 13.84092078 14.29192078 14.77667078
# 15.09267078 15.57742078 16.02842078 16.35942078 16.54492078]]]]
output = interpolate_nd(
data, lambda x, _: cubic_coeffs(x), output_size=sizes
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_upsample_sizes_cubic",
)
@staticmethod
def export_resize_downsample_sizes_cubic() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="cubic",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 3, 3], dtype=np.int64)
# [[[[ 1.63078704 3.00462963 4.37847222]
# [ 7.12615741 8.5 9.87384259]
# [12.62152778 13.99537037 15.36921296]]]]
output = interpolate_nd(
data, lambda x, _: cubic_coeffs(x), output_size=sizes
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_downsample_sizes_cubic",
)
# TensorFlow v1 bicubic with half_pixel_centers=True
@staticmethod
def export_resize_upsample_scales_cubic_A_n0p5_exclude_outside() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="cubic",
cubic_coeff_a=-0.5,
exclude_outside=True,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
# [[[[ 0.55882353 0.81494204 1.35698249 1.89705882 2.39705882
# 2.93713516 3.47917561 3.73529412]
# [ 1.58329755 1.83941606 2.38145651 2.92153285 3.42153285
# 3.96160918 4.50364964 4.75976814]
# [ 3.75145936 4.00757787 4.54961832 5.08969466 5.58969466
# 6.12977099 6.67181144 6.92792995]
# [ 5.91176471 6.16788321 6.70992366 7.25 7.75
# 8.29007634 8.83211679 9.08823529]
# [ 7.91176471 8.16788321 8.70992366 9.25 9.75
# 10.29007634 10.83211679 11.08823529]
# [10.07207005 10.32818856 10.87022901 11.41030534 11.91030534
# 12.45038168 12.99242213 13.24854064]
# [12.24023186 12.49635036 13.03839082 13.57846715 14.07846715
# 14.61854349 15.16058394 15.41670245]
# [13.26470588 13.52082439 14.06286484 14.60294118 15.10294118
# 15.64301751 16.18505796 16.44117647]]]]
output = interpolate_nd(
data,
lambda x, _: cubic_coeffs(x, A=-0.5),
scale_factors=scales,
exclude_outside=True,
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_cubic_A_n0p5_exclude_outside",
)
@staticmethod
def export_resize_downsample_scales_cubic_A_n0p5_exclude_outside() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="cubic",
cubic_coeff_a=-0.5,
exclude_outside=True,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
# [[[[ 1.36812675 2.6695014 4.0133367 ]
# [ 6.57362535 7.875 9.2188353 ]
# [11.94896657 13.25034122 14.59417652]]]]
output = interpolate_nd(
data,
lambda x, _: cubic_coeffs(x, A=-0.5),
scale_factors=scales,
exclude_outside=True,
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_downsample_scales_cubic_A_n0p5_exclude_outside",
)
# TensorFlow v1 bicubic with half_pixel_centers=False
@staticmethod
def export_resize_upsample_scales_cubic_asymmetric() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="cubic",
coordinate_transformation_mode="asymmetric",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
# [[[[ 1. 1.40625 2. 2.5 3. 3.59375 4.
# 4.09375]
# [ 2.625 3.03125 3.625 4.125 4.625 5.21875 5.625
# 5.71875]
# [ 5. 5.40625 6. 6.5 7. 7.59375 8.
# 8.09375]
# [ 7. 7.40625 8. 8.5 9. 9.59375 10.
# 10.09375]
# [ 9. 9.40625 10. 10.5 11. 11.59375 12.
# 12.09375]
# [11.375 11.78125 12.375 12.875 13.375 13.96875 14.375
# 14.46875]
# [13. 13.40625 14. 14.5 15. 15.59375 16.
# 16.09375]
# [13.375 13.78125 14.375 14.875 15.375 15.96875 16.375
# 16.46875]]]]
output = interpolate_nd(
data,
lambda x, _: cubic_coeffs(x, A=-0.75),
scale_factors=scales,
coordinate_transformation_mode="asymmetric",
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_cubic_asymmetric",
)
@staticmethod
def export_resize_tf_crop_and_resize() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "roi", "", "sizes"],
outputs=["Y"],
mode="linear",
coordinate_transformation_mode="tf_crop_and_resize",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
# Note: for some rois, the result may be different with that of TF for inaccurate floating point
roi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)
sizes = np.array([1, 1, 3, 3], dtype=np.int64)
# [[[[ 7.6000004 7.9 8.2 ]
# [ 8.8 9.1 9.400001 ]
# [10. 10.3 10.6 ]]]]
output = interpolate_nd(
data,
lambda x, _: linear_coeffs(x),
output_size=sizes,
roi=roi,
coordinate_transformation_mode="tf_crop_and_resize",
).astype(np.float32)
expect(
node,
inputs=[data, roi, sizes],
outputs=[output],
name="test_resize_tf_crop_and_resize",
)
@staticmethod
def export_resize_tf_crop_and_resize_extrapolation_value() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "roi", "", "sizes"],
outputs=["Y"],
mode="linear",
coordinate_transformation_mode="tf_crop_and_resize",
extrapolation_value=10.0,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
# Note: for some rois, the result may be different with that of TF for inaccurate floating point
roi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)
sizes = np.array([1, 1, 3, 3], dtype=np.int64)
# [[[[ 7.6000004 10. 10. ]
# [12.400001 10. 10. ]
# [10. 10. 10. ]]]]
output = interpolate_nd(
data,
lambda x, _: linear_coeffs(x),
output_size=sizes,
roi=roi,
coordinate_transformation_mode="tf_crop_and_resize",
extrapolation_value=10.0,
).astype(np.float32)
expect(
node,
inputs=[data, roi, sizes],
outputs=[output],
name="test_resize_tf_crop_and_resize",
)
@staticmethod
def export_resize_downsample_sizes_linear_pytorch_half_pixel() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="linear",
coordinate_transformation_mode="pytorch_half_pixel",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 3, 1], dtype=np.int64)
# [[[[ 1.6666666]
# [ 7. ]
# [12.333333 ]]]]
output = interpolate_nd(
data,
lambda x, _: linear_coeffs(x),
output_size=sizes,
coordinate_transformation_mode="pytorch_half_pixel",
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_downsample_sizes_linear_pytorch_half_pixel",
)
@staticmethod
def export_resize_upsample_sizes_nearest_floor_align_corners() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
coordinate_transformation_mode="align_corners",
nearest_mode="floor",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 8, 8], dtype=np.int64)
# [[[[ 1. 1. 1. 2. 2. 3. 3. 4.]
# [ 1. 1. 1. 2. 2. 3. 3. 4.]
# [ 1. 1. 1. 2. 2. 3. 3. 4.]
# [ 5. 5. 5. 6. 6. 7. 7. 8.]
# [ 5. 5. 5. 6. 6. 7. 7. 8.]
# [ 9. 9. 9. 10. 10. 11. 11. 12.]
# [ 9. 9. 9. 10. 10. 11. 11. 12.]
# [13. 13. 13. 14. 14. 15. 15. 16.]]]]
output = interpolate_nd(
data,
lambda x, _: nearest_coeffs(x, mode="floor"),
output_size=sizes,
coordinate_transformation_mode="align_corners",
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_upsample_sizes_nearest_floor_align_corners",
)
@staticmethod
def export_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
coordinate_transformation_mode="asymmetric",
nearest_mode="round_prefer_ceil",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 8, 8], dtype=np.int64)
# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]
# [ 5. 6. 6. 7. 7. 8. 8. 8.]
# [ 5. 6. 6. 7. 7. 8. 8. 8.]
# [ 9. 10. 10. 11. 11. 12. 12. 12.]
# [ 9. 10. 10. 11. 11. 12. 12. 12.]
# [13. 14. 14. 15. 15. 16. 16. 16.]
# [13. 14. 14. 15. 15. 16. 16. 16.]
# [13. 14. 14. 15. 15. 16. 16. 16.]]]]
output = interpolate_nd(
data,
lambda x, _: nearest_coeffs(x, mode="round_prefer_ceil"),
output_size=sizes,
coordinate_transformation_mode="asymmetric",
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric",
)
@staticmethod
def export_resize_upsample_sizes_nearest_ceil_half_pixel() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
coordinate_transformation_mode="half_pixel",
nearest_mode="ceil",
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 8, 8], dtype=np.int64)
# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]
# [ 5. 6. 6. 7. 7. 8. 8. 8.]
# [ 5. 6. 6. 7. 7. 8. 8. 8.]
# [ 9. 10. 10. 11. 11. 12. 12. 12.]
# [ 9. 10. 10. 11. 11. 12. 12. 12.]
# [13. 14. 14. 15. 15. 16. 16. 16.]
# [13. 14. 14. 15. 15. 16. 16. 16.]
# [13. 14. 14. 15. 15. 16. 16. 16.]]]]
output = interpolate_nd(
data, lambda x, _: nearest_coeffs(x, mode="ceil"), output_size=sizes
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_upsample_sizes_nearest_ceil_half_pixel",
)
@staticmethod
def export_resize_downsample_scales_linear_antialias() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="linear",
antialias=1,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
# [[[[ 2.875 4.5 ]
# [ 9.375 11. ]]]]
output = interpolate_nd(
data, linear_coeffs_antialias, scale_factors=scales
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_downsample_scales_linear_antialias",
)
@staticmethod
def export_resize_downsample_sizes_linear_antialias() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="linear",
antialias=1,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 3, 3], dtype=np.int64)
# [[[[ 2.3636363 3.590909 4.818182 ]
# [ 7.2727275 8.5 9.727273 ]
# [12.181818 13.409091 14.636364 ]]]]
output = interpolate_nd(
data, linear_coeffs_antialias, output_size=sizes
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_downsample_sizes_linear_antialias",
)
@staticmethod
def export_resize_downsample_scales_cubic_antialias() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="cubic",
antialias=1,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
# [[[[ 2.5180721 4.2858863]
# [ 9.589329 11.357142 ]]]]
output = interpolate_nd(
data, cubic_coeffs_antialias, scale_factors=scales
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_downsample_scales_cubic_antialias",
)
@staticmethod
def export_resize_downsample_sizes_cubic_antialias() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="cubic",
antialias=1,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 1, 3, 3], dtype=np.int64)
# [[[[ 1.7750092 3.1200073 4.4650054]
# [ 7.1550016 8.5 9.844998 ]
# [12.534994 13.8799925 15.224991 ]]]]
output = interpolate_nd(data, cubic_coeffs_antialias, output_size=sizes).astype(
np.float32
)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_downsample_sizes_cubic_antialias",
)
@staticmethod
def export_resize_upsample_scales_nearest_axes_2_3() -> None:
axes = [2, 3]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="nearest",
axes=axes,
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
scales = np.array([2.0, 3.0], dtype=np.float32)
# [[[[1. 1. 1. 2. 2. 2.]
# [1. 1. 1. 2. 2. 2.]
# [3. 3. 3. 4. 4. 4.]
# [3. 3. 3. 4. 4. 4.]]]]
output = interpolate_nd(
data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_nearest_axes_2_3",
)
@staticmethod
def export_resize_upsample_scales_nearest_axes_3_2() -> None:
axes = [3, 2]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="nearest",
axes=axes,
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
scales = np.array([3.0, 2.0], dtype=np.float32)
# [[[[1. 1. 1. 2. 2. 2.]
# [1. 1. 1. 2. 2. 2.]
# [3. 3. 3. 4. 4. 4.]
# [3. 3. 3. 4. 4. 4.]]]]
output = interpolate_nd(
data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_nearest_axes_3_2",
)
@staticmethod
def export_resize_upsample_sizes_nearest_axes_2_3() -> None:
axes = [2, 3]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
axes=axes,
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
sizes = np.array([7, 8], dtype=np.int64)
# [[[[1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [3. 3. 3. 3. 4. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4. 4.]]]]
output = interpolate_nd(
data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_upsample_sizes_nearest_axes_2_3",
)
@staticmethod
def export_resize_upsample_sizes_nearest_axes_3_2() -> None:
axes = [3, 2]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
axes=axes,
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
sizes = np.array([8, 7], dtype=np.int64)
# [[[[1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [3. 3. 3. 3. 4. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4. 4.]]]]
output = interpolate_nd(
data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_upsample_sizes_nearest_axes_3_2",
)
@staticmethod
def export_resize_tf_crop_and_resize_axes_2_3() -> None:
axes = [2, 3]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "roi", "", "sizes"],
outputs=["Y"],
mode="linear",
coordinate_transformation_mode="tf_crop_and_resize",
axes=axes,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
# Note: for some rois, the result may be different with that of TF for inaccurate floating point
roi = np.array([0.4, 0.6, 0.6, 0.8], dtype=np.float32)
sizes = np.array([3, 3], dtype=np.int64)
# [[[[ 7.6000004 7.9 8.2 ]
# [ 8.8 9.1 9.400001 ]
# [10. 10.3 10.6 ]]]]
output = interpolate_nd(
data,
lambda x, _: linear_coeffs(x),
output_size=sizes,
roi=roi,
axes=axes,
coordinate_transformation_mode="tf_crop_and_resize",
).astype(np.float32)
expect(
node,
inputs=[data, roi, sizes],
outputs=[output],
name="test_resize_tf_crop_and_resize_axes_2_3",
)
@staticmethod
def export_resize_tf_crop_and_resize_axes_3_2() -> None:
axes = [3, 2]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "roi", "", "sizes"],
outputs=["Y"],
mode="linear",
coordinate_transformation_mode="tf_crop_and_resize",
axes=axes,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
],
dtype=np.float32,
)
# Note: for some rois, the result may be different with that of TF for inaccurate floating point
roi = np.array([0.6, 0.4, 0.8, 0.6], dtype=np.float32)
sizes = np.array([3, 3], dtype=np.int64)
# [[[[ 7.6000004 7.9 8.2 ]
# [ 8.8 9.1 9.400001 ]
# [10. 10.3 10.6 ]]]]
output = interpolate_nd(
data,
lambda x, _: linear_coeffs(x),
output_size=sizes,
roi=roi,
axes=axes,
coordinate_transformation_mode="tf_crop_and_resize",
).astype(np.float32)
expect(
node,
inputs=[data, roi, sizes],
outputs=[output],
name="test_resize_tf_crop_and_resize_axes_3_2",
)
@staticmethod
def export_resize_upsample_sizes_nearest_not_larger() -> None:
keep_aspect_ratio_policy = "not_larger"
axes = [2, 3]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
axes=axes,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
sizes = np.array([7, 8], dtype=np.int64) # Results in 7x7
# [[[[1. 1. 1. 1. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2.]
# [3. 3. 3. 3. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4.]]]]
output = interpolate_nd(
data,
lambda x, _: nearest_coeffs(x),
output_size=sizes,
axes=axes,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_upsample_sizes_nearest_not_larger",
)
@staticmethod
def export_resize_upsample_sizes_nearest_not_smaller() -> None:
keep_aspect_ratio_policy = "not_smaller"
axes = [2, 3]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
axes=axes,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
sizes = np.array([7, 8], dtype=np.int64) # Results in 8x8
# [[[[1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [1. 1. 1. 1. 2. 2. 2. 2.]
# [3. 3. 3. 3. 4. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4. 4.]
# [3. 3. 3. 3. 4. 4. 4. 4.]]]]
output = interpolate_nd(
data,
lambda x, _: nearest_coeffs(x),
output_size=sizes,
axes=axes,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_upsample_sizes_nearest_not_larger",
)
@staticmethod
def export_resize_downsample_sizes_nearest_not_larger() -> None:
keep_aspect_ratio_policy = "not_larger"
axes = [2, 3]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
axes=axes,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 3], dtype=np.int64) # Results in 1x2
# [[[[1. 3.]]]]
output = interpolate_nd(
data,
lambda x, _: nearest_coeffs(x),
output_size=sizes,
axes=axes,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_downsample_sizes_nearest_not_larger",
)
@staticmethod
def export_resize_downsample_sizes_nearest_not_smaller() -> None:
keep_aspect_ratio_policy = "not_smaller"
axes = [2, 3]
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "", "sizes"],
outputs=["Y"],
mode="nearest",
axes=axes,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
)
data = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
]
]
],
dtype=np.float32,
)
sizes = np.array([1, 3], dtype=np.int64) # Results in 2x3
# [[[[1. 2. 4.]
# [5. 6. 8.]]]]
output = interpolate_nd(
data,
lambda x, _: nearest_coeffs(x),
output_size=sizes,
axes=axes,
keep_aspect_ratio_policy=keep_aspect_ratio_policy,
).astype(np.float32)
expect(
node,
inputs=[data, sizes],
outputs=[output],
name="test_resize_downsample_sizes_nearest_not_smaller",
)
@staticmethod
def export_resize_downsample_scales_linear_half_pixel_symmetric() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="linear",
coordinate_transformation_mode="half_pixel_symmetric",
)
data = np.array([[[[1, 2, 3, 4]]]], dtype=np.float32)
scales = np.array([1.0, 1.0, 1.0, 0.6], dtype=np.float32)
# [[[[1.6666667, 3.3333333]]]]
output = interpolate_nd(
data,
lambda x, _: linear_coeffs(x),
scale_factors=scales,
coordinate_transformation_mode="half_pixel_symmetric",
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_downsample_scales_linear_half_pixel_symmetric",
)
@staticmethod
def export_resize_upsample_scales_linear_half_pixel_symmetric() -> None:
node = onnx.helper.make_node(
"Resize",
inputs=["X", "", "scales"],
outputs=["Y"],
mode="linear",
coordinate_transformation_mode="half_pixel_symmetric",
)
data = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)
scales = np.array([1.0, 1.0, 2.3, 2.94], dtype=np.float32)
# [[[[1. , 1.15986395, 1.5 , 1.84013605, 2. ],
# [1.56521738, 1.72508133, 2.06521738, 2.40535343, 2.56521738],
# [2.43478262, 2.59464657, 2.93478262, 3.27491867, 3.43478262],
# [3. , 3.15986395, 3.5 , 3.84013605, 4. ]]]]
output = interpolate_nd(
data,
lambda x, _: linear_coeffs(x),
scale_factors=scales,
coordinate_transformation_mode="half_pixel_symmetric",
).astype(np.float32)
expect(
node,
inputs=[data, scales],
outputs=[output],
name="test_resize_upsample_scales_linear_half_pixel_symmetric",
)
| 51,616 | 29.114936 | 104 | py |
onnx | onnx-main/onnx/backend/test/runner/__init__.py | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import functools
import glob
import os
import re
import shutil
import sys
import tarfile
import tempfile
import time
import unittest
from collections import defaultdict
from typing import Any, Callable, Iterable, Pattern, Sequence
from urllib.request import urlretrieve
import numpy as np
import onnx
import onnx.reference
from onnx import ONNX_ML, ModelProto, NodeProto, TypeProto, ValueInfoProto, numpy_helper
from onnx.backend.base import Backend
from onnx.backend.test.case.test_case import TestCase
from onnx.backend.test.loader import load_model_tests
from onnx.backend.test.runner.item import TestItem
class BackendIsNotSupposedToImplementIt(unittest.SkipTest):
pass
def retry_execute(times: int) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
assert times >= 1
def wrapper(func: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
for i in range(1, times + 1):
try:
return func(*args, **kwargs)
except Exception:
print(f"{i} times tried")
if i == times:
raise
time.sleep(5 * i)
return wrapped
return wrapper
class Runner:
def __init__(
self, backend: type[Backend], parent_module: str | None = None
) -> None:
self.backend = backend
self._parent_module = parent_module
self._include_patterns: set[Pattern[str]] = set()
self._exclude_patterns: set[Pattern[str]] = set()
self._xfail_patterns: set[Pattern[str]] = set()
# This is the source of the truth of all test functions.
# Properties `test_cases`, `test_suite` and `tests` will be
# derived from it.
# {category: {name: func}}
self._test_items: dict[str, dict[str, TestItem]] = defaultdict(dict)
for rt in load_model_tests(kind="node"):
self._add_model_test(rt, "Node")
for rt in load_model_tests(kind="real"):
self._add_model_test(rt, "Real")
for rt in load_model_tests(kind="simple"):
self._add_model_test(rt, "Simple")
for ct in load_model_tests(kind="pytorch-converted"):
self._add_model_test(ct, "PyTorchConverted")
for ot in load_model_tests(kind="pytorch-operator"):
self._add_model_test(ot, "PyTorchOperator")
def _get_test_case(self, name: str) -> type[unittest.TestCase]:
test_case = type(str(name), (unittest.TestCase,), {})
if self._parent_module:
test_case.__module__ = self._parent_module
return test_case
def include(self, pattern: str) -> Runner:
self._include_patterns.add(re.compile(pattern))
return self
def exclude(self, pattern: str) -> Runner:
self._exclude_patterns.add(re.compile(pattern))
return self
def xfail(self, pattern: str) -> Runner:
self._xfail_patterns.add(re.compile(pattern))
return self
def enable_report(self) -> Runner:
import pytest
for category, items_map in self._test_items.items():
for item in items_map.values():
item.func = pytest.mark.onnx_coverage(item.proto, category)(item.func)
return self
@property
def _filtered_test_items(self) -> dict[str, dict[str, TestItem]]:
filtered: dict[str, dict[str, TestItem]] = {}
for category, items_map in self._test_items.items():
filtered[category] = {}
for name, item in items_map.items():
if self._include_patterns and (
not any(include.search(name) for include in self._include_patterns)
):
item.func = unittest.skip("no matched include pattern")(item.func)
for exclude in self._exclude_patterns:
if exclude.search(name):
item.func = unittest.skip(
f'matched exclude pattern "{exclude.pattern}"'
)(item.func)
for xfail in self._xfail_patterns:
if xfail.search(name):
item.func = unittest.expectedFailure(item.func)
filtered[category][name] = item
return filtered
@property
def test_cases(self) -> dict[str, type[unittest.TestCase]]:
"""
List of test cases to be applied on the parent scope
Example usage:
globals().update(BackendTest(backend).test_cases)
"""
test_cases = {}
for category, items_map in self._filtered_test_items.items():
test_case_name = f"OnnxBackend{category}Test"
test_case = self._get_test_case(test_case_name)
for name, item in sorted(items_map.items()):
setattr(test_case, name, item.func)
test_cases[test_case_name] = test_case
return test_cases
@property
def test_suite(self) -> unittest.TestSuite:
"""
TestSuite that can be run by TestRunner
Example usage:
unittest.TextTestRunner().run(BackendTest(backend).test_suite)
"""
suite = unittest.TestSuite()
for case in sorted(
self.test_cases.values(), key=lambda cl: cl.__class__.__name__
):
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(case))
return suite
# For backward compatibility (we used to expose `.tests`)
@property
def tests(self) -> type[unittest.TestCase]:
"""
One single unittest.TestCase that hosts all the test functions
Example usage:
onnx_backend_tests = BackendTest(backend).tests
"""
tests = self._get_test_case("OnnxBackendTest")
for items_map in sorted(
self._filtered_test_items.values(), key=lambda cl: cl.__class__.__name__
):
for name, item in sorted(items_map.items()):
setattr(tests, name, item.func)
return tests
@classmethod
def assert_similar_outputs(
cls,
ref_outputs: Sequence[Any],
outputs: Sequence[Any],
rtol: float,
atol: float,
) -> None:
np.testing.assert_equal(len(outputs), len(ref_outputs))
for i in range(len(outputs)):
if isinstance(outputs[i], (list, tuple)):
for j in range(len(outputs[i])):
cls.assert_similar_outputs(
ref_outputs[i][j], outputs[i][j], rtol, atol
)
else:
np.testing.assert_equal(outputs[i].dtype, ref_outputs[i].dtype)
if ref_outputs[i].dtype == object: # type: ignore[attr-defined]
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
else:
np.testing.assert_allclose(
outputs[i], ref_outputs[i], rtol=rtol, atol=atol
)
@classmethod
@retry_execute(3)
def download_model(
cls, model_test: TestCase, model_dir: str, models_dir: str
) -> None:
# On Windows, NamedTemporaryFile can not be opened for a
# second time
download_file = tempfile.NamedTemporaryFile(delete=False)
try:
download_file.close()
assert model_test.url
print(
f"Start downloading model {model_test.model_name} from {model_test.url}"
)
urlretrieve(model_test.url, download_file.name)
print("Done")
with tarfile.open(download_file.name) as t:
t.extractall(models_dir)
except Exception as e:
print(f"Failed to prepare data for model {model_test.model_name}: {e}")
raise
finally:
os.remove(download_file.name)
@classmethod
def prepare_model_data(cls, model_test: TestCase) -> str:
onnx_home = os.path.expanduser(
os.getenv("ONNX_HOME", os.path.join("~", ".onnx"))
)
models_dir = os.getenv("ONNX_MODELS", os.path.join(onnx_home, "models"))
model_dir: str = os.path.join(models_dir, model_test.model_name)
if not os.path.exists(os.path.join(model_dir, "model.onnx")):
if os.path.exists(model_dir):
bi = 0
while True:
dest = f"{model_dir}.old.{bi}"
if os.path.exists(dest):
bi += 1
continue
shutil.move(model_dir, dest)
break
os.makedirs(model_dir)
cls.download_model(
model_test=model_test, model_dir=model_dir, models_dir=models_dir
)
return model_dir
def _add_test(
self,
category: str,
test_name: str,
test_func: Callable[..., Any],
report_item: list[ModelProto | NodeProto | None],
devices: Iterable[str] = ("CPU", "CUDA"),
) -> None:
# We don't prepend the 'test_' prefix to improve greppability
if not test_name.startswith("test_"):
raise ValueError(f"Test name must start with test_: {test_name}")
def add_device_test(device: str) -> None:
device_test_name = f"{test_name}_{device.lower()}"
if device_test_name in self._test_items[category]:
raise ValueError(
f'Duplicated test name "{device_test_name}" in category "{category}"'
)
@unittest.skipIf( # type: ignore
not self.backend.supports_device(device),
f"Backend doesn't support device {device}",
)
@functools.wraps(test_func)
def device_test_func(*args: Any, **kwargs: Any) -> Any:
try:
return test_func(*args, device=device, **kwargs)
except BackendIsNotSupposedToImplementIt as e:
# hacky verbose reporting
if "-v" in sys.argv or "--verbose" in sys.argv:
print(f"Test {device_test_name} is effectively skipped: {e}")
self._test_items[category][device_test_name] = TestItem(
device_test_func, report_item
)
for device in devices:
add_device_test(device)
@staticmethod
def generate_dummy_data(
x: ValueInfoProto, seed: int = 0, name: str = "", random: bool = False
) -> np.ndarray:
"""
Generates a random tensor based on the input definition.
"""
if not x.type.tensor_type:
raise NotImplementedError(
f"Input expected to have tensor type. "
f"Unable to generate random data for model {name!r} and input {x}."
)
if x.type.tensor_type.elem_type != 1:
raise NotImplementedError(
f"Currently limited to float tensors. "
f"Unable to generate random data for model {name!r} and input {x}."
)
shape = tuple(
d.dim_value if d.HasField("dim_value") else 1
for d in x.type.tensor_type.shape.dim
)
if random:
gen = np.random.default_rng(seed=seed)
return gen.random(shape, np.float32)
n = np.prod(shape)
return (np.arange(n).reshape(shape) / n).astype(np.float32)
def _add_model_test(self, model_test: TestCase, kind: str) -> None:
# model is loaded at runtime, note sometimes it could even
# never loaded if the test skipped
model_marker: list[ModelProto | NodeProto | None] = [None]
def run(test_self: Any, device: str) -> None:
if model_test.url is not None and model_test.url.startswith(
"onnx/backend/test/data/light/"
):
# testing local files
model_pb_path = os.path.normpath(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"..",
model_test.url,
)
)
if not os.path.exists(model_pb_path):
raise FileNotFoundError(f"Unable to find model {model_pb_path!r}.")
onnx_home = os.path.expanduser(
os.getenv("ONNX_HOME", os.path.join("~", ".onnx"))
)
models_dir = os.getenv(
"ONNX_MODELS", os.path.join(onnx_home, "models", "light")
)
model_dir: str = os.path.join(models_dir, model_test.model_name)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
use_dummy = True
else:
if model_test.model_dir is None:
model_dir = self.prepare_model_data(model_test)
else:
model_dir = model_test.model_dir
model_pb_path = os.path.join(model_dir, "model.onnx")
use_dummy = False
if not ONNX_ML and "ai_onnx_ml" in model_dir:
return
model = onnx.load(model_pb_path)
model_marker[0] = model
if (
hasattr(self.backend, "is_compatible")
and callable(self.backend.is_compatible)
and not self.backend.is_compatible(model)
):
raise unittest.SkipTest("Not compatible with backend")
prepared_model = self.backend.prepare(model, device)
assert prepared_model is not None
if use_dummy:
# When the backend test goes through a test involving a
# model stored in onnx/backend/test/data/light,
# this function generates expected output coming from
# from ReferenceEvaluator run with random inputs.
# A couple of models include many Conv operators and the
# python implementation is slow (such as test_bvlc_alexnet).
with open(model_pb_path, "rb") as f:
onx = onnx.load(f)
test_data_set = os.path.join(model_dir, "test_data_set_0")
if not os.path.exists(test_data_set):
os.mkdir(test_data_set)
feeds = {}
inits = {i.name for i in onx.graph.initializer}
n_input = 0
inputs = []
for i in range(len(onx.graph.input)):
if onx.graph.input[i].name in inits:
continue
name = os.path.join(test_data_set, f"input_{n_input}.pb")
inputs.append(name)
n_input += 1
x = onx.graph.input[i]
value = self.generate_dummy_data(
x, seed=0, name=model_test.model_name, random=False
)
feeds[x.name] = value
with open(name, "wb") as f:
f.write(onnx.numpy_helper.from_array(value).SerializeToString())
# loads expected output if any available
prefix = os.path.splitext(model_pb_path)[0]
expected_outputs = []
for i in range(len(onx.graph.output)):
name = f"{prefix}_output_{i}.pb"
if os.path.exists(name):
expected_outputs.append(name)
continue
expected_outputs = None
break
if expected_outputs is None:
ref = onnx.reference.ReferenceEvaluator(onx)
outputs = ref.run(None, feeds)
for i, o in enumerate(outputs):
name = os.path.join(test_data_set, f"output_{i}.pb")
with open(name, "wb") as f:
f.write(onnx.numpy_helper.from_array(o).SerializeToString())
else:
for i, o in enumerate(expected_outputs):
name = os.path.join(test_data_set, f"output_{i}.pb")
shutil.copy(o, name)
else:
# TODO after converting all npz files to protobuf, we can delete this.
for test_data_npz in glob.glob(
os.path.join(model_dir, "test_data_*.npz")
):
test_data = np.load(test_data_npz, encoding="bytes")
inputs = list(test_data["inputs"])
outputs = list(prepared_model.run(inputs))
ref_outputs = test_data["outputs"]
self.assert_similar_outputs(
ref_outputs, outputs, rtol=model_test.rtol, atol=model_test.atol
)
for test_data_dir in glob.glob(os.path.join(model_dir, "test_data_set*")):
inputs = []
inputs_num = len(glob.glob(os.path.join(test_data_dir, "input_*.pb")))
for i in range(inputs_num):
input_file = os.path.join(test_data_dir, f"input_{i}.pb")
self._load_proto(input_file, inputs, model.graph.input[i].type)
ref_outputs = []
ref_outputs_num = len(
glob.glob(os.path.join(test_data_dir, "output_*.pb"))
)
for i in range(ref_outputs_num):
output_file = os.path.join(test_data_dir, f"output_{i}.pb")
self._load_proto(
output_file, ref_outputs, model.graph.output[i].type
)
outputs = list(prepared_model.run(inputs))
self.assert_similar_outputs(
ref_outputs, outputs, rtol=model_test.rtol, atol=model_test.atol
)
self._add_test(kind + "Model", model_test.name, run, model_marker)
def _load_proto(
self,
proto_filename: str,
target_list: list[np.ndarray | list[Any]],
model_type_proto: TypeProto,
) -> None:
with open(proto_filename, "rb") as f:
protobuf_content = f.read()
if model_type_proto.HasField("sequence_type"):
sequence = onnx.SequenceProto()
sequence.ParseFromString(protobuf_content)
target_list.append(numpy_helper.to_list(sequence))
elif model_type_proto.HasField("tensor_type"):
tensor = onnx.TensorProto()
tensor.ParseFromString(protobuf_content)
target_list.append(numpy_helper.to_array(tensor))
elif model_type_proto.HasField("optional_type"):
optional = onnx.OptionalProto()
optional.ParseFromString(protobuf_content)
target_list.append(numpy_helper.to_optional(optional)) # type: ignore[arg-type]
else:
print(
"Loading proto of that specific type (Map/Sparse Tensor) is currently not supported"
)
| 19,487 | 39.181443 | 104 | py |
onnx | onnx-main/onnx/test/reference_evaluator_test.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# type: ignore
# pylint: disable=C3001,C0302,C0415,R0904,R0913,R0914,R0915,W0221,W0707
"""
You can run a specific test by using the following syntax.
::
python onnx/test/reference_evaluator_test.py TestReferenceEvaluator.test_function_attribute_nested_graph
"""
import itertools
import math
import unittest
from contextlib import redirect_stdout
from functools import wraps
from io import StringIO
from os import getenv
from textwrap import dedent
from typing import Sequence, Tuple
import numpy as np
import parameterized
from numpy.testing import assert_allclose
from onnx import AttributeProto, FunctionProto, ModelProto, TensorProto, checker, parser
from onnx.backend.test.case.node.roialign import get_roi_align_input_values
from onnx.checker import check_model
from onnx.defs import onnx_opset_version
from onnx.helper import (
float32_to_bfloat16,
float32_to_float8e4m3,
float32_to_float8e5m2,
make_function,
make_graph,
make_model,
make_model_gen_version,
make_node,
make_opsetid,
make_sequence_type_proto,
make_tensor,
make_tensor_sequence_value_info,
make_tensor_value_info,
make_value_info,
)
from onnx.numpy_helper import float8e4m3_to_float32, float8e5m2_to_float32, from_array
from onnx.reference import ReferenceEvaluator
from onnx.reference.op_run import OpRun, OpRunExpand
from onnx.reference.ops import load_op
from onnx.reference.ops._op_common_indices import _get_indices, _is_out
from onnx.reference.ops._op_list import Celu
from onnx.reference.ops.aionnx_preview_training._op_list import Adam
from onnx.reference.ops.op_celu import _vcelu1
from onnx.reference.ops.op_col2im import (
_col2im_naive_implementation_2d,
col2im_naive_implementation,
)
from onnx.reference.ops.op_conv import Conv, _conv_implementation
from onnx.reference.ops_optimized import Conv as ConvOptimized
from onnx.reference.ops_optimized.op_conv_optimized import _conv_implementation_im2col
# TODO (https://github.com/microsoft/onnxruntime/issues/14932): Get max supported version from onnxruntime directly
# For now, bump the version in CIs whenever there is a new onnxruntime release
ORT_MAX_IR_SUPPORTED_VERSION = int(getenv("ORT_MAX_IR_SUPPORTED_VERSION", "8"))
ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION = int(
getenv("ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION", "18")
)
def skip_if_no_onnxruntime(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
import onnxruntime # pylint: disable=W0611
del onnxruntime
except ImportError:
raise unittest.SkipTest("onnxruntime not installed") from None
fn(*args, **kwargs)
return wrapper
def skip_if_no_torch(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
import torch # pylint: disable=W0611
del torch
except ImportError:
raise unittest.SkipTest("torch not installed") from None
fn(*args, **kwargs)
return wrapper
def skip_if_no_torchvision(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
import torchvision # pylint: disable=W0611
del torchvision
except ImportError:
raise unittest.SkipTest("torchvision not installed") from None
fn(*args, **kwargs)
return wrapper
def make_sequence_value_info(name, elem_type, shape):
if isinstance(elem_type, int):
return make_tensor_sequence_value_info(name, elem_type, shape)
s_type = make_sequence_type_proto(elem_type)
return make_value_info(name, s_type, shape)
def run_ort_inference(onnx_model):
import onnxruntime as ort
onnx_domain_opset = ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION
for opset in onnx_model.opset_import:
if opset.domain in ("", "ai.onnx"):
onnx_domain_opset = opset.version
break
# The new IR or opset version is not supported by onnxruntime yet
if (
onnx_model.ir_version > ORT_MAX_IR_SUPPORTED_VERSION
or onnx_domain_opset > ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION
):
return None
return ort.InferenceSession(
onnx_model.SerializeToString(), providers=["CPUExecutionProvider"]
)
def im2col_naive_implementation(data, kernel_shape, dilations, pads, strides): # type: ignore
"""
Naive implementation for `im2col`.
:param image: image (float)
:param kernel_shape: kernel shape
:param dilations: dilations
:param pads: pads
:param strides: strides
:return: result
"""
if not isinstance(kernel_shape, tuple):
raise TypeError(f"Unexpected type {type(kernel_shape)!r} for kernel_shape.")
if len(data.shape) != len(kernel_shape):
raise ValueError(f"Shape mismatch {data.shape!r} and {kernel_shape!r}.")
n_dims = len(pads) // 2
new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)])
list_output_shape = list(data.shape + kernel_shape)
for d in range(n_dims):
kd = kernel_shape[d] + (kernel_shape[d] - 1) * (dilations[d] - 1)
nd = int(
((list_output_shape[d] - kd + new_pads[d][0] + new_pads[d][1]) / strides[d])
+ 1
)
list_output_shape[d] = nd
output_shape = tuple(list_output_shape)
res = np.zeros(output_shape, dtype=data.dtype)
kernel_size = np.prod(kernel_shape)
res_size = np.prod(res.shape[:-n_dims])
for i in range(res_size):
i_res = _get_indices(i, res.shape[:-n_dims])
t_res = tuple(i_res)
for j in range(kernel_size):
i_kernel = _get_indices(j, kernel_shape)
t_kernel = tuple(i_kernel)
i_img = i_res * strides - new_pads[:, 0] + i_kernel * dilations
t_img = tuple(i_img)
if _is_out(t_img, data.shape):
res[t_res + t_kernel] = 0
else:
res[t_res + t_kernel] = data[tuple(t_img)]
return res
def im2col(
img: np.ndarray,
kernel_shape: Tuple[int, ...],
dilations: Sequence[int],
pads: Sequence[int],
strides: Sequence[int],
) -> np.ndarray:
res = None
for n in range(img.shape[0]):
for c in range(img.shape[1]):
out = im2col_naive_implementation(
img[n, c, ...], kernel_shape, dilations, pads, strides
)
if res is None:
new_shape = img.shape[:2] + out.shape
res = np.empty(new_shape, dtype=img.dtype)
res[n, c, ...] = out
new_shape = res.shape[: -len(kernel_shape)] + (-1,) # type: ignore
return res.reshape(new_shape) # type: ignore
class TestReferenceEvaluator(unittest.TestCase):
m2_def = """
<
ir_version: 7,
opset_import: [ "": 10, "com.microsoft": 1]
>
agraph (float[N, M] B01, float[N, M] B11, float[N, M] B21) => (float[N, M] D0)
{
C0 = Add(B01, B11)
C1 = Sub(B11, B21)
D0 = Mul(C0, C1)
}
"""
@staticmethod
def _load_model(m_def: str) -> ModelProto:
"""
Parses a model from a string representation, including checking
the model for correctness
"""
m = parser.parse_model(m_def)
checker.check_model(m)
return m
@staticmethod
def _linear_regression(clip=False, opset=None, min_value=-1.0, max_value=1.0):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
A = make_tensor_value_info("A", TensorProto.FLOAT, [None, None])
B = make_tensor_value_info("B", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("MatMul", ["X", "A"], ["XA"])
if clip:
node2 = make_node("Add", ["XA", "B"], ["Y_clip"])
if opset is not None and opset < 11:
if min_value:
if max_value:
node3 = make_node(
"Clip", ["Y_clip"], ["Y"], min=min_value, max=max_value
)
else:
node3 = make_node("Clip", ["Y_clip"], ["Y"], min=min_value)
elif max_value:
node3 = make_node("Clip", ["Y_clip"], ["Y"], max=max_value)
else:
node3 = make_node("Clip", ["Y_clip"], ["Y"])
graph = make_graph([node1, node2, node3], "lr", [X, A, B], [Y])
else:
mi = (
from_array(np.array([min_value], dtype=np.float32), name="mi")
if min_value
else None
)
ma = (
from_array(np.array([max_value], dtype=np.float32), name="ma")
if max_value
else None
)
inputs = ["Y_clip", "mi" if mi else "", "ma" if ma else ""]
node3 = make_node("Clip", inputs, ["Y"])
initializer = [_ for _ in [mi, ma] if _]
graph = make_graph(
[node1, node2, node3], "lr", [X, A, B], [Y], initializer=initializer
)
f = lambda x, a, b: np.clip(a @ a + b, min_value, max_value) # noqa: E731
else:
node2 = make_node("Add", ["XA", "B"], ["Y"])
graph = make_graph([node1, node2], "lr", [X, A, B], [Y])
f = lambda x, a, b: a @ a + b # noqa: E731
if opset is None:
onnx_model = make_model(graph)
else:
onnx_model = make_model(graph, opset_imports=[make_opsetid("", opset)])
try:
check_model(onnx_model)
except Exception as e:
raise AssertionError(f"checker fails for\n{onnx_model}") from e
return onnx_model, f
def test_reference_evaluator_exceptions(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
with self.assertRaises(TypeError):
ReferenceEvaluator(X)
def test_reference_evaluator_no_attribute(self):
m = TestReferenceEvaluator._load_model(TestReferenceEvaluator.m2_def)
checker.check_model(m)
sess = ReferenceEvaluator(m)
self.assertEqual(sess.input_names, ["B01", "B11", "B21"])
self.assertEqual(sess.output_names, ["D0"])
self.assertEqual(sess.opsets, {"": 10, "com.microsoft": 1})
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
y = np.array([[4, 5], [6, 7]], dtype=np.float32)
z = np.array([[-4, -5], [-6, -7]], dtype=np.float32)
res = sess.run(None, {"B01": x, "B11": y, "B21": z})[0]
expected = (x + y) * (y - z)
assert_allclose(expected, res)
def test_reference_evaluator_no_attribute_bytes(self):
m = TestReferenceEvaluator._load_model(TestReferenceEvaluator.m2_def)
checker.check_model(m)
sess = ReferenceEvaluator(m.SerializeToString())
self.assertEqual(sess.input_names, ["B01", "B11", "B21"])
self.assertEqual(sess.output_names, ["D0"])
self.assertEqual(sess.opsets, {"": 10, "com.microsoft": 1})
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
y = np.array([[4, 5], [6, 7]], dtype=np.float32)
z = np.array([[-4, -5], [-6, -7]], dtype=np.float32)
res = sess.run(None, {"B01": x, "B11": y, "B21": z})[0]
expected = (x + y) * (y - z)
assert_allclose(expected, res)
def test_reference_evaluator_no_attribute_verbose(self):
m = TestReferenceEvaluator._load_model(TestReferenceEvaluator.m2_def)
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
y = np.array([[4, 5], [6, 7]], dtype=np.float32)
z = np.array([[-4, -5], [-6, -7]], dtype=np.float32)
with self.subTest(level=2):
sess = ReferenceEvaluator(m, verbose=2)
stdout = StringIO()
with redirect_stdout(stdout):
sess.run(None, {"B01": x, "B11": y, "B21": z})
out = stdout.getvalue()
log = "Add(B01, B11) -> C0\nSub(B11, B21) -> C1\nMul(C0, C1) -> D0\n"
self.assertEqual(log, out)
with self.subTest(level=3):
sess = ReferenceEvaluator(m, verbose=3)
stdout = StringIO()
with redirect_stdout(stdout):
sess.run(None, {"B01": x, "B11": y, "B21": z})
out = stdout.getvalue()
log = dedent(
"""
+I B01: float32:(2, 2) in [0.0, 3.0]
+I B11: float32:(2, 2) in [4.0, 7.0]
+I B21: float32:(2, 2) in [-7.0, -4.0]
Add(B01, B11) -> C0
+ C0: float32:(2, 2) in [4.0, 10.0]
Sub(B11, B21) -> C1
+ C1: float32:(2, 2) in [8.0, 14.0]
Mul(C0, C1) -> D0
+ D0: float32:(2, 2) in [32.0, 140.0]
"""
).lstrip("\n")
self.assertEqual(log, out)
with self.subTest(level=4):
sess = ReferenceEvaluator(m, verbose=4)
stdout = StringIO()
with redirect_stdout(stdout):
sess.run(None, {"B01": x, "B11": y, "B21": z})
out = stdout.getvalue()
log = dedent(
"""
+I B01: float32:(2, 2):[0.0, 1.0, 2.0, 3.0]
+I B11: float32:(2, 2):[4.0, 5.0, 6.0, 7.0]
+I B21: float32:(2, 2):[-4.0, -5.0, -6.0, -7.0]
Add(B01, B11) -> C0
+ C0: float32:(2, 2):[4.0, 6.0, 8.0, 10.0]
Sub(B11, B21) -> C1
+ C1: float32:(2, 2):[8.0, 10.0, 12.0, 14.0]
Mul(C0, C1) -> D0
+ D0: float32:(2, 2):[32.0, 60.0, 96.0, 140.0]
"""
).lstrip("\n")
self.assertEqual(log, out)
with self.subTest(level=15):
sess = ReferenceEvaluator(m, verbose=15)
stdout = StringIO()
with redirect_stdout(stdout):
sess.run(None, {"B01": x, "B11": y, "B21": z})
out = stdout.getvalue()
log = dedent(
"""
+I B01: float32:(2, 2):[0.0, 1.0, 2.0, 3.0]
+I B11: float32:(2, 2):[4.0, 5.0, 6.0, 7.0]
+I B21: float32:(2, 2):[-4.0, -5.0, -6.0, -7.0]
Add(B01, B11) -> C0
-- begin Add.run(2 inputs)
-- done Add.run -> 1 outputs
+ C0: float32:(2, 2):[4.0, 6.0, 8.0, 10.0]
Sub(B11, B21) -> C1
-- begin Sub.run(2 inputs)
-- done Sub.run -> 1 outputs
+ C1: float32:(2, 2):[8.0, 10.0, 12.0, 14.0]
Mul(C0, C1) -> D0
-- begin Mul.run(2 inputs)
-- done Mul.run -> 1 outputs
+ D0: float32:(2, 2):[32.0, 60.0, 96.0, 140.0]
"""
).lstrip("\n")
self.assertEqual(log, out)
def test_reference_evaluator_lr(self):
lr, f = TestReferenceEvaluator._linear_regression()
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
a = np.array([1, 1], dtype=np.float32)
b = np.array([11], dtype=np.float32)
expected = f(x, a, b)
sess = ReferenceEvaluator(lr)
got = sess.run(None, {"X": a, "A": a, "B": b})[0]
assert_allclose(expected, got)
def test_reference_evaluator_lr_clip(self):
with self.subTest(opt="min+max"):
lr, f = TestReferenceEvaluator._linear_regression(clip=True)
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
a = np.array([1, 1], dtype=np.float32)
b = np.array([11], dtype=np.float32)
expected = f(x, a, b)
sess = ReferenceEvaluator(lr)
last_node = sess.rt_nodes_[-1]
self.assertEqual(last_node.__class__.__name__, "Clip_11")
got = sess.run(None, {"X": a, "A": a, "B": b})[0]
assert_allclose(expected, got)
with self.subTest(opt="max"):
lr, f = TestReferenceEvaluator._linear_regression(clip=True, min_value=None)
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
a = np.array([1, 1], dtype=np.float32)
b = np.array([11], dtype=np.float32)
expected = f(x, a, b)
sess = ReferenceEvaluator(lr)
last_node = sess.rt_nodes_[-1]
self.assertEqual(last_node.__class__.__name__, "Clip_11")
got = sess.run(None, {"X": a, "A": a, "B": b})[0]
assert_allclose(expected, got)
with self.subTest(opt="min"):
lr, f = TestReferenceEvaluator._linear_regression(clip=True, max_value=None)
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
a = np.array([1, 1], dtype=np.float32)
b = np.array([11], dtype=np.float32)
expected = f(x, a, b)
sess = ReferenceEvaluator(lr)
last_node = sess.rt_nodes_[-1]
self.assertEqual(last_node.__class__.__name__, "Clip_11")
got = sess.run(None, {"X": a, "A": a, "B": b})[0]
assert_allclose(expected, got)
def test_reference_evaluator_lr_clip_6(self):
with self.subTest(opt="min+max"):
lr, f = TestReferenceEvaluator._linear_regression(clip=True, opset=10)
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
a = np.array([1, 1], dtype=np.float32)
b = np.array([11], dtype=np.float32)
expected = f(x, a, b)
sess = ReferenceEvaluator(lr)
last_node = sess.rt_nodes_[-1]
self.assertEqual(last_node.__class__.__name__, "Clip_6")
self.assertEqual(last_node.min, -1)
self.assertEqual(last_node.max, 1)
got = sess.run(None, {"X": a, "A": a, "B": b})[0]
assert_allclose(expected, got)
with self.subTest(opt="max"):
lr, f = TestReferenceEvaluator._linear_regression(
clip=True, opset=10, min_value=None
)
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
a = np.array([1, 1], dtype=np.float32)
b = np.array([11], dtype=np.float32)
expected = f(x, a, b)
sess = ReferenceEvaluator(lr)
last_node = sess.rt_nodes_[-1]
self.assertEqual(last_node.__class__.__name__, "Clip_6")
self.assertEqual(last_node.max, 1)
self.assertEqual(last_node.min, -3.4028234663852886e38)
got = sess.run(None, {"X": a, "A": a, "B": b})[0]
assert_allclose(expected, got)
with self.subTest(opt="min"):
lr, f = TestReferenceEvaluator._linear_regression(
clip=True, opset=10, max_value=None
)
x = np.array([[0, 1], [2, 3]], dtype=np.float32)
a = np.array([1, 1], dtype=np.float32)
b = np.array([11], dtype=np.float32)
expected = f(x, a, b)
sess = ReferenceEvaluator(lr)
last_node = sess.rt_nodes_[-1]
self.assertEqual(last_node.__class__.__name__, "Clip_6")
self.assertEqual(last_node.min, -1)
self.assertEqual(last_node.max, 3.4028234663852886e38)
got = sess.run(None, {"X": a, "A": a, "B": b})[0]
assert_allclose(expected, got)
def test_nested_local_functions(self):
m = parser.parse_model(
"""
<
ir_version: 8,
opset_import: [ "" : 14, "local" : 1],
producer_name: "test",
producer_version: "1.0",
model_version: 1,
doc_string: "Test preprocessing model"
>
agraph (uint8[H, W, C] x) => (uint8[H, W, C] x_processed)
{
x_processed = local.func(x)
}
<
opset_import: [ "" : 14 ],
domain: "local",
doc_string: "function 1"
>
f1 (x) => (y) {
y = Identity(x)
}
<
opset_import: [ "" : 14 ],
domain: "local",
doc_string: "function 2"
>
f2 (x) => (y) {
y = Identity(x)
}
<
opset_import: [ "" : 14, "local" : 1 ],
domain: "local",
doc_string: "Preprocessing function."
>
func (x) => (y) {
x1 = local.f1(x)
y = local.f2(x1)
}
"""
)
sess = ReferenceEvaluator(m)
x = np.array([0, 1, 3], dtype=np.uint8).reshape((1, 1, 3))
result = sess.run(None, {"x": x})[0]
expected = x
assert_allclose(expected, result)
def test_reduce_sum_11(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("ReduceSum", ["X"], ["Y"], axes=[1], keepdims=1)
graph = make_graph([node1], "rs", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 11)])
check_model(onnx_model)
x = np.arange(60).reshape((3, 4, 5)).astype(np.float32)
expected = x.sum(axis=1, keepdims=1)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x})[0]
assert_allclose(expected, got)
def test_reduce_sum_square_11(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("ReduceSumSquare", ["X"], ["Y"], axes=[1], keepdims=1)
graph = make_graph([node1], "rs", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 11)])
check_model(onnx_model)
x = np.arange(60).reshape((3, 4, 5)).astype(np.float32)
expected = (x * x).sum(axis=1, keepdims=1)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x})[0]
assert_allclose(expected, got)
def test_reduce_sum_13(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
A = make_tensor_value_info("A", TensorProto.INT64, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("ReduceSum", ["X", "A"], ["Y"], keepdims=1)
graph = make_graph([node1], "rs", [X, A], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 13)])
check_model(onnx_model)
x = np.arange(60).reshape((3, 4, 5)).astype(np.float32)
a = np.array([1], dtype=np.int64)
expected = x.sum(axis=1, keepdims=1)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x, "A": a})[0]
assert_allclose(expected, got)
def test_reduce_sum_attribute(self):
opset = onnx_opset_version()
new_domain = "custom"
opset_imports = [make_opsetid("", opset), make_opsetid(new_domain, 1)]
node = make_node("ReduceSum", ["X", "axis"], ["Y"])
att = AttributeProto()
att.name = "keepdims"
att.ref_attr_name = "keepdims"
att.type = AttributeProto.INT
node.attribute.append(att)
my_reduce_sum = make_function(
new_domain,
"MyReduceSum",
["X", "axis"],
["Y"],
[node],
opset_imports,
["keepdims"],
)
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
axis = make_tensor_value_info("axis", TensorProto.INT64, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
graph = make_graph(
[
make_node(
"MyReduceSum",
["X", "axis"],
["Y"],
domain=new_domain,
keepdims=1,
),
],
"example",
[X, axis],
[Y],
)
onnx_model = make_model(
graph, opset_imports=opset_imports, functions=[my_reduce_sum]
)
sess = ReferenceEvaluator(onnx_model)
x = np.arange(6).reshape((3, 2)).astype(np.float32)
a = np.array([-1], dtype=np.int64)
result = sess.run(None, {"X": x, "axis": a})[0]
expected = x.sum(axis=-1, keepdims=1)
assert_allclose(expected, result)
def test_reduce_sum_square_18(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
A = make_tensor_value_info("A", TensorProto.INT64, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("ReduceSumSquare", ["X", "A"], ["Y"], keepdims=1)
graph = make_graph([node1], "rs", [X, A], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
check_model(onnx_model)
x = np.arange(60).reshape((3, 4, 5)).astype(np.float32)
a = np.array([1], dtype=np.int64)
expected = (x * x).sum(axis=1, keepdims=1)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x, "A": a})[0]
assert_allclose(expected, got)
def test_reduce_sum_13_empty_axes(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
A = make_tensor_value_info("A", TensorProto.INT64, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("ReduceSum", ["X", "A"], ["Y"], keepdims=1)
graph = make_graph([node1], "rs", [X, A], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 13)])
check_model(onnx_model)
x = np.arange(60).reshape((3, 4, 5)).astype(np.float32)
a = np.array([], dtype=np.int64)
expected = x.sum(keepdims=1)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x, "A": a})[0]
assert_allclose(expected, got)
def test_reduce_sum_square_18_empty_axes(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
A = make_tensor_value_info("A", TensorProto.INT64, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("ReduceSumSquare", ["X", "A"], ["Y"], keepdims=1)
graph = make_graph([node1], "rs", [X, A], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
check_model(onnx_model)
x = np.arange(60).reshape((3, 4, 5)).astype(np.float32)
a = np.array([], dtype=np.int64)
expected = (x * x).sum(keepdims=1)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x, "A": a})[0]
assert_allclose(expected, got)
def test_reduce_sum_13_empty_axes_noop(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("ReduceSum", ["X"], ["Y"], keepdims=1, noop_with_empty_axes=1)
graph = make_graph([node1], "rs", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 13)])
check_model(onnx_model)
x = np.arange(60).reshape((3, 4, 5)).astype(np.float32)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x})[0]
assert_allclose(x, got)
def test_reduce_sum_square_18_empty_axes_noop(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node(
"ReduceSumSquare", ["X"], ["Y"], keepdims=1, noop_with_empty_axes=1
)
graph = make_graph([node1], "rs", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
check_model(onnx_model)
x = np.arange(60).reshape((3, 4, 5)).astype(np.float32)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x})[0]
assert_allclose(x * x, got)
def test_greater(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
Z = make_tensor_value_info("Z", TensorProto.FLOAT, [None])
node1 = make_node("Greater", ["X", "Y"], ["Z"])
graph = make_graph([node1], "g", [X, Y], [Z])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 13)])
check_model(onnx_model)
x = np.arange(4).reshape((2, 2)).astype(np.float32)
y = np.array([2], dtype=np.float32)
expected = x > y
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x, "Y": y})[0]
assert_allclose(expected, got)
def test_node_proto(self):
node1 = make_node("Greater", ["X", "Y"], ["Z"])
x = np.arange(4).reshape((2, 2)).astype(np.float32)
y = np.array([2], dtype=np.float32)
expected = x > y
sess = ReferenceEvaluator(node1)
got = sess.run(None, {"X": x, "Y": y})[0]
assert_allclose(expected, got)
def test_greater_or_equal(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
Z = make_tensor_value_info("Z", TensorProto.FLOAT, [None])
node1 = make_node("GreaterOrEqual", ["X", "Y"], ["Z"])
graph = make_graph([node1], "g", [X, Y], [Z])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 13)])
check_model(onnx_model)
x = np.arange(4).reshape((2, 2)).astype(np.float32)
y = np.array([2], dtype=np.float32)
expected = x >= y
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": x, "Y": y})[0]
assert_allclose(expected, got)
def test_if(self):
C = make_tensor_value_info("C", TensorProto.FLOAT, [None])
bthen = make_node(
"Constant",
[],
["C"],
value_floats=from_array(np.array([1], dtype=np.float32)),
)
bthen_body = make_graph([bthen], "gthen", [], [C])
C = make_tensor_value_info("C", TensorProto.FLOAT, [None])
belse = make_node(
"Constant",
[],
["C"],
value_floats=from_array(np.array([0], dtype=np.float32)),
)
belse_body = make_graph([belse], "gelse", [], [C])
zero = from_array(np.array([0], dtype=np.float32), name="zero")
greater = make_node("Greater", ["X", "zero"], ["G"])
node_if = make_node(
"If",
["G"],
["Z"],
then_branch=bthen_body,
else_branch=belse_body,
)
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Z = make_tensor_value_info("Z", TensorProto.FLOAT, [None])
graph = make_graph([greater, node_if], "g", [X], [Z], initializer=[zero])
model_def = make_model(graph)
sess = ReferenceEvaluator(model_def)
self.assertEqual(str(sess), "ReferenceEvaluator(X) -> Z")
x = np.array([1, 2], dtype=np.float32)
got = sess.run(None, {"X": x})[0]
assert_allclose(np.array([1], dtype=np.float32), got)
x = np.array([-1, -2], dtype=np.float32)
got = sess.run(None, {"X": x})[0]
assert_allclose(np.array([0], dtype=np.float32), got)
def test_if_function(self):
then_out = make_tensor_value_info("then_out", TensorProto.FLOAT, [5])
else_out = make_tensor_value_info("else_out", TensorProto.FLOAT, [5])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
y = np.array([5, 4, 3, 2, 1]).astype(np.float32)
then_const_node = make_node(
"Constant", inputs=[], outputs=["then_out"], value=from_array(x)
)
else_const_node = make_node(
"Constant", inputs=[], outputs=["else_out"], value=from_array(y)
)
then_body = make_graph([then_const_node], "then_body", [], [then_out])
else_body = make_graph([else_const_node], "else_body", [], [else_out])
if_node = make_node(
"If",
inputs=["f_cond"],
outputs=["f_res"],
then_branch=then_body,
else_branch=else_body,
)
f = FunctionProto()
f.domain = "custom"
f.name = "fn"
f.input.extend(["f_cond"])
f.output.extend(["f_res"])
f.node.extend([if_node])
opset = onnx_opset_version()
f.opset_import.extend([make_opsetid("", opset)])
graph = make_graph(
nodes=[make_node("fn", domain="custom", inputs=["cond"], outputs=["res"])],
name="graph",
inputs=[make_tensor_value_info("cond", TensorProto.BOOL, [])],
outputs=[make_tensor_value_info("res", TensorProto.FLOAT, [5])],
)
m = make_model(
graph,
producer_name="test",
opset_imports=[make_opsetid("", opset), make_opsetid("custom", 1)],
)
m.functions.extend([f])
sess = ReferenceEvaluator(m)
result = sess.run(None, {"cond": np.array(True)})
expected = np.array([1, 2, 3, 4, 5], dtype=np.float32)
assert_allclose(expected, result[0])
def test_function_attribute(self):
opset = onnx_opset_version()
new_domain = "custom"
opset_imports = [make_opsetid("", opset), make_opsetid(new_domain, 1)]
cst = make_node("Constant", [], ["B"])
att = AttributeProto()
att.name = "value"
att.ref_attr_name = "bias"
att.type = AttributeProto.TENSOR
cst.attribute.append(att)
node1 = make_node("MatMul", ["X", "A"], ["XA"])
node2 = make_node("Add", ["XA", "B"], ["Y"])
linear_regression = make_function(
new_domain,
"LinearRegression",
["X", "A"],
["Y"],
[cst, node1, node2],
opset_imports,
["bias"],
)
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
A = make_tensor_value_info("A", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
graph = make_graph(
[
make_node(
"LinearRegression",
["X", "A"],
["Y1"],
domain=new_domain,
bias=make_tensor("former_B", TensorProto.FLOAT, [1], [0.67]),
),
make_node("Abs", ["Y1"], ["Y"]),
],
"example",
[X, A],
[Y],
)
onnx_model = make_model(
graph, opset_imports=opset_imports, functions=[linear_regression]
)
sess = ReferenceEvaluator(onnx_model)
x = np.arange(6).reshape((3, 2)).astype(np.float32)
a = np.array([1, -1], dtype=np.float32)
result = sess.run(None, {"X": x, "A": a})[0]
expected = np.abs(x @ a + 0.67)
assert_allclose(expected, result)
def test_function_attribute_nested_graph(self):
opset = onnx_opset_version()
new_domain = "custom"
opset_imports = [make_opsetid("", opset), make_opsetid(new_domain, 1)]
cst1 = make_node("Constant", [], ["B1"])
att = AttributeProto()
att.name = "value"
att.ref_attr_name = "bias1"
att.type = AttributeProto.TENSOR
cst1.attribute.append(att)
cst2 = make_node("Constant", [], ["B2"])
att = AttributeProto()
att.name = "value"
att.ref_attr_name = "bias2"
att.type = AttributeProto.TENSOR
cst2.attribute.append(att)
then_out = make_tensor_value_info("B1", TensorProto.FLOAT, [None])
else_out = make_tensor_value_info("B2", TensorProto.FLOAT, [None])
then_body = make_graph([cst1], "then_body", [], [then_out])
else_body = make_graph([cst2], "else_body", [], [else_out])
zero = make_node(
"Constant",
inputs=[],
outputs=["zero"],
value=from_array(np.array([0], dtype=np.float32)),
)
mini = make_node("ReduceMin", ["X"], ["Xmin"])
f_cond = make_node("Greater", ["Xmin", "zero"], ["f_cond"])
if_node = make_node(
"If",
inputs=["f_cond"],
outputs=["B"],
then_branch=then_body,
else_branch=else_body,
)
node1 = make_node("MatMul", ["X", "A"], ["XA"])
node2 = make_node("Add", ["XA", "B"], ["Y"])
linear_regression = make_function(
new_domain,
"LinearRegression",
["X", "A"],
["Y"],
[zero, mini, f_cond, if_node, node1, node2],
opset_imports,
["bias1", "bias2"],
)
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
A = make_tensor_value_info("A", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
graph = make_graph(
[
make_node(
"LinearRegression",
["X", "A"],
["Y1"],
domain=new_domain,
bias1=make_tensor("former_B1", TensorProto.FLOAT, [1], [0.67]),
bias2=make_tensor("former_B2", TensorProto.FLOAT, [1], [777]),
),
make_node("Abs", ["Y1"], ["Y"]),
],
"example",
[X, A],
[Y],
)
onnx_model = make_model(
graph, opset_imports=opset_imports, functions=[linear_regression]
)
check_model(onnx_model)
sess = ReferenceEvaluator(onnx_model)
self.assertEqual(sess.rt_nodes_[0].__class__.__name__, "OpFunction")
self.assertEqual(
sess.rt_nodes_[0].impl_.__class__.__name__, "ReferenceEvaluator"
)
fct = sess.rt_nodes_[0].impl_
checked = False
for node in fct.rt_nodes_:
if node.__class__.__name__.startswith("If"):
if not node.has_linked_attribute:
raise AssertionError(
f"Nested node {type(node)} declares no linked attribute "
f"but a subgraph does."
)
checked = True
if not checked:
raise AssertionError(
"No node 'If' was found, has_linked_attribute could not be checked."
)
x = np.arange(6).reshape((3, 2)).astype(np.float32)
a = np.array([1, -1], dtype=np.float32)
result = sess.run(None, {"X": x + 1, "A": a})[0]
expected = np.abs(x @ a + 0.67)
assert_allclose(expected, result)
result = sess.run(None, {"X": x - 10, "A": a})[0]
expected = np.abs(x @ a + 777)
assert_allclose(expected, result)
def test_function_attribute_nested_nested_graph(self):
opset = onnx_opset_version()
new_domain = "custom"
opset_imports = [make_opsetid("", opset), make_opsetid(new_domain, 1)]
# first If
cst1 = make_node("Constant", [], ["B1"])
att = AttributeProto()
att.name = "value"
att.ref_attr_name = "bias1"
att.type = AttributeProto.TENSOR
cst1.attribute.append(att)
cst2 = make_node("Constant", [], ["B2"])
att = AttributeProto()
att.name = "value"
att.ref_attr_name = "bias2"
att.type = AttributeProto.TENSOR
cst2.attribute.append(att)
then_out = make_tensor_value_info("B1", TensorProto.FLOAT, [None])
else_out = make_tensor_value_info("B2", TensorProto.FLOAT, [None])
then_body1 = make_graph([cst1], "then_body", [], [then_out])
else_body1 = make_graph([cst2], "else_body", [], [else_out])
# sub graph 2
c100 = make_node(
"Constant",
inputs=[],
outputs=["c100"],
value=from_array(np.array([100], dtype=np.float32)),
)
f_cond = make_node("Greater", ["Xmin", "c100"], ["f_cond_100"])
if_node = make_node(
"If",
inputs=["f_cond_100"],
outputs=["B4"],
then_branch=then_body1,
else_branch=else_body1,
)
# second If
cst3 = make_node("Constant", [], ["B3"])
att = AttributeProto()
att.name = "value"
att.ref_attr_name = "bias3"
att.type = AttributeProto.TENSOR
cst3.attribute.append(att)
then_out = make_tensor_value_info("B3", TensorProto.FLOAT, [None])
then_body2 = make_graph([cst3], "then_body", [], [then_out])
else_out = make_tensor_value_info("B4", TensorProto.FLOAT, [None])
else_body2 = make_graph([c100, f_cond, if_node], "else_body", [], [else_out])
# function
zero = make_node(
"Constant",
inputs=[],
outputs=["zero"],
value=from_array(np.array([0], dtype=np.float32)),
)
mini = make_node("ReduceMin", ["X"], ["Xmin"])
f_cond = make_node("Less", ["Xmin", "zero"], ["f_cond_zero"])
if_node = make_node(
"If",
inputs=["f_cond_zero"],
outputs=["B"],
then_branch=then_body2,
else_branch=else_body2,
)
node1 = make_node("MatMul", ["X", "A"], ["XA"])
node2 = make_node("Add", ["XA", "B"], ["Y"])
linear_regression = make_function(
new_domain,
"LinearRegression",
["X", "A"],
["Y"],
[zero, mini, f_cond, if_node, node1, node2],
opset_imports,
["bias1", "bias2", "bias3"],
)
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
A = make_tensor_value_info("A", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
graph = make_graph(
[
make_node(
"LinearRegression",
["X", "A"],
["Y1"],
domain=new_domain,
bias1=make_tensor("former_B1", TensorProto.FLOAT, [1], [0.67]),
bias2=make_tensor("former_B2", TensorProto.FLOAT, [1], [777]),
bias3=make_tensor("former_B3", TensorProto.FLOAT, [1], [-888]),
),
make_node("Abs", ["Y1"], ["Y"]),
],
"example",
[X, A],
[Y],
)
onnx_model = make_model(
graph, opset_imports=opset_imports, functions=[linear_regression]
)
check_model(onnx_model)
sess = ReferenceEvaluator(onnx_model)
x = np.arange(6).reshape((3, 2)).astype(np.float32)
a = np.array([1, -1], dtype=np.float32)
result = sess.run(None, {"X": x + 1, "A": a})[0]
expected = np.abs(x @ a + 777)
assert_allclose(expected, result)
result = sess.run(None, {"X": x - 10, "A": a})[0]
expected = np.abs(x @ a - 888)
assert_allclose(expected, result)
result = sess.run(None, {"X": x + 1000, "A": a})[0]
expected = np.abs(x @ a + 0.67)
assert_allclose(expected, result)
def test_custom_node(self):
class _InvAlpha:
op_domain = "custom"
def __init__(self, onnx_node, run_params): # type: ignore
self.onnx_node = onnx_node
self.run_params = run_params
def _run(self, x): # type: ignore
return (1 / (x + self.alpha),)
class InvAlpha2(OpRun):
def _run(self, x): # type: ignore
return (1 / (x + self.alpha),)
class InvAlpha(OpRun):
op_domain = "custom"
def _run(self, x, alpha=None): # type: ignore
alpha = alpha or self.alpha # type: ignore
return (1 / (x + alpha),)
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("InvAlpha", ["X"], ["Y"], alpha=0.5, domain="custom")
graph = make_graph([node1], "rs", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("custom", 1)])
x = np.arange(60).reshape((3, 4, 5)).astype(np.float32) + 1
with self.assertRaises(NotImplementedError):
ReferenceEvaluator(onnx_model)
node1 = make_node("_InvAlpha", ["X"], ["Y"], alpha=0.5, domain="custom")
graph = make_graph([node1], "rs", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("custom", 1)])
with self.assertRaises(TypeError):
ReferenceEvaluator(onnx_model, new_ops=[_InvAlpha])
node1 = make_node("InvAlpha2", ["X"], ["Y"], alpha=0.5, domain="custom")
graph = make_graph([node1], "rs", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("custom", 1)])
with self.assertRaises(NotImplementedError):
ReferenceEvaluator(onnx_model, new_ops=[InvAlpha2])
node1 = make_node("InvAlpha", ["X"], ["Y"], alpha=0.5, domain="custom")
graph = make_graph([node1], "rs", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("custom", 1)])
sess = ReferenceEvaluator(onnx_model, new_ops=[InvAlpha, InvAlpha])
got = sess.run(None, {"X": x})[0]
expected = 1 / (x + 0.5)
assert_allclose(expected, got)
def test_loop(self):
# Given a tensor x of values [x1, ..., xN],
# Return a sequence of tensors of
# [[x1], [x1, x2], ..., [x1, ..., xN]]
cond_in = make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = make_tensor_value_info("iter_count", TensorProto.INT64, [])
seq_in = make_tensor_sequence_value_info("seq_in", TensorProto.FLOAT, None)
seq_out = make_tensor_sequence_value_info("seq_out", TensorProto.FLOAT, None)
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
x_const_node = make_node(
"Constant",
inputs=[],
outputs=["x"],
value=make_tensor(
name="const_tensor_x",
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(float),
),
)
one_const_node = make_node(
"Constant",
inputs=[],
outputs=["one"],
value=make_tensor(
name="const_tensor_one",
data_type=TensorProto.INT64,
dims=(),
vals=[1],
),
)
zero_const_node = make_node(
"Constant",
inputs=[],
outputs=["slice_start"],
value=make_tensor(
name="const_tensor_zero",
data_type=TensorProto.INT64,
dims=(1,),
vals=[0],
),
)
axes_node = make_node(
"Constant",
inputs=[],
outputs=["axes"],
value=make_tensor(
name="const_tensor_axes",
data_type=TensorProto.INT64,
dims=(),
vals=[0],
),
)
add_node = make_node("Add", inputs=["iter_count", "one"], outputs=["end"])
end_unsqueeze_node = make_node(
"Unsqueeze", inputs=["end", "axes"], outputs=["slice_end"]
)
slice_node = make_node(
"Slice", inputs=["x", "slice_start", "slice_end"], outputs=["slice_out"]
)
insert_node = make_node(
"SequenceInsert", inputs=["seq_in", "slice_out"], outputs=["seq_out"]
)
identity_node = make_node("Identity", inputs=["cond_in"], outputs=["cond_out"])
loop_body = make_graph(
[
identity_node,
x_const_node,
one_const_node,
zero_const_node,
add_node,
axes_node,
end_unsqueeze_node,
slice_node,
insert_node,
],
"loop_body",
[iter_count, cond_in, seq_in],
[cond_out, seq_out],
)
node = make_node(
"Loop",
inputs=["trip_count", "cond", "seq_empty"],
outputs=["seq_res"],
body=loop_body,
)
node_concat = make_node(
"ConcatFromSequence",
inputs=["seq_res"],
outputs=["res"],
axis=0,
new_axis=0,
)
trip_count = np.array(5).astype(np.int64)
seq_empty = [] # type: List[Any]
# seq_res = [x[:int(i)] for i in x]
cond = np.array(1).astype(np.bool_)
model_def = make_model(
graph=make_graph(
name="loop_test",
inputs=[
make_tensor_value_info(
"trip_count", TensorProto.INT64, trip_count.shape
),
make_tensor_value_info("cond", TensorProto.BOOL, cond.shape),
make_sequence_value_info("seq_empty", TensorProto.FLOAT, []),
],
outputs=[make_tensor_value_info("res", TensorProto.FLOAT, None)],
nodes=[node, node_concat],
)
)
expected = np.array(
[1.0, 1.0, 2.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 5.0],
dtype=np.float32,
)
oinf = ReferenceEvaluator(model_def)
inputs = {"trip_count": trip_count, "cond": cond, "seq_empty": seq_empty}
got = oinf.run(None, inputs)
assert_allclose(expected, got[0])
def test_onnxt_runtime_bernoulli(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("Bernoulli", ["X"], ["Y"], seed=0.0)
graph = make_graph([node1], "g", [X], [Y])
onnx_model = make_model(graph)
check_model(onnx_model)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": np.zeros((2, 4), dtype=np.float32) + 0.5})[0]
self.assertEqual(got.shape, (2, 4))
self.assertEqual(got.dtype, np.float32)
self.assertGreater(got.min(), -1e-5)
self.assertLess(got.max(), 1 + 1e-5)
def test_onnxt_runtime_random_uniform(self):
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("RandomUniform", [], ["Y"], seed=0.0, shape=[2, 4])
graph = make_graph([node1], "g", [], [Y])
onnx_model = make_model(graph)
check_model(onnx_model)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {})[0]
self.assertEqual(got.shape, (2, 4))
self.assertEqual(got.dtype, np.float32)
self.assertGreater(got.min(), 0)
self.assertLess(got.max(), 1)
def test_onnxt_runtime_random_uniform_like(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("RandomUniformLike", ["X"], ["Y"], seed=0.0)
graph = make_graph([node1], "g", [X], [Y])
onnx_model = make_model(graph)
check_model(onnx_model)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": np.zeros((2, 4), dtype=np.float32)})[0]
self.assertEqual(got.shape, (2, 4))
self.assertEqual(got.dtype, np.float32)
self.assertGreater(got.min(), 0)
self.assertLess(got.max(), 1)
def test_onnxt_runtime_random_normal(self):
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("RandomNormal", [], ["Y"], seed=0.0, shape=[2, 4])
graph = make_graph([node1], "g", [], [Y])
onnx_model = make_model(graph)
check_model(onnx_model)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {})[0]
self.assertEqual(got.shape, (2, 4))
self.assertEqual(got.dtype, np.float32)
def test_onnxt_runtime_random_normal_like(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("RandomNormalLike", ["X"], ["Y"], seed=0.0)
graph = make_graph([node1], "g", [X], [Y])
onnx_model = make_model(graph)
check_model(onnx_model)
sess = ReferenceEvaluator(onnx_model)
got = sess.run(None, {"X": np.zeros((2, 4), dtype=np.float32)})[0]
self.assertEqual(got.shape, (2, 4))
self.assertEqual(got.dtype, np.float32)
def test_eval_celu(self):
inst = Celu.create(alpha=0.5)
self.assertEqual(inst.alpha, 0.5)
x = np.array([[0, 1], [-1, 2]], dtype=np.float32)
y = Celu.eval(x, alpha=0.5)
expected = _vcelu1(x, alpha=0.5)
assert_allclose(expected, y)
def test_eval_celu_load_op(self):
celu = load_op("", "Celu")
self.assertEqual(celu.op_domain, "")
inst = celu.create(alpha=0.5)
self.assertEqual(inst.alpha, 0.5)
x = np.array([[0, 1], [-1, 2]], dtype=np.float32)
y = celu.eval(x, alpha=0.5)
expected = _vcelu1(x, alpha=0.5)
assert_allclose(expected, y)
def test_create_adam(self):
inst = Adam.create(alpha=0.5)
self.assertEqual(inst.alpha, 0.5)
@skip_if_no_onnxruntime
def test_conv(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
B = make_tensor_value_info("B", TensorProto.FLOAT, [None, None, None, None])
W = make_tensor_value_info("W", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
"Conv",
["X", "W", "B"],
["Y"],
pads=[1, 1, 1, 1],
dilations=[1, 1],
strides=[2, 2],
)
graph = make_graph([node], "g", [X, W, B], [Y])
onnx_model = make_model_gen_version(graph, opset_imports=[make_opsetid("", 16)])
sess1 = run_ort_inference(onnx_model)
if sess1 is None:
return
sess2 = ReferenceEvaluator(onnx_model, optimized=False)
self.assertIsInstance(sess2.rt_nodes_[0], Conv)
sess3 = ReferenceEvaluator(onnx_model, new_ops=[ConvOptimized], optimized=False)
self.assertIsInstance(sess3.rt_nodes_[0], ConvOptimized)
sess4 = ReferenceEvaluator(onnx_model, optimized=True)
self.assertIsInstance(sess4.rt_nodes_[0], ConvOptimized)
sH, sW = 5, 6
for i in range(sH):
for j in range(sW):
X = np.zeros((1, 1, sH, sW), dtype=np.float32)
X[0, 0, i, j] = 1.0
W = np.zeros((1, 1, 3, 3), dtype=np.float32)
W[0, 0, :, :] = np.minimum(2 ** np.arange(9).reshape((3, -1)), 256)
B = np.array([[[[0]]]], dtype=np.float32)
expected = sess1.run(None, {"X": X, "W": W, "B": B})[0]
got = sess2.run(None, {"X": X, "W": W, "B": B})[0]
assert_allclose(expected, got)
got3 = sess3.run(None, {"X": X, "W": W, "B": B})[0]
assert_allclose(expected, got3)
got4 = sess4.run(None, {"X": X, "W": W, "B": B})[0]
assert_allclose(expected, got4)
@skip_if_no_onnxruntime
def test_qlinearconv(self):
x = make_tensor_value_info("x", TensorProto.UINT8, [None, None, None, None])
w = make_tensor_value_info("w", TensorProto.UINT8, [None, None, None, None])
y = make_tensor_value_info("y", TensorProto.UINT8, [None, None, None, None])
x_scale = make_tensor_value_info("x_scale", TensorProto.FLOAT, [None])
w_scale = make_tensor_value_info("w_scale", TensorProto.FLOAT, [None])
y_scale = make_tensor_value_info("y_scale", TensorProto.FLOAT, [None])
x_zero_point = make_tensor_value_info("x_zero_point", TensorProto.UINT8, [None])
w_zero_point = make_tensor_value_info("w_zero_point", TensorProto.UINT8, [None])
y_zero_point = make_tensor_value_info("y_zero_point", TensorProto.UINT8, [None])
node = make_node(
"QLinearConv",
[
"x",
"x_scale",
"x_zero_point",
"w",
"w_scale",
"w_zero_point",
"y_scale",
"y_zero_point",
],
["y"],
)
graph = make_graph(
[node],
"g",
[x, x_scale, x_zero_point, w, w_scale, w_zero_point, y_scale, y_zero_point],
[y],
)
onnx_model = make_model_gen_version(graph, opset_imports=[make_opsetid("", 16)])
sess1 = run_ort_inference(onnx_model)
if sess1 is None:
return
sess2 = ReferenceEvaluator(onnx_model)
sH, sW = 3, 3
for i in range(sH):
for j in range(sW):
x = np.zeros((1, 1, sH, sW), dtype=np.uint8)
x[0, 0, i, j] = 1.0
with self.subTest(w="1x1", i=i, j=j):
w = np.zeros((1, 1, 1, 1), dtype=np.uint8)
w[0, 0, :, :] = 1
feeds = {
"x": x,
"x_scale": np.array([1], dtype=np.float32),
"x_zero_point": np.array([0], dtype=np.uint8),
"w": w,
"w_scale": np.array([1], dtype=np.float32),
"w_zero_point": np.array([0], dtype=np.uint8),
"y_scale": np.array([1], dtype=np.float32),
"y_zero_point": np.array([0], np.uint8),
}
expected = sess1.run(None, feeds)[0]
got = sess2.run(None, feeds)[0]
try:
assert_allclose(expected, got)
except AssertionError as e:
raise e
with self.subTest(w="3x3", i=i, j=j):
w = np.zeros((1, 1, 3, 3), dtype=np.uint8)
w[0, 0, :, :] = np.minimum(2 ** np.arange(9).reshape((3, -1)), 128)
feeds = {
"x": x,
"x_scale": np.array([1], dtype=np.float32),
"x_zero_point": np.array([0], dtype=np.uint8),
"w": w,
"w_scale": np.array([1], dtype=np.float32),
"w_zero_point": np.array([0], dtype=np.uint8),
"y_scale": np.array([1], dtype=np.float32),
"y_zero_point": np.array([0], np.uint8),
}
expected = sess1.run(None, feeds)[0]
got = sess2.run(None, feeds)[0]
assert_allclose(expected, got)
with self.subTest(w="1x1", i=i, j=j):
w = np.zeros((1, 1, 1, 1), dtype=np.uint8)
w[0, 0, :, :] = 0
feeds = {
"x": x,
"x_scale": np.array([0.00369204697], dtype=np.float32),
"x_zero_point": np.array([132], dtype=np.uint8),
"w": w,
"w_scale": np.array([100.001727945750], dtype=np.float32),
"w_zero_point": np.array([255], dtype=np.uint8),
"y_scale": np.array([0.00162681262], dtype=np.float32),
"y_zero_point": np.array([132], np.uint8),
}
expected = sess1.run(None, feeds)[0]
got = sess2.run(None, feeds)[0]
assert_allclose(expected, got)
x = np.array(
[
[255, 174, 162, 25, 203, 168, 58],
[15, 59, 237, 95, 129, 0, 64],
[56, 242, 153, 221, 168, 12, 166],
[232, 178, 186, 195, 237, 162, 237],
[188, 39, 124, 77, 80, 102, 43],
[127, 230, 21, 83, 41, 40, 134],
[255, 154, 92, 141, 42, 148, 247],
],
dtype=np.uint8,
).reshape((1, 1, 7, 7))
x_scale = np.array([0.00369204697], dtype=np.float32)
x_zero_point = np.array([132], dtype=np.uint8)
w = np.array([0], dtype=np.uint8).reshape((1, 1, 1, 1))
w_scale = np.array([0.00172794575], dtype=np.float32)
w_zero_point = np.array([255], dtype=np.uint8)
y_scale = np.array([0.00162681262], dtype=np.float32)
y_zero_point = np.array([123], dtype=np.uint8)
feeds = {
"x": x,
"x_scale": x_scale,
"x_zero_point": x_zero_point,
"w": w,
"w_scale": w_scale,
"w_zero_point": w_zero_point,
"y_scale": y_scale,
"y_zero_point": y_zero_point,
}
expected = sess1.run(None, feeds)[0]
got = sess2.run(None, feeds)[0]
assert_allclose(expected, got)
def common_test_im2col(self, kernel_shape, pads, strides, dilations):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None, None])
Y1 = make_tensor_value_info("Y1", TensorProto.FLOAT, [None, None, None, None])
Y2 = make_tensor_value_info("Y2", TensorProto.FLOAT, [None, None, None, None])
W = make_tensor_value_info("W", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
"Conv", ["X", "W"], ["Y1"], pads=pads, strides=strides, dilations=dilations
)
node_shape = make_node("Shape", ["W"], ["shape"])
node_im = make_node(
"Im2Col",
["X", "shape"],
["xim"],
pads=pads,
strides=strides,
dilations=dilations,
domain="experimental",
)
node_flat = make_node("Flatten", ["W"], ["wflat"])
node_gem = make_node("MatMul", ["wflat", "xim"], ["Y2"])
graph = make_graph(
[node, node_shape, node_im, node_flat, node_gem],
"g",
[X, W],
[Y1, Y2],
)
onnx_model = make_model(
graph, opset_imports=[make_opsetid("", 16), make_opsetid("experimental", 1)]
)
graph_conv = make_graph([node], "g", [X, W], [Y1])
onnx_model_conv = make_model_gen_version(
graph_conv, opset_imports=[make_opsetid("", 16)]
)
sess = ReferenceEvaluator(onnx_model)
try:
sess_conv = run_ort_inference(onnx_model_conv)
if sess_conv is None:
return
except ImportError:
sess_conv = None
sH, sW = 7, 7
nker = np.prod(kernel_shape)
for i in range(sH):
for j in range(sW):
X = np.zeros((1, 1, sH, sW), dtype=np.float32)
X[0, 0, i, j] = 1.0
W = np.zeros(
(1, 1, *kernel_shape),
dtype=np.float32,
)
W[0, 0, :, :] = np.minimum(
2 ** np.arange(nker).reshape((kernel_shape[0], -1)), 256
)
got = sess.run(None, {"X": X, "W": W})
if sess_conv is not None:
ort_res = sess_conv.run(None, {"X": X, "W": W})[0]
assert_allclose(got[1].ravel(), ort_res.ravel())
try:
assert_allclose(got[0].ravel(), got[1].ravel())
except AssertionError as e:
raise AssertionError(
f"Discrepancies: pads={pads}, dilations={dilations}, strides={strides}, "
f"kernel_shape={kernel_shape}"
f"\n{got[0]}\n!=\n{got[1]}"
) from e
def test_im2col_1x1(self):
self.common_test_im2col(
(1, 1), pads=[1, 1, 1, 2], strides=[1, 1], dilations=[1, 1]
)
def test_im2col_2x2(self):
self.common_test_im2col(
(2, 2), pads=[1, 1, 1, 2], strides=[1, 1], dilations=[1, 1]
)
def test_im2col_3x3(self):
self.common_test_im2col(
(3, 3), pads=[1, 1, 1, 2], strides=[1, 1], dilations=[1, 1]
)
def test_im2col_3x3_pads(self):
self.common_test_im2col(
(3, 3), pads=[0, 1, 2, 3], strides=[1, 1], dilations=[1, 1]
)
def test_im2col_3x3_strides(self):
self.common_test_im2col(
(3, 3), pads=[0, 1, 1, 1], strides=[1, 2], dilations=[1, 1]
)
def test_im2col_5x5(self):
self.common_test_im2col(
(5, 5), pads=[1, 1, 1, 2], strides=[1, 1], dilations=[1, 1]
)
@skip_if_no_torch
def test_col2im(self):
import torch
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None])
IS = make_tensor_value_info("I", TensorProto.INT64, [None])
BS = make_tensor_value_info("B", TensorProto.INT64, [None])
node = make_node(
"Col2Im",
["X", "I", "B"],
["Y"],
pads=[0, 0, 0, 0],
strides=[1, 1],
dilations=[1, 1],
)
graph = make_graph([node], "g", [X, IS, BS], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
sess = ReferenceEvaluator(onnx_model)
X = np.array(
[
[
[1.0, 6.0, 11.0, 16.0, 21.0],
[2.0, 7.0, 12.0, 17.0, 22.0],
[3.0, 8.0, 13.0, 18.0, 23.0],
[4.0, 9.0, 14.0, 19.0, 24.0],
[5.0, 0.0, 15.0, 20.0, 25.0],
]
]
).astype(np.float32)
image_shape = np.array([5, 5]).astype(np.int64)
block_shape = np.array([1, 5]).astype(np.int64)
fold = torch.nn.Fold(output_size=tuple(image_shape), kernel_size=block_shape)
got = sess.run(None, {"X": X, "B": block_shape, "I": image_shape})
output = fold(torch.from_numpy(X)).numpy()
assert_allclose(output, got[0])
def common_test_col2im(
self, size, image_shape, block_shape, pads, strides, dilations
):
import torch
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None])
IS = make_tensor_value_info("I", TensorProto.INT64, [None])
BS = make_tensor_value_info("B", TensorProto.INT64, [None])
node = make_node(
"Col2Im",
["X", "I", "B"],
["Y"],
pads=pads,
strides=strides,
dilations=dilations,
)
graph = make_graph([node], "g", [X, IS, BS], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
sess = ReferenceEvaluator(onnx_model)
fold = torch.nn.Fold(
output_size=tuple(image_shape),
kernel_size=tuple(block_shape),
dilation=tuple(dilations),
padding=min(pads),
stride=tuple(strides),
)
nker = np.prod(block_shape)
for i in range(nker):
for j in range(size):
X = np.zeros((1, nker, size), dtype=np.float32)
X[0, i, j] = 1.0
i_shape = np.array(image_shape, dtype=np.int64)
b_shape = np.array(block_shape, dtype=np.int64)
output = fold(torch.from_numpy(X)).numpy()
got = sess.run(None, {"X": X, "B": b_shape, "I": i_shape})
# print(output)
# print(got)
assert_allclose(output, got[0])
@skip_if_no_torch
def test_col2im_2x3(self):
self.common_test_col2im(
10, (6, 4), (2, 3), pads=[0, 0, 0, 0], strides=[1, 1], dilations=[1, 1]
)
@skip_if_no_torch
def test_col2im_2x3_pads(self):
self.common_test_col2im(
28, (6, 4), (2, 3), pads=[1, 1, 1, 1], strides=[1, 1], dilations=[1, 1]
)
def test_col2im_2d(self):
data = np.zeros([6, 28], dtype=np.float32)
data[0][0] = 1.0
image_shape, kernel_shape, dilations, pads, stride = (
np.array([6, 4]),
(2, 3),
np.array([1, 1]),
np.array([1, 1, 1, 1]),
np.array([1, 1]),
)
r1 = _col2im_naive_implementation_2d(
data, image_shape, kernel_shape, dilations, pads, stride
)
r2 = col2im_naive_implementation(
data, image_shape, kernel_shape, dilations, pads, stride
)
assert_allclose(r1, r2)
def test_conv_im2col_group4(self):
# model 1
X = make_tensor_value_info("X", TensorProto.FLOAT, [2, 4, 6, 6])
W = make_tensor_value_info("W", TensorProto.FLOAT, [4, 1, 3, 3])
B = make_tensor_value_info("B", TensorProto.FLOAT, [4])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [2, 4, 6, 6])
node = make_node(
"Conv",
["X", "W", "B"],
["Y"],
group=4,
dilations=[1, 1],
kernel_shape=[3, 3],
pads=[1, 1, 1, 1],
strides=[1, 1],
)
graph = make_graph([node], "g", [X, W, B], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
feeds = {
"X": np.arange(2 * 4 * 6 * 6).reshape((2, 4, 6, 6)).astype(np.float32),
"W": np.array(
[
[
[
[
-0.026239916682243347,
0.07565222680568695,
-0.03209298849105835,
],
[
-0.08708783239126205,
0.0961190015077591,
0.13418219983577728,
],
[
0.1598859578371048,
0.03840477764606476,
-0.13170936703681946,
],
]
],
[
[
[
-0.0689004510641098,
0.1408083587884903,
-0.03717087209224701,
],
[
0.030967697501182556,
0.0263785719871521,
-0.0899493545293808,
],
[
0.07828782498836517,
-0.06266771256923676,
0.10750330984592438,
],
]
],
[
[
[
0.020227551460266113,
-0.04353883117437363,
-0.10938453674316406,
],
[
-0.14101561903953552,
-0.03393106162548065,
0.12139306962490082,
],
[
0.02838282287120819,
0.13864465057849884,
-0.06065710633993149,
],
]
],
[
[
[
-0.06511610746383667,
-0.05987360328435898,
-0.008047685027122498,
],
[
0.07340313494205475,
0.0326494425535202,
0.012516498565673828,
],
[
0.13260947167873383,
-0.022225692868232727,
-0.11167611926794052,
],
]
],
],
dtype=np.float32,
),
"B": np.array(
[
-0.1457933485507965,
-0.07481209933757782,
-0.05890338122844696,
-0.11964251846075058,
],
dtype=np.float32,
),
}
feeds["B"][:] = 0
# model 2
X = feeds["X"]
W = feeds["W"]
B = feeds["B"]
Y = np.empty((2, 4, 6, 6), dtype=X.dtype)
for b in range(X.shape[0]):
for g in range(4):
x = X[b : b + 1, g : g + 1]
w = W[g]
c2 = im2col(x, (3, 3), [1, 1], [1, 1, 1, 1], [1, 1])
mul = np.matmul(c2, w.flatten())
mul = mul + B[g]
Y[b, g, :, :] = mul
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
assert_allclose(Y, got1[0], atol=1e-5)
def test_conv_strides(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [1, 3, 6, 6])
W = make_tensor_value_info("W", TensorProto.FLOAT, [2, 3, 3, 3])
B = make_tensor_value_info("B", TensorProto.FLOAT, [2])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
"Conv",
["X", "W", "B"],
["Y"],
group=1,
dilations=[1, 1],
kernel_shape=[3, 3],
pads=[1, 1, 1, 1],
strides=[2, 2],
)
graph = make_graph([node], "g", [X, W, B], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
feeds = {
"X": np.arange(1 * 3 * 6 * 6).reshape((1, 3, 6, 6)).astype(np.float32) + 1,
"W": np.zeros((2, 3, 3, 3), dtype=np.float32),
"B": np.zeros((2,), dtype=np.float32),
}
feeds["W"][0, 0, 0, 1] = 1
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = np.array(
[
[
[[0.0, 0.0, 0.0], [7.0, 9.0, 11.0], [19.0, 21.0, 23.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
]
],
dtype=np.float32,
)
assert_allclose(expected, got1[0])
def test_max_pool_2d_1(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
"MaxPool",
["X"],
["Y"],
kernel_shape=[3, 3],
pads=[1, 1, 1, 1],
strides=[2, 2],
)
graph = make_graph([node], "g", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
feeds = {"X": np.arange(49)[::-1].reshape((1, 1, 7, 7)).astype(np.float32)}
expected = np.array(
[
[
[
[48.0, 47.0, 45.0, 43.0],
[41.0, 40.0, 38.0, 36.0],
[27.0, 26.0, 24.0, 22.0],
[13.0, 12.0, 10.0, 8.0],
]
]
],
dtype=np.float32,
)
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
assert_allclose(expected, got1[0])
def test_max_pool_2d_2(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
"MaxPool",
["X"],
["Y"],
kernel_shape=[3, 3],
pads=[1, 1, 1, 1],
strides=[2, 2],
)
graph = make_graph([node], "g", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
feeds = {
"X": np.array(
[
[
[
[683, 358, 726, 578, 650, 946, 200],
[679, 260, 264, 5, 240, 255, 582],
[322, 66, 687, 632, 852, 698, 428],
[111, 452, 627, 332, 751, 842, 685],
[472, 52, 956, 81, 807, 827, 360],
[972, 574, 81, 799, 646, 499, 486],
[892, 758, 75, 833, 972, 415, 736],
]
]
],
dtype=np.float32,
)
}
expected = np.array(
[
[
[
[683.0, 726.0, 946.0, 946.0],
[679.0, 687.0, 852.0, 842.0],
[972.0, 956.0, 842.0, 842.0],
[972.0, 833.0, 972.0, 736.0],
]
]
],
dtype=np.float32,
)
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
assert_allclose(expected, got1[0])
def test_scatter_elements(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Ind = make_tensor_value_info("I", TensorProto.INT64, [None, None])
U = make_tensor_value_info("U", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None])
node = make_node(
"ScatterElements",
["X", "I", "U"],
["Y"],
axis=1,
reduction="min",
)
graph = make_graph([node], "g", [X, Ind, U], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
feeds = {
"X": np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32),
"I": np.array([[1, 1]]),
"U": np.array([[1.1, 2.1]], dtype=np.float32),
}
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = np.array([[1.0, 1.1, 3.0, 4.0, 5.0]], dtype=np.float32)
assert_allclose(expected, got1[0])
def test_scatternd(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Ind = make_tensor_value_info("I", TensorProto.INT64, [None, None])
U = make_tensor_value_info("U", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None])
node = make_node(
"ScatterND",
["X", "I", "U"],
["Y"],
)
graph = make_graph([node], "g", [X, Ind, U], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
feeds = {
"X": np.array([[1.0, 2.0]], dtype=np.float32),
"I": np.array([[0, 0]]),
"U": np.array([3.0], dtype=np.float32),
}
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = np.array([[3.0, 2.0]], dtype=np.float32)
assert_allclose(expected, got1[0])
def test_col2im_impl(self):
def get_im2col_indices(
x_shape, field_height, field_width, padding=None, stride=1
):
# source: https://stackoverflow.com/questions/51703367/col2im-implementation-in-convnet
N, C, H, W = x_shape
del N # Unused
assert (H + padding[0] + padding[2] - field_height) % stride == 0
assert (W + padding[1] + padding[3] - field_height) % stride == 0
out_height = (H + padding[0] + padding[2] - field_height) // stride + 1
out_width = (W + padding[1] + padding[3] - field_width) // stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def col2im_indices(
cols, x_shape, field_height=3, field_width=3, padding=None, stride=1
):
# source: https://stackoverflow.com/questions/51703367/col2im-implementation-in-convnet
N, C, H, W = x_shape
H_padded, W_padded = (
H + padding[0] + padding[2],
W + padding[1] + padding[3],
)
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(
x_shape, field_height, field_width, padding, stride
)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
padding = padding.copy()
if padding[2] == 0:
padding[2] += x_padded.shape[2]
elif padding[2] > 0:
padding[2] *= -1
if padding[3] == 0:
padding[3] += x_padded.shape[3]
elif padding[3] > 0:
padding[3] *= -1
res = x_padded[:, :, padding[0] : padding[2], padding[1] : padding[3]]
return res
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None])
IS = make_tensor_value_info("IS", TensorProto.INT64, [None])
BS = make_tensor_value_info("BS", TensorProto.INT64, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
node = make_node("Col2Im", ["X", "IS", "BS"], ["Y"], pads=[0, 1, 0, 1])
graph = make_graph([node], "g", [X, IS, BS], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
feeds = {
"X": np.arange(5 * 15).astype(np.float32).reshape((1, 5, 15)),
"IS": np.array([5, 5]),
"BS": np.array([1, 5]),
}
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = col2im_indices(
feeds["X"],
(1, 1, 5, 5),
field_height=1,
field_width=5,
padding=[0, 1, 0, 1],
)
assert_allclose(expected, got1[0])
def test_conv_transpose_2d(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None, None])
W = make_tensor_value_info("W", TensorProto.FLOAT, [None, None, None, None])
B = make_tensor_value_info("B", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
"ConvTranspose",
["X", "W", "B"],
["Y"],
dilations=[1, 1],
kernel_shape=[3, 3],
output_padding=[0, 0],
pads=[1, 1, 1, 1],
strides=[1, 1],
)
graph = make_graph([node], "g", [X, W, B], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
feeds = {
"X": np.arange(1 * 3 * 5 * 4).reshape((1, 3, 5, 4)).astype(np.float32),
"W": np.arange(3 * 1 * 3 * 3).reshape((3, 1, 3, 3)).astype(np.float32),
"B": np.array([0, 0, 0, 0], dtype=np.float32),
}
# import torch
# ex = torch.nn.functional.conv_transpose2d(
# torch.Tensor(feeds["X"]), torch.Tensor(feeds["W"]),
# bias=None, stride=1, padding=1, output_padding=0, groups=1, dilation=1)
# print(ex)
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = np.array(
[
[
[
[4371, 6855, 7062, 4929],
[7524, 11781, 12132, 8451],
[8424, 13185, 13536, 9423],
[9324, 14589, 14940, 10395],
[7197, 11229, 11490, 7971],
],
]
],
dtype=np.float32,
)
assert_allclose(expected, got1[0])
feeds["X"] *= 0
feeds["X"][0, 0, 0, 0] = 1
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = np.array(
[
[
[
[4, 5, 0, 0],
[7, 8, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
]
],
dtype=np.float32,
)
assert_allclose(expected, got1[0])
def test_conv_transpose_2d_upper(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None, None])
W = make_tensor_value_info("W", TensorProto.FLOAT, [None, None, None, None])
B = make_tensor_value_info("B", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
"ConvTranspose",
["X", "W", "B"],
["Y"],
auto_pad="SAME_UPPER",
strides=[2, 2],
# output_shape=[6, 6],
)
graph = make_graph([node], "g", [X, W, B], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])
feeds = {
"X": np.arange(1 * 1 * 3 * 3).reshape((1, 1, 3, 3)).astype(np.float32),
"W": np.arange(1 * 2 * 3 * 3).reshape((1, 2, 3, 3)).astype(np.float32),
"B": np.array([0, 0, 0, 0], dtype=np.float32),
}
expected = np.array(
[
[
[
[0, 0, 0, 1, 2, 2],
[0, 0, 3, 4, 11, 8],
[0, 3, 12, 11, 28, 19],
[9, 12, 27, 16, 35, 20],
[18, 27, 60, 35, 76, 43],
[18, 24, 51, 28, 59, 32],
],
[
[0, 0, 9, 10, 29, 20],
[0, 0, 12, 13, 38, 26],
[27, 30, 84, 56, 136, 82],
[36, 39, 90, 52, 116, 65],
[99, 108, 240, 134, 292, 160],
[72, 78, 168, 91, 194, 104],
],
]
],
dtype=np.float32,
)
# import onnxruntime
# ref0 = onnxruntime.InferenceSession(onnx_model.SerializeToString(), providers=["CPUExecutionProvider"])
# got0 = ref0.run(None, feeds)
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
assert_allclose(expected, got1[0])
def test_stft(self):
signal = make_tensor_value_info("signal", TensorProto.FLOAT, [None, None, None])
frame_step = make_tensor_value_info("frame_step", TensorProto.INT64, [None])
frame_length = make_tensor_value_info("frame_length", TensorProto.INT64, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
"STFT",
["signal", "frame_step", "", "frame_length"],
["Y"],
)
graph = make_graph([node], "g", [signal, frame_step, frame_length], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 17)])
feeds = {
"signal": np.arange(128).reshape((1, 128, 1)).astype(np.float32),
"frame_step": np.array(8, dtype=np.int64),
"frame_length": np.array(16, dtype=np.int64),
}
signal = feeds["signal"]
frame_length = int(feeds["frame_length"])
frame_step = int(feeds["frame_step"])
onesided_length = (frame_length // 2) + 1
nstfts = ((feeds["signal"].shape[1] - frame_length) // frame_step) + 1
# [batch_size][frames][frame_length][2]
expected = np.empty([1, nstfts, onesided_length, 2], dtype=np.float32)
for i in range(nstfts):
start = i * frame_step
stop = i * frame_step + frame_length
complex_out = np.fft.fft(signal[0, start:stop, 0])
c_out = complex_out[0:onesided_length]
expected[0, i] = np.stack((c_out.real, c_out.imag), axis=1)
# import torch
# correspondance with torch
# hop_length = frame_step
# window = np.ones((frame_length,), dtype=np.float32)
# ex = torch.stft(
# torch.Tensor(feeds["signal"][:, :, 0]),
# n_fft=frame_length, window=torch.Tensor(window),
# hop_length=hop_length, win_length=frame_length,
# onesided=True, return_complex=True, center=False,
# normalized=False)
# ex = np.transpose(ex.numpy(), [0, 2, 1])
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
assert_allclose(expected, got1[0])
def test_stft_with_window(self):
signal = make_tensor_value_info("signal", TensorProto.FLOAT, [None, None, None])
frame_step = make_tensor_value_info("frame_step", TensorProto.INT64, [None])
window = make_tensor_value_info("window", TensorProto.FLOAT, [None])
frame_length = make_tensor_value_info("frame_length", TensorProto.INT64, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
"STFT",
["signal", "frame_step", "window", "frame_length"],
["Y"],
)
graph = make_graph([node], "g", [signal, frame_step, window, frame_length], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 17)])
feeds = {
"signal": np.arange(128).reshape((1, 128, 1)).astype(np.float32),
"frame_step": np.array(8, dtype=np.int64),
"window": 0.5
+ 0.5 * np.cos(2 * np.pi * np.arange(0, 16, 1, dtype=np.float32) / 16),
"frame_length": np.array(16, dtype=np.int64),
}
signal = feeds["signal"]
frame_length = int(feeds["frame_length"])
window = feeds["window"]
frame_step = int(feeds["frame_step"])
onesided_length = (frame_length // 2) + 1
nstfts = 1 + (signal.shape[1] - window.shape[0]) // 8
# [batch_size][frames][frame_length][2]
expected = np.empty([1, nstfts, onesided_length, 2], dtype=np.float32)
for i in range(nstfts):
start = i * frame_step
stop = i * frame_step + frame_length
complex_out = np.fft.fft(signal[0, start:stop, 0] * window)[
0:onesided_length
]
c_out = complex_out[0:onesided_length]
expected[0, i] = np.stack((c_out.real, c_out.imag), axis=1)
# import torch
# hop_length = frame_step
# ex = torch.stft(
# torch.Tensor(feeds["signal"][:, :, 0]),
# n_fft=frame_length, window=torch.Tensor(window),
# hop_length=hop_length, win_length=frame_length,
# onesided=True, return_complex=True, center=False,
# normalized=False)
# ex = np.transpose(ex.numpy(), [0, 2, 1])
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
assert_allclose(expected, got1[0])
def get_roi_align_model(self, mode):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None, None])
rois = make_tensor_value_info("rois", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
IS = make_tensor_value_info("I", TensorProto.INT64, [None])
node = make_node(
"RoiAlign",
["X", "rois", "I"],
["Y"],
output_height=5,
output_width=5,
sampling_ratio=2,
spatial_scale=1.0,
coordinate_transformation_mode="output_half_pixel",
mode=mode,
)
graph = make_graph([node], "g", [X, rois, IS], [Y])
return make_model_gen_version(graph, opset_imports=[make_opsetid("", 17)])
def common_test_roi_align(self, mode):
onnx_model = self.get_roi_align_model(mode)
X, batch_indices, rois = get_roi_align_input_values()
feeds = {"X": X, "rois": rois, "I": batch_indices}
sess = run_ort_inference(onnx_model)
if sess is None:
return
expected = sess.run(None, feeds)
ref = ReferenceEvaluator(onnx_model)
got = ref.run(None, feeds)
assert_allclose(expected[0], got[0], atol=1e-5)
@skip_if_no_onnxruntime
def test_roi_align(self):
with self.subTest(mode="avg"):
self.common_test_roi_align("avg")
# max does not have example in the backend
with self.subTest(mode="max"):
self.common_test_roi_align("max")
def common_test_roi_align_torch(self, mode):
import torch
from torchvision.ops import RoIAlign
onnx_model = self.get_roi_align_model(mode)
sess = ReferenceEvaluator(onnx_model)
X, batch_indices, rois = get_roi_align_input_values()
got = sess.run(None, {"X": X, "rois": rois, "I": batch_indices})
a = RoIAlign((5, 5), spatial_scale=1.0, sampling_ratio=2)
expected = a(torch.from_numpy(X), [torch.from_numpy(rois)])
assert_allclose(expected, got[0], atol=1e-5)
@skip_if_no_torch
@skip_if_no_torchvision
def test_roi_align_torch(self):
with self.subTest(mode="avg"):
self.common_test_roi_align_torch("avg")
# not implemented in torch
# with self.subTest(mode="max"):
# self.common_test_roi_align_torch("max")
def test_split(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y1 = make_tensor_value_info("Y1", TensorProto.FLOAT, [None])
Y2 = make_tensor_value_info("Y2", TensorProto.FLOAT, [None])
Y3 = make_tensor_value_info("Y3", TensorProto.FLOAT, [None])
Y4 = make_tensor_value_info("Y4", TensorProto.FLOAT, [None])
node = make_node("Split", ["X"], ["Y1", "Y2", "Y3", "Y4"], num_outputs=4)
graph = make_graph([node], "g", [X], [Y1, Y2, Y3, Y4])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
feeds = {"X": np.arange(10).astype(np.float32)}
expected = [
np.array([0, 1, 2], dtype=np.float32),
np.array([3, 4, 5], dtype=np.float32),
np.array([6, 7, 8], dtype=np.float32),
np.array([9], dtype=np.float32),
]
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
for i in range(4):
assert_allclose(expected[i], got1[i])
def test_split_2(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y1 = make_tensor_value_info("Y1", TensorProto.FLOAT, [None])
Y2 = make_tensor_value_info("Y2", TensorProto.FLOAT, [None])
Y3 = make_tensor_value_info("Y3", TensorProto.FLOAT, [None])
Y4 = make_tensor_value_info("Y4", TensorProto.FLOAT, [None])
node = make_node("Split", ["X", "split"], ["Y1", "Y2", "Y3", "Y4"])
graph = make_graph([node], "g", [X], [Y1, Y2, Y3, Y4])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
feeds = {
"X": np.arange(10).astype(np.float32),
"split": np.array([3, 3, 2, 2], dtype=np.int64),
}
expected = [
np.array([0, 1, 2], dtype=np.float32),
np.array([3, 4, 5], dtype=np.float32),
np.array([6, 7], dtype=np.float32),
np.array([8, 9], dtype=np.float32),
]
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
for i in range(4):
assert_allclose(expected[i], got1[i])
def test_split_num_outputs_4(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y1 = make_tensor_value_info("Y1", TensorProto.FLOAT, [None])
Y2 = make_tensor_value_info("Y2", TensorProto.FLOAT, [None])
Y3 = make_tensor_value_info("Y3", TensorProto.FLOAT, [None])
Y4 = make_tensor_value_info("Y4", TensorProto.FLOAT, [None])
node = make_node("Split", ["X"], ["Y1", "Y2", "Y3", "Y4"], num_outputs=4)
graph = make_graph([node], "g", [X], [Y1, Y2, Y3, Y4])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
# case 1
feeds = {"X": np.arange(10).astype(np.float32)}
expected = [
np.array([0, 1, 2], dtype=np.float32),
np.array([3, 4, 5], dtype=np.float32),
np.array([6, 7, 8], dtype=np.float32),
np.array([9], dtype=np.float32),
]
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
for i in range(4):
assert_allclose(expected[i], got1[i])
# case 2
feeds = {"X": np.arange(9).astype(np.float32)}
expected = [
np.array([0, 1, 2], dtype=np.float32),
np.array([3, 4, 5], dtype=np.float32),
np.array([6, 7, 8], dtype=np.float32),
np.array([], dtype=np.float32),
]
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
for i in range(4):
assert_allclose(expected[i], got1[i])
def test_argmin(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.INT64, [None])
node = make_node("ArgMin", ["X"], ["Y"], axis=1)
graph = make_graph([node], "g", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
feeds = {"X": np.arange(12).reshape((3, 4)).astype(np.float32)}
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = np.array([0, 0, 0], dtype=np.int64).reshape((-1, 1))
self.assertEqual(expected.tolist(), got1[0].tolist())
def test_argmax(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.INT64, [None])
node = make_node("ArgMax", ["X"], ["Y"], axis=1)
graph = make_graph([node], "g", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
feeds = {"X": np.arange(12).reshape((3, 4)).astype(np.float32)}
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = np.array([3, 3, 3], dtype=np.int64).reshape((-1, 1))
self.assertEqual(expected.tolist(), got1[0].tolist())
def test_slice_squeeze(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
starts = make_tensor_value_info("starts", TensorProto.INT64, [None])
ends = make_tensor_value_info("ends", TensorProto.INT64, [None])
axes = make_tensor_value_info("axes", TensorProto.INT64, [None])
Y = make_tensor_value_info("Y", TensorProto.INT64, [None])
nodes = [
make_node("Slice", ["X", "starts", "ends", "axes"], ["T"]),
make_node("Squeeze", ["T", "axes"], ["Y"]),
]
graph = make_graph(nodes, "g", [X, starts, ends, axes], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
feeds = {
"X": np.array([[0]], dtype=np.int64),
"starts": np.array([0], dtype=np.int64),
"ends": np.array([1], dtype=np.int64),
"axes": np.array([0], dtype=np.int64),
}
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = np.array([0], dtype=np.int64)
self.assertEqual(expected.tolist(), got1[0].tolist())
def test_slice_squeeze_6(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.INT64, [None])
nodes = [
make_node("Slice", ["X"], ["T"], axes=[0], starts=[0], ends=[1]),
make_node("Squeeze", ["T"], ["Y"], axes=[0]),
]
graph = make_graph(nodes, "g", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 6)])
feeds = {"X": np.array([[0]], dtype=np.int64)}
ref1 = ReferenceEvaluator(onnx_model)
got1 = ref1.run(None, feeds)
expected = np.array([0], dtype=np.int64)
self.assertEqual(expected.tolist(), got1[0].tolist())
def test_onnxrt_reduce_mean(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None])
node1 = make_node("ReduceMean", ["X"], ["Y"])
graph = make_graph([node1], "g", [X], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 17)])
check_model(onnx_model)
sess = ReferenceEvaluator(onnx_model)
cls = sess.rt_nodes_[0]
self.assertEqual(cls.__class__.__name__, "ReduceMean_1")
got = sess.run(None, {"X": np.ones((2, 4), dtype=np.float32)})[0]
self.assertEqual(got.shape, (1, 1))
self.assertEqual(got[0, 0], 1)
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 18)])
check_model(onnx_model)
sess = ReferenceEvaluator(onnx_model)
cls = sess.rt_nodes_[0]
self.assertEqual(cls.__class__.__name__, "ReduceMean_18")
got = sess.run(None, {"X": np.ones((2, 4), dtype=np.float32)})[0]
self.assertEqual(got.shape, (1, 1))
self.assertEqual(got[0, 0], 1)
@staticmethod
def _cdist_model(opset, reduce_op="ReduceSumSquare"):
# subgraph
initializers = []
inputs = [
make_tensor_value_info("next_in", TensorProto.FLOAT, [None, 4]),
make_tensor_value_info("next", TensorProto.FLOAT, [None]),
]
outputs = [
make_tensor_value_info("next_out", TensorProto.FLOAT, [None, None]),
make_tensor_value_info("scan_out", TensorProto.FLOAT, [None]),
]
if opset >= 18:
initializers.append(
from_array(np.array([1], dtype=np.int64), name="axis_red")
)
node_reduce = make_node(
reduce_op,
["cdistdf_17_C0", "axis_red"],
["cdistdf_17_reduced0"],
name="cdistdf_17_ReduceSumSquare",
keepdims=0,
)
else:
node_reduce = make_node(
reduce_op,
["cdistdf_17_C0"],
["cdistdf_17_reduced0"],
name="cdistdf_17_ReduceSumSquare",
axes=[1],
keepdims=0,
)
nodes = [
make_node("Identity", ["next_in"], ["next_out"], name="cdistd_17_Identity"),
make_node(
"Sub", ["next_in", "next"], ["cdistdf_17_C0"], name="cdistdf_17_Sub"
),
node_reduce,
make_node(
"Identity",
["cdistdf_17_reduced0"],
["scan_out"],
name="cdistdf_17_Identity",
),
]
graph = make_graph(nodes, "OnnxIdentity", inputs, outputs, initializers)
# main graph
initializers = []
list_value = [
1.1394007205963135,
-0.6848101019859314,
-1.234825849533081,
0.4023416340351105,
0.17742614448070526,
0.46278226375579834,
-0.4017809331417084,
-1.630198359489441,
-0.5096521973609924,
0.7774903774261475,
-0.4380742907524109,
-1.2527953386306763,
-1.0485529899597168,
1.950775384902954,
-1.420017957687378,
-1.7062702178955078,
1.8675580024719238,
-0.15135720372200012,
-0.9772778749465942,
0.9500884413719177,
-2.5529897212982178,
-0.7421650290489197,
0.653618574142456,
0.8644362092018127,
1.5327792167663574,
0.37816253304481506,
1.4693588018417358,
0.154947429895401,
-0.6724604368209839,
-1.7262825965881348,
-0.35955315828323364,
-0.8131462931632996,
-0.8707971572875977,
0.056165341287851334,
-0.5788496732711792,
-0.3115525245666504,
1.2302906513214111,
-0.302302747964859,
1.202379822731018,
-0.38732680678367615,
2.269754648208618,
-0.18718385696411133,
-1.4543657302856445,
0.04575851559638977,
-0.9072983860969543,
0.12898291647434235,
0.05194539576768875,
0.7290905714035034,
1.4940791130065918,
-0.8540957570075989,
-0.2051582634449005,
0.3130677044391632,
1.764052391052246,
2.2408931255340576,
0.40015721321105957,
0.978738009929657,
0.06651721894741058,
-0.3627411723136902,
0.30247190594673157,
-0.6343221068382263,
-0.5108051300048828,
0.4283318817615509,
-1.18063223361969,
-0.02818222902715206,
-1.6138978004455566,
0.38690251111984253,
-0.21274028718471527,
-0.8954665660858154,
0.7610377073287964,
0.3336743414402008,
0.12167501449584961,
0.44386324286460876,
-0.10321885347366333,
1.4542734622955322,
0.4105985164642334,
0.14404356479644775,
-0.8877857327461243,
0.15634897351264954,
-1.980796456336975,
-0.34791216254234314,
]
initializers.append(
from_array(
np.array(list_value, dtype=np.float32).reshape((20, 4)),
name="Sc_Scancst",
)
)
initializers.append(
from_array(np.array([2], dtype=np.int64), name="To_TopKcst")
)
inputs = [make_tensor_value_info("input", TensorProto.FLOAT, [None, 4])]
outputs = [
make_tensor_value_info("values", TensorProto.FLOAT, [None, 2]),
make_tensor_value_info("indices", TensorProto.INT64, [None, 2]),
]
# nodes
nodes = [
make_node(
"Scan",
["input", "Sc_Scancst"],
["UU032UU", "UU033UU"],
name="Sc_Scan",
body=graph,
num_scan_inputs=1,
),
make_node(
"Transpose",
["UU033UU"],
["Tr_transposed0"],
name="Tr_Transpose",
perm=[1, 0],
),
make_node("Sqrt", ["Tr_transposed0"], ["Sq_Y0"], name="Sq_Sqrt"),
make_node(
"TopK",
["Sq_Y0", "To_TopKcst"],
["values", "indices"],
name="To_TopK",
largest=0,
sorted=1,
),
]
graph = make_graph(nodes, "dummy", inputs, outputs, initializers)
# model
onnx_model = make_model(graph, opset_imports=[make_opsetid("", opset)])
return onnx_model
@parameterized.parameterized.expand(
itertools.product(
[
(
"ReduceMin",
[
np.array(
[[np.nan, np.nan], [14.422706, 18.80527]], dtype=np.float32
),
np.array([[2, 15], [10, 4]], dtype=np.int64),
],
),
(
"ReduceL1",
[
np.array(
[[2.2367053, 2.3516612], [4.076292, 4.2970634]],
dtype=np.float32,
),
np.array([[18, 6], [13, 6]], dtype=np.int64),
],
),
(
"ReduceL2",
[
np.array(
[[1.80155, 1.8169948], [2.9928076, 3.1205883]],
dtype=np.float32,
),
np.array([[11, 18], [13, 6]], dtype=np.int64),
],
),
(
"ReduceLogSum",
[
np.array(
[[0.9497848, 1.1872643], [1.6764175, 1.70759]],
dtype=np.float32,
),
np.array([[6, 18], [13, 6]], dtype=np.int64),
],
),
(
"ReduceLogSumExp",
[
np.array(
[[1.6005973, 1.7445935], [2.5616229, 2.6539795]],
dtype=np.float32,
),
np.array([[13, 6], [13, 6]], dtype=np.int64),
],
),
(
"ReduceMax",
[
np.array(
[[1.4217108, 1.5069536], [2.453826, 2.5041783]],
dtype=np.float32,
),
np.array([[13, 11], [13, 11]], dtype=np.int64),
],
),
(
"ReduceMean",
[
np.array(
[[0.39247903, 0.78497636], [2.038146, 2.1485317]],
dtype=np.float32,
),
np.array([[13, 6], [13, 6]], dtype=np.int64),
],
),
(
"ReduceSumSquare",
[
np.array(
[[3.2455828, 3.3014696], [8.956896, 9.7380705]],
dtype=np.float32,
),
np.array([[11, 18], [13, 6]], dtype=np.int64),
],
),
(
"ReduceProd",
[
np.array(
[[np.nan, np.nan], [14.422706, 18.80527]], dtype=np.float32
),
np.array([[2, 15], [13, 6]], dtype=np.int64),
],
),
],
[17, 18],
)
)
def test_op_reduce(self, reduce_op_expected, opset: int):
reduce_op, expected = reduce_op_expected
X = np.arange(8).reshape((-1, 4)).astype(np.float32)
results = {}
model = self._cdist_model(opset, reduce_op)
sess = ReferenceEvaluator(model)
got = sess.run(None, {"input": X})
results["ref", opset] = got
cl = [
n
for n in sess.rt_nodes_[0].body.rt_nodes_
if n.__class__.__name__.startswith(reduce_op)
]
schema = cl[0]._schema # pylint: disable=protected-access
new_cl = type(reduce_op, (cl[0].__class__,), {"op_schema": schema})
sess = ReferenceEvaluator(model, new_ops=[new_cl])
got = sess.run(None, {"input": X})
results["ref_cl", opset] = got
baseline = "constant"
for k, v in results.items():
for a, b in zip(reversed(expected), reversed(v)):
if a.shape != b.shape:
raise AssertionError(
f"Shape mismatch for {reduce_op!r}, {baseline}:{a.shape} != {k}:{b.shape}."
)
diff = np.abs(a - b).max()
if diff > 1e-6:
raise AssertionError(
f"Discrepancies (max={diff}) for {reduce_op!r}, {baseline} != {k}\n{a}\n!=\n{b}"
)
@parameterized.parameterized.expand(
[
(13,),
(17,),
(18,),
]
)
def test_mvn(self, opset: int, ref_opset: int = 13):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
nodes = [
make_node("MeanVarianceNormalization", ["X"], ["Y"]),
]
graph = make_graph(nodes, "g", [X], [Y])
x = np.random.rand(3, 3, 3, 1).astype(np.float32)
onnx_model = make_model(graph, opset_imports=[make_opsetid("", opset)])
ref = ReferenceEvaluator(onnx_model)
got = ref.run(None, {"X": x})[0]
ref_onnx_model = make_model(graph, opset_imports=[make_opsetid("", ref_opset)])
ref_expected = ReferenceEvaluator(ref_onnx_model)
expected = ref_expected.run(None, {"X": x})[0]
self.assertEqual(expected.shape, got.shape)
assert_allclose(expected, got)
def test_concat_in_a_function(self):
def create_model():
nodes = []
inputs = []
outputs = []
functions = []
opsets = {"": onnx_opset_version(), "custom_domain": 1}
nodes_fct = []
node = make_node("Concat", ["x:0", "x:1"], ["r__0"], axis=0, domain="")
nodes_fct.append(node)
opset_imports_fct = [
make_opsetid(domain, 1 if version is None else version)
for domain, version in opsets.items()
]
fct = make_function(
"custom_domain",
"concat_2",
["x:0", "x:1"],
["r__0"],
nodes_fct,
opset_imports_fct,
)
functions.append(fct)
inputs.append(make_tensor_value_info("I__0", TensorProto.DOUBLE, []))
inputs.append(make_tensor_value_info("I__1", TensorProto.DOUBLE, []))
inputs.append(make_tensor_value_info("I__2", TensorProto.DOUBLE, []))
outputs.append(make_tensor_value_info("r__4", TensorProto.DOUBLE, []))
node = make_node(
"concat_2", ["I__0", "I__1"], ["r__3"], axis=0, domain="custom_domain"
)
nodes.append(node)
node = make_node(
"concat_2", ["I__2", "r__3"], ["r__4"], axis=0, domain="custom_domain"
)
nodes.append(node)
opset_imports = [
make_opsetid(domain, 1 if version is None else version)
for domain, version in opsets.items()
]
graph = make_graph(nodes, "numpyx", inputs, outputs)
onnx_model = make_model(
graph, opset_imports=opset_imports, functions=functions
)
return onnx_model
onnx_model = create_model()
x1 = np.array([[-5, 6], [15, 3]], dtype=np.float64)
x2 = np.array([[1, 2]], dtype=np.float64)
x3 = np.array([[-1, -2]], dtype=np.float64)
z = np.vstack([x1, x2, x3])
ref = ReferenceEvaluator(onnx_model)
feeds = {"I__2": x1, "I__0": x2, "I__1": x3}
got = ref.run(None, feeds)
assert_allclose(z, got[0])
def test_cast_float_to_string(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.STRING, [None])
model = make_model(
make_graph(
[
make_node("Cast", ["X"], ["Y"], to=TensorProto.STRING),
],
"g",
[X],
[Y],
)
)
ref = ReferenceEvaluator(model)
data = np.array([1.152512, -0.152612, 0.0, np.nan])
got = ref.run(None, {"X": data})[0]
self.assertTrue(
(got == np.array([1.152512, -0.152612, 0.0, np.nan]).astype(np.str_)).all()
)
def test_cast_float_to_string_and_back(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
model = make_model(
make_graph(
[
make_node("Cast", ["X"], ["Z"], to=TensorProto.STRING),
make_node("Cast", ["Z"], ["Y"], to=TensorProto.FLOAT),
],
"g",
[X],
[Y],
)
)
ref = ReferenceEvaluator(model)
data = np.array([1.152512, -0.152612, 0.0, np.nan])
got = ref.run(None, {"X": data})[0]
assert_allclose(got, np.array([1.152512, -0.152612, 0.0, np.nan]))
def test_split_to_sequence(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, None)
Y = make_tensor_value_info("Y", TensorProto.INT64, None)
Z = make_tensor_value_info("Z", TensorProto.UNDEFINED, None)
nodes = [make_node("SplitToSequence", ["X", "Y"], ["Z"], axis=2)]
model = make_model(make_graph(nodes, "g", [X, Y], [Z]))
ref = ReferenceEvaluator(model)
data = np.arange(18).reshape((1, 3, 6)).astype(np.float32)
indices = np.array(2, dtype=np.int64)
got = ref.run(None, {"X": data, "Y": indices})
expected = [
[
np.array([[[0.0, 1.0], [6.0, 7.0], [12.0, 13.0]]], dtype=np.float32),
np.array([[[2.0, 3.0], [8.0, 9.0], [14.0, 15.0]]], dtype=np.float32),
np.array([[[4.0, 5.0], [10.0, 11.0], [16.0, 17.0]]], dtype=np.float32),
]
]
self.assertEqual(len(expected[0]), len(got[0]))
for a, b in zip(expected[0], got[0]):
assert_allclose(a, b)
def test_split_to_sequence_1d(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, None)
Y = make_tensor_value_info("Y", TensorProto.INT64, None)
Z = make_tensor_value_info("Z", TensorProto.UNDEFINED, None)
nodes = [make_node("SplitToSequence", ["X", "Y"], ["Z"], axis=2)]
model = make_model(make_graph(nodes, "g", [X, Y], [Z]))
ref = ReferenceEvaluator(model)
data = np.arange(18).reshape((1, 3, 6)).astype(np.float32)
indices = np.array([2, 2, 2], dtype=np.int64)
got = ref.run(None, {"X": data, "Y": indices})
expected = [
[
np.array([[[0.0, 1.0], [6.0, 7.0], [12.0, 13.0]]], dtype=np.float32),
np.array([[[2.0, 3.0], [8.0, 9.0], [14.0, 15.0]]], dtype=np.float32),
np.array([[[4.0, 5.0], [10.0, 11.0], [16.0, 17.0]]], dtype=np.float32),
]
]
self.assertEqual(len(expected[0]), len(got[0]))
for a, b in zip(expected[0], got[0]):
assert_allclose(a, b)
def test_split_to_sequence_nokeepdims_noinput(self):
# keepdims is ignored in that case
X = make_tensor_value_info("X", TensorProto.FLOAT, None)
Z = make_tensor_value_info("Z", TensorProto.UNDEFINED, None)
nodes = [make_node("SplitToSequence", ["X"], ["Z"], axis=2, keepdims=0)]
model = make_model(make_graph(nodes, "g", [X], [Z]))
ref = ReferenceEvaluator(model)
data = np.arange(18).reshape((1, 3, 6)).astype(np.float32)
got = ref.run(None, {"X": data})
expected = [[data[:, :, i] for i in range(data.shape[2])]]
self.assertEqual(len(expected[0]), len(got[0]))
for a, b in zip(expected[0], got[0]):
assert_allclose(a, b)
def test_cast_float8(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
F1 = make_tensor_value_info("F1", TensorProto.FLOAT, [None])
F2 = make_tensor_value_info("F2", TensorProto.FLOAT, [None])
F3 = make_tensor_value_info("F3", TensorProto.FLOAT, [None])
F4 = make_tensor_value_info("F4", TensorProto.FLOAT, [None])
model = make_model(
make_graph(
[
make_node("Cast", ["X"], ["f81"], to=TensorProto.FLOAT8E4M3FN),
make_node("Cast", ["X"], ["f82"], to=TensorProto.FLOAT8E5M2),
make_node(
"Constant",
[],
["C1"],
value=make_tensor(
"C1", TensorProto.FLOAT8E4M3FN, [5], [0, 1, 2, 5e-2, 200]
),
),
make_node(
"Constant",
[],
["C2"],
value=make_tensor(
"C2", TensorProto.FLOAT8E5M2, [5], [0, 1, 2, 5e-2, 200]
),
),
make_node("Cast", ["f81"], ["F1"], to=TensorProto.FLOAT),
make_node("Cast", ["f82"], ["F2"], to=TensorProto.FLOAT),
make_node("Cast", ["C1"], ["F3"], to=TensorProto.FLOAT),
make_node("Cast", ["C2"], ["F4"], to=TensorProto.FLOAT),
],
"g",
[X],
[F1, F2, F3, F4],
)
)
ref = ReferenceEvaluator(model)
data = np.array([0, 1, 2, 5e-2, 200], dtype=np.float32)
expected1 = np.array(
[float8e4m3_to_float32(float32_to_float8e4m3(x)) for x in data]
)
expected2 = np.array(
[float8e5m2_to_float32(float32_to_float8e5m2(x)) for x in data]
)
got = ref.run(None, {"X": data})
assert_allclose(got[0], expected1)
assert_allclose(got[1], expected2)
assert_allclose(got[2], expected1)
assert_allclose(got[3], expected2)
def test_cast_like_float8(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
model = make_model(
make_graph(
[
make_node("Cast", ["X"], ["f8"], to=TensorProto.FLOAT8E4M3FNUZ),
make_node("CastLike", ["X", "f8"], ["f32"], saturate=0),
make_node("Cast", ["f32"], ["Y"], to=TensorProto.FLOAT),
],
"g",
[X],
[Y],
)
)
data = np.array([0, 1e7], dtype=np.float32)
expected = np.array(
[
float8e4m3_to_float32(
float32_to_float8e4m3(x, uz=True, saturate=False), uz=True
)
for x in data
]
)
ref = ReferenceEvaluator(model)
got = ref.run(None, {"X": data})
assert_allclose(got[0], expected)
# Forces ReferenceEvaluator to not use the associated implementation for CastLike
# but its implementation as a function instead.
class CastLike(OpRunExpand):
op_domain = ""
ref = ReferenceEvaluator(model, new_ops=[CastLike])
got = ref.run(None, {"X": data})
assert_allclose(got[0], expected)
def test_cast_float8_output(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
F1 = make_tensor_value_info("F1", TensorProto.FLOAT8E4M3FN, [None])
F2 = make_tensor_value_info("F2", TensorProto.FLOAT8E5M2, [None])
model = make_model(
make_graph(
[
make_node("Cast", ["X"], ["F1"], to=TensorProto.FLOAT8E4M3FN),
make_node("Cast", ["X"], ["F2"], to=TensorProto.FLOAT8E5M2),
],
"g",
[X],
[F1, F2],
)
)
ref = ReferenceEvaluator(model)
data = np.array([0, 1, 2, 5e-2, 200], dtype=np.float32)
expected1 = np.array([float32_to_float8e4m3(x) for x in data])
expected2 = np.array([float32_to_float8e5m2(x) for x in data])
got = ref.run(None, {"X": data})
self.assertEqual(expected1.tolist(), got[0].tolist())
self.assertEqual(expected2.tolist(), got[1].tolist())
def test_float8_4_types(self):
x = np.array(
[
0.4068359375,
352,
416,
336,
304,
272,
-248,
-100,
1e-4,
1e-2,
416,
432,
1e5,
np.inf,
-np.inf,
np.nan,
],
dtype=np.float32,
)
expected = {
TensorProto.FLOAT8E4M3FN: np.array(
[
0.40625,
352.0,
416.0,
320.0,
320.0,
256.0,
-256.0,
-96.0,
0.0,
0.009765625,
416.0,
448.0,
448.0,
448.0,
-448.0,
np.nan,
],
dtype=np.float32,
),
TensorProto.FLOAT8E4M3FNUZ: np.array(
[
0.40625,
240.0,
240.0,
240.0,
240.0,
240.0,
-240.0,
-104.0,
0.0,
0.009765625,
240.0,
240.0,
240.0,
240.0,
-240.0,
np.nan,
],
dtype=np.float32,
),
TensorProto.FLOAT8E5M2: np.array(
[
0.4375,
384.0,
384.0,
320.0,
320.0,
256.0,
-256.0,
-96.0,
0.0001068115234375,
0.009765625,
384.0,
448.0,
57344.0,
57344.0,
-57344.0,
np.nan,
],
dtype=np.float32,
),
TensorProto.FLOAT8E5M2FNUZ: np.array(
[
4.3750000e-01,
3.8400000e02,
4.4800000e02,
3.2000000e02,
3.2000000e02,
2.5600000e02,
-2.5600000e02,
-9.6000000e01,
1.0681152e-04,
9.7656250e-03,
4.4800000e02,
4.4800000e02,
5.7344000e04,
5.7344000e04,
-5.7344000e04,
np.nan,
],
dtype=np.float32,
),
}
def model_cast_cast(to):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = make_node("Cast", ["X"], ["T"], to=to)
node2 = make_node("Cast", ["T"], ["Y"], to=TensorProto.FLOAT)
graph = make_graph([node1, node2], "lr", [X], [Y])
onnx_model = make_model(graph)
check_model(onnx_model)
return onnx_model
for to, expect in expected.items():
with self.subTest(to=to):
onnx_model = model_cast_cast(to)
ref = ReferenceEvaluator(onnx_model)
y = ref.run(None, {"X": x})[0]
assert_allclose(expect, y)
self.assertEqual(expect.shape, y.shape)
self.assertEqual(expect.dtype, y.dtype)
def test_cast_bfloat16_output(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.BFLOAT16, [None])
model = make_model(
make_graph(
[
make_node("Cast", ["X"], ["Y"], to=TensorProto.BFLOAT16),
],
"g",
[X],
[Y],
)
)
ref = ReferenceEvaluator(model)
data = np.array([0, 1, 2, 1e5, 200], dtype=np.float32)
expected1 = np.array([float32_to_bfloat16(x) for x in data])
got = ref.run(None, {"X": data})
self.assertEqual(expected1.tolist(), got[0].tolist())
def test_quantize_linear_e4m3(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
model = make_model(
make_graph(
[
make_node(
"Constant",
[],
["scale"],
value=make_tensor("scale", TensorProto.FLOAT, [1], [2.0]),
),
make_node(
"Constant",
[],
["zero"],
value=make_tensor("zero", TensorProto.FLOAT8E4M3FN, [1], [0.0]),
),
make_node("QuantizeLinear", ["X", "scale", "zero"], ["T"]),
make_node("DequantizeLinear", ["T", "scale"], ["Y"], axis=0),
],
"g",
[X],
[Y],
)
)
ref = ReferenceEvaluator(model)
data = np.array([0, 1, 2, 1e5, 200], dtype=np.float32)
expected = np.array([0, 1, 2, 896, 192], dtype=np.float32)
got = ref.run(None, {"X": data})
assert_allclose(expected, got[0])
def test_quantize_linear_e4m3_initializer(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
model = make_model(
make_graph(
[
make_node("QuantizeLinear", ["X", "scale", "zero"], ["T"]),
make_node("DequantizeLinear", ["T", "scale"], ["Y"], axis=0),
],
"g",
[X],
[Y],
[
make_tensor("scale", TensorProto.FLOAT, [1], [2.0]),
make_tensor("zero", TensorProto.FLOAT8E4M3FN, [1], [0.0]),
],
)
)
ref = ReferenceEvaluator(model)
data = np.array([0, 1, 2, 1e5, 200], dtype=np.float32)
expected = np.array([0, 1, 2, 896, 192], dtype=np.float32)
got = ref.run(None, {"X": data})
assert_allclose(expected, got[0])
def test_quantize_linear_e5m2(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, [None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None])
model = make_model(
make_graph(
[
make_node(
"Constant",
[],
["scale"],
value=make_tensor("scale", TensorProto.FLOAT, [1], [2.0]),
),
make_node(
"Constant",
[],
["zero"],
value=make_tensor("zero", TensorProto.FLOAT8E5M2, [1], [0.0]),
),
make_node("QuantizeLinear", ["X", "scale", "zero"], ["T"]),
make_node("DequantizeLinear", ["T", "scale"], ["Y"], axis=0),
],
"g",
[X],
[Y],
)
)
ref = ReferenceEvaluator(model)
data = np.array([0, 1, 2, 1e5, 200], dtype=np.float32)
expected = np.array([0, 1, 2, 98304, 192], dtype=np.float32)
got = ref.run(None, {"X": data})
assert_allclose(expected, got[0])
def test_lrn(self):
def _expected(x, alpha, beta, bias, size):
square_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)
for n, c, h, w in np.ndindex(x.shape):
square_sum[n, c, h, w] = sum(
x[
n,
max(0, c - int(math.floor((size - 1) / 2))) : min(
5, c + int(math.ceil((size - 1) / 2)) + 1
),
h,
w,
]
** 2
)
y = x / ((bias + (alpha / size) * square_sum) ** beta)
return y
# keepdims is ignored in that case
alpha = 0.0002
beta = 0.5
bias = 2.0
size = 3
X = make_tensor_value_info("X", TensorProto.FLOAT, [5, 5, 50, 50])
Z = make_tensor_value_info("Z", TensorProto.UNDEFINED, None)
nodes = [
make_node("LRN", ["X"], ["Z"], alpha=alpha, beta=beta, bias=bias, size=size)
]
model = make_model(make_graph(nodes, "g", [X], [Z]))
ref = ReferenceEvaluator(model)
data = np.random.rand(5, 5, 5, 5).astype(np.float32)
got = ref.run(None, {"X": data})
expected = _expected(data, alpha, beta, bias, size)
self.assertEqual(len(expected), len(got[0]))
def test_conv_im2col_1d(self):
feeds = {
"X": np.arange(1 * 1 * 11).reshape((1, 1, 11)).astype(np.float32) + 1,
"W": np.arange(3).reshape((1, 1, 3)).astype(np.float32),
"B": np.zeros((1,), dtype=np.float32),
}
kwargs = dict(
group=1,
dilations=[1],
kernel_shape=[3],
pads=[1, 1],
strides=[1],
auto_pad="NOTSET",
)
expected = _conv_implementation(**feeds, **kwargs)
got = _conv_implementation_im2col(**feeds, **kwargs)
assert_allclose(expected, got)
def test_conv_im2col_1d_pad0(self):
feeds = {
"X": np.arange(2 * 4 * 3).reshape((2, 4, -1)).astype(np.float32) + 1,
"W": np.arange(2 * 4 * 3).reshape((-1, 4, 3)).astype(np.float32),
"B": np.zeros((1,), dtype=np.float32),
}
kwargs = dict(
group=1,
dilations=[1],
kernel_shape=[3],
pads=[0, 0],
strides=[1],
auto_pad="NOTSET",
)
expected = _conv_implementation(**feeds, **kwargs)
got = _conv_implementation_im2col(**feeds, **kwargs)
assert_allclose(expected, got)
def test_conv_im2col_2d(self):
feeds = {
"X": np.arange(1 * 1 * 11 * 23).reshape((1, 1, 11, 23)).astype(np.float32)
+ 1,
"W": np.arange(9).reshape((1, 1, 3, 3)).astype(np.float32),
"B": np.zeros((1,), dtype=np.float32),
}
kwargs = dict(
group=1,
dilations=[1, 1],
kernel_shape=[3, 3],
pads=[1, 1, 1, 1],
strides=[1, 1],
auto_pad="NOTSET",
)
expected = _conv_implementation(**feeds, **kwargs)
got = _conv_implementation_im2col(**feeds, **kwargs)
assert_allclose(expected, got)
def test_conv_im2col_2d_pad0(self):
feeds = {
"X": np.arange(2 * 3 * 5 * 2).reshape((2, 3, 5, -1)).astype(np.float32) + 1,
"W": 2
** np.arange(3 * 3 * 1 * 2).reshape((-1, 3, 1, 2)).astype(np.float32),
"B": np.zeros((1,), dtype=np.float32),
}
kwargs = dict(
group=1,
dilations=[1, 1],
kernel_shape=[1, 2],
pads=[0, 0, 0, 0],
strides=[1, 1],
auto_pad="NOTSET",
)
expected = _conv_implementation(**feeds, **kwargs)
got = _conv_implementation_im2col(**feeds, **kwargs)
assert_allclose(expected, got)
def test_conv_im2col_2d_autopad(self):
feeds = {
"X": np.arange(5 * 5).reshape((1, 1, 5, -1)).astype(np.float32) + 1,
"W": 2 ** np.arange(3 * 3).reshape((1, 1, 3, 3)).astype(np.float32),
"B": np.zeros((1,), dtype=np.float32),
}
kwargs = dict(
group=1,
dilations=[1, 1],
kernel_shape=[3, 3],
strides=[2, 2],
pads=None,
auto_pad="SAME_LOWER",
)
expected = _conv_implementation(**feeds, **kwargs)
got = _conv_implementation_im2col(**feeds, **kwargs)
assert_allclose(expected, got)
def test_conv_im2col_3d(self):
feeds = {
"X": np.arange(1 * 1 * 11 * 5 * 13)
.reshape((1, 1, 11, 5, 13))
.astype(np.float32)
+ 1,
"W": np.arange(27).reshape((1, 1, 3, 3, 3)).astype(np.float32),
"B": np.zeros((1,), dtype=np.float32),
}
kwargs = dict(
group=1,
dilations=[1, 1, 1],
kernel_shape=[3, 3, 3],
pads=[1, 1, 1, 1, 1, 1],
strides=[1, 1, 1],
auto_pad="NOTSET",
)
expected = _conv_implementation(**feeds, **kwargs)
got = _conv_implementation_im2col(**feeds, **kwargs)
assert_allclose(expected, got)
def test_conv_im2col_2d_strides(self):
feeds = {
"X": np.arange(1 * 3 * 6 * 6).reshape((1, 3, 6, 6)).astype(np.float32) + 1,
"W": np.arange(2 * 3 * 3 * 3).reshape((2, 3, 3, 3)).astype(np.float32),
"B": np.zeros((2,), dtype=np.float32),
}
kwargs = dict(
group=1,
dilations=[1, 1],
kernel_shape=[3, 3],
pads=[1, 1, 1, 1],
strides=[2, 2],
auto_pad="NOTSET",
)
expected = _conv_implementation(**feeds, **kwargs)
got = _conv_implementation_im2col(**feeds, **kwargs)
assert_allclose(expected, got)
def test_conv_im2col_2d_dilations(self):
feeds = {
"X": np.arange(1 * 3 * 6 * 6).reshape((1, 3, 6, 6)).astype(np.float32) + 1,
"W": np.arange(2 * 3 * 3 * 3).reshape((2, 3, 3, 3)).astype(np.float32),
"B": np.zeros((2,), dtype=np.float32),
}
kwargs = dict(
group=1,
dilations=[2, 1],
kernel_shape=[3, 3],
pads=[1, 1, 1, 1],
strides=[2, 2],
auto_pad="NOTSET",
)
expected = _conv_implementation(**feeds, **kwargs)
got = _conv_implementation_im2col(**feeds, **kwargs)
assert_allclose(expected, got)
@parameterized.parameterized.expand(
[
("ReduceSum",),
("ReduceL1",),
("ReduceL2",),
("ReduceMin",),
("ReduceMax",),
("ReduceProd",),
("ReduceSumSquare",),
]
)
def test_reduce_op_no_axis(self, op):
X = make_tensor_value_info("X", TensorProto.FLOAT, None)
Y = make_tensor_value_info("Y", TensorProto.FLOAT, None)
data = np.arange(6).reshape((1, 3, 2)).astype(np.float32)
nodes = [make_node(op, ["X"], ["Y"], keepdims=0)]
model = make_model(make_graph(nodes, "g", [X], [Y]))
ref = ReferenceEvaluator(model)
got = ref.run(None, {"X": data})
r = got[0]
self.assertIsInstance(r, np.ndarray)
self.assertEqual(r.shape, ())
@parameterized.parameterized.expand([(1,), (2,), (3,), (4,), (5,), (6,)])
def test_pad(self, dim):
X = make_tensor_value_info("X", TensorProto.FLOAT, None)
P = make_tensor_value_info("P", TensorProto.INT64, None)
V = make_tensor_value_info("V", TensorProto.FLOAT, None)
Y = make_tensor_value_info("Y", TensorProto.FLOAT, None)
value = np.array([-5], dtype=np.float32)
node = make_node("Pad", inputs=["X", "P", "V"], outputs=["Y"], mode="constant")
model = make_model(make_graph([node], "g", [X, P, V], [Y]))
ref = ReferenceEvaluator(model)
x = np.array([1], dtype=np.float32).reshape((1,) * dim)
p = np.array([1, 1] * dim, dtype=np.int64)
got = ref.run(None, {"X": x, "P": p, "V": value})[0]
self.assertEqual(got.shape, (3,) * dim)
self.assertEqual(got.dtype, np.float32)
p = np.repeat([7, 3], dim).astype(np.int64)
got = ref.run(None, {"X": x, "P": p, "V": value})[0]
self.assertEqual(got.shape, (11,) * dim)
self.assertEqual(got.dtype, np.float32)
def test_constant_of_shape(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, None)
Y = make_tensor_value_info("Y", TensorProto.FLOAT, None)
nodes = [
make_node("Shape", inputs=["X"], outputs=["shape"]),
make_node(
"ConstantOfShape",
inputs=["shape"],
outputs=["Y"],
value=make_tensor("value", TensorProto.UINT16, [1], [1]),
),
]
model = make_model(make_graph(nodes, "g", [X], [Y]))
ref = ReferenceEvaluator(model)
x = np.array(1, dtype=np.float32)
got = ref.run(None, {"X": x})[0]
self.assertEqual(got.shape, tuple())
self.assertEqual(got.dtype, np.uint16)
assert_allclose(np.array(1, dtype=np.uint16), got)
def test_constant_of_shape_castlike(self):
X = make_tensor_value_info("X", TensorProto.FLOAT, None)
Y = make_tensor_value_info("Y", TensorProto.FLOAT, None)
nodes = [
make_node(
"Constant",
[],
["like"],
value=make_tensor("c", TensorProto.UINT16, [1], [2]),
),
make_node("Shape", inputs=["X"], outputs=["shape"]),
make_node(
"ConstantOfShape",
inputs=["shape"],
outputs=["cst"],
value=make_tensor("value", TensorProto.INT64, [1], [1]),
),
make_node("CastLike", ["cst", "like"], ["Y"]),
]
model = make_model(make_graph(nodes, "g", [X], [Y]))
ref = ReferenceEvaluator(model)
x = np.array(1, dtype=np.float32)
got = ref.run(None, {"X": x})[0]
self.assertEqual(got.shape, tuple())
self.assertEqual(got.dtype, np.uint16)
assert_allclose(np.array(1, dtype=np.uint16), got)
@parameterized.parameterized.expand(
[
(["abc", "def"], [".com", ".net"], ["abc.com", "def.net"], (2,)),
(["cat", "dog", "snake"], ["s"], ["cats", "dogs", "snakes"], (3,)),
("cat", "s", "cats", ()),
(["a", "ß", "y"], ["a", "ß", "y"], ["aa", "ßß", "yy"], (3,)),
]
)
def test_string_concat(self, a, b, expected, expected_shape):
A = make_tensor_value_info("A", TensorProto.STRING, None)
B = make_tensor_value_info("B", TensorProto.STRING, None)
Y = make_tensor_value_info("Y", TensorProto.STRING, None)
node = make_node("StringConcat", inputs=["A", "B"], outputs=["Y"])
model = make_model(make_graph([node], "g", [A, B], [Y]))
ref = ReferenceEvaluator(model)
result, *_ = ref.run(None, {"A": np.array(a), "B": np.array(b)})
np.testing.assert_array_equal(result, expected)
self.assertEqual(result.dtype.kind, "O")
self.assertEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 145,760 | 37.469517 | 115 | py |
onnx | onnx-main/onnx/test/reference_evaluator_backend_test.py | # Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# type: ignore
# pylint: disable=C0415,R0912,R0913,R0914,R0915,W0613,W0640,W0703
"""
These test evaluates the python runtime (class ReferenceEvaluator) against
all the backend tests (in onnx/backend/test/case/node) and checks
the runtime produces the expected outputs.
You may run one specific test with following command line:
::
python onnx/test/reference_evaluator_backend_test.py TestOnnxBackEndWithReferenceEvaluator.test_group_normalization_example
You may bypass a test newly added by adding to the global variable `SKIP_TESTS`.
You may refine the absolute or relative tolerance for a test by
adding an item in method `setUpClass` and attributes
`atol` or `rtol`.
"""
import os
import pprint
import unittest
try:
from packaging.version import parse as version
except ImportError:
from distutils.version import ( # noqa: N813 # pylint: disable=deprecated-module
StrictVersion as version,
)
from os import getenv
import numpy as np
from numpy import __version__ as npver
from numpy import object_ as dtype_object
from numpy.testing import assert_allclose # type: ignore
from onnx import ONNX_ML, OptionalProto, SequenceProto, TensorProto, load
from onnx.backend.test import __file__ as backend_folder
from onnx.helper import __file__ as onnx_file
from onnx.numpy_helper import bfloat16_to_float32, to_list, to_optional
from onnx.reference import ReferenceEvaluator
from onnx.reference.op_run import to_array_extended
from onnx.reference.ops.op_cast import cast_to
# TODO (https://github.com/microsoft/onnxruntime/issues/14932): Get max supported version from onnxruntime directly
# For now, bump the version in CIs whenever there is a new onnxruntime release
ORT_MAX_IR_SUPPORTED_VERSION = int(getenv("ORT_MAX_IR_SUPPORTED_VERSION", "8"))
ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION = int(
getenv("ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION", "18")
)
# Number of tests expected to pass without raising an exception.
MIN_PASSING_TESTS = 1235
# Update this list if one new operator does not have any implementation.
SKIP_TESTS = {
# mismatches
# shapes (10, 9, 3), (10, 8, 3) shape mismatch unexpected as the operator is inlined
"test_center_crop_pad_crop_axes_hwc_expanded",
# deprecated
"test_scan_sum", # deprecated, opset 8 -> not implemented
"test_scatter_with_axis", # deprecated, scatter is removed
"test_scatter_without_axis", # deprecated, scatter is removed
# not implemented
"test__simple_gradient_of_add", # gradient not implemented
"test__simple_gradient_of_add_and_mul", # gradient not implemented
"test_lppool_2d_dilations", # CommonPool._run returns incorrect output shape when dilations is set
"test_averagepool_2d_dilations", # CommonPool._run returns incorrect output shape when dilations is set
}
if version(npver) < version("1.21.5"):
SKIP_TESTS |= {
"test_cast_FLOAT_to_BFLOAT16",
"test_castlike_FLOAT_to_BFLOAT16",
"test_castlike_FLOAT_to_BFLOAT16_expanded",
}
if version(npver) < version("1.21.5"):
SKIP_TESTS |= {
"test_cast_FLOAT_to_BFLOAT16",
"test_castlike_FLOAT_to_BFLOAT16",
"test_castlike_FLOAT_to_BFLOAT16_expanded",
}
def assert_allclose_string(expected, value):
"""
Compares two arrays knowing they contain strings.
Raises an exception if the test fails.
:param expected: expected array
:param value: value
"""
def is_float(x):
try:
float(x)
return True
except ValueError:
return False
if all(map(is_float, expected.ravel())):
expected_float = expected.astype(np.float32)
value_float = value.astype(np.float32)
assert_allclose(expected_float, value_float)
else:
if expected.tolist() != value.tolist():
raise AssertionError(f"Mismatches {expected} != {value}.")
class OnnxBackendTest:
"""
Definition of a backend test. It starts with a folder,
in this folder, one onnx file must be there, then a subfolder
for each test to run with this model.
:param folder: test folder
:param onnx_path: onnx file
:param onnx_model: loaded onnx file
:param tests: list of test
"""
@staticmethod
def _sort(filenames):
temp = []
for f in filenames:
name = os.path.splitext(f)[0]
i = name.split("_")[-1]
temp.append((int(i), f))
temp.sort()
return [_[1] for _ in temp]
@staticmethod
def _read_proto_from_file(full):
if not os.path.exists(full):
raise FileNotFoundError(f"File not found: {full!r}.")
with open(full, "rb") as f:
serialized = f.read()
return OnnxBackendTest._read_proto_from_serialized(serialized, full)
@staticmethod
def _read_proto_from_serialized(serialized, full):
if not os.path.exists(full):
raise FileNotFoundError(f"File not found: {full!r}.")
with open(full, "rb") as f:
serialized = f.read()
proto_types = [
(TensorProto, to_array_extended),
(SequenceProto, to_list),
(OptionalProto, to_optional),
]
exc = None
for pt, cvt in proto_types:
obj = pt()
try:
obj.ParseFromString(serialized)
try:
return cvt(obj)
except ValueError as e:
exc = e
continue
except Exception as e:
exc = e
raise RuntimeError(
f"Unable to read {full!r}, error is {exc}, "
f"content is {serialized[:100]!r}."
) from exc
@staticmethod
def _load(folder, names):
res = []
for name in names:
full = os.path.join(folder, name)
obj = OnnxBackendTest._read_proto_from_file(full)
res.append(obj)
return res
def __repr__(self):
"usual"
return f"{self.__class__.__name__}({self.folder!r})"
def __init__(self, folder):
if not os.path.exists(folder):
raise FileNotFoundError(f"Unable to find folder {folder!r}.")
content = os.listdir(folder)
onx = [c for c in content if os.path.splitext(c)[-1] in {".onnx"}]
if len(onx) != 1:
raise ValueError(
f"There is more than one onnx file in {folder!r} ({onx!r})."
)
self.folder = folder
self.onnx_path = os.path.join(folder, onx[0])
self.onnx_model = load(self.onnx_path)
self.tests = []
for sub in content:
full = os.path.join(folder, sub)
if os.path.isdir(full):
pb = [c for c in os.listdir(full) if os.path.splitext(c)[-1] in {".pb"}]
inputs = OnnxBackendTest._sort(c for c in pb if c.startswith("input_"))
outputs = OnnxBackendTest._sort(
c for c in pb if c.startswith("output_")
)
self.tests.append(
{
"inputs": OnnxBackendTest._load(full, inputs),
"outputs": OnnxBackendTest._load(full, outputs),
}
)
@property
def name(self):
"Returns the test name."
return os.path.split(self.folder)[-1]
@property
def fname(self):
folder = self.folder.replace("\\", "/").split("/")[-2]
if folder.endswith("node"):
fname = self.name
else:
fname = f"test__{folder.replace('-', '_')}_{self.name[5:]}"
if "/" in fname or fname == "test__test_AvgPool1d_AvgPool1d":
raise AssertionError(
f"name={self.name!r}, folder={folder!r}, self.folder={self.folder}."
)
return fname
def __len__(self):
"Returns the number of tests."
return len(self.tests)
def _compare_results(
self, index, i_output, desired, output, rtol=0, atol=0, comment="", inputs=None
):
"""
Compares the expected output and the output produced
by the runtime. Raises an exception if not equal.
:param index: test index
:param i_output: output index
:param desired: expected output
:param output: output
:param rtol: relative tolerance
:param atol: absolute tolerance
:param comment: addition text to give more insights to the user
:param inputs: inputs to the model
"""
if comment == "":
raise RuntimeError("Argument comment should be filled.")
if atol is None:
atol = 0
if rtol is None:
rtol = 0
if isinstance(desired, np.ndarray):
if isinstance(output, np.ndarray):
if rtol == 0:
if desired.dtype == np.float32:
rtl = 1e-5
elif desired.dtype == np.float64:
rtl = 1e-12
else:
rtl = rtol
else:
rtl = rtol
if desired.dtype == dtype_object:
try:
assert_allclose_string(desired, output)
except AssertionError as ex:
raise AssertionError(
f"Output {i_output} of test {index} in folder {self.folder!r} failed, comment={comment}."
) from ex
else:
equal_nan = desired.dtype in (np.float16, np.float32, np.float64)
if equal_nan:
try:
assert_allclose(
desired,
output,
atol=atol,
rtol=rtl,
equal_nan=equal_nan,
)
except AssertionError as ex:
try:
diff = output - desired
except ValueError:
diff = None
raise AssertionError(
f"Output {i_output} of test {index} in folder {self.folder!r} failed "
f"(rtol={rtl}, atol={atol}), comment={comment}\n---\n{desired}\n----"
f"\n{output}\n-----\n{diff}\n------INPUTS----\n{pprint.pformat(inputs)}."
) from ex
else:
# float 8 types
if desired.dtype != output.dtype:
raise AssertionError(
f"Output {i_output} of test {index} in folder {self.folder!r} "
f"has unexpected type {output.dtype} (expecting {desired.dtype}.)"
)
if desired.tolist() != output.tolist():
raise AssertionError(
f"Output {i_output} of test {index} in folder {self.folder!r} "
f"has unexpected values {output} (expecting {desired}.)"
)
if desired.shape != output.shape:
raise AssertionError(
f"Output {i_output} of test {index} in folder {self.folder!r} failed "
f"(expected shape={desired.shape} but shape={output.shape}), "
f"comment={comment}\n---\n{desired}\n----"
f"\n{output}\n------INPUTS----\n{pprint.pformat(inputs)}."
)
elif hasattr(output, "is_compatible"):
# A shape
if desired.dtype != output.dtype:
raise AssertionError(
f"Output {i_output} of test {index} in folder {self.folder!r} failed "
f"(desired.dtype={desired.dtype!r}, output={output!r}), comment={comment}."
)
if not output.is_compatible(desired.shape):
raise AssertionError(
f"Output {i_output} of test {index} in folder {self.folder!r} failed "
f"(desired.shape={desired.shape}, output={output!r}), comment={comment}."
)
elif isinstance(desired, list):
if not isinstance(output, list):
raise AssertionError(
f"Expected result is 'list' but output type is {type(output)} for output {i_output}"
f", comment={comment}\n--EXPECTED--\n{desired}\n--GOT--\n{output}."
)
if len(desired) != len(output):
raise AssertionError(
f"Expected has {len(desired)} but output has {len(output)} for output {i_output}"
f", comment={comment}\n--EXPECTED--\n{desired}\n--GOT--\n{output}."
)
for a, b in zip(desired, output):
self._compare_results(
index, i_output, a, b, rtol=rtol, atol=atol, comment=comment
)
else:
raise NotImplementedError(
f"Comparison not implemented for type {type(desired)} and output {i_output}, comment={comment}."
)
def is_random(self):
"Tells if a test is random or not."
if "bernoulli" in self.folder:
return True
return False
def run(
self,
load_fct,
run_fct,
index=None,
rtol=1e-07,
atol=0,
comment="",
print_io=False,
):
"""
Executes a tests or all tests if index is None.
The function crashes if the tests fails.
:param load_fct: loading function, takes a loaded onnx graph,
and returns an object
:param run_fct: running function, takes the result of previous
function, the inputs, and returns the outputs
:param index: index of the test to run or all.
:param rtol: relative tolerance
:param atol: absolute tolerance
:param comment: additional information for the user
:param print_io: prints out the input and output
"""
if index is None:
res = []
for i in range(len(self)):
res.append(
self.run(
load_fct,
run_fct,
index=i,
atol=atol,
rtol=rtol,
comment=comment,
print_io=print_io,
)
)
return res
if print_io:
print("------ INPUTS")
for k, v in enumerate(self.tests[index]["inputs"]):
print(f"input {k!r}, shape={v.shape}, dtype={v.dtype}")
print("------ EXPECTED OUTPUTS")
for k, v in enumerate(self.tests[index]["outputs"]):
print(f"output {k!r}, shape={v.shape}, dtype={v.dtype}")
obj = load_fct(self.onnx_model)
got = run_fct(obj, *self.tests[index]["inputs"])
expected = self.tests[index]["outputs"]
if len(got) != len(expected):
raise AssertionError(
f"Unexpected number of output (test {index}, folder {self.folder!r}), "
f"got {len(got)}, expected {len(expected)}."
)
res = {
"inputs": self.tests[index]["inputs"],
"expected": self.tests[index]["outputs"],
"results": got,
}
for i, (e, o) in enumerate(zip(expected, got)):
if self.is_random():
if e.dtype != o.dtype:
raise AssertionError(
f"Output {i} of test {index} in folder {self.folder!r} failed "
f"(type mismatch {e.dtype} != {o.dtype!r})."
)
if e.shape != o.shape:
raise AssertionError(
f"Output {i} of test {index} in folder {self.folder!r} failed "
f"(shape mismatch {e.shape} != {o.shape})."
)
else:
self._compare_results(
index,
i,
e,
o,
atol=atol,
rtol=rtol,
comment=comment + "\n" + str(self.onnx_model),
inputs=self.tests[index]["inputs"],
)
return res
def enumerate_onnx_tests(series, fct_filter=None):
"""
Collects test from a sub folder of `onnx/backend/test`.
Works as an enumerator to start processing them
without waiting or storing too much of them.
:param series: which subfolder to load, possible values:
(`'node'`, ...)
:param fct_filter: function `lambda testname: boolean`
to load or skip the test, None for all
:return: list of @see cl OnnxBackendTest
"""
root = os.path.dirname(backend_folder)
sub = os.path.join(root, "data", series)
if not os.path.exists(sub):
content = "\n".join(os.listdir(root))
raise FileNotFoundError(
f"Unable to find series of tests in {root!r}, subfolders:\n{content}"
)
tests = os.listdir(sub)
for t in tests:
if fct_filter is not None and not fct_filter(t):
continue
folder = os.path.join(sub, t)
if not ONNX_ML and "ai_onnx_ml" in folder:
continue
content = os.listdir(folder)
onx = [c for c in content if os.path.splitext(c)[-1] in {".onnx"}]
if len(onx) == 1:
yield OnnxBackendTest(folder)
class TestOnnxBackEndWithReferenceEvaluator(unittest.TestCase):
folder = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "onnx_backend_test_code"
)
@classmethod
def add_test_methods(cls):
for folder in ["node", "pytorch-converted", "pytorch-operator", "simple"]:
for te in enumerate_onnx_tests(folder):
def _test_(
self, te=te, check_other_runtime=None, verbose=0, print_io=False
):
if te.fname in getattr(cls, "skip_test", set()):
cls.skipped.append((te, None))
return
rtol = getattr(cls, "rtol", {})
atol = getattr(cls, "atol", {})
if len(rtol) == 0 or len(atol) == 0:
raise AssertionError("rtol or atol is empty.")
self.common_test_onnx_test_run(
te,
getattr(cls, "successes", []),
getattr(cls, "missed", []),
getattr(cls, "skipped", []),
getattr(cls, "load_failed", []),
getattr(cls, "exec_failed", []),
getattr(cls, "mismatch", []),
verbose=verbose,
rtol=rtol,
atol=atol,
check_other_runtime=check_other_runtime,
print_io=print_io,
)
setattr(TestOnnxBackEndWithReferenceEvaluator, te.fname, _test_)
def test_onnx_backend_test_abs(self):
name = "test_abs"
code = []
for te in enumerate_onnx_tests("node", lambda folder: folder == name):
code.append(te)
self.assertEqual(len(code), 1)
def test_onnx_backend_test_expand_shape_model1(self):
name = "test_expand_shape_model1"
code = []
for te in enumerate_onnx_tests("simple", lambda folder: folder == name):
code.append(te)
self.assertEqual(len(code), 1)
@staticmethod
def load_fct(obj, verbose=0):
return ReferenceEvaluator(obj, verbose=verbose)
@staticmethod
def run_fct(obj, *inputs, verbose=0): # pylint: disable=W0613
if hasattr(obj, "input_names"):
input_names = obj.input_names
elif hasattr(obj, "get_inputs"):
input_names = [_.name for _ in obj.get_inputs()]
else:
raise AttributeError(
f"Unable to extract the number to guess the number of inputs for type {type(obj)}."
)
if len(input_names) < len(inputs):
raise AssertionError(
f"Got {len(inputs)} inputs but expecting {len(obj.input_names)}."
)
rewrite = False
for i in range(len(inputs)): # pylint: disable=C0200
if (
isinstance(inputs[i], np.ndarray)
and inputs[i].dtype == np.uint16
and obj.input_types[i].tensor_type.elem_type != TensorProto.UINT16
):
rewrite = True
if rewrite:
# bfloat16 does not exist for numpy.
inputs = list(inputs)
for i in range(len(inputs)): # pylint: disable=C0200
if (
isinstance(inputs[i], np.ndarray)
and inputs[i].dtype == np.uint16
and obj.input_types[i].tensor_type.elem_type != TensorProto.UINT16
):
xr = inputs[i].ravel()
xf = np.empty(xr.shape[0], dtype=np.float32)
for ie in range(xr.shape[0]):
el = bfloat16_to_float32(xr[ie])
xf[ie] = el
inputs[i] = cast_to(
xf.astype(np.float32).reshape(inputs[i].shape),
TensorProto.BFLOAT16,
True,
)
feeds = {input_names[i]: inputs[i] for i in range(len(inputs))}
got = obj.run(None, feeds)
return got
# def test_onnx_test_run_test_abs(self):
# done = 0
# for te in enumerate_onnx_tests("node", lambda folder: folder == "test_abs"):
# self.assertIn(te.name, repr(te))
# self.assertGreater(len(te), 0)
# te.run(
# TestOnnxBackEndWithReferenceEvaluator.load_fct,
# TestOnnxBackEndWithReferenceEvaluator.run_fct,
# comment="[runtime=ReferenceEvaluator]",
# )
# done += 1
# self.assertEqual(done, 1)
def common_test_onnx_test_run(
self,
te,
successes,
missed,
skipped,
load_failed,
exec_failed,
mismatch,
verbose=0,
rtol=None,
atol=None,
check_other_runtime=None,
print_io=False,
):
if verbose > 6:
print("TEST:", te.name)
if verbose > 7:
print(" check runtime")
self.assertIn(te.name, repr(te))
self.assertGreater(len(te), 0)
try:
if verbose > 7:
print(" run")
if verbose > 5:
te.run(
lambda *args, verbose=verbose: TestOnnxBackEndWithReferenceEvaluator.load_fct(
*args, verbose
),
TestOnnxBackEndWithReferenceEvaluator.run_fct,
atol=atol.get(te.name, None),
rtol=rtol.get(te.name, None),
comment=f"[runtime=ReferenceEvaluator, verbose={verbose}]",
print_io=print_io,
)
else:
te.run(
TestOnnxBackEndWithReferenceEvaluator.load_fct,
TestOnnxBackEndWithReferenceEvaluator.run_fct,
atol=atol.get(te.fname, atol.get(te.name, None)),
rtol=rtol.get(te.fname, rtol.get(te.name, None)),
comment="[runtime=ReferenceEvaluator]",
print_io=print_io,
)
if verbose > 7:
print(" end run")
if verbose > 8:
print(te.onnx_model)
except NotImplementedError as e:
if verbose > 7:
print(" ", e, type(e))
missed.append((te, e))
with open(f"missed_{te.name}.onnx", "wb") as f:
f.write(te.onnx_model.SerializeToString())
raise e
except (AssertionError, ValueError) as e:
if verbose > 7:
print(" ", e, type(e))
mismatch.append((te, e))
with open(f"mismatch_{te.name}.onnx", "wb") as f:
f.write(te.onnx_model.SerializeToString())
if check_other_runtime is None:
raise e
if "onnxruntime" in check_other_runtime:
print("CHECK RUNTIME onnxruntime")
from onnxruntime import InferenceSession
onnx_domain_opset = ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION
for opset in te.onnx_model.opset_import:
if opset.domain in ("", "ai.onnx"):
onnx_domain_opset = opset.version
break
# The new IR or opset version is not supported by onnxruntime yet
if (
te.onnx_model.ir_version > ORT_MAX_IR_SUPPORTED_VERSION
or onnx_domain_opset > ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION
):
print(
"Skip test because of IR or opset version is not supported by onnxruntime yet"
)
return
te.run(
lambda obj: InferenceSession(
obj.SerializeToString(), providers=["CPUExecutionProvider"]
),
lambda *a, **b: TestOnnxBackEndWithReferenceEvaluator.run_fct(
*a, verbose=1, **b
),
atol=1e-5,
rtol=1e-3,
comment="[runtime=onnxruntime]",
)
print("done")
raise e
except Exception as e:
if verbose > 7:
print(" ", e, type(e))
with open(f"issue_{te.name}.onnx", "wb") as f:
f.write(te.onnx_model.SerializeToString())
raise AssertionError(
f"Unable to run test {te.name!r} due to {e}\n{te.onnx_model}"
) from e
successes.append((te, atol.get(te.fname, None), rtol.get(te.fname, None)))
if verbose > 7:
print(" end example.")
@staticmethod
def _postprocess(
successes, missed, skipped, load_failed, exec_failed, mismatch, verbose
):
success = len(successes)
failed = [
len(missed),
len(skipped),
len(load_failed),
len(exec_failed),
len(mismatch),
]
coverage = success / (success + sum(failed))
if verbose:
path = os.path.dirname(onnx_file)
print("-----------")
print(
f"success={success}, skipped={len(skipped)}, missed={len(missed)}, load_failed={len(load_failed)}, "
f"exec_failed={len(exec_failed)}, mismatch={len(mismatch)}"
)
print(
f"coverage {coverage * 100:.1f}% out of {success + sum(failed)} tests"
)
if verbose > 3:
def _print(s, path):
return (
str(s)
.replace("\\\\", "\\")
.replace(path, "onnx")
.replace("\\", "/")
)
print("-----------")
for t in sorted(load_failed, key=lambda m: m[0].fname):
print("loading failed", t[0].fname, "---", _print(t[0], path))
for t in sorted(exec_failed, key=lambda m: m[0].fname):
print("execution failed", t[0].fname, "---", _print(t[0], path))
for t in sorted(mismatch, key=lambda m: m[0].fname):
print("mismatch", t[0].fname, "---", _print(t[0], path))
for t in sorted(missed, key=lambda m: m[0].fname):
print("missed ", t[0].fname, "---", _print(t[0], path))
for t in sorted(skipped, key=lambda m: m[0].fname):
print("skipped", t[0].fname, "---", _print(t[0], path))
if success > 30:
print("-----------")
print(
f"success={success}, skipped={len(skipped)}, missed={len(missed)}, load_failed={len(load_failed)}, "
f"exec_failed={len(exec_failed)}, mismatch={len(mismatch)}"
)
print(
f"coverage {coverage * 100:.1f}% out of {success + sum(failed)} tests"
)
print("-----------")
if len(mismatch) > 0:
te, e = mismatch[0]
raise AssertionError(
f"Mismatch in test {te.name!r}\n{te.onnx_model}."
) from e
if sum(failed) > len(SKIP_TESTS):
raise AssertionError(
f"Unexpected failures. {sum(failed)}/{success + sum(failed)} tests have failed."
f"The coverage is {coverage * 100:.1f}%. "
f"New operators were added with no corresponding runtime."
)
@classmethod
def setUpClass(cls, all_tests=False):
# test not supported yet
# not supported yet
# see https://onnx.ai/backend-scoreboard/onnxruntime_details_stable.html
# to compare with onnxruntime
cls.rtol = {
"test_adam_multiple": 1e-2,
"test_blackmanwindow_expanded": 0,
"test_blackmanwindow_symmetric_expanded": 0,
"test_simple_rnn_batchwise": 0,
"test__pytorch_converted_Conv1d_pad1": 1e-4,
"test__pytorch_converted_Conv2d": 1e-5,
"test__pytorch_converted_Conv2d_no_bias": 1e-3,
"test__pytorch_converted_Conv2d_strided": 1e-4,
"test_layer_normalization_4d_axis1_expanded_ver18": 1e-4,
"test_layer_normalization_4d_axis_negative_1_expanded_ver18": 1e-4,
"test_layer_normalization_4d_axis_negative_3_expanded_ver18": 1e-4,
}
cls.atol = {
"test_blackmanwindow": 1e-7,
"test_blackmanwindow_expanded": 1e-4,
"test_blackmanwindow_symmetric": 1e-7,
"test_blackmanwindow_symmetric_expanded": 1e-4,
"test_Conv1d": 1e-6,
"test_Conv2d_depthwise_padded": 1e-7,
"test_Conv3d_dilated": 1e-6,
"test_gridsample_bicubic": 1e-4,
"test_gru_seq_length": 1e-7,
"test_hammingwindow_expanded": 1e-4,
"test_hammingwindow_symmetric_expanded": 1e-4,
"test_hannwindow_expanded": 1e-4,
"test_hannwindow_symmetric": 1e-7,
"test_hannwindow_symmetric_expanded": 1e-4,
"test_layer_normalization_4d_axis_negative_1_expanded": 1e-6,
"test_layer_normalization_4d_axis1_expanded": 1e-6,
"test_layer_normalization_4d_axis_negative_3_expanded": 1e-6,
"test_mish": 1e-6,
"test_mish_expanded": 1e-6,
"test_roialign_aligned_false": 1e-4,
"test_roialign_aligned_true": 1e-4,
# extended list
"test__pytorch_converted_ConvTranspose2d_no_bias": 1e-4,
"test__pytorch_converted_Linear_no_bias": 1e-5,
"test_Linear_no_bias": 1e-5,
"test__pytorch_converted_Conv1d_pad1": 1e-6,
"test__pytorch_converted_Conv2d": 1e-5,
"test__pytorch_converted_Conv2d_depthwise": 1e-4,
"test__pytorch_converted_Conv2d_depthwise_strided": 1e-4,
"test__pytorch_converted_Conv2d_depthwise_with_multiplier": 1e-4,
"test__pytorch_converted_Conv2d_depthwise_padded": 1e-4,
"test__pytorch_converted_Conv2d_groups": 1e-4,
"test__pytorch_converted_Conv2d_groups_thnn": 1e-4,
"test__pytorch_converted_Conv2d_no_bias": 1e-5,
"test__pytorch_converted_Conv2d_strided": 1e-4,
"test__pytorch_operator_operator_symbolic_override": 1e-5,
"test_operator_symbolic_override": 1e-4,
"test__pytorch_converted_Conv3d_dilated_strided": 1e-4,
"test__pytorch_converted_Conv3d_groups": 1e-4,
}
if version(npver) < version("1.21.5"):
cls.atol.update(
{
"test_dft": 1e-11,
"test_dft_axis": 1e-11,
"test_dft_inverse": 1e-11,
}
)
cls.skip_test = SKIP_TESTS
if all_tests:
cls.skip_test = set()
cls.successes = []
cls.missed = []
cls.skipped = []
cls.load_failed = []
cls.exec_failed = []
cls.mismatch = []
@classmethod
def tearDownClass(cls):
if len(cls.successes) == 0:
failed = cls.mismatch + cls.missed + cls.load_failed + cls.exec_failed
if len(failed) > 0:
raise RuntimeError(
f"No test was successful, {len(failed)} failed."
) from failed[0][1]
raise RuntimeError("No test was successful.")
cls._postprocess(
cls.successes,
cls.missed,
cls.skipped,
cls.load_failed,
cls.exec_failed,
cls.mismatch,
10,
)
TestOnnxBackEndWithReferenceEvaluator.add_test_methods()
if __name__ == "__main__":
unittest.main(verbosity=2)
| 34,178 | 38.106407 | 127 | py |
adagrid | adagrid-main/uniform-negative-sampling/adagrid_uniform_negative_sampling.py | from pathlib import Path
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
import argparse
import copy
import math
import random
import numpy as np
import pandas as pd
from sklearn.metrics import *
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
import torch_geometric.nn as pyg_nn
from deepsnap.dataset import GraphDataset
from deepsnap.batch import Batch
class Arguments:
def __init__(self, dataset='cora', device='cpu', epochs=500, mode='all', model='GCN',
edge_message_ratio=0.6, layers=2, hidden_dim=64, batch_size=1, data_split=[0.85, 0.05, 0.1],
verbose=False, adapt=False, try_epochs=3, adapt_epochs=50, criterion='val', random=False):
self.dataset = dataset
self.device = device
self.epochs = epochs
self.mode = mode
self.model = model
self.edge_message_ratio = edge_message_ratio
self.layers = layers
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.data_split = data_split
self.verbose = verbose
self.adapt = adapt
self.try_epochs = try_epochs
self.adapt_epochs = adapt_epochs
self.criterion = criterion
self.random = random
def arg_parse():
parser = argparse.ArgumentParser(description='Link prediction arguments.')
parser.add_argument('--gpu', type=int,
help='GPU device.')
parser.add_argument('--dataset', type=str,
help='Dataset.')
parser.add_argument('--num_layers', type=int,
help='Number of layers of GNN.')
parser.add_argument('--hidden_dim', type=int,
help='Hidden dimension of GNN.')
parser.add_argument('--verbose', action='store_true',
help='Whether output is verbose.')
parser.set_defaults(
gpu=0,
dataset='cora',
num_layers=2,
hidden_dim=64,
verbose=False
)
return parser.parse_args()
class Net(torch.nn.Module):
def __init__(self, input_dim, args):
super(Net, self).__init__()
self.model = args.model
if self.model == 'GCN':
self.conv_first = pyg_nn.GCNConv(input_dim, args.hidden_dim)
self.convs = torch.nn.ModuleList([pyg_nn.GCNConv(args.hidden_dim, args.hidden_dim) for i in range(args.layers - 2)])
self.conv_last = pyg_nn.GCNConv(args.hidden_dim, args.hidden_dim)
else:
raise ValueError('unknown conv')
self.loss_fn = torch.nn.BCEWithLogitsLoss()
def forward(self, graph):
x = F.dropout(graph.node_feature, p=0.2, training=self.training)
x = F.relu(self._conv_op(self.conv_first, x, graph))
for i in range(len(self.convs)):
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(self._conv_op(self.convs[i], x, graph))
x = F.dropout(x, p=0.2, training=self.training)
x = self._conv_op(self.conv_last, x, graph)
nodes_first = torch.index_select(x, 0, graph.edge_label_index[0,:].long())
nodes_second = torch.index_select(x, 0, graph.edge_label_index[1,:].long())
pred = torch.sum(nodes_first * nodes_second, dim=-1)
return pred
def _conv_op(self, conv, x, graph):
if self.model == 'GCN':
return conv(x, graph.edge_index)
elif self.model == 'spline':
return conv(x, graph.edge_index, graph.edge_feature)
def loss(self, pred, link_label):
return self.loss_fn(pred, link_label)
def try_edge_message_ratio(edge_message_ratio, model, datasets, dataloaders, optimizer, args, scheduler=None):
datasets['train'].edge_message_ratio = edge_message_ratio
val_max = -math.inf
best_model = model
mean_accs = {mode: 0 for mode, dataloader in dataloaders.items()}
for epoch in range(args.try_epochs):
for iter_i, batch in enumerate(dataloaders['train']):
batch.to(args.device)
model.train()
optimizer.zero_grad()
pred = model(batch)
loss = model.loss(pred, batch.edge_label.type(pred.dtype))
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
accs, _ = test(model, dataloaders, args)
for mode in accs:
mean_accs[mode] += accs[mode]
if val_max < accs['val']:
val_max = accs['val']
best_model = copy.deepcopy(model)
for mode in mean_accs:
mean_accs[mode] /= args.try_epochs
log = 'Edge message ratio: {:.4f}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
if(args.verbose):
print(log.format(edge_message_ratio, mean_accs['train'], mean_accs['val'], mean_accs['test']))
return mean_accs, val_max, best_model
def train(model, datasets, dataloaders, optimizer, args, scheduler=None):
# training loop
val_max = -math.inf
best_model = model
input_dim = datasets['train'].num_node_features
num_classes = datasets['train'].num_edge_labels
waiting = 0
edge_message_ratio_changes = []
for epoch in range(0, args.epochs):
if(waiting > 0):
waiting -= 1
continue
# search to determine the next edge_message_ratio
if(args.adapt and epoch % args.adapt_epochs == 0):
best_ratio = 0.0
try_val_max = None
if(args.criterion == 'val'):
try_val_max = -math.inf
if(args.criterion == 'gap'):
try_val_max = math.inf
best_try_model = model
best_try_optimizer = optimizer
best_try_scheduler = scheduler
# best version of the best_try_model during training for try epochs (highest validation accuracy)
best_val_model = model
best_val = -math.inf
edge_message_ratios = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for edge_message_ratio in edge_message_ratios:
new_model = Net(input_dim, args).to(args.device)
new_model.load_state_dict(model.state_dict())
new_optimizer = torch.optim.SGD(new_model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
new_optimizer.load_state_dict(optimizer.state_dict())
new_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(new_optimizer, T_max=args.epochs)
new_scheduler.load_state_dict(scheduler.state_dict())
try_accs, current_val, current_val_model = try_edge_message_ratio(edge_message_ratio,
new_model, datasets, dataloaders, new_optimizer,
args, new_scheduler)
if (args.criterion == 'val' and try_val_max < try_accs['val']):
try_val_max = try_accs['val']
best_ratio = edge_message_ratio
best_try_model = new_model
best_try_optimizer = new_optimizer
best_try_scheduler = new_scheduler
best_val = current_val
best_val_model = current_val_model
if (args.criterion == 'gap' and try_val_max > abs(try_accs['train'] - try_accs['val'])):
try_val_max = abs(try_accs['train'] - try_accs['val'])
best_ratio = edge_message_ratio
best_try_model = new_model
best_try_optimizer = new_optimizer
best_try_scheduler = new_scheduler
best_val = current_val
best_val_model = current_val_model
model = best_try_model
optimizer = best_try_optimizer
scheduler = best_try_scheduler
datasets['train'].edge_message_ratio = best_ratio
edge_message_ratio_changes.append(datasets['train'].edge_message_ratio)
if val_max < best_val:
val_max = best_val
best_model = best_val_model
waiting = args.try_epochs - 1
continue
if(args.random):
datasets['train'].edge_message_ratio = random.uniform(0.1, 0.9)
for iter_i, batch in enumerate(dataloaders['train']):
batch.to(args.device)
model.train()
optimizer.zero_grad()
pred = model(batch)
loss = model.loss(pred, batch.edge_label.type(pred.dtype))
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
accs, _ = test(model, dataloaders, args)
if(args.verbose):
print(log.format(epoch, accs['train'], accs['val'], accs['test']))
if val_max < accs['val']:
val_max = accs['val']
best_model = copy.deepcopy(model)
log = 'Best, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
accs, _ = test(best_model, dataloaders, args)
print(log.format(accs['train'], accs['val'], accs['test']))
return np.array([accs['train'], accs['val'], accs['test']]), edge_message_ratio_changes
def test(model, dataloaders, args, max_train_batches=1):
model.eval()
accs = {}
losses = {}
for mode, dataloader in dataloaders.items():
acc = 0
loss = 0
num_batches = 0
for batch in dataloader:
batch.to(args.device)
pred = model(batch)
# only 1 graph in dataset. In general needs aggregation
loss += model.loss(pred, batch.edge_label.type(pred.dtype)).cpu().data.numpy()
acc += roc_auc_score(batch.edge_label.flatten().cpu().numpy(),
pred.flatten().data.cpu().numpy())
num_batches += 1
if mode == 'train' and num_batches >= max_train_batches:
# do not eval on the entire training set for efficiency
break
accs[mode] = acc / num_batches
losses[mode] = loss / num_batches
return accs, losses
def run(args):
pyg_dataset = None
if(args.dataset == 'cora'):
pyg_dataset = Planetoid('./datasets', 'Cora', transform=T.TargetIndegree())
if(args.dataset == 'citeseer'):
pyg_dataset = Planetoid('./datasets', 'CiteSeer', transform=T.TargetIndegree())
if(args.dataset == 'pubmed'):
pyg_dataset = Planetoid('./datasets', 'PubMed', transform=T.TargetIndegree())
# the input that we assume users have
edge_train_mode = args.mode
if(args.verbose):
print('edge train mode: {}'.format(edge_train_mode))
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
dataset = GraphDataset(graphs,
task='link_pred',
edge_message_ratio=args.edge_message_ratio,
edge_train_mode=edge_train_mode,
resample_disjoint=True,
resample_disjoint_period=1)
if(args.verbose):
print('Initial dataset: {}'.format(dataset))
# split dataset
datasets = {}
datasets['train'], datasets['val'], datasets['test'] = dataset.split(
transductive=True, split_ratio=args.data_split)
if(args.verbose):
print('after split')
print('Train message-passing graph: {} nodes; {} edges.'.format(
datasets['train'][0].G.number_of_nodes(),
datasets['train'][0].G.number_of_edges()))
print('Val message-passing graph: {} nodes; {} edges.'.format(
datasets['val'][0].G.number_of_nodes(),
datasets['val'][0].G.number_of_edges()))
print('Test message-passing graph: {} nodes; {} edges.'.format(
datasets['test'][0].G.number_of_nodes(),
datasets['test'][0].G.number_of_edges()))
# node feature dimension
input_dim = datasets['train'].num_node_features
# link prediction needs 2 classes (0, 1)
num_classes = datasets['train'].num_edge_labels
model = Net(input_dim, args).to(args.device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
follow_batch = []
dataloaders = {split: DataLoader(
ds, collate_fn=Batch.collate(follow_batch),
batch_size=args.batch_size, shuffle=(split=='train'))
for split, ds in datasets.items()}
if(args.verbose):
print('Graphs after split: ')
for key, dataloader in dataloaders.items():
for batch in dataloader:
print(key, ': ', batch)
return train(model, datasets, dataloaders, optimizer, args, scheduler=scheduler)
def write_file(file_name, total_acc):
print(total_acc)
total_acc = total_acc.reshape(1, -1)
frame = pd.DataFrame(data=total_acc, columns=['Train', 'Validation', 'Test'])
frame.to_csv(file_name, index=False)
print(frame)
def experiment(iterations, args, file_name_results=None, file_name_edge_message_ratio_changes=None):
edge_message_ratio_changes_iterations = []
total_acc = np.zeros(3)
for seed in range(1, iterations + 1):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
acc, edge_message_ratio_changes = run(args)
total_acc = np.add(total_acc, acc)
edge_message_ratio_changes_iterations.append(edge_message_ratio_changes)
total_acc /= iterations
if(file_name_results != None):
write_file(file_name_results, total_acc)
if(file_name_edge_message_ratio_changes != None):
edge_message_ratio_changes_iterations = np.array(edge_message_ratio_changes_iterations)
data = {"Iteration {}".format(i) : edge_message_ratio_changes_iterations[i - 1, :] for i in range(1, iterations + 1)}
frame = pd.DataFrame(data)
frame.to_csv(file_name_edge_message_ratio_changes, index=False)
def main():
global_args = arg_parse()
iterations = 3
device = torch.device('cuda:{}'.format(global_args.gpu) if torch.cuda.is_available() else 'cpu')
dataset_name = global_args.dataset
verbose = global_args.verbose
layer = global_args.num_layers
hidden_dim = global_args.hidden_dim
data_splits = [[0.8, 0.1, 0.1], [0.2, 0.4, 0.4], [0.5, 0.25, 0.25]]
adapt_epochs = [100, 50, 10]
try_epochs = [1, 5, -1]
criterions = ['val', 'gap']
edge_message_ratios = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for data_split in data_splits:
folder_dataset = "{}-{}-{}".format(dataset_name, layer, hidden_dim)
folder_split = "/{}-{}-{}".format(int(100 * data_split[0]), int(100 * data_split[1]), int(100 * data_split[2]))
folder_name = folder_dataset + "/results/" + folder_split
Path(folder_name).mkdir(parents=True, exist_ok=True)
# complete search - constant edge message ratio
for edge_message_ratio in edge_message_ratios:
args = Arguments(device=device, mode="disjoint", verbose=verbose, dataset=dataset_name, adapt=False,
edge_message_ratio=edge_message_ratio, hidden_dim=hidden_dim, layers=layer, data_split=data_split)
file_name = folder_name + "/normal_{}.csv".format(int(100 * edge_message_ratio))
experiment(iterations, args, file_name)
# random search - changes edge message randomly after every epoch
args = Arguments(device=device, mode="disjoint", verbose=verbose, dataset=dataset_name,
adapt=False, random=True, hidden_dim=hidden_dim, layers=layer, data_split=data_split)
file_name = folder_name + "/random.csv"
experiment(iterations, args, file_name)
# AdaGrid
for criterion in criterions:
for adapt_epoch in adapt_epochs:
for try_epoch in try_epochs:
if(try_epoch == -1):
try_epoch = adapt_epoch
args = Arguments(device=device, mode="disjoint", verbose=verbose, dataset=dataset_name,
adapt=True, adapt_epochs=adapt_epoch, try_epochs=try_epoch, criterion=criterion,
hidden_dim=hidden_dim, layers=layer, data_split=data_split)
file_name_results = folder_name + "/adapt_{}_{}_{}.csv".format(criterion, adapt_epoch, try_epoch)
file_name_edge_message_ratio = folder_name + "/edge_message_ratio_adapt_{}_{}_{}.csv".format(criterion, adapt_epoch, try_epoch)
experiment(iterations, args, file_name_results, file_name_edge_message_ratio)
if __name__ == '__main__':
main()
| 14,995 | 30.241667 | 132 | py |
adagrid | adagrid-main/community-ratio-based-negative-sampling/adagrid_community_ratio_based_negative_sampling.py | from pathlib import Path
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
import argparse
import copy
import math
import random
import numpy as np
import pandas as pd
from sklearn.metrics import *
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from networkx.algorithms.community import greedy_modularity_communities
import networkx as nx
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
import torch_geometric.nn as pyg_nn
from deepsnap.dataset import GraphDataset
from deepsnap.batch import Batch
class Arguments:
def __init__(self, dataset='cora', device='cpu', epochs=500, mode='all', model='GCN',
edge_message_ratio=0.6, layers=2, hidden_dim=64, batch_size=1, data_split=[0.85, 0.05, 0.1],
verbose=False, adapt=False, try_epochs=3, adapt_epochs=50, criterion='val', random=False):
self.dataset = dataset
self.device = device
self.epochs = epochs
self.mode = mode
self.model = model
self.edge_message_ratio = edge_message_ratio
self.layers = layers
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.data_split = data_split
self.verbose = verbose
self.adapt = adapt
self.try_epochs = try_epochs
self.adapt_epochs = adapt_epochs
self.criterion = criterion
self.random = random
def arg_parse():
parser = argparse.ArgumentParser(description='Link prediction arguments.')
parser.add_argument('--gpu', type=int,
help='GPU device.')
parser.add_argument('--dataset', type=str,
help='Dataset.')
parser.add_argument('--num_layers', type=int,
help='Number of layers of GNN.')
parser.add_argument('--hidden_dim', type=int,
help='Hidden dimension of GNN.')
parser.add_argument('--verbose', action='store_true',
help='Whether output is verbose.')
parser.set_defaults(
gpu=0,
dataset='cora',
num_layers=2,
hidden_dim=64,
verbose=False
)
return parser.parse_args()
class Net(torch.nn.Module):
def __init__(self, input_dim, args):
super(Net, self).__init__()
self.model = args.model
if self.model == 'GCN':
self.conv_first = pyg_nn.GCNConv(input_dim, args.hidden_dim)
self.convs = torch.nn.ModuleList([pyg_nn.GCNConv(args.hidden_dim, args.hidden_dim) for i in range(args.layers - 2)])
self.conv_last = pyg_nn.GCNConv(args.hidden_dim, args.hidden_dim)
else:
raise ValueError('unknown conv')
self.loss_fn = torch.nn.BCEWithLogitsLoss()
def forward(self, graph):
x = F.dropout(graph.node_feature, p=0.2, training=self.training)
x = F.relu(self._conv_op(self.conv_first, x, graph))
for i in range(len(self.convs)):
x = F.dropout(x, p=0.2, training=self.training)
x = F.relu(self._conv_op(self.convs[i], x, graph))
x = F.dropout(x, p=0.2, training=self.training)
x = self._conv_op(self.conv_last, x, graph)
nodes_first = torch.index_select(x, 0, graph.edge_label_index[0,:].long())
nodes_second = torch.index_select(x, 0, graph.edge_label_index[1,:].long())
pred = torch.sum(nodes_first * nodes_second, dim=-1)
return pred
def _conv_op(self, conv, x, graph):
if self.model == 'GCN':
return conv(x, graph.edge_index)
elif self.model == 'spline':
return conv(x, graph.edge_index, graph.edge_feature)
def loss(self, pred, link_label):
return self.loss_fn(pred, link_label)
def sample_negative_community(edge_index, num_nodes, num_neg_edges, community_ratio, communities, from_community):
num_within_communities = int(num_neg_edges * community_ratio)
num_between_communities = num_neg_edges - num_within_communities
# idx = N * i + j
idx = (edge_index[0] * num_nodes + edge_index[1]).to("cpu")
edges_within = []
edges_between = []
while(len(edges_within) < num_within_communities):
x = random.randint(0, num_nodes - 1)
comm_x = from_community[x]
y_ind = random.randint(0, len(communities[comm_x]) - 1)
y = communities[comm_x][y_ind]
if(from_community[x] == from_community[y] and (x * num_nodes + y) not in idx):
edges_within.append([x, y])
while(len(edges_between) < num_between_communities):
x = random.randint(0, num_nodes - 1)
y = random.randint(0, num_nodes - 1)
if(from_community[x] != from_community[y] and ((x * num_nodes + y) not in idx)):
edges_between.append([x, y])
edges_within = np.array(edges_within)
edges_between = np.array(edges_between)
row = torch.tensor(np.append(edges_within[:, 0], edges_between[:, 0]))
col = torch.tensor(np.append(edges_within[:, 1], edges_between[:, 1]))
neg_edge_index = torch.stack([row, col], dim=0).long()
return neg_edge_index.to(edge_index.device)
def sample_community_ratio(graph, community_ratio, communities, from_community):
if graph._num_positive_examples is not None:
# remove previous negative samples first
# if self._num_positive_examples is None then no previous sampling was done
graph.edge_label_index = graph.edge_label_index[:, : graph._num_positive_examples]
num_pos_edges = graph.edge_label_index.shape[-1]
num_neg_edges = num_pos_edges
if graph.edge_index.size() == graph.edge_label_index.size() and (
torch.sum(graph.edge_index - graph.edge_label_index) == 0
):
edge_index_all = graph.edge_index
else:
edge_index_all = (
torch.cat((graph.edge_index, graph.edge_label_index), -1)
)
if len(edge_index_all) > 0:
negative_edges = sample_negative_community(edge_index_all, graph.num_nodes, num_neg_edges,
community_ratio, communities, from_community)
else:
return torch.LongTensor([])
# label for negative edges is 0
negative_label = torch.zeros(num_neg_edges, dtype=torch.long)
# positive edges
if graph.edge_label is not None:
# when resampling, get the positive portion of labels
positive_label = graph.edge_label[:num_pos_edges]
elif graph.edge_label is None:
# if label is not yet specified, use all ones for positives
positive_label = torch.ones(num_pos_edges, dtype=torch.long)
else:
# reserve class 0 for negatives; increment other edge labels
positive_label = graph.edge_label + 1
graph._num_positive_examples = num_pos_edges
# append to edge_label_index
graph.edge_label_index = (
torch.cat((graph.edge_label_index, negative_edges), -1)
)
graph.edge_label = (
torch.cat((positive_label, negative_label), -1).type(torch.long)
)
def try_edge_message_ratio(edge_message_ratio, model, datasets, dataloaders, optimizer, args,
communities, from_community, scheduler=None):
datasets['train'].edge_message_ratio = edge_message_ratio
val_max = -math.inf
best_model = model
mean_accs = {mode: 0 for mode, dataloader in dataloaders.items()}
for epoch in range(args.try_epochs):
for i in range(len(datasets['train'].graphs)):
sample_community_ratio(datasets['train'].graphs[i], datasets['train'].community_ratio, communities, from_community)
for iter_i, batch in enumerate(dataloaders['train']):
batch.to(args.device)
model.train()
optimizer.zero_grad()
pred = model(batch)
loss = model.loss(pred, batch.edge_label.type(pred.dtype))
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
accs, _ = test(model, dataloaders, args)
for mode in accs:
mean_accs[mode] += accs[mode]
if val_max < accs['val']:
val_max = accs['val']
best_model = copy.deepcopy(model)
for mode in mean_accs:
mean_accs[mode] /= args.try_epochs
log = 'Edge message ratio: {:.4f}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
if(args.verbose):
print(log.format(edge_message_ratio, mean_accs['train'], mean_accs['val'], mean_accs['test']))
return mean_accs, val_max, best_model
def train(model, datasets, dataloaders, optimizer, args, scheduler=None):
graph = datasets['val'][0]
G = nx.Graph()
G.add_nodes_from(range(graph.num_nodes))
G.add_edges_from([(int(graph.edge_index[0][i]), int(graph.edge_index[1][i]))
for i in range(graph.num_edges)])
communities = [list(community) for community in greedy_modularity_communities(G)]
from_community = {node: ind for ind in range(len(communities)) for node in communities[ind]}
# get community ratio for positive edges in training dataset
within = 0
between = 0
for iter_i, batch in enumerate(dataloaders['val']):
for i in range(len(batch.edge_label_index[0])):
if(batch.edge_label[i] > 0.5):
x = batch.edge_label_index[0][i].item()
y = batch.edge_label_index[1][i].item()
if(from_community[x] == from_community[y]):
within += 1
else:
between += 1
community_ratio = within / (within + between)
datasets['train'].community_ratio = community_ratio
datasets['val'].community_ratio = community_ratio
datasets['test'].community_ratio = community_ratio
for i in range(len(datasets['train'].graphs)):
sample_community_ratio(datasets['train'].graphs[i], community_ratio, communities, from_community)
for i in range(len(datasets['val'].graphs)):
sample_community_ratio(datasets['val'].graphs[i], community_ratio, communities, from_community)
for i in range(len(datasets['test'].graphs)):
sample_community_ratio(datasets['test'].graphs[i], community_ratio, communities, from_community)
# training loop
val_max = -math.inf
best_model = model
input_dim = datasets['train'].num_node_features
num_classes = datasets['train'].num_edge_labels
waiting = 0
edge_message_ratio_changes = []
for epoch in range(0, args.epochs):
if(waiting > 0):
waiting -= 1
continue
# search to determine the next edge_message_ratio
if(args.adapt and epoch % args.adapt_epochs == 0):
best_ratio = 0.0
try_val_max = None
if(args.criterion == 'val'):
try_val_max = -math.inf
if(args.criterion == 'gap'):
try_val_max = math.inf
best_try_model = model
best_try_optimizer = optimizer
best_try_scheduler = scheduler
# best version of the best_try_model during training for try epochs (highest validation accuracy)
best_val_model = model
best_val = -math.inf
edge_message_ratios = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for edge_message_ratio in edge_message_ratios:
new_model = Net(input_dim, args).to(args.device)
new_model.load_state_dict(model.state_dict())
new_optimizer = torch.optim.SGD(new_model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
new_optimizer.load_state_dict(optimizer.state_dict())
new_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(new_optimizer, T_max=args.epochs)
new_scheduler.load_state_dict(scheduler.state_dict())
try_accs, current_val, current_val_model = try_edge_message_ratio(edge_message_ratio,
new_model, datasets, dataloaders, new_optimizer,
args, communities, from_community, new_scheduler)
if (args.criterion == 'val' and try_val_max < try_accs['val']):
try_val_max = try_accs['val']
best_ratio = edge_message_ratio
best_try_model = new_model
best_try_optimizer = new_optimizer
best_try_scheduler = new_scheduler
best_val = current_val
best_val_model = current_val_model
if (args.criterion == 'gap' and try_val_max > abs(try_accs['train'] - try_accs['val'])):
try_val_max = abs(try_accs['train'] - try_accs['val'])
best_ratio = edge_message_ratio
best_try_model = new_model
best_try_optimizer = new_optimizer
best_try_scheduler = new_scheduler
best_val = current_val
best_val_model = current_val_model
model = best_try_model
optimizer = best_try_optimizer
scheduler = best_try_scheduler
datasets['train'].edge_message_ratio = best_ratio
edge_message_ratio_changes.append(datasets['train'].edge_message_ratio)
if val_max < best_val:
val_max = best_val
best_model = best_val_model
waiting = args.try_epochs - 1
continue
if(args.random):
datasets['train'].edge_message_ratio = random.uniform(0.1, 0.9)
for i in range(len(datasets['train'].graphs)):
sample_community_ratio(datasets['train'].graphs[i], datasets['train'].community_ratio, communities, from_community)
for iter_i, batch in enumerate(dataloaders['train']):
batch.to(args.device)
model.train()
optimizer.zero_grad()
pred = model(batch)
loss = model.loss(pred, batch.edge_label.type(pred.dtype))
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
accs, _ = test(model, dataloaders, args)
if(args.verbose):
print(log.format(epoch, accs['train'], accs['val'], accs['test']))
if val_max < accs['val']:
val_max = accs['val']
best_model = copy.deepcopy(model)
log = 'Best, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'
accs, _ = test(best_model, dataloaders, args)
print(log.format(accs['train'], accs['val'], accs['test']))
return np.array([accs['train'], accs['val'], accs['test']]), edge_message_ratio_changes
def test(model, dataloaders, args, max_train_batches=1):
model.eval()
accs = {}
losses = {}
for mode, dataloader in dataloaders.items():
acc = 0
loss = 0
num_batches = 0
for batch in dataloader:
batch.to(args.device)
pred = model(batch)
# only 1 graph in dataset. In general needs aggregation
loss += model.loss(pred, batch.edge_label.type(pred.dtype)).cpu().data.numpy()
acc += roc_auc_score(batch.edge_label.flatten().cpu().numpy(),
pred.flatten().data.cpu().numpy())
num_batches += 1
if mode == 'train' and num_batches >= max_train_batches:
# do not eval on the entire training set for efficiency
break
accs[mode] = acc / num_batches
losses[mode] = loss / num_batches
return accs, losses
def run(args):
pyg_dataset = None
if(args.dataset == 'cora'):
pyg_dataset = Planetoid('./datasets', 'Cora', transform=T.TargetIndegree())
if(args.dataset == 'citeseer'):
pyg_dataset = Planetoid('./datasets', 'CiteSeer', transform=T.TargetIndegree())
if(args.dataset == 'pubmed'):
pyg_dataset = Planetoid('./datasets', 'PubMed', transform=T.TargetIndegree())
edge_train_mode = args.mode
if(args.verbose):
print('edge train mode: {}'.format(edge_train_mode))
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
dataset = GraphDataset(graphs,
task='link_pred',
edge_message_ratio=args.edge_message_ratio,
edge_train_mode=edge_train_mode,
resample_disjoint=True,
resample_disjoint_period=1,
resample_negatives=False)
if(args.verbose):
print('Initial dataset: {}'.format(dataset))
# split dataset
datasets = {}
datasets['train'], datasets['val'], datasets['test'] = dataset.split(
transductive=True, split_ratio=args.data_split)
if(args.verbose):
print('after split')
print('Train message-passing graph: {} nodes; {} edges.'.format(
datasets['train'][0].G.number_of_nodes(),
datasets['train'][0].G.number_of_edges()))
print('Val message-passing graph: {} nodes; {} edges.'.format(
datasets['val'][0].G.number_of_nodes(),
datasets['val'][0].G.number_of_edges()))
print('Test message-passing graph: {} nodes; {} edges.'.format(
datasets['test'][0].G.number_of_nodes(),
datasets['test'][0].G.number_of_edges()))
# node feature dimension
input_dim = datasets['train'].num_node_features
# link prediction needs 2 classes (0, 1)
num_classes = datasets['train'].num_edge_labels
model = Net(input_dim, args).to(args.device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
follow_batch = []
dataloaders = {split: DataLoader(
ds, collate_fn=Batch.collate(follow_batch),
batch_size=args.batch_size, shuffle=(split=='train'))
for split, ds in datasets.items()}
if(args.verbose):
print('Graphs after split: ')
for key, dataloader in dataloaders.items():
for batch in dataloader:
print(key, ': ', batch)
return train(model, datasets, dataloaders, optimizer, args, scheduler=scheduler)
def write_file(file_name, total_acc):
print(total_acc)
total_acc = total_acc.reshape(1, -1)
frame = pd.DataFrame(data=total_acc, columns=['Train', 'Validation', 'Test'])
frame.to_csv(file_name, index=False)
print(frame)
def experiment(iterations, args, file_name_results=None, file_name_edge_message_ratio_changes=None):
edge_message_ratio_changes_iterations = []
total_acc = np.zeros(3)
for seed in range(1, iterations + 1):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
acc, edge_message_ratio_changes = run(args)
total_acc = np.add(total_acc, acc)
edge_message_ratio_changes_iterations.append(edge_message_ratio_changes)
total_acc /= iterations
if(file_name_results != None):
write_file(file_name_results, total_acc)
if(file_name_edge_message_ratio_changes != None):
edge_message_ratio_changes_iterations = np.array(edge_message_ratio_changes_iterations)
data = {"Iteration {}".format(i) : edge_message_ratio_changes_iterations[i - 1, :] for i in range(1, iterations + 1)}
frame = pd.DataFrame(data)
frame.to_csv(file_name_edge_message_ratio_changes, index=False)
def main():
global_args = arg_parse()
iterations = 3
device = torch.device('cuda:{}'.format(global_args.gpu) if torch.cuda.is_available() else 'cpu')
dataset_name = global_args.dataset
verbose = global_args.verbose
layer = global_args.num_layers
hidden_dim = global_args.hidden_dim
data_splits = [[0.8, 0.1, 0.1], [0.5, 0.25, 0.25], [0.2, 0.4, 0.4]]
adapt_epochs = [100, 50, 10]
try_epochs = [1, 5, -1]
criterions = ['val', 'gap']
edge_message_ratios = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for data_split in data_splits:
folder_dataset = "{}-{}-{}".format(dataset_name, layer, hidden_dim)
folder_split = "/{}-{}-{}".format(int(100 * data_split[0]), int(100 * data_split[1]), int(100 * data_split[2]))
folder_name = folder_dataset + "/results/" + folder_split
Path(folder_name).mkdir(parents=True, exist_ok=True)
# complete search - constant edge message ratio
for edge_message_ratio in edge_message_ratios:
args = Arguments(device=device, mode="disjoint", verbose=verbose, dataset=dataset_name, adapt=False,
edge_message_ratio=edge_message_ratio, hidden_dim=hidden_dim, layers=layer, data_split=data_split)
file_name = folder_name + "/normal_{}.csv".format(int(100 * edge_message_ratio))
experiment(iterations, args, file_name)
# random search - changes edge message randomly after every epoch
args = Arguments(device=device, mode="disjoint", verbose=verbose, dataset=dataset_name,
adapt=False, random=True, hidden_dim=hidden_dim, layers=layer, data_split=data_split)
file_name = folder_name + "/random.csv"
experiment(iterations, args, file_name)
# AdaGrid
for criterion in criterions:
for adapt_epoch in adapt_epochs:
for try_epoch in try_epochs:
if(try_epoch == -1):
try_epoch = adapt_epoch
args = Arguments(device=device, mode="disjoint", verbose=verbose, dataset=dataset_name,
adapt=True, adapt_epochs=adapt_epoch, try_epochs=try_epoch, criterion=criterion,
hidden_dim=hidden_dim, layers=layer, data_split=data_split)
file_name_results = folder_name + "/adapt_{}_{}_{}.csv".format(criterion, adapt_epoch, try_epoch)
file_name_edge_message_ratio = folder_name + "/edge_message_ratio_adapt_{}_{}_{}.csv".format(criterion, adapt_epoch, try_epoch)
experiment(iterations, args, file_name_results, file_name_edge_message_ratio)
if __name__ == '__main__':
main()
| 19,954 | 30.979167 | 132 | py |
SMERF | SMERF-main/scripts/adv_train.py | """
This is an example of how to use ART and Keras to perform adversarial training using data generators for CIFAR10
"""
import keras
import numpy as np
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D, Input, BatchNormalization
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras.utils.np_utils import to_categorical
import os
import keras.backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, ZeroPadding2D
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
import sys
sys.path.append('../')
import smerf
import argparse
from art.attacks.evasion import ProjectedGradientDescent, FastGradientMethod
from art.classifiers import KerasClassifier
from art.data_generators import KerasDataGenerator
from art.defences.trainer import AdversarialTrainer
import time
# Directory to save the data
DATA_DIR = '../data'
# Directory to save intermediary/final results
CACHE_DIR = '../outputs/cache'
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
# Directory to save plots
PLOT_DIR = '../outputs/plots'
if not os.path.exists(PLOT_DIR):
os.mkdir(PLOT_DIR)
def build_simple_model(input_shape=(64,64,3), interm_dim=200, lr=0.0001):
model = keras.Sequential([
keras.layers.Conv2D(filters=32, kernel_size=3, strides=(2, 2), activation='relu', input_shape=input_shape),
keras.layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(interm_dim, activation='relu'),
keras.layers.Dense(2)
])
opt = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
return model
def build_complex_model(input_shape=(64,64,3), interm_dim=200, lr=0.0001):
model = keras.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu', input_shape=input_shape),
keras.layers.Conv2D(filters=128, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Conv2D(filters=256, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(interm_dim, activation='relu'),
keras.layers.Dense(interm_dim, activation='relu'),
keras.layers.Dense(2)
])
opt = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
return model
def main(args):
exp_no = args.exp # experiment number
attack_type = args.attack # attack type
epochs = args.ep
batch_size = args.batch
print('EXP_NO = %f'%exp_no)
lr = args.lr # learning rate used to train the model
model_name = 'w-adv2-%0.2f.pt'%exp_no # model file to save/load
train_type = args.type
if exp_no == 1.11:
import smerf.simple_fr as textbox_exp
no_data = 2000
elif exp_no == 2.11:
import smerf.simple_nr as textbox_exp
no_data = 5000
elif exp_no == 1.2:
import smerf.complex_fr as textbox_exp
no_data = 2000
elif exp_no == 3.71: # Complex-CR1
import smerf.complex_cr1 as textbox_exp
no_data = 15000
elif exp_no == 3.72: # Complex-CR2
import smerf.complex_cr2 as textbox_exp
no_data = 15000
elif exp_no == 3.73: # Complex-CR3
import smerf.complex_cr3 as textbox_exp
no_data = 15000
elif exp_no == 3.74: # Complex-CR4
import smerf.complex_cr4 as textbox_exp
no_data = 15000
### Generate (or load) datasets
train_data, test_data, train_primary, test_primary, train_secondary, test_secondary = \
textbox_exp.generate_textbox_data(n=no_data,
save=True,
save_dir='../data',
exp_no=exp_no,
random_bg=0)
x_train = train_data.X
x_test = test_data.X
y_train = train_data.y
y_test = test_data.y
original_name = 'w%0.2f.pt'%exp_no
y_train_oh = to_categorical(y_train, 2)
y_test_oh = to_categorical(y_test, 2)
print('data loaded')
datagen = ImageDataGenerator()
datagen.fit(x_train)
art_datagen = KerasDataGenerator(datagen.flow(x=x_train,
y=y_train_oh,
batch_size=batch_size,
shuffle=True),
size=x_train.shape[0], batch_size=batch_size)
print('generator fit')
if exp_no == 1.11 or exp_no == 2.11:
model = build_simple_model(lr=lr)
#if train_type != 'scratch':
model.load_weights(os.path.join(CACHE_DIR, original_name))
classifier = KerasClassifier(model, clip_values=(0, 1), use_logits=False)
else:
model = build_complex_model(lr=lr)
#if train_type != 'scratch':
model.load_weights(os.path.join(CACHE_DIR, original_name))
classifier = KerasClassifier(model, clip_values=(0, 1), use_logits=False)
# Create attack for adversarial trainer; here, we use 2 attacks, both crafting adv examples on the target model
print('Creating Attack')
if attack_type == 'pgd':
attacker = ProjectedGradientDescent(classifier, eps=0.3, eps_step=0.1, max_iter=10, num_random_init=1)
elif attack_type == 'fgsm':
attacker = FastGradientMethod(classifier, eps=0.2)
else:
raise ValueError()
# Create advareasial samples
if os.path.exists('x_train2_%s_%0.2f.npy'%(attack_type, exp_no)):
x_train_pgd = np.load('x_train2_%s_%0.2f.npy'%(attack_type, exp_no))
x_test_pgd = np.load('x_test2_%s_%0.2f.npy'%(attack_type, exp_no))
else:
x_test_pgd = attacker.generate(x_test)
x_train_pgd = attacker.generate(x_train)
np.save(open('x_train2_%s_%0.2f.npy'%(attack_type, exp_no), 'wb'), x_train_pgd)
np.save(open('x_test2_%s_%0.2f.npy'%(attack_type, exp_no), 'wb'), x_test_pgd)
print(x_test_pgd.shape)
preds = np.argmax(classifier.predict(x_test_pgd), axis=1)
acc = np.sum(preds == np.argmax(y_test_oh, axis=1)) / y_test.shape[0]
print("Classifier before adversarial training")
print("Accuracy on adversarial samples: %.2f%%", (acc * 100))
# Create adversarial trainer and perform adversarial training
print('Training')
if train_type == 'aug':
# augment the data
x_train = np.append(x_train, x_train_pgd, axis=0)
y_train = np.append(y_train, y_train, axis=0)
y_train_oh = np.append(y_train_oh, y_train_oh, axis=0)
#retrain the model with augmented data
model.compile(optimizer=keras.optimizers.Adam(lr=lr), loss='binary_crossentropy', metrics=['accuracy'])
classifier.fit(x_train, y_train_oh, nb_epochs=epochs, batch_size=batch_size, verbose=True)
classifier._model.save_weights(os.path.join(CACHE_DIR, model_name))
elif train_type == 'scratch':
adv_trainer = AdversarialTrainer(classifier, attacks=attacker, ratio=1.)
adv_trainer.fit_generator(art_datagen, nb_epochs=epochs)
model = classifier._model
model.save_weights(os.path.join(CACHE_DIR, model_name))
# load the pretrained model
#model.load_weights(os.path.join(CACHE_DIR, model_name))
#classifier = KerasClassifier(model, clip_values=(0, 1), use_logits=False)
# adv_trainer = AdversarialTrainer(classifier, attacks=attacker, ratio=1.)
# adv_trainer.fit_generator(art_datagen, nb_epochs=20)
# model = classifier._model
# model.save_weights(os.path.join(CACHE_DIR, model_name))
# Evaluate the adversarially trained model on clean test set
labels_true = np.argmax(y_test_oh, axis=1)
labels_test = np.argmax(classifier.predict(x_test), axis=1)
print('Accuracy test set: %.2f%%' % (np.sum(labels_test == labels_true) / x_test.shape[0] * 100))
# Evaluate the adversarially trained model on original adversarial samples
labels_pgd = np.argmax(classifier.predict(x_test_pgd), axis=1)
print('Accuracy on original PGD adversarial samples: %.2f%%' % (np.sum(labels_pgd == labels_true) / x_test.shape[0] * 100))
# Evaluate the adversarially trained model on fresh adversarial samples produced on the adversarially trained model
x_test_pgd = attacker.generate(x_test)
labels_pgd = np.argmax(classifier.predict(x_test_pgd), axis=1)
print('Accuracy on new PGD adversarial samples: %.2f%%' % (np.sum(labels_pgd == labels_true) / x_test.shape[0] * 100))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=float, help='experiment number. 1-FR, 2-NR, 3-CR')
parser.add_argument('--ep', type=int, default=10, help='max epoch')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('--attack', type=str, default='fgsm', help='attack method')
parser.add_argument('--batch', type=int, default=256, help='batch size')
parser.add_argument('--type', type=str, default='scratch', help='train type')
args = parser.parse_args()
print(args)
main(args)
| 9,861 | 43.224215 | 128 | py |
SMERF | SMERF-main/smerf/explanations.py | import numpy as np
import imp
textcolorutils = imp.load_source('textcolor_utils', '../smerf/textcolor_utils.py')
import innvestigate
import keras
import keras.backend as K
import tensorflow as tf
import cv2
import pickle
import os
from tqdm import tqdm
# NOTE Helper functions for saliency methods that are not supported by iNNvestigate libary
# We recommend specifying these methods on a separate file to avoid clutter.
from .grad_cam_utils import *
from .shap_utils import *
from .lime_utils import *
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=1)[:, None]
def run_methods(model,
x_data,
y_data,
x_train,
no_images=10,
exp_no=0,
load=True,
f_name=None,
directory='../outputs/cache',
split=None,
model_type=0):
"""
Given a trained model, run saliency methods and return the results.
:param model: keras sequntial model to be explained
:param x_data: (N, H, W, C) image dataset
:param y_data: (N,) image labels
:param no_images: number of sample images to run methods on (n)
:param exp_no: experiment number for reference
:param load: if True, load from a cached results
:param directory: directory to save or load the results
:param split: if not None, specifies the bucket index the methods are run on
:param model_type: 0 (default) for simple CNNs. 1 for VGG16. 2 for ResNet50.
:return (result, method, text, idx) tuple where result is (no_images, M, H, C, W) saliency output
for M methods and no_images images; method is the list of configs for the saliency methods;
text is the prediction information on the images; idx is the original index of the sampled images from the dataset
"""
model_in = keras.models.Model(inputs=model.inputs,
outputs=model.outputs)
if split is None:
result_name = os.path.join(directory, 'result_%0.2f.pkl'%exp_no)
idx_name = os.path.join(directory, 'idx_%0.2f.pkl'%exp_no)
methods_name = os.path.join(directory, 'methods_%0.2f.pkl'%exp_no)
text_name = os.path.join(directory, 'text_%0.2f.pkl'%exp_no)
else:
result_name = os.path.join(directory, 'result_%0.2f_%d.pkl'%(exp_no, split))
idx_name = os.path.join(directory, 'idx_%0.2f_%d.pkl'%(exp_no, split))
methods_name = os.path.join(directory, 'methods_%0.2f_%d.pkl'%(exp_no, split))
text_name = os.path.join(directory, 'text_%0.2f_%d.pkl'%(exp_no, split))
if os.path.exists(result_name) and os.path.exists(idx_name) and load==True:
loaded = True
print('loading results from cache in {}'.format(directory))
result = pickle.load(open(result_name, 'rb'))
idx = pickle.load(open(idx_name, 'rb'))
methods = pickle.load(open(methods_name, 'rb'))
text = pickle.load(open(text_name, 'rb'))
else:
loaded = False
if f_name is not None:
result_name = os.path.join(directory, f_name+'.pkl')
idx_name = os.path.join(directory, f_name+'_idx.pkl')
methods_name = os.path.join(directory, f_name+'_methods.pkl')
text_name = os.path.join(directory, f_name+'_text.pkl')
print('cache not found')
noise_scale = 0.1
input_range = (0,1)
## NOTE Methods defined here is using the iNNvestigate library.
## Other methods should be added manually via separately defined helper functions at the end.
methods = [
# NAME OPT.PARAMS POSTPROC FXN TITLE
# Show input.
("input", {}, textcolorutils.identity, "Input"),
# Function
("gradient", {"postprocess": "abs"}, textcolorutils.graymap, "Gradient"),
("smoothgrad", {"augment_by_n": 64,
"noise_scale": noise_scale,
"postprocess": "square"},textcolorutils.graymap, "SmoothGrad"),
# Signal
("deconvnet", {}, textcolorutils.bk_proj, "Deconvnet"),
("guided_backprop", {}, textcolorutils.bk_proj, "Guided Backprop",),
# Interaction
("deep_taylor.bounded", {"low": input_range[0],
"high": input_range[1]}, textcolorutils.heatmap, "DeepTaylor"),
("input_t_gradient", {}, textcolorutils.heatmap, "Input * Gradient"),
("integrated_gradients", {"reference_inputs": input_range[0], "steps": 64}, textcolorutils.heatmap, "Integrated Gradients"),
("lrp.z", {}, textcolorutils.heatmap, "LRP-Z"),
("lrp.epsilon", {"epsilon": 1}, textcolorutils.heatmap, "LRP-Epsilon"),
("lrp.sequential_preset_a_flat",{"epsilon": 1}, textcolorutils.heatmap, "LRP-PresetAFlat"),
("lrp.sequential_preset_b_flat",{"epsilon": 1}, textcolorutils.heatmap, "LRP-PresetBFlat"),
("deep_lift.wrapper", {"nonlinear_mode":"reveal_cancel", "reference_inputs": 0, "verbose": 0}, textcolorutils.heatmap, "DeepLIFT-RevealCancel"),
("deep_lift.wrapper", {"nonlinear_mode":"rescale", "reference_inputs": 0, "verbose":0}, textcolorutils.heatmap, "DeepLIFT-Rescale"),
]
# Create analyzers.
analyzers = []
for method in methods:
try:
analyzer = innvestigate.create_analyzer(method[0], # analysis method identifier
model_in, # model without softmax output
**method[1]) # optional analysis parameters
except innvestigate.NotAnalyzeableModelException:
# Not all methods work with all models.
analyzer = None
analyzers.append(analyzer)
# Run the saliency methods
text = []
label_to_class_name = {0: 'Neg', 1: 'Pos'}
color_conversion = None
channels_first = keras.backend.image_data_format() == "channels_first"
# random set of images to test
np.random.seed(1)
idx = np.random.choice(len(x_data), no_images, replace=False) # index of the test data selected
images = x_data[idx]
labels = y_data[idx]
h, w, c = images[0].shape
result = np.zeros((len(images), len(analyzers), h, w, c))
# Run methods on batch
for aidx, analyzer in enumerate(tqdm(analyzers)):
if methods[aidx][0] == "input":
# Do not analyze, but keep not preprocessed input.
a = images
elif analyzer:
if model_type == 1 or model_type ==2: # NOTE for fine-tuned model, deep lift does not support maxpooling2d
if 'deep_lift' not in methods[aidx][0]:
# Analyze.
a = analyzer.analyze(images)
# Apply common postprocessing, e.g., re-ordering the channels for plotting.
a = textcolorutils.postprocess(a, color_conversion, channels_first)
# Apply analysis postprocessing, e.g., creating a heatmap.
a = methods[aidx][2](a)
else:
print('deepLift does not support specific layers')
a = np.zeros_like(images)
else:
# Analyze.
a = analyzer.analyze(images)
# Apply common postprocessing, e.g., re-ordering the channels for plotting.
a = textcolorutils.postprocess(a, color_conversion, channels_first)
# Apply analysis postprocessing, e.g., creating a heatmap.
a = methods[aidx][2](a)
else:
a = np.zeros_like(images)
# Store the analysis.
result[:, aidx] = a
# Predict final activations, probabilites, and label.
presm = model.predict(images)
prob = softmax(presm)
y_hat = prob.argmax(axis=1)
for i, y in enumerate(labels):
# Save prediction info:
text.append(("%s" % label_to_class_name[y], # ground truth label
"%.2f" % presm.max(axis=1)[i], # pre-softmax logits
"%.2f" % prob.max(axis=1)[i], # probabilistic softmax output
"%s" % label_to_class_name[y_hat[i]] # predicted label
))
####### NOTE Add additional methods to run below (those that are not supported
####### in the iNNvestigate library)
# Add Grad-CAM
print(' Running Grad-CAM')
result, methods = add_grad_cam(result, methods, model, images, exp_no, directory, model_type)
# Add SHAP
print(' Running SHAP')
result, methods = add_shap(result, methods, model, images, labels, x_train, exp_no, model_type)
# # Add LIME
# print(' Running LIME')
# result, methods = add_lime(result, methods, model, images, labels, x_train, exp_no)
# random baseline
print(' Running random')
random_results = np.random.random((len(images), 1, h, w, c))
methods.append(('random',{}, textcolorutils.heatmap, "Random"))
# edge detector
print(' Running edge detection')
edge_results = np.zeros((len(images), 1, h, w, c))
for i, x in enumerate(images):
ed = cv2.Sobel(x, cv2.CV_64F,1,0,ksize=5)
ed = (ed - np.min(ed)) / (np.max(ed) - np.min(ed))
edge_results[i,0] = ed
methods.append(('edge', {}, textcolorutils.heatmap, "Edge-detection"))
result = np.concatenate((result, random_results, edge_results), axis=1)
###### Adding new methods should be completed above ######
if not loaded:
# Save results
pickle.dump(result, open(result_name, 'wb'))
pickle.dump(idx, open(idx_name, 'wb'))
pickle.dump(methods, open(methods_name, 'wb'))
pickle.dump(text, open(text_name, 'wb'))
## saliency results: size (no_samples, methods, img_size)
print(' No-images: %d \t No-methods: %d finished.'%(result.shape[0], result.shape[1]))
return result, methods, text, idx
#### Helper functions for additional explanation methods to include ####
#### NOTE Add additional explanation methods below
#### Define methods that will take in the result matrix containing outputs from other methods
#### and add the new method's result to return.
#### The result matrix has a shape (no_samples, no_methods, h, w, c), so the output of the new methods
#### defined below should respect this format and be concatenated to the existing result matrix
#### along axis=1 (no_methods).
#### Below shows examples of adding two new methods (GradCAM and DeepSHAP) and basic steps that
#### should be followed within these functions, what should be returned in the end.
# Function for adding GradCAM results to the existing results.
def add_grad_cam(result, methods, model, images, exp_no, directory, model_type):
# compute attributions
if model_type == -1: # adversarial
model_name = 'w-adv-%0.2f.pt'%exp_no
elif model_type == 0:
model_name = 'w%0.2f.pt'%exp_no
elif model_type == 1:
model_name = 'w_vgg%0.2f.pt'%exp_no
elif model_type == 2:
model_name = 'w_alex%0.2f.pt'%exp_no
else:
raise ValueError('model_type not supported')
c, h, g = grad_cam_run(model, images, os.path.join(directory, model_name), exp_no, model_type)
# add the new results to the existing results
added_result = np.expand_dims(h, 1)
result = np.concatenate((result, added_result), axis=1)
# add the new method information to the existing method information
methods.append(('grad-cam', {}, textcolorutils.graymap, "Grad-CAM"))
# return both result and method information
return result, methods
# Function for adding DeepSHAP results to the existing results.
def add_shap(result, methods, model, images, labels, x_train, exp_no, model_type):
# compute attributions
output = shap_run(model, images, labels, x_train, exp_no, model_type)
h, w, c = images[0].shape
assert(output.shape == (images.shape[0], 1, h, w, c))
# add the new results to the existing results
result = np.concatenate((result, output), axis=1)
# add the new method information to the existing method information
methods.append(('deep-shap', {}, textcolorutils.graymap, "DeepSHAP"))
# return both result and method information
return result, methods
def add_lime(result, methods, model, images, labels, x_train, exp_no):
# compute attributions
output = lime_run(model, images, labels, x_train, exp_no)
h, w, c = images[0].shape
assert(output.shape == (images.shape[0], 1, h, w, c))
# add the new results to the existing results
result = np.concatenate((result, output), axis=1)
# add the new method information to the existing method information
methods.append(('lime', {}, textcolorutils.graymap, "LIME"))
# return both result and method information
return result, methods
# New method to be added to the pipeline
def add_your_new_method(result, methods, model, images, **kwargs):
# TODO fill in the code for your new method to be added.
return result, methods
| 13,998 | 45.97651 | 156 | py |
SMERF | SMERF-main/smerf/grad_cam_utils.py | """
Source code adapted from https://github.com/wawaku/grad-cam-keras
"""
from keras.preprocessing import image
from tensorflow.python.framework import ops
import keras.backend as K
import tensorflow as tf
import numpy as np
import keras
import cv2
import os, gc
from .models import TextBoxCNN as TextBoxCNN
from .models import TextBoxCNN_adv as TextBoxCNN_adv
from .models import VGG16_model as VGG16_model
from .models import AlexNet_model as AlexNet_model
def register_gradient():
if "GuidedBackProp" not in ops._gradient_registry._registry:
@ops.RegisterGradient("GuidedBackProp")
def _GuidedBackProp(op, grad):
dtype = op.inputs[0].dtype
return grad * tf.cast(grad > 0., dtype) * \
tf.cast(op.inputs[0] > 0., dtype)
def compile_saliency_function(model, activation_layer='block5_pool'):
input_img = model.input
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
layer_output = layer_dict[activation_layer].output
max_output = K.max(layer_output, axis=3)
saliency = K.gradients(K.sum(max_output), input_img)[0]
return K.function([input_img, K.learning_phase()], [saliency])
def modify_backprop(model, name, model_file, exp_no, model_type):
g = tf.get_default_graph()
with g.gradient_override_map({'Relu': name}):
# get layers that have an activation
layer_dict = [layer for layer in model.layers[1:]
if hasattr(layer, 'activation')]
# replace relu activation
for layer in layer_dict:
if layer.activation == keras.activations.relu:
layer.activation = tf.nn.relu
# re-instanciate a new model
if model_type == 0 or model_type == -1: # adversarial model
if exp_no >= 3.5 or exp_no == 1.2:
new_model = TextBoxCNN_adv().model
else:
new_model = TextBoxCNN().model
elif model_type == 1:
new_model = VGG16_model().model
elif model_type == 2:
new_model = AlexNet_model().model
else:
raise ValueError('model_type must be 0, 1, or 2')
new_model.load_weights(model_file)
return new_model
def deprocess_image(x):
'''
Same normalization as in:
https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py
'''
if np.ndim(x) > 3:
x = np.squeeze(x)
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_dim_ordering() == 'th':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def grad_cam(model, x, category_index, layer_name):
"""
Args:
model: model
x: image input
category_index: category index
layer_name: last convolution layer name
"""
# get category loss
class_output = model.output[:, category_index]
# layer output
convolution_output = model.get_layer(layer_name).output
# get gradients
grads = K.gradients(class_output, convolution_output)[0]
# get convolution output and gradients for input
gradient_function = K.function([model.input], [convolution_output, grads])
output, grads_val = gradient_function([x])
output, grads_val = output[0], grads_val[0]
# avg
weights = np.mean(grads_val, axis=(0, 1))
cam = np.dot(output, weights)
# create heat map
cam = cv2.resize(cam, (x.shape[1], x.shape[2]), cv2.INTER_LINEAR)
cam = np.maximum(cam, 0)
heatmap = cam / np.max(cam)
# Return to BGR [0..255] from the preprocessed image
image_rgb = x[0, :]
image_rgb -= np.min(image_rgb)
image_rgb = np.minimum(image_rgb, 255)
cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
cam = np.float32(cam) + np.float32(image_rgb)
cam = 255 * cam / np.max(cam)
return np.uint8(cam), heatmap
def grad_cam_run(model, x_sample, model_file, exp_no, model_type):
last_conv_layer_name = [x for x in model.layers if type(x) == keras.layers.convolutional.Conv2D][-1].name
cam_imgs = np.zeros(x_sample.shape)
heat_maps = np.zeros(x_sample.shape)
grad_cam_imgs = np.zeros(x_sample.shape)
for i in range(x_sample.shape[0]):
img = x_sample[i][None,:,:,:]
predictions = model.predict(img)
pred_class = predictions.argmax(axis=1)[0]
cam_image, heat_map = grad_cam(model, img, pred_class, last_conv_layer_name)
# guided grad_cam img
register_gradient()
guided_model = modify_backprop(model, 'GuidedBackProp', model_file, exp_no, model_type)
guided_model_name = [x for x in guided_model.layers if type(x) == keras.layers.convolutional.Conv2D][-1].name
saliency_fn = compile_saliency_function(guided_model, activation_layer=guided_model_name)
saliency = saliency_fn([img, 0])
grad_cam_img = saliency[0] * heat_map[..., np.newaxis]
if np.max(grad_cam_img) - np.min(grad_cam_img) != 0:
grad_cam_img = (grad_cam_img - np.min(grad_cam_img)) / (np.max(grad_cam_img) - np.min(grad_cam_img))
cam_imgs[i] = cam_image
heat_maps[i] = np.repeat(heat_map[:, :, np.newaxis], 3, axis=2)
grad_cam_imgs[i] = grad_cam_img[0]
del guided_model
gc.collect()
return cam_imgs, heat_maps, grad_cam_imgs
| 5,486 | 33.727848 | 117 | py |
SMERF | SMERF-main/smerf/shap_utils.py | import shap
import tensorflow as tf
import keras.backend as K
import numpy as np
from smerf.models import *
import gc
# Uses SHAP library to obtain feature attributions
def shap_run(model, x_sample, y_sample, x_train, exp_no, model_type):
## NOTE due to complications in keras and TF versions (this code works only in TF1),
## a separate model should be redefined on a separate session where SHAP is run.
model.save_weights('/tmp/model.pt')
background = x_train[np.random.choice(x_train.shape[0], 1000, replace=False)]
n, h, w, c = x_sample.shape
output = np.zeros((n, 1, h, w, c))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# redefine the model
if model_type == 0 or model_type ==-1: # adversarial model
if exp_no >= 3.5 or exp_no==1.2:
model_obj = TextBoxCNN_adv(lr=0.0001, max_epoch=10)
else:
model_obj = TextBoxCNN(lr=0.0001, max_epoch=10)
elif model_type == 1:
model_obj = VGG16_model()
elif model_type == 2:
model_obj = AlexNet_model()
else:
raise ValueError('model_type must be 0, 1, or 2')
model_obj.model.load_weights('/tmp/model.pt')
model_sess = model_obj.model
# DeepSHAP
e = shap.DeepExplainer(model_sess, background)
shap_vals_deep = e.shap_values(x_sample)
#shap_vals_deep = np.array([shap_vals_deep[y_sample[i]][i] for i in range(n)])
shap_vals_deep = np.array([scale(shap_vals_deep[y_sample[i]][i]) for i in range(n)])
shap_vals_deep = np.max(shap_vals_deep, axis=3)
shap_vals_deep = np.expand_dims(shap_vals_deep, 3)
shap_vals_deep = np.concatenate((shap_vals_deep, shap_vals_deep, shap_vals_deep), axis=3)
output[:, 0, :, :, :] = shap_vals_deep
return output
def scale(x):
if x.max() - x.min() != 0:
return (x - x.min()) / (x.max() - x.min())
return x
| 1,980 | 40.270833 | 97 | py |
SMERF | SMERF-main/smerf/textbox_data.py | import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFont, ImageEnhance
from torch.utils.data import Dataset, DataLoader
import os
from smerf.eval import setup_bboxes
import pickle
DATA_DIR = '../data/'
class TextBoxDataset(Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
# Interpolate between blue (v = 0) and red (v = 1)
def shade(im, v):
if v == -1:
im[:, :, :] = 255 #plain white background
elif v == -2:
im[:,:,:] = 0 # plain black background
elif v == -3:
im[:,:,:] = np.asarray(np.random.random((64,64,3)) * 100, dtype=int) # random gray background
elif v == -4:
# natural image background
places_img_file = pickle.load(open(os.path.join(DATA_DIR, 'places_img_file.pkl'), 'rb'))
choices = places_img_file['stadium/baseball']
img_ids = [0, 9, 10, 12, 15, 16, 17, 19, 20, 21, 24, 25, 26, 27, 28, 33, 34, 35, 37, 39, 41, 42, 43, 45, 46, 47, 49, 50, 51, 52, 55, 56, 58, 64, 65, 58, 68, 71, 74, 78, 86, 88, 90, 91, 92, 93]
#choices = places_img_file['bamboo_forest']
#img_ids = [0, 1, 2, 3, 4, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20, 21, 25, 28, 37, 44, 47, 57, 59, 65, 68, 69, 72, 75, 77, 85, 93, 96, 98, 99]
img_dir = os.path.join(DATA_DIR, 'val_256')
img = Image.open(os.path.join(img_dir, choices[np.random.choice(img_ids)]))
img = img.resize((64,64))
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(0.7)
img = np.array(img)
im[:,:,:] = img
else:
im[:, :, 0] = 255 * v
im[:, :, 2] = 255 * (1 - v)
return im
# Add a square
def sticker(im, x_start = 0, y_start = 0, delta = 10, color = [0, 0, 0]):
im[y_start:y_start + delta, x_start:x_start + delta, :] = color
return im
# Add text
def text(im, text, x, y, color = (0,0,0), size = 20):
im = Image.fromarray(im)
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("../data/arial.ttf", size)
w, h = font.getsize(text)
draw.text((x, y), text, color, font = font)
im = np.array(im)
return im, (w,h)
def vec2im(features, **kwargs):
"""
Convert a feature into an image with certain features.
:return image, text location, small box location
"""
im = np.zeros((64, 64, 3), dtype = np.uint8)
im = shade(im, features[4])
if features[5] == 1: # place large box in the image
if 'x_start' in kwargs.keys():
patch_x = kwargs['x_start']
else:
patch_x = 0
if 'y_start' in kwargs.keys():
patch_y = kwargs['y_start']
else:
patch_y = 0
if 'p_color' in kwargs.keys():
p_color = kwargs['p_color']
else:
p_color = [0,0,0]
if 'p_size' in kwargs.keys():
p_delta = kwargs['p_size']
else:
p_delta = 10
# Add a large box in the image at the location set in the argument.
im = sticker(im, x_start=patch_x, y_start=patch_y, color=p_color, delta=p_delta)
else: # no large box in the image so the locations are set to None
patch_x = None
patch_y = None
# Determine the character to be included in the image
if features[0] == 0:
char = "A"
elif features[0] == 1:
char = "B"
elif features[0] == -1:
char = None # no character
# Determine the color of the character to be included in the image
if features[3] == 0: # set text color as black
color = (0, 0, 0)
elif features[3] == 1: # set text color as green
color = (0, 255, 0)
elif features[3] == 3: # set text color as white
color = (255, 255, 255)
elif features[3] == 2: # set manual text color from (R, G, B) input
color = kwargs['color']
textloc = (None, None, None, None)
# Add text if character is not None
if char != None:
xstart = int(36 * features[1] + 6)
ystart = int(36 * features[2] + 6)
im, dim = text(im, char, xstart, ystart, color = color)
# keep the location of the character to return
textloc = (ystart, ystart+dim[1], xstart, xstart+dim[0])
# Add a small box if switch argument is turned on
dist_ = 300
if kwargs['switch'] == 1: # small box at a fixed location
switch_x = 58
switch_y = 58
im = sticker(im, x_start=58, y_start=58, delta=4, color=kwargs['s_color'])
elif kwargs['switch'] == 2: # small box at a random location
while True:
switch_x = np.random.random_integers(0, 53)
switch_y = np.random.random_integers(0, 53)
# prevent overlap of switch with text and larger box
if patch_x is not None and features[0] != -1: # yes patch, yes character
if (int(36*features[1]) - switch_x)**2 + (int(36*features[2]) - switch_y)**2 > dist_+100 and \
(patch_x - switch_x)**2 + (patch_y - switch_y)**2 > dist_:
break
elif patch_x is None and features[0] != -1: # no patch, yes character
if (int(36*features[1]) - switch_x)**2 + (int(36*features[2]) - switch_y)**2 > dist_+100:
break
elif patch_x is not None and features[0] == -1: # yes patch, no character
if (patch_x - switch_x)**2 + (patch_y - switch_y)**2 > dist_:
break
elif patch_x is None and features[0] == -1: # neither
break
im = sticker(im, x_start=switch_x, y_start=switch_y, delta=4, color=kwargs['s_color'])
else:
switch_x = None
switch_y = None
# keep the small box location to return
if switch_x is not None:
switch_loc = (switch_y, switch_y+4, switch_x, switch_x+4)
else:
switch_loc = (None, None, None, None)
# return the image generated, character location, and small box location
return im, textloc, switch_loc
def save_data(exp_no, save_dir, train_data, test_data, train_coord, train_avoid, train_avoid2, test_coord, test_avoid, test_avoid2, save=True):
# setup bbox info to save to the file
fname = os.path.join(save_dir, 'textbox_%0.2f.npz'%exp_no)
# NOTE need to specify below based on different type of experiments
if exp_no in [1.11, 2.11]: # for simple FR and NR, only one object to include
gt_flag = [1,0,0]
elif exp_no == 1.2: # for complex-FR, there are two ground-truth objects to include
gt_flag = [1,0,1]
elif exp_no >= 3.7: # for complex-CR, there are two ground-truth objects to inlcude
gt_flag = [1,0,1]
train_primary, train_secondary = setup_bboxes(train_coord, train_avoid, train_avoid2, np.array(range(train_data.X.shape[0])), gt_flag=gt_flag)
test_primary, test_secondary = setup_bboxes(test_coord, test_avoid, test_avoid2, np.array(range(test_data.X.shape[0])), gt_flag=gt_flag)
if save:
np.savez(open(fname, 'wb'),
x_train=train_data.X,
y_train=train_data.y,
x_test=test_data.X,
y_test=test_data.y,
train_primary=train_primary,
test_primary=test_primary,
train_secondary=train_secondary,
test_secondary=test_secondary)
return train_data, test_data, train_primary, train_secondary, test_primary, test_secondary
def load_data(exp_no, load_dir):
fname = os.path.join(load_dir, 'textbox_%0.2f.npz'%exp_no)
tmp = np.load(open(fname, 'rb'), allow_pickle=True)
train_data = TextBoxDataset(tmp['x_train'], tmp['y_train'])
test_data = TextBoxDataset(tmp['x_test'], tmp['y_test'])
train_primary = tmp['train_primary']
test_primary = tmp['test_primary']
train_secondary = tmp['train_secondary']
test_secondary = tmp['test_secondary']
return train_data, test_data, train_primary, train_secondary, test_primary, test_secondary
# Generate text data with spurious features
# make the labels to be correlated with color, not the digit itself
# or the other way
def sample_uniform():
feature = np.zeros((6))
feature[0] = np.random.randint(2) #character
feature[1] = np.random.uniform() #x
feature[2] = np.random.uniform() #y
feature[3] = 0
feature[4] = np.random.uniform() # shade
feature[5] = 0
return feature
def generate_data(n=10000):
#plain data
rep = np.zeros((n, 6))
labels = np.zeros(n)
im = np.zeros((n, 64, 64, 3))
for i in range(n):
rep[i] = sample_uniform()
im[i] = vec2im(rep[i])
labels[i] = int(rep[i][0])
im = np.float32(im / 255)
return im, labels, rep
def original_textbox_data(n=10000, save=True, save_dir='data'):
if not os.path.exists(save_dir):
os.mkdir(save_dir)
fname = os.path.join(save_dir, 'textbox_original.npz')
if os.path.exists(fname):
tmp = np.load(open(fname, 'rb'))
im = tmp['x_train']
labels = tmp['y_train']
im_test = tmp['x_test']
labels_test = tmp['y_test']
else:
# train data
im, labels, rep = generate_data(n=n)
# val data
test_n = int(n * 0.3)
im_test, labels_test, rep_test = generate_data(n=test_n)
train_data = TextBoxDataset(im, labels)
test_data = TextBoxDataset(im_test, labels_test)
if save:
np.savez(open(fname, 'wb'), x_train=train_data.X, y_train=train_data.y, x_test=test_data.X, y_test=test_data.y)
np.savez(open(os.path.join(save_dir, 'textbox_original_meta.npz'), 'wb'), rep=rep, rep_test=rep_test)
return train_data, test_data
| 9,823 | 38.773279 | 200 | py |
SMERF | SMERF-main/smerf/models.py | import keras
from keras.layers.pooling import GlobalAveragePooling1D, GlobalAveragePooling2D
import numpy as np
from keras.utils.np_utils import to_categorical
import os
import keras.backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.metrics import categorical_accuracy
from keras.applications.resnet50 import ResNet50
from keras.applications.vgg16 import VGG16
from keras.applications.densenet import DenseNet121
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, ZeroPadding2D, MaxPooling2D, BatchNormalization, Activation
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
class EarlyStoppingByLossVal(keras.callbacks.Callback):
def __init__(self, monitor='loss', value=0.1, verbose=0):
super(keras.callbacks.Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_batch_end(self, batch, logs={}):
current = logs.get(self.monitor)
if current < self.value:
print('stopping with %f'%current)
self.model.stop_training = True
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
print("Early stopping requires %s available!" % self.monitor)
if current < self.value:
print('stopping with %f'%current)
self.model.stop_training = True
if current > 7.00:
self.model.stop_training = True
class TextBoxCNN:
def __init__(self, lr=0.0001, batch=128, max_epoch=10, interm_dim=200, input_shape=(64, 64, 3), model_name='w.pt', output_dir='../outputs'):
self.input_shape = input_shape
self.model = keras.Sequential([
#keras.layers.InputLayer(input_shape=(64, 64, 3)),
keras.layers.Conv2D(filters=32, kernel_size=3, strides=(2, 2), activation='relu', input_shape=input_shape),
#keras.layers.Conv2D(filters=32, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(interm_dim, activation='relu'),
keras.layers.Dense(2)
])
self.lr = lr
self.opt = keras.optimizers.Adam(lr=self.lr)
#self.opt = keras.optimizers.SGD(lr=self.lr)
self.batch = batch
self.max_epoch = max_epoch
self.model.compile(optimizer=self.opt, loss='binary_crossentropy', metrics=['accuracy'])
self.modelfile = os.path.join(output_dir, model_name)
def train(self, x_train, y_train, retrain=False, validate=False, earlystop=False, verbose=True, adversarial=False):
if not adversarial:
if earlystop:
#cb = [EarlyStopping(monitor='accuracy', mode='min', verbose=1)]
#cb = [EarlyStopping(monitor='loss', patience=1, mode='min'), ModelCheckpoint(self.modelfile, monitor='loss', mode='min', save_best_only=True)]
cb = [EarlyStoppingByLossVal(monitor='loss', value=0.005)]
else:
cb = []
if os.path.exists(self.modelfile) and not retrain:
self.model.load_weights(self.modelfile)
elif not os.path.exists(self.modelfile) and retrain:
raise ValueError('modelfile not found')
else:
y_train_oh = to_categorical(y_train, 2)
if validate:
self.model.fit(x_train, y_train_oh, batch_size=self.batch, epochs=self.max_epoch, validation_split=0.1, shuffle=True, callbacks=cb)
else:
self.model.fit(x_train, y_train_oh, batch_size=self.batch, epochs=self.max_epoch, validation_split=0, shuffle=True, callbacks=cb)
self.model.save_weights(self.modelfile)
else:
# add adversarial training: load from pretrained
if not os.path.exists(self.modelfile):
raise ValueError('modelfile %s not found. Make sure to run script/adv_train.py to train the model first.'%self.modelfile)
self.model.load_weights(self.modelfile)
if verbose:
print(self.model.summary())
def test(self, x_test, y_test):
pred = self.model.predict_classes(x_test)
score = (pred == y_test).sum() / y_test.shape[0]
print('Accuracy=%f'%score)
return score
class TextBoxCNN_adv(TextBoxCNN):
def __init__(self, lr=0.0001, batch=128, max_epoch=10, interm_dim=200, input_shape=(64, 64, 3), model_name='w.pt', output_dir='../outputs'):
self.input_shape = input_shape
self.model = keras.Sequential([
#keras.layers.InputLayer(input_shape=(64, 64, 3)),
keras.layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu', input_shape=input_shape),
keras.layers.Conv2D(filters=128, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Conv2D(filters=256, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(interm_dim, activation='relu'),
keras.layers.Dense(interm_dim, activation='relu'),
keras.layers.Dense(2)
])
self.lr = lr
self.opt = keras.optimizers.Adam(lr=self.lr)
self.batch = batch
self.max_epoch = max_epoch
self.model.compile(optimizer=self.opt, loss='binary_crossentropy', metrics=['accuracy'])
self.modelfile = os.path.join(output_dir, model_name)
## CNNs
class AlexNet_model(TextBoxCNN):
def __init__(self, lr=0.005, batch=1024, max_epoch=10, interm_dim=200, input_shape=(64, 64, 3), model_name='w_alex.pt', output_dir='../outputs'):
self.input_shape = input_shape
#Instantiation
self.model = keras.Sequential()
#1st Convolutional Layer
self.model.add(Conv2D(filters=96, input_shape=self.input_shape, kernel_size=(11,11), strides=(4,4), padding='same'))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
#2nd Convolutional Layer
self.model.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1,1), padding='same'))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
#3rd Convolutional Layer
self.model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='same'))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
#4th Convolutional Layer
self.model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='same'))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
#5th Convolutional Layer
self.model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='same'))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
#Passing it to a Fully Connected layer
self.model.add(Flatten())
# 1st Fully Connected Layer
self.model.add(Dense(4096, input_shape=(32,32,3,)))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
# Add Dropout to prevent overfitting
self.model.add(Dropout(0.4))
#2nd Fully Connected Layer
self.model.add(Dense(4096))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
#Add Dropout
self.model.add(Dropout(0.4))
#3rd Fully Connected Layer
self.model.add(Dense(1000))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
#Add Dropout
self.model.add(Dropout(0.4))
#Output Layer
self.model.add(Dense(2))
self.model.add(BatchNormalization())
# set up other hyperparmeters
self.lr = lr
self.opt = keras.optimizers.Adam(lr=self.lr)
#self.opt = keras.optimizers.SGD(lr=self.lr)
self.batch = batch
self.max_epoch = max_epoch
self.model.compile(optimizer=self.opt, loss='binary_crossentropy', metrics=['accuracy'])
self.modelfile = os.path.join(output_dir, model_name)
class VGG16_model(TextBoxCNN):
def __init__(self, lr=0.005, batch=1024, max_epoch=10, interm_dim=200, input_shape=(64, 64, 3), model_name='w_res.pt', output_dir='../outputs'):
self.input_shape = input_shape
# define the base model to fine-tuen from
base_model = VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
x = Dense(512, activation='relu')(x)
x = Dense(256, activation='relu')(x)
predictions = Dense(2)(x)
self.model = Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
# set up other hyperparmeters
self.lr = lr
self.opt = keras.optimizers.Adam(lr=self.lr)
#self.opt = keras.optimizers.SGD(lr=self.lr)
self.batch = batch
self.max_epoch = max_epoch
self.model.compile(optimizer=self.opt, loss='binary_crossentropy', metrics=['accuracy'])
self.modelfile = os.path.join(output_dir, model_name)
def test(self, x_test, y_test):
pred = self.softmax(self.model.predict(x_test))
pred = np.argmax(pred, axis=1)
score = (pred == y_test).sum() / y_test.shape[0]
print('Accuracy=%f'%score)
return score
@staticmethod
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True) | 10,558 | 45.928889 | 159 | py |
SMERF | SMERF-main/smerf/lime_utils.py | import lime
from lime import lime_image
from lime.wrappers.scikit_image import SegmentationAlgorithm
import tensorflow as tf
import keras.backend as K
import numpy as np
from smerf.models import *
# Uses LIME library to obtain feature attributions
def lime_run(model, x_sample, y_sample, x_train, exp_no):
n, h, w, c = x_sample.shape
output = np.zeros((n, 1, h, w, c))
explainer = lime_image.LimeImageExplainer(verbose=False)
for n_i in range(n):
explanation = explainer.explain_instance(x_sample[n_i].astype('double'),
model.predict,
top_labels=2,
hide_color=(0,0,0),
num_samples=1000,
segmentation_fn=SegmentationAlgorithm('quickshift',
kernel_size=4,
max_dist=10,
ratio=0.2)
)
ind = explanation.top_labels[0]
dict_heatmap = dict(explanation.local_exp[ind])
heatmap = np.vectorize(dict_heatmap.get)(explanation.segments)
heatmap = scale(heatmap)
output[n_i, 0, :, :, 0] = heatmap
output[n_i, 0, :, :, 1] = heatmap
output[n_i, 0, :, :, 2] = heatmap
return output
def scale(x):
if x.max() - x.min() != 0:
return (x - x.min()) / (x.max() - x.min())
return x | 1,723 | 45.594595 | 101 | py |
cpnest | cpnest-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CPNest documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 7 13:57:03 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
#import os
#import sys
#sys.path.insert(0, os.path.abspath('../cpnest'))
import cpnest
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CPNest'
copyright = '2017-2021, W. Del Pozzo, J. Veitch'
author = 'W. Del Pozzo, J. Veitch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cpnest.__version__
# The full version, including alpha/beta/rc tags.
release = cpnest.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CPNestdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CPNest.tex', 'CPNest Documentation',
'W. Del Pozzo, J. Veitch', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cpnest', 'CPNest Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CPNest', 'CPNest Documentation',
author, 'CPNest', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None,
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.sourceforge.net', None)
}
| 5,836 | 30.896175 | 85 | py |
CR-VAE | CR-VAE-main/CRVAE_demo.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 6 20:00:04 2022
@author: 61995
"""
import torch
import numpy as np
import matplotlib.pyplot as plt
from models.cgru_error import CRVAE, VRAE4E, train_phase1, train_phase2
import scipy.io
device = torch.device('cuda')
X_np = np.load('henon.npy').T
dim = X_np.shape[-1]
GC = np.zeros([dim,dim])
for i in range(dim):
GC[i,i] = 1
if i!=0:
GC[i,i-1] = 1
X = torch.tensor(X_np[np.newaxis], dtype=torch.float32, device=device)
full_connect = np.ones(GC.shape)
cgru = CRVAE(X.shape[-1], full_connect, hidden=64).cuda(device=device)
vrae = VRAE4E(X.shape[-1], hidden=64).cuda(device=device)
#%%
train_loss_list = train_phase1(
cgru, X, context=20, lam=0.1, lam_ridge=0, lr=5e-2, max_iter=1000,
check_every=50)#0.1
#%%
GC_est = cgru.GC().cpu().data.numpy()
print('True variable usage = %.2f%%' % (100 * np.mean(GC)))
print('Estimated variable usage = %.2f%%' % (100 * np.mean(GC_est)))
print('Accuracy = %.2f%%' % (100 * np.mean(GC == GC_est)))
# Make figures
fig, axarr = plt.subplots(1, 2, figsize=(10, 5))
axarr[0].imshow(GC, cmap='Blues')
axarr[0].set_title('Causal-effect matrix')
axarr[0].set_ylabel('Effect series')
axarr[0].set_xlabel('Causal series')
axarr[0].set_xticks([])
axarr[0].set_yticks([])
axarr[1].imshow(GC_est, cmap='Blues', vmin=0, vmax=1, extent=(0, len(GC_est), len(GC_est), 0))
axarr[1].set_ylabel('Effect series')
axarr[1].set_xlabel('Causal series')
axarr[1].set_xticks([])
axarr[1].set_yticks([])
# Mark disagreements
for i in range(len(GC_est)):
for j in range(len(GC_est)):
if GC[i, j] != GC_est[i, j]:
rect = plt.Rectangle((j, i-0.05), 1, 1, facecolor='none', edgecolor='red', linewidth=1)
axarr[1].add_patch(rect)
plt.show()
#np.save('GC_henon.npy', GC_est)
full_connect = np.load('GC_henon.npy')
#%%
cgru = CRVAE(X.shape[-1], full_connect, hidden=64).cuda(device=device)
vrae = VRAE4E(X.shape[-1], hidden=64).cuda(device=device)
train_loss_list = train_phase2(
cgru, vrae, X, context=20, lam=0., lam_ridge=0, lr=5e-2, max_iter=10000,
check_every=50)
| 2,116 | 23.333333 | 99 | py |
CR-VAE | CR-VAE-main/models/cgru_error.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 6 20:01:33 2022
@author: 61995
"""
import torch
import torch.nn as nn
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
from metrics.visualization_metrics import visualization
import torch.optim as optim
class GRU(nn.Module):
def __init__(self, num_series, hidden):
super(GRU, self).__init__()
self.p = num_series
self.hidden = hidden
# Set up network.
self.gru = nn.GRU(num_series, hidden, batch_first=True)
self.gru.flatten_parameters()
self.linear = nn.Linear(hidden, 1)
self.sigmoid = nn.Sigmoid()
def init_hidden(self, batch):
#Initialize hidden states
device = self.gru.weight_ih_l0.device
return torch.zeros(1, batch, self.hidden, device=device)
def forward(self, X, z, connection, mode = 'train'):
X=X[:,:,np.where(connection!=0)[0]]
device = self.gru.weight_ih_l0.device
tau = 0
if mode == 'train':
X_right, hidden_out = self.gru(torch.cat((X[:,0:1,:],X[:,11:-1,:]),1), z)
X_right = self.linear(X_right)
return X_right, hidden_out
class VRAE4E(nn.Module):
def __init__(self, num_series, hidden):
'''
Error VAE
'''
super(VRAE4E, self).__init__()
self.device = torch.device('cuda')
self.p = num_series
self.hidden = hidden
self.gru_left = nn.GRU(num_series, hidden, batch_first=True)
self.gru_left.flatten_parameters()
self.fc_mu = nn.Linear(hidden, hidden)#nn.Linear(hidden, 1)
self.fc_std = nn.Linear(hidden, hidden)
self.linear_hidden = nn.Linear(hidden, hidden)
self.tanh = nn.Tanh()
self.gru = nn.GRU(num_series, hidden, batch_first=True)
self.gru.flatten_parameters()
self.linear = nn.Linear(hidden, num_series)
def init_hidden(self, batch):
'''Initialize hidden states for GRU cell.'''
device = self.gru.weight_ih_l0.device
return torch.zeros(1, batch, self.hidden, device=device)
def forward(self, X, mode = 'train'):
X = torch.cat((torch.zeros(X.shape,device = self.device)[:,0:1,:],X),1)
if mode == 'train':
hidden_0 = torch.zeros(1, X.shape[0], self.hidden, device=self.device)
out, h_t = self.gru_left(X[:,1:,:], hidden_0.detach())
mu = self.fc_mu(h_t)
log_var = self.fc_std(h_t)
sigma = torch.exp(0.5*log_var)
z = torch.randn(size = mu.size())
z = z.type_as(mu)
z = mu + sigma*z
z = self.tanh(self.linear_hidden(z))
X_right, hidden_out = self.gru(X[:,:-1,:], z)
pred = self.linear(X_right)
return pred, log_var, mu
if mode == 'test':
X_seq = torch.zeros(X[:,:1,:].shape).to(self.device)
h_t = torch.randn(size = (1, X_seq[:,-2:-1,:].size(0),self.hidden)).to(self.device)
for i in range(int(20/1)+1):
out, h_t = self.gru(X_seq[:,-1:,:], h_t)
out = self.linear(out)
#out = self.sigmoid(out)
X_seq = torch.cat([X_seq,out],dim = 1)
return X_seq
class CRVAE(nn.Module):
def __init__(self, num_series, connection, hidden):
'''
connection: pruned networks
'''
super(CRVAE, self).__init__()
self.device = torch.device('cuda')
self.p = num_series
self.hidden = hidden
self.gru_left = nn.GRU(num_series, hidden, batch_first=True)
self.gru_left.flatten_parameters()
self.fc_mu = nn.Linear(hidden, hidden)
self.fc_std = nn.Linear(hidden, hidden)
self.connection = connection
# Set up networks.
self.networks = nn.ModuleList([
GRU(int(connection[:,i].sum()), hidden) for i in range(num_series)])
def forward(self, X, noise = None, mode = 'train', phase = 0):
if phase == 0:
X = torch.cat((torch.zeros(X.shape,device = self.device)[:,0:1,:],X),1)
if mode == 'train':
hidden_0 = torch.zeros(1, X.shape[0], self.hidden, device=self.device)
out, h_t = self.gru_left(X[:,1:11,:], hidden_0.detach())
mu = self.fc_mu(h_t)
log_var = self.fc_std(h_t)
sigma = torch.exp(0.5*log_var)
z = torch.randn(size = mu.size())
z = z.type_as(mu)
z = mu + sigma*z
pred = [self.networks[i](X, z, self.connection[:,i])[0]
for i in range(self.p)]
return pred, log_var, mu
if mode == 'test':
X_seq = torch.zeros(X[:,:1,:].shape).to(self.device)
h_0 = torch.randn(size = (1, X_seq[:,-2:-1,:].size(0),self.hidden)).to(self.device)
ht_last =[]
for i in range(self.p):
ht_last.append(h_0)
for i in range(int(20/1)+1):#int(20/2)+1
ht_new = []
for j in range(self.p):
# out, h_t = self.gru_out[j](X_seq[:,-1:,:], ht_last[j])
# out = self.fc[j](out)
out, h_t = self.networks[j](X_seq[:,-1:,:], ht_last[j], self.connection[:,j])
if j == 0:
X_t = out
else:
X_t = torch.cat((X_t,out),-1)
ht_new.append(h_t)
ht_last = ht_new
if i ==0:
X_seq = X_t
else:
X_seq = torch.cat([X_seq,X_t],dim = 1)
#out = self.sigmoid(out)
return X_seq
if phase == 1:
X = torch.cat((torch.zeros(X.shape,device = self.device)[:,0:1,:],X),1)
if mode == 'train':
hidden_0 = torch.zeros(1, X.shape[0], self.hidden, device=self.device)
out, h_t = self.gru_left(X[:,1:11,:], hidden_0.detach())
mu = self.fc_mu(h_t)
log_var = self.fc_std(h_t)
sigma = torch.exp(0.5*log_var)
z = torch.randn(size = mu.size())
z = z.type_as(mu) # Setting z to be .cuda when using GPU training
z = mu + sigma*z
pred = [self.networks[i](X, z, self.connection[:,i])[0]
for i in range(self.p)]
return pred, log_var, mu
if mode == 'test':
X_seq = torch.zeros(X[:,:1,:].shape).to(self.device)
h_0 = torch.randn(size = (1, X_seq[:,-2:-1,:].size(0),self.hidden)).to(self.device)
ht_last =[]
for i in range(self.p):
ht_last.append(h_0)
for i in range(int(20/1)+1):#int(20/2)+1
ht_new = []
for j in range(self.p):
# out, h_t = self.gru_out[j](X_seq[:,-1:,:], ht_last[j])
# out = self.fc[j](out)
out, h_t = self.networks[j](X_seq[:,-1:,:], ht_last[j], self.connection[:,j])
if j == 0:
X_t = out
else:
X_t = torch.cat((X_t,out),-1)
ht_new.append(h_t)
ht_last = ht_new
if i ==0:
X_seq = X_t + 0.1*noise[:,i:i+1,:]
else:
X_seq = torch.cat([X_seq,X_t+0.1*noise[:,i:i+1,:]],dim = 1)
#out = self.sigmoid(out)
return X_seq
def GC(self, threshold=True):
'''
Extract learned Granger causality.
Args:
threshold: return norm of weights, or whether norm is nonzero.
Returns:
GC: (p x p) matrix. Entry (i, j) indicates whether variable j is
Granger causal of variable i.
'''
GC = [torch.norm(net.gru.weight_ih_l0, dim=0)
for net in self.networks]
GC = torch.stack(GC)
#print(GC)
if threshold:
return (torch.abs(GC) > 0).int()
else:
return GC
def prox_update(network, lam, lr):
'''Perform in place proximal update on first layer weight matrix.'''
W = network.gru.weight_ih_l0
norm = torch.norm(W, dim=0, keepdim=True)
W.data = ((W / torch.clamp(norm, min=(lam * lr)))
* torch.clamp(norm - (lr * lam), min=0.0))
network.gru.flatten_parameters()
def regularize(network, lam):
'''Calculate regularization term for first layer weight matrix.'''
W = network.gru.weight_ih_l0
return lam * torch.sum(torch.norm(W, dim=0))
def ridge_regularize(network, lam):
'''Apply ridge penalty at linear layer and hidden-hidden weights.'''
return lam * (
torch.sum(network.linear.weight ** 2) +
torch.sum(network.gru.weight_hh_l0 ** 2))# +
#torch.sum(network.fc_std.weight ** 2) +
#torch.sum(network.fc_mu.weight ** 2) +
#torch.sum(network.fc_std.weight ** 2))
def restore_parameters(model, best_model):
'''Move parameter values from best_model to model.'''
for params, best_params in zip(model.parameters(), best_model.parameters()):
params.data = best_params
def arrange_input(data, context):
'''
Arrange a single time series into overlapping short sequences.
Args:
data: time series of shape (T, dim).
context: length of short sequences.
'''
assert context >= 1 and isinstance(context, int)
input = torch.zeros(len(data) - context, context, data.shape[1],
dtype=torch.float32, device=data.device)
target = torch.zeros(len(data) - context, context, data.shape[1],
dtype=torch.float32, device=data.device)
for i in range(context):
start = i
end = len(data) - context + i
input[:, i, :] = data[start:end]
target[:, i, :] = data[start+1:end+1]
return input.detach(), target.detach()
def MinMaxScaler(data):
"""Min-Max Normalizer.
Args:
- data: raw data
Returns:
- norm_data: normalized data
- min_val: minimum values (for renormalization)
- max_val: maximum values (for renormalization)
"""
min_val = np.min(np.min(data, axis = 0), axis = 0)
data = data - min_val
max_val = np.max(np.max(data, axis = 0), axis = 0)
norm_data = data / (max_val + 1e-7)
return norm_data
def train_phase2(crvae, vrae, X, context, lr, max_iter, lam=0, lam_ridge=0,
lookback=5, check_every=50, verbose=1,sparsity = 100, batch_size = 1024):
'''Train model with Adam.'''
optimizer = optim.Adam(vrae.parameters(), lr=1e-3)
p = X.shape[-1]
device = crvae.networks[0].gru.weight_ih_l0.device
loss_fn = nn.MSELoss()
train_loss_list = []
batch_size = batch_size
# Set up data.
X, Y = zip(*[arrange_input(x, context) for x in X])
X_all = torch.cat(X, dim=0)
Y_all = torch.cat(Y, dim=0)
idx = np.random.randint(len(X_all), size=(batch_size,))
X = X_all[idx]
Y = Y_all[idx]
X_v = X_all[batch_size:]
start_point = 0#context-10-1
beta = 1#0.001
beta_e = 1
# For early stopping.
best_it = None
best_loss = np.inf
best_model = None
# Calculate smooth error.
pred,mu,log_var = crvae(X)#
loss = sum([loss_fn(pred[i][:, :, 0], X[:, 10:, i]) for i in range(p)])
mmd = (-0.5*(1+log_var - mu**2- torch.exp(log_var)).sum(dim = -1).sum(dim = 0)).mean(dim =0)
#mmd = sum([MMD(torch.randn(200, Y[:, :, 0].shape[-1], requires_grad = False).to(device), latent[i][:,:,0]) for i in range(p)])
ridge = sum([ridge_regularize(net, lam_ridge) for net in crvae.networks])
smooth = loss + ridge + beta*mmd
error = (-torch.stack(pred)[:, :, :, 0].permute(1,2,0) + X[:, 10:, :]).detach()
pred_e,mu_e,log_var_e = vrae(error)
loss_e = loss_fn(pred_e, error)
mmd_e = (-0.5*(1+log_var_e - mu_e**2- torch.exp(log_var_e)).sum(dim = -1).sum(dim = 0)).mean(dim =0)
smooth_e = loss_e + beta_e*mmd_e
best_mmd = np.inf
########################################################################
#lr = 1e-3
for it in range(max_iter):
# Take gradient step.
smooth_e.backward()
if lam == 0:
optimizer.step()
optimizer.zero_grad()
smooth.backward()
for param in crvae.parameters():
param.data -= lr * param.grad
# Take prox step.
if lam > 0:
for net in crvae.networks:
prox_update(net, lam, lr)
crvae.zero_grad()
# Calculate loss for next iteration.
idx = np.random.randint(len(X_all), size=(batch_size,))
#X = X_all[idx]
#Y = Y_all[idx]
pred,mu,log_var = crvae(X)#
loss = sum([loss_fn(pred[i][:, :, 0], X[:, 10:, i]) for i in range(p)])
mmd = (-0.5*(1+log_var - mu**2- torch.exp(log_var)).sum(dim = -1).sum(dim = 0)).mean(dim =0)
ridge = sum([ridge_regularize(net, lam_ridge)
for net in crvae.networks])
smooth = loss + ridge + beta*mmd
error = (-torch.stack(pred)[:, :, :, 0].permute(1,2,0) + X[:, 10:, :]).detach()
pred_e,mu_e,log_var_e = vrae(error)
loss_e = loss_fn(pred_e, error)
mmd_e = (-0.5*(1+log_var_e - mu_e**2- torch.exp(log_var_e)).sum(dim = -1).sum(dim = 0)).mean(dim =0)
smooth_e = loss_e + beta_e*mmd_e
# Check progress.
if (it) % check_every == 0:
X_t = X
pred_t,mu_t ,log_var_t= crvae(X_t)
loss_t = sum([loss_fn(pred_t[i][:, :, 0], X_t[:, 10:, i]) for i in range(p)])
mmd_t = (-0.5*(1+log_var_t - mu_t**2- torch.exp(log_var_t)).sum(dim = -1).sum(dim = 0)).mean(dim =0)
ridge_t = sum([ridge_regularize(net, lam_ridge)
for net in crvae.networks])
smooth_t = loss_t + ridge_t# + beta*mmd_t
nonsmooth = sum([regularize(net, lam) for net in crvae.networks])
mean_loss = (smooth_t) / p
if verbose > 0:
print(('-' * 10 + 'Iter = %d' + '-' * 10) % (it ))
print('Loss = %f' % mean_loss)
print('KL = %f' % mmd)
print('Loss_e = %f' % smooth_e)
print('KL_e = %f' % mmd_e)
if lam>0:
print('Variable usage = %.2f%%'
% (100 * torch.mean(crvae.GC().float())))
if mean_loss < best_loss:
best_loss = mean_loss
best_it = it
best_model = deepcopy(crvae)
start_point = 0
predicted_error = vrae(error, mode = 'test').detach()
predicted_data = crvae(X_t, predicted_error, mode = 'test', phase = 1)
syn = predicted_data[:,:-1,:].cpu().detach().numpy()
ori= X_t[:,start_point:,:].cpu().detach().numpy()
if it % 1000 ==0:
plt.plot(ori[0,:,1])
plt.plot(syn[0,:,1])
plt.show()
visualization(ori, syn, 'pca')
visualization(ori, syn, 'tsne')
np.save('ori_henon.npy',ori)
np.save('syn_henon.npy',syn)
# Restore best model.
restore_parameters(crvae, best_model)
return train_loss_list
def train_phase1(crvae, X, context, lr, max_iter, lam=0, lam_ridge=0,
lookback=5, check_every=50, verbose=1,sparsity = 100, batch_size = 2048):
'''Train model with Adam.'''
p = X.shape[-1]
device = crvae.networks[0].gru.weight_ih_l0.device
loss_fn = nn.MSELoss()
train_loss_list = []
batch_size = batch_size
# Set up data.
X, Y = zip(*[arrange_input(x, context) for x in X])
X_all = torch.cat(X, dim=0)
Y_all = torch.cat(Y, dim=0)
idx = np.random.randint(len(X_all), size=(batch_size,))
X = X_all[idx]
Y = Y_all[idx]
start_point = 0
beta = 0.1
# For early stopping.
best_it = None
best_loss = np.inf
best_model = None
# Calculate crvae error.
pred,mu,log_var = crvae(X)
loss = sum([loss_fn(pred[i][:, :, 0], X[:, 10:, i]) for i in range(p)])
mmd = (-0.5*(1+log_var - mu**2- torch.exp(log_var)).sum(dim = -1).sum(dim = 0)).mean(dim =0)
#mmd = sum([MMD(torch.randn(200, Y[:, :, 0].shape[-1], requires_grad = False).to(device), latent[i][:,:,0]) for i in range(p)])
ridge = sum([ridge_regularize(net, lam_ridge) for net in crvae.networks])
smooth = loss + ridge + beta*mmd
best_mmd = np.inf
########################################################################
#lr = 1e-3
for it in range(max_iter):
# Take gradient step.
smooth.backward()
for param in crvae.parameters():
param.data -= lr * param.grad
# Take prox step.
if lam > 0:
for net in crvae.networks:
prox_update(net, lam, lr)
crvae.zero_grad()
pred,mu,log_var = crvae(X)
loss = sum([loss_fn(pred[i][:, :, 0], X[:, 10:, i]) for i in range(p)])
mmd = (-0.5*(1+log_var - mu**2- torch.exp(log_var)).sum(dim = -1).sum(dim = 0)).mean(dim =0)
ridge = sum([ridge_regularize(net, lam_ridge)
for net in crvae.networks])
smooth = loss + ridge + beta*mmd
# Check progress.
if (it) % check_every == 0:
X_t = X
Y_t = Y
pred_t,mu_t ,log_var_t= crvae(X_t)
loss_t = sum([loss_fn(pred_t[i][:, :, 0], X_t[:, 10:, i]) for i in range(p)])
mmd_t = (-0.5*(1+log_var_t - mu_t**2- torch.exp(log_var_t)).sum(dim = -1).sum(dim = 0)).mean(dim =0)
ridge_t = sum([ridge_regularize(net, lam_ridge)
for net in crvae.networks])
smooth_t = loss_t + ridge_t# + beta*mmd_t
nonsmooth = sum([regularize(net, lam) for net in crvae.networks])
mean_loss = (smooth_t) / p
if verbose > 0:
print(('-' * 10 + 'Iter = %d' + '-' * 10) % (it ))
print('Loss = %f' % mean_loss)
print('KL = %f' % mmd)
if lam>0:
print('Variable usage = %.2f%%'
% (100 * torch.mean(crvae.GC().float())))
if mean_loss < best_loss:
best_loss = mean_loss
best_it = it
best_model = deepcopy(crvae)
start_point = 0
predicted_data = crvae(X_t,mode = 'test')
syn = predicted_data[:,:-1,:].cpu().detach().numpy()
ori= X_t[:,start_point:,:].cpu().detach().numpy()
syn = MinMaxScaler(syn)
ori = MinMaxScaler(ori)
# Restore best model.
restore_parameters(crvae, best_model)
return train_loss_list | 20,309 | 30.734375 | 132 | py |
ulysses | ulysses-master/doc/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'ulysses'
copyright = '2019, Kris Moffat, Holger Schulz, Jessica Turner'
author = 'Kris Moffat, Holger Schulz, Jessica Turner'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.2'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ulyssesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ulysses.tex', 'ulysses Documentation',
'Kris Moffat, Holger Schulz, Jessica Turner', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ulysses', 'ulysses Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ulysses', 'ulysses Documentation',
author, 'ulysses', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
B
| 5,441 | 28.737705 | 79 | py |
Tatoeba-Challenge | Tatoeba-Challenge-master/scripts/convert_marian_tatoeba_to_pytorch.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
from typing import List, Tuple
from transformers.models.marian.convert_marian_to_pytorch import (
FRONT_MATTER_TEMPLATE,
_parse_readme,
convert_all_sentencepiece_models,
get_system_metadata,
remove_prefix,
remove_suffix,
)
try:
import pandas as pd
except ImportError:
pass
DEFAULT_REPO = "Tatoeba-Challenge"
DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models")
LANG_CODE_URL = "https://datahub.io/core/language-codes/r/language-codes-3b2.csv"
ISO_URL = "https://cdn-datasets.huggingface.co/language_codes/iso-639-3.csv"
ISO_PATH = "lang_code_data/iso-639-3.csv"
LANG_CODE_PATH = "lang_code_data/language-codes-3b2.csv"
class TatoebaConverter:
"""
Convert Tatoeba-Challenge models to huggingface format.
Steps:
1. convert numpy state dict to hf format (same code as OPUS-MT-Train conversion).
2. rename opus model to huggingface format. This means replace each alpha3 code with an alpha2 code if a unique
one exists. e.g. aav-eng -> aav-en, heb-eng -> he-en
3. write a model card containing the original Tatoeba-Challenge/README.md and extra info about alpha3 group
members.
"""
def __init__(self, save_dir="marian_converted"):
assert Path(DEFAULT_REPO).exists(), "need git clone git@github.com:Helsinki-NLP/Tatoeba-Challenge.git"
reg = self.make_tatoeba_registry()
self.download_metadata()
self.registry = reg
reg_df = pd.DataFrame(reg, columns=["id", "prepro", "url_model", "url_test_set"])
assert reg_df.id.value_counts().max() == 1
reg_df = reg_df.set_index("id")
reg_df["src"] = reg_df.reset_index().id.apply(lambda x: x.split("-")[0]).values
reg_df["tgt"] = reg_df.reset_index().id.apply(lambda x: x.split("-")[1]).values
released_cols = [
"url_base",
"pair", # (ISO639-3/ISO639-5 codes),
"short_pair", # (reduced codes),
"chrF2_score",
"bleu",
#"brevity_penalty",
#"ref_len",
"src_name",
"tgt_name",
]
released = pd.read_csv("Tatoeba-Challenge/models/released-models.txt", sep="\t", header=None).iloc[:-1]
released.columns = released_cols
released["fname"] = released["url_base"].apply(
lambda x: remove_prefix( remove_suffix(x.split("/")[5], ".zip"), "opus")
)
# lambda x: remove_suffix(remove_prefix(x, "https://object.pouta.csc.fi/Tatoeba-Challenge/opus"), ".zip")
released["2m"] = released.fname.str.startswith("2m")
# released["date"] = pd.to_datetime(
# released["fname"].apply(lambda x: remove_prefix(remove_prefix(x, "2m-"), "-"))
# )
# attempted fix:
released["date"] = pd.to_datetime(
released["url_base"].apply(
lambda x: (remove_suffix(x, ".zip")[-10:])) # extract date: last 10 chars before ".zip"
)
# released["fname"].apply(lambda x: x.split("-")[1] )
released["base_ext"] = released.url_base.apply(lambda x: Path(x).name)
reg_df["base_ext"] = reg_df.url_model.apply(lambda x: Path(x).name)
metadata_new = reg_df.reset_index().merge(released.rename(columns={"pair": "id"}), on=["base_ext", "id"])
metadata_renamer = {"src": "src_alpha3", "tgt": "tgt_alpha3", "id": "long_pair", "date": "train_date"}
metadata_new = metadata_new.rename(columns=metadata_renamer)
metadata_new["src_alpha2"] = metadata_new.short_pair.apply(lambda x: x.split("-")[0])
metadata_new["tgt_alpha2"] = metadata_new.short_pair.apply(lambda x: x.split("-")[1])
DROP_COLS_BOTH = ["url_base", "base_ext", "fname"]
metadata_new = metadata_new.drop(DROP_COLS_BOTH, 1)
metadata_new["prefer_old"] = metadata_new.long_pair.isin([])
metadata_new = metadata_new.drop_duplicates(subset=['short_pair']) # added row
self.metadata = metadata_new
assert self.metadata.short_pair.value_counts().max() == 1, "Multiple metadata entries for a short pair"
self.metadata = self.metadata.set_index("short_pair")
# wget.download(LANG_CODE_URL)
mapper = pd.read_csv(LANG_CODE_PATH)
mapper.columns = ["a3", "a2", "ref"]
self.iso_table = pd.read_csv(ISO_PATH, sep="\t").rename(columns=lambda x: x.lower())
more_3_to_2 = self.iso_table.set_index("id").part1.dropna().to_dict()
more_3_to_2.update(mapper.set_index("a3").a2.to_dict())
self.alpha3_to_alpha2 = more_3_to_2
self.model_card_dir = Path(save_dir)
self.constituents = GROUP_MEMBERS
def convert_models(self, tatoeba_ids, dry_run=False):
entries_to_convert = [x for x in self.registry if x[0] in tatoeba_ids]
converted_paths = convert_all_sentencepiece_models(entries_to_convert, dest_dir=self.model_card_dir)
for path in converted_paths:
long_pair = remove_prefix(path.name, "opus-mt-").split("-") # eg. heb-eng
assert len(long_pair) == 2
new_p_src = self.get_two_letter_code(long_pair[0])
new_p_tgt = self.get_two_letter_code(long_pair[1])
hf_model_id = f"opus-mt-{new_p_src}-{new_p_tgt}"
new_path = path.parent.joinpath(hf_model_id) # opus-mt-he-en
os.rename(str(path), str(new_path))
self.write_model_card(hf_model_id, dry_run=dry_run)
def get_two_letter_code(self, three_letter_code):
return self.alpha3_to_alpha2.get(three_letter_code, three_letter_code)
def expand_group_to_two_letter_codes(self, grp_name):
return [self.get_two_letter_code(x) for x in self.constituents[grp_name]]
def get_tags(self, code, ref_name):
if len(code) == 2:
assert "languages" not in ref_name, f"{code}: {ref_name}"
return [code], False
elif "languages" in ref_name or len(self.constituents.get(code, [])) > 1:
group = self.expand_group_to_two_letter_codes(code)
group.append(code)
return group, True
else: # zho-> zh
print(f"Three letter monolingual code: {code}")
return [code], False
def resolve_lang_code(self, r) -> Tuple[List[str], str, str]:
"""R is a row in ported"""
short_pair = r.short_pair
src, tgt = short_pair.split("-")
src_tags, src_multilingual = self.get_tags(src, r.src_name)
assert isinstance(src_tags, list)
tgt_tags, tgt_multilingual = self.get_tags(tgt, r.tgt_name)
assert isinstance(tgt_tags, list)
return dedup(src_tags + tgt_tags), src_multilingual, tgt_multilingual
def write_model_card(
self,
hf_model_id: str,
repo_root=DEFAULT_REPO,
dry_run=False,
) -> str:
"""
Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync
model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun
"""
short_pair = remove_prefix(hf_model_id, "opus-mt-")
extra_metadata = self.metadata.loc[short_pair].drop("2m")
extra_metadata["short_pair"] = short_pair
lang_tags, src_multilingual, tgt_multilingual = self.resolve_lang_code(extra_metadata)
opus_name = f"{extra_metadata.src_alpha3}-{extra_metadata.tgt_alpha3}"
# opus_name: str = self.convert_hf_name_to_opus_name(hf_model_name)
assert repo_root in ("OPUS-MT-train", "Tatoeba-Challenge")
opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md")
assert opus_readme_path.exists(), f"Readme file {opus_readme_path} not found"
opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")]
readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md"
s, t = ",".join(opus_src), ",".join(opus_tgt)
metadata = {
"hf_name": short_pair,
"source_languages": s,
"target_languages": t,
"opus_readme_url": readme_url,
"original_repo": repo_root,
"tags": ["translation"],
"languages": lang_tags,
}
lang_tags = l2front_matter(lang_tags)
metadata["src_constituents"] = self.constituents[s]
metadata["tgt_constituents"] = self.constituents[t]
metadata["src_multilingual"] = src_multilingual
metadata["tgt_multilingual"] = tgt_multilingual
metadata.update(extra_metadata)
metadata.update(get_system_metadata(repo_root))
# combine with Tatoeba markdown
extra_markdown = f"### {short_pair}\n\n* source group: {metadata['src_name']} \n* target group: {metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n"
content = opus_readme_path.open().read()
content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model.
splat = content.split("*")[2:]
content = "*".join(splat)
# BETTER FRONT MATTER LOGIC
content = (
FRONT_MATTER_TEMPLATE.format(lang_tags)
+ extra_markdown
+ "\n* "
+ content.replace("download", "download original " "weights")
)
items = "\n".join([f"- {k}: {v}" for k, v in metadata.items()])
sec3 = "\n### System Info: \n" + items
content += sec3
if dry_run:
return content, metadata
sub_dir = self.model_card_dir / hf_model_id
sub_dir.mkdir(exist_ok=True)
dest = sub_dir / "README.md"
dest.open("w").write(content)
pd.Series(metadata).to_json(sub_dir / "metadata.json")
return content, metadata
def download_metadata(self):
Path(LANG_CODE_PATH).parent.mkdir(exist_ok=True)
import wget
if not os.path.exists(ISO_PATH):
wget.download(ISO_URL, ISO_PATH)
if not os.path.exists(LANG_CODE_PATH):
wget.download(LANG_CODE_URL, LANG_CODE_PATH)
@staticmethod
def make_tatoeba_registry(repo_path=DEFAULT_MODEL_DIR):
if not (Path(repo_path) / "zho-eng" / "README.md").exists():
raise ValueError(
f"repo_path:{repo_path} does not exist: "
"You must run: git clone git@github.com:Helsinki-NLP/Tatoeba-Challenge.git before calling."
)
results = {}
for p in Path(repo_path).iterdir():
if len(p.name) != 7:
continue
lns = list(open(p / "README.md").readlines())
results[p.name] = _parse_readme(lns)
new_list = [(k, v) for k, v in results.items()]
new_list_2 = []
for element in new_list:
try:
new_list_2.append((element[0], element[1]["pre-processing"], element[1]["download"], element[1]["download"][:-4] + ".test.txt"))
except:
print("Skipped element " + element[0])
return new_list_2
GROUP_MEMBERS = {
# three letter code -> (group/language name, {constituents...}
# if this language is on the target side the constituents can be used as target language codes.
# if the language is on the source side they are supported natively without special codes.
"aav": ("Austro-Asiatic languages", {"hoc", "hoc_Latn", "kha", "khm", "khm_Latn", "mnw", "vie", "vie_Hani"}),
"afa": (
"Afro-Asiatic languages",
{
"acm",
"afb",
"amh",
"apc",
"ara",
"arq",
"ary",
"arz",
"hau_Latn",
"heb",
"kab",
"mlt",
"rif_Latn",
"shy_Latn",
"som",
"thv",
"tir",
},
),
"afr": ("Afrikaans", {"afr"}),
"alv": (
"Atlantic-Congo languages",
{
"ewe",
"fuc",
"fuv",
"ibo",
"kin",
"lin",
"lug",
"nya",
"run",
"sag",
"sna",
"swh",
"toi_Latn",
"tso",
"umb",
"wol",
"xho",
"yor",
"zul",
},
),
"ara": ("Arabic", {"afb", "apc", "apc_Latn", "ara", "ara_Latn", "arq", "arq_Latn", "arz"}),
"art": (
"Artificial languages",
{
"afh_Latn",
"avk_Latn",
"dws_Latn",
"epo",
"ido",
"ido_Latn",
"ile_Latn",
"ina_Latn",
"jbo",
"jbo_Cyrl",
"jbo_Latn",
"ldn_Latn",
"lfn_Cyrl",
"lfn_Latn",
"nov_Latn",
"qya",
"qya_Latn",
"sjn_Latn",
"tlh_Latn",
"tzl",
"tzl_Latn",
"vol_Latn",
},
),
"aze": ("Azerbaijani", {"aze_Latn"}),
"bat": ("Baltic languages", {"lit", "lav", "prg_Latn", "ltg", "sgs"}),
"bel": ("Belarusian", {"bel", "bel_Latn"}),
"ben": ("Bengali", {"ben"}),
"bnt": (
"Bantu languages",
{"kin", "lin", "lug", "nya", "run", "sna", "swh", "toi_Latn", "tso", "umb", "xho", "zul"},
),
"bul": ("Bulgarian", {"bul", "bul_Latn"}),
"cat": ("Catalan", {"cat"}),
"cau": ("Caucasian languages", {"abk", "kat", "che", "ady"}),
"ccs": ("South Caucasian languages", {"kat"}),
"ceb": ("Cebuano", {"ceb"}),
"cel": ("Celtic languages", {"gla", "gle", "bre", "cor", "glv", "cym"}),
"ces": ("Czech", {"ces"}),
"cpf": ("Creoles and pidgins, French‑based", {"gcf_Latn", "hat", "mfe"}),
"cpp": (
"Creoles and pidgins, Portuguese-based",
{"zsm_Latn", "ind", "pap", "min", "tmw_Latn", "max_Latn", "zlm_Latn"},
),
"cus": ("Cushitic languages", {"som"}),
"dan": ("Danish", {"dan"}),
"deu": ("German", {"deu"}),
"dra": ("Dravidian languages", {"tam", "kan", "mal", "tel"}),
"ell": ("Modern Greek (1453-)", {"ell"}),
"eng": ("English", {"eng"}),
"epo": ("Esperanto", {"epo"}),
"est": ("Estonian", {"est"}),
"euq": ("Basque (family)", {"eus"}),
"eus": ("Basque", {"eus"}),
"fin": ("Finnish", {"fin"}),
"fiu": (
"Finno-Ugrian languages",
{
"est",
"fin",
"fkv_Latn",
"hun",
"izh",
"kpv",
"krl",
"liv_Latn",
"mdf",
"mhr",
"myv",
"sma",
"sme",
"udm",
"vep",
"vro",
},
),
"fra": ("French", {"fra"}),
"gem": (
"Germanic languages",
{
"afr",
"ang_Latn",
"dan",
"deu",
"eng",
"enm_Latn",
"fao",
"frr",
"fry",
"gos",
"got_Goth",
"gsw",
"isl",
"ksh",
"ltz",
"nds",
"nld",
"nno",
"nob",
"nob_Hebr",
"non_Latn",
"pdc",
"sco",
"stq",
"swe",
"swg",
"yid",
},
),
"gle": ("Irish", {"gle"}),
"glg": ("Galician", {"glg"}),
"gmq": ("North Germanic languages", {"dan", "nob", "nob_Hebr", "swe", "isl", "nno", "non_Latn", "fao"}),
"gmw": (
"West Germanic languages",
{
"afr",
"ang_Latn",
"deu",
"eng",
"enm_Latn",
"frr",
"fry",
"gos",
"gsw",
"ksh",
"ltz",
"nds",
"nld",
"pdc",
"sco",
"stq",
"swg",
"yid",
},
),
"grk": ("Greek languages", {"grc_Grek", "ell"}),
"hbs": ("Serbo-Croatian", {"hrv", "srp_Cyrl", "bos_Latn", "srp_Latn"}),
"heb": ("Hebrew", {"heb"}),
"hin": ("Hindi", {"hin"}),
"hun": ("Hungarian", {"hun"}),
"hye": ("Armenian", {"hye", "hye_Latn"}),
"iir": (
"Indo-Iranian languages",
{
"asm",
"awa",
"ben",
"bho",
"gom",
"guj",
"hif_Latn",
"hin",
"jdt_Cyrl",
"kur_Arab",
"kur_Latn",
"mai",
"mar",
"npi",
"ori",
"oss",
"pan_Guru",
"pes",
"pes_Latn",
"pes_Thaa",
"pnb",
"pus",
"rom",
"san_Deva",
"sin",
"snd_Arab",
"tgk_Cyrl",
"tly_Latn",
"urd",
"zza",
},
),
"ilo": ("Iloko", {"ilo"}),
"inc": (
"Indic languages",
{
"asm",
"awa",
"ben",
"bho",
"gom",
"guj",
"hif_Latn",
"hin",
"mai",
"mar",
"npi",
"ori",
"pan_Guru",
"pnb",
"rom",
"san_Deva",
"sin",
"snd_Arab",
"urd",
},
),
"ine": (
"Indo-European languages",
{
"afr",
"afr_Arab",
"aln",
"ang_Latn",
"arg",
"asm",
"ast",
"awa",
"bel",
"bel_Latn",
"ben",
"bho",
"bjn",
"bos_Latn",
"bre",
"bul",
"bul_Latn",
"cat",
"ces",
"cor",
"cos",
"csb_Latn",
"cym",
"dan",
"deu",
"dsb",
"egl",
"ell",
"eng",
"enm_Latn",
"ext",
"fao",
"fra",
"frm_Latn",
"frr",
"fry",
"gcf_Latn",
"gla",
"gle",
"glg",
"glv",
"gom",
"gos",
"got_Goth",
"grc_Grek",
"gsw",
"guj",
"hat",
"hif_Latn",
"hin",
"hrv",
"hsb",
"hye",
"hye_Latn",
"ind",
"isl",
"ita",
"jdt_Cyrl",
"ksh",
"kur_Arab",
"kur_Latn",
"lad",
"lad_Latn",
"lat_Grek",
"lat_Latn",
"lav",
"lij",
"lit",
"lld_Latn",
"lmo",
"ltg",
"ltz",
"mai",
"mar",
"max_Latn",
"mfe",
"min",
"mkd",
"mwl",
"nds",
"nld",
"nno",
"nob",
"nob_Hebr",
"non_Latn",
"npi",
"oci",
"ori",
"orv_Cyrl",
"oss",
"pan_Guru",
"pap",
"pcd",
"pdc",
"pes",
"pes_Latn",
"pes_Thaa",
"pms",
"pnb",
"pol",
"por",
"prg_Latn",
"pus",
"roh",
"rom",
"ron",
"rue",
"rus",
"rus_Latn",
"san_Deva",
"scn",
"sco",
"sgs",
"sin",
"slv",
"snd_Arab",
"spa",
"sqi",
"srd",
"srp_Cyrl",
"srp_Latn",
"stq",
"swe",
"swg",
"tgk_Cyrl",
"tly_Latn",
"tmw_Latn",
"ukr",
"urd",
"vec",
"wln",
"yid",
"zlm_Latn",
"zsm_Latn",
"zza",
},
),
"isl": ("Icelandic", {"isl"}),
"ita": ("Italian", {"ita"}),
"itc": (
"Italic languages",
{
"arg",
"ast",
"bjn",
"cat",
"cos",
"egl",
"ext",
"fra",
"frm_Latn",
"gcf_Latn",
"glg",
"hat",
"ind",
"ita",
"lad",
"lad_Latn",
"lat_Grek",
"lat_Latn",
"lij",
"lld_Latn",
"lmo",
"max_Latn",
"mfe",
"min",
"mwl",
"oci",
"pap",
"pcd",
"pms",
"por",
"roh",
"ron",
"scn",
"spa",
"srd",
"tmw_Latn",
"vec",
"wln",
"zlm_Latn",
"zsm_Latn",
},
),
"jpn": ("Japanese", {"jpn", "jpn_Bopo", "jpn_Hang", "jpn_Hani", "jpn_Hira", "jpn_Kana", "jpn_Latn", "jpn_Yiii"}),
"jpx": ("Japanese (family)", {"jpn"}),
"kat": ("Georgian", {"kat"}),
"kor": ("Korean", {"kor_Hani", "kor_Hang", "kor_Latn", "kor"}),
"lav": ("Latvian", {"lav"}),
"lit": ("Lithuanian", {"lit"}),
"mkd": ("Macedonian", {"mkd"}),
"mkh": ("Mon-Khmer languages", {"vie_Hani", "mnw", "vie", "kha", "khm_Latn", "khm"}),
"msa": ("Malay (macrolanguage)", {"zsm_Latn", "ind", "max_Latn", "zlm_Latn", "min"}),
"mul": (
"Multiple languages",
{
"abk",
"acm",
"ady",
"afb",
"afh_Latn",
"afr",
"akl_Latn",
"aln",
"amh",
"ang_Latn",
"apc",
"ara",
"arg",
"arq",
"ary",
"arz",
"asm",
"ast",
"avk_Latn",
"awa",
"aze_Latn",
"bak",
"bam_Latn",
"bel",
"bel_Latn",
"ben",
"bho",
"bod",
"bos_Latn",
"bre",
"brx",
"brx_Latn",
"bul",
"bul_Latn",
"cat",
"ceb",
"ces",
"cha",
"che",
"chr",
"chv",
"cjy_Hans",
"cjy_Hant",
"cmn",
"cmn_Hans",
"cmn_Hant",
"cor",
"cos",
"crh",
"crh_Latn",
"csb_Latn",
"cym",
"dan",
"deu",
"dsb",
"dtp",
"dws_Latn",
"egl",
"ell",
"enm_Latn",
"epo",
"est",
"eus",
"ewe",
"ext",
"fao",
"fij",
"fin",
"fkv_Latn",
"fra",
"frm_Latn",
"frr",
"fry",
"fuc",
"fuv",
"gan",
"gcf_Latn",
"gil",
"gla",
"gle",
"glg",
"glv",
"gom",
"gos",
"got_Goth",
"grc_Grek",
"grn",
"gsw",
"guj",
"hat",
"hau_Latn",
"haw",
"heb",
"hif_Latn",
"hil",
"hin",
"hnj_Latn",
"hoc",
"hoc_Latn",
"hrv",
"hsb",
"hun",
"hye",
"iba",
"ibo",
"ido",
"ido_Latn",
"ike_Latn",
"ile_Latn",
"ilo",
"ina_Latn",
"ind",
"isl",
"ita",
"izh",
"jav",
"jav_Java",
"jbo",
"jbo_Cyrl",
"jbo_Latn",
"jdt_Cyrl",
"jpn",
"kab",
"kal",
"kan",
"kat",
"kaz_Cyrl",
"kaz_Latn",
"kek_Latn",
"kha",
"khm",
"khm_Latn",
"kin",
"kir_Cyrl",
"kjh",
"kpv",
"krl",
"ksh",
"kum",
"kur_Arab",
"kur_Latn",
"lad",
"lad_Latn",
"lao",
"lat_Latn",
"lav",
"ldn_Latn",
"lfn_Cyrl",
"lfn_Latn",
"lij",
"lin",
"lit",
"liv_Latn",
"lkt",
"lld_Latn",
"lmo",
"ltg",
"ltz",
"lug",
"lzh",
"lzh_Hans",
"mad",
"mah",
"mai",
"mal",
"mar",
"max_Latn",
"mdf",
"mfe",
"mhr",
"mic",
"min",
"mkd",
"mlg",
"mlt",
"mnw",
"moh",
"mon",
"mri",
"mwl",
"mww",
"mya",
"myv",
"nan",
"nau",
"nav",
"nds",
"niu",
"nld",
"nno",
"nob",
"nob_Hebr",
"nog",
"non_Latn",
"nov_Latn",
"npi",
"nya",
"oci",
"ori",
"orv_Cyrl",
"oss",
"ota_Arab",
"ota_Latn",
"pag",
"pan_Guru",
"pap",
"pau",
"pdc",
"pes",
"pes_Latn",
"pes_Thaa",
"pms",
"pnb",
"pol",
"por",
"ppl_Latn",
"prg_Latn",
"pus",
"quc",
"qya",
"qya_Latn",
"rap",
"rif_Latn",
"roh",
"rom",
"ron",
"rue",
"run",
"rus",
"sag",
"sah",
"san_Deva",
"scn",
"sco",
"sgs",
"shs_Latn",
"shy_Latn",
"sin",
"sjn_Latn",
"slv",
"sma",
"sme",
"smo",
"sna",
"snd_Arab",
"som",
"spa",
"sqi",
"srp_Cyrl",
"srp_Latn",
"stq",
"sun",
"swe",
"swg",
"swh",
"tah",
"tam",
"tat",
"tat_Arab",
"tat_Latn",
"tel",
"tet",
"tgk_Cyrl",
"tha",
"tir",
"tlh_Latn",
"tly_Latn",
"tmw_Latn",
"toi_Latn",
"ton",
"tpw_Latn",
"tso",
"tuk",
"tuk_Latn",
"tur",
"tvl",
"tyv",
"tzl",
"tzl_Latn",
"udm",
"uig_Arab",
"uig_Cyrl",
"ukr",
"umb",
"urd",
"uzb_Cyrl",
"uzb_Latn",
"vec",
"vie",
"vie_Hani",
"vol_Latn",
"vro",
"war",
"wln",
"wol",
"wuu",
"xal",
"xho",
"yid",
"yor",
"yue",
"yue_Hans",
"yue_Hant",
"zho",
"zho_Hans",
"zho_Hant",
"zlm_Latn",
"zsm_Latn",
"zul",
"zza",
},
),
"nic": (
"Niger-Kordofanian languages",
{
"bam_Latn",
"ewe",
"fuc",
"fuv",
"ibo",
"kin",
"lin",
"lug",
"nya",
"run",
"sag",
"sna",
"swh",
"toi_Latn",
"tso",
"umb",
"wol",
"xho",
"yor",
"zul",
},
),
"nld": ("Dutch", {"nld"}),
"nor": ("Norwegian", {"nob", "nno"}),
"phi": ("Philippine languages", {"ilo", "akl_Latn", "war", "hil", "pag", "ceb"}),
"pol": ("Polish", {"pol"}),
"por": ("Portuguese", {"por"}),
"pqe": (
"Eastern Malayo-Polynesian languages",
{"fij", "gil", "haw", "mah", "mri", "nau", "niu", "rap", "smo", "tah", "ton", "tvl"},
),
"roa": (
"Romance languages",
{
"arg",
"ast",
"cat",
"cos",
"egl",
"ext",
"fra",
"frm_Latn",
"gcf_Latn",
"glg",
"hat",
"ind",
"ita",
"lad",
"lad_Latn",
"lij",
"lld_Latn",
"lmo",
"max_Latn",
"mfe",
"min",
"mwl",
"oci",
"pap",
"pms",
"por",
"roh",
"ron",
"scn",
"spa",
"tmw_Latn",
"vec",
"wln",
"zlm_Latn",
"zsm_Latn",
},
),
"ron": ("Romanian", {"ron"}),
"run": ("Rundi", {"run"}),
"rus": ("Russian", {"rus"}),
"sal": ("Salishan languages", {"shs_Latn"}),
"sem": ("Semitic languages", {"acm", "afb", "amh", "apc", "ara", "arq", "ary", "arz", "heb", "mlt", "tir"}),
"sla": (
"Slavic languages",
{
"bel",
"bel_Latn",
"bos_Latn",
"bul",
"bul_Latn",
"ces",
"csb_Latn",
"dsb",
"hrv",
"hsb",
"mkd",
"orv_Cyrl",
"pol",
"rue",
"rus",
"slv",
"srp_Cyrl",
"srp_Latn",
"ukr",
},
),
"slv": ("Slovenian", {"slv"}),
"spa": ("Spanish", {"spa"}),
"swe": ("Swedish", {"swe"}),
"taw": ("Tai", {"lao", "tha"}),
"tgl": ("Tagalog", {"tgl_Latn"}),
"tha": ("Thai", {"tha"}),
"trk": (
"Turkic languages",
{
"aze_Latn",
"bak",
"chv",
"crh",
"crh_Latn",
"kaz_Cyrl",
"kaz_Latn",
"kir_Cyrl",
"kjh",
"kum",
"ota_Arab",
"ota_Latn",
"sah",
"tat",
"tat_Arab",
"tat_Latn",
"tuk",
"tuk_Latn",
"tur",
"tyv",
"uig_Arab",
"uig_Cyrl",
"uzb_Cyrl",
"uzb_Latn",
},
),
"tur": ("Turkish", {"tur"}),
"ukr": ("Ukrainian", {"ukr"}),
"urd": ("Urdu", {"urd"}),
"urj": (
"Uralic languages",
{
"est",
"fin",
"fkv_Latn",
"hun",
"izh",
"kpv",
"krl",
"liv_Latn",
"mdf",
"mhr",
"myv",
"sma",
"sme",
"udm",
"vep",
"vro",
},
),
"vie": ("Vietnamese", {"vie", "vie_Hani"}),
"war": ("Waray (Philippines)", {"war"}),
"zho": (
"Chinese",
{
"cjy_Hans",
"cjy_Hant",
"cmn",
"cmn_Bopo",
"cmn_Hang",
"cmn_Hani",
"cmn_Hans",
"cmn_Hant",
"cmn_Hira",
"cmn_Kana",
"cmn_Latn",
"cmn_Yiii",
"gan",
"hak_Hani",
"lzh",
"lzh_Bopo",
"lzh_Hang",
"lzh_Hani",
"lzh_Hans",
"lzh_Hira",
"lzh_Kana",
"lzh_Yiii",
"nan",
"nan_Hani",
"wuu",
"wuu_Bopo",
"wuu_Hani",
"wuu_Latn",
"yue",
"yue_Bopo",
"yue_Hang",
"yue_Hani",
"yue_Hans",
"yue_Hant",
"yue_Hira",
"yue_Kana",
"zho",
"zho_Hans",
"zho_Hant",
},
),
"zle": ("East Slavic languages", {"bel", "orv_Cyrl", "bel_Latn", "rus", "ukr", "rue"}),
"zls": ("South Slavic languages", {"bos_Latn", "bul", "bul_Latn", "hrv", "mkd", "slv", "srp_Cyrl", "srp_Latn"}),
"zlw": ("West Slavic languages", {"csb_Latn", "dsb", "hsb", "pol", "ces"}),
}
def l2front_matter(langs):
return "".join(f"- {l}\n" for l in langs)
def dedup(lst):
"""Preservers order"""
new_lst = []
for item in lst:
if not item:
continue
elif item in new_lst:
continue
else:
new_lst.append(item)
return new_lst
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--models", action="append", help="<Required> Set flag", required=True, nargs="+", dest="models"
)
parser.add_argument("-save_dir", "--save_dir", default="marian_converted", help="where to save converted models")
args = parser.parse_args()
resolver = TatoebaConverter(save_dir=args.save_dir)
resolver.convert_models(args.models[0])
| 34,591 | 25.94081 | 175 | py |
defend_framework | defend_framework-main/DPA_certified_train.py | import torch
import os
import json
import time
import numpy as np
from utils.data_processing import MNIST17DataPreprocessor, MNISTDataPreprocessor, IMDBDataPreprocessor, \
EmberDataPreProcessor, EMBER_DATASET, EmberPoisonDataPreProcessor, MNIST01DataPreprocessor, \
MNIST17LimitedDataPreprocessor, FMNISTDataPreprocessor, CIFARDataPreprocessor
from models import MNISTLiRPAModel, EmberLiRPAModel
from utils.train_utils import train_many
from utils.cert_train_argments import get_arguments
if __name__ == "__main__":
parser = get_arguments()
# Set random seeds
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# random.seed(args.seed)
# np.random.seed(args.seed)
# tf.random.set_seed(args.seed)
# make dirs
if args.exp_name is None:
args.exp_name = time.strftime("%Y%m%d-%H%M%S")
if args.model_save_dir is not None:
if not os.path.exists(args.model_save_dir):
os.mkdir(args.model_save_dir)
os.mkdir(os.path.join(args.model_save_dir, args.exp_name))
res = None
res_noise = None
if args.res_save_dir is not None:
if not os.path.exists(args.res_save_dir):
os.mkdir(args.res_save_dir)
if os.path.exists(os.path.join(args.res_save_dir, args.exp_name)):
respond = input("Experiment already exists, type [O] to overwrite, type [R] to resume")
if respond == "O":
pass
elif respond == "R":
res, res_noise = np.load(os.path.join(args.res_save_dir, args.exp_name, "aggre_res.npy"))
else:
exit(0)
else:
os.mkdir(os.path.join(args.res_save_dir, args.exp_name))
assert args.res_save_dir is not None and args.exp_name is not None
if args.dataset == "mnist":
if args.load_poison_dir is not None:
data_loader = MNISTDataPreprocessor.load(os.path.join(args.load_poison_dir, "data"), args)
else:
data_loader = MNISTDataPreprocessor(args)
model = MNISTLiRPAModel.MNISTModel(data_loader.n_features, data_loader.n_classes, args, device, lr=args.lr)
elif args.dataset == "ember":
if args.load_poison_dir is not None:
data_loader = EmberPoisonDataPreProcessor(args)
else:
data_loader = EmberDataPreProcessor(args)
model = EmberLiRPAModel.EmberModel(data_loader.n_features, data_loader.n_classes, args, device, lr=args.lr)
else:
raise NotImplementedError
with open(os.path.join(args.res_save_dir, args.exp_name, "commandline_args.txt"), 'w') as f:
json.dump(args.__dict__, f, indent=2)
aggregate_results = train_many(data_loader, model, args, res, res_noise)
| 2,833 | 39.485714 | 115 | py |
defend_framework | defend_framework-main/gen_attack_dataset.py | import random
import os
import json
import time
import numpy as np
import tensorflow as tf
from tensorflow import keras
from utils.data_processing import MNISTDataPreprocessor, MNIST17DataPreprocessor, MNIST01DataPreprocessor, \
CIFAR02DataPreprocessor
from models.MNISTModel import MNISTModel, MNIST17Model, MNIST01Model
from models.CIFAR10Model import CIFAR10Model
from utils.train_utils import train_single
from attack.BadNetAttack import BadNetAttackLabel, BadNetAttackNoLabel
from utils.cert_train_argments import get_arguments
if __name__ == "__main__":
parser = get_arguments()
# attack parameters
parser.add_argument("--attack", choices=["badnet"], help="attack algorithms", required=True)
parser.add_argument("--consecutive", action="store_true",
help="Whether the poisoned features need to be inside a block")
parser.add_argument("--attack_label", action="store_true",
help="Whether to attack the label of the training image")
parser.add_argument("--poisoned_feat_num", type=int, required=True, help="poisoned feature number")
parser.add_argument("--poisoned_ins_rate", default=0.1, type=float, help="the rate of instances to be poisoned")
parser.add_argument("--attack_targets", type=str,
help="A list of ints of length n_classes, attacking label i to its target attack_targets[i], "
"attack_targets[i] can be None.")
# dirs and files
parser.add_argument("--save_poison_dir", type=str,
help="dir for save poisoned dataset"
)
parser.add_argument("--load", action="store_true", help="whether to load the saved file")
# Set random seeds
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
# random.seed(args.seed)
# np.random.seed(args.seed)
# tf.random.set_seed(args.seed)
# make dirs
if not os.path.exists(args.save_poison_dir):
os.mkdir(args.save_poison_dir)
if args.exp_name is None:
args.exp_name = time.strftime("%Y%m%d-%H%M%S")
filepath = os.path.join(args.save_poison_dir, args.exp_name)
if args.load:
assert os.path.exists(filepath) and os.path.exists(os.path.join(filepath, "data"))
if os.path.exists(filepath) and not args.load:
respond = input("Experiment already exists, type [Y] to overwrite")
if respond != "Y":
exit(0)
elif not os.path.exists(filepath):
os.mkdir(filepath)
if args.dataset == "mnist":
DataPreprocessor_type = MNISTDataPreprocessor
Model_type = MNISTModel
elif args.dataset == "mnist17":
DataPreprocessor_type = MNIST17DataPreprocessor
Model_type = MNIST17Model
elif args.dataset == "mnist01":
DataPreprocessor_type = MNIST01DataPreprocessor
Model_type = MNIST01Model
elif args.dataset == "cifar10-02":
DataPreprocessor_type = CIFAR02DataPreprocessor
Model_type = CIFAR10Model
else:
raise NotImplementedError
with open(os.path.join(filepath, "commandline_args.txt"), 'w') as f:
json.dump(args.__dict__, f, indent=2)
attack_targets = eval(args.attack_targets)
if not args.load:
data_loader = DataPreprocessor_type(args)
if args.attack_label:
attack = BadNetAttackLabel(data_loader, attack_targets, args.poisoned_feat_num,
consecutive=args.consecutive, poisoned_ins_rate=args.poisoned_ins_rate)
else:
attack = BadNetAttackNoLabel(data_loader, attack_targets, args.poisoned_feat_num,
consecutive=args.consecutive, poisoned_ins_rate=args.poisoned_ins_rate)
attack.attack()
attack.save(os.path.join(filepath, "data"))
else:
if args.attack_label:
attack = BadNetAttackLabel.load(os.path.join(filepath, "data"))
else:
attack = BadNetAttackNoLabel.load(os.path.join(filepath, "data"))
data_loader = attack.data_processor
model = Model_type(data_loader.n_features, data_loader.n_classes)
train_single(data_loader, model, args)
print("Clean Test Set:")
model.evaluate(data_loader.x_test, keras.utils.to_categorical(data_loader.y_test, data_loader.n_classes))
print("Poisoned Test Set:")
for i in range(data_loader.n_classes):
idx = np.where(data_loader.y_test == i)[0]
if attack_targets[i] is None:
print(f"class {i} is not poisoned:")
model.evaluate(data_loader.x_test_poisoned[idx],
keras.utils.to_categorical(data_loader.y_test_poisoned[idx], data_loader.n_classes))
else:
print(f"class {i} is poisoned:")
model.evaluate(data_loader.x_test_poisoned[idx],
keras.utils.to_categorical(data_loader.y_test_poisoned[idx], data_loader.n_classes))
| 5,498 | 42.642857 | 118 | py |
defend_framework | defend_framework-main/models/LiRPAModel.py | from abc import ABC, abstractmethod
from torch.utils.data import TensorDataset, DataLoader
import torch.optim as optim
from torch.nn import CrossEntropyLoss
from auto_LiRPA import BoundedModule, BoundedTensor
from auto_LiRPA.perturbations import *
from auto_LiRPA.eps_scheduler import LinearScheduler, AdaptiveScheduler, SmoothedScheduler, FixedScheduler
from auto_LiRPA.utils import MultiAverageMeter
class LiRPAModel(ABC):
def __init__(self, n_features, n_classes, args, device, model_ori, lr=1e-3):
self.input_shape = n_features
self.n_classes = n_classes
self.lr = lr
self.args = args
self.device = device
self.init_model = model_ori
self.init()
def init(self):
## Step 1: Initial original model as usual
self.model_ori = self.init_model(in_ch=self.input_shape[-1], in_dim=self.input_shape[0])
## Step 3: wrap model with auto_LiRPA
# The second parameter dummy_input is for constructing the trace of the computational graph.
dummy_input = torch.randn(*([2] + list(self.input_shape)))
self.model = BoundedModule(self.model_ori, dummy_input,
bound_opts={'relu': self.args.bound_opts, 'conv_mode': self.args.conv_mode},
device=self.device)
# print("Model structure: \n", str(self.model_ori))
def fit(self, X, y, batch_size, epochs, data_aug=None):
data = TensorDataset(torch.Tensor(X), torch.Tensor(y).long().max(dim=-1)[1])
loader = DataLoader(data, batch_size=batch_size, shuffle=True, pin_memory=True)
## Step 4 prepare optimizer, epsilon scheduler and learning rate scheduler
eps_scheduler = eval(self.args.scheduler_name)(self.args.eps, self.args.scheduler_opts)
opt = optim.Adam(self.model.parameters(), lr=self.lr)
lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.5)
for t in range(1, epochs + 1):
if eps_scheduler.reached_max_eps():
# Only decay learning rate after reaching the maximum eps
lr_scheduler.step()
self.train(eps_scheduler, opt, loader, data_aug)
def evaluate(self, x_test, y_test):
data = TensorDataset(torch.Tensor(x_test),
torch.Tensor(y_test).long().max(dim=-1)[1])
loader = DataLoader(data, batch_size=256, shuffle=False, pin_memory=True)
eps_scheduler = FixedScheduler(self.args.eps)
with torch.no_grad():
predictions, verified = self.test(eps_scheduler, loader)
print("Test accuracy: ", np.mean(predictions == np.argmax(y_test, axis=-1)))
predictions_cert = predictions * verified + (1 - verified) * self.n_classes
print("Certified accuracy: ", np.mean(predictions_cert == np.argmax(y_test, axis=-1)))
return predictions, predictions_cert
def test(self, eps_scheduler, loader):
norm = float(self.args.norm)
self.model.eval()
eps_scheduler.eval()
predictions = np.array([], dtype=np.int)
verified = np.array([], dtype=np.bool)
for i, (data, _) in enumerate(loader):
eps_scheduler.step_batch()
eps = eps_scheduler.get_eps()
# bound input for Linf norm used only
data_ub = data_lb = data
if list(self.model.parameters())[0].is_cuda:
data = data.cuda()
data_lb, data_ub = data_lb.cuda(), data_ub.cuda()
# Specify Lp norm perturbation.
# When using Linf perturbation, we manually set element-wise bound x_L and x_U. eps is not used for Linf norm.
if norm > 0:
ptb = PerturbationLpNorm(norm=norm, eps=eps, x_L=data_lb, x_U=data_ub)
elif norm == 0:
ptb = PerturbationL0Norm(eps=eps_scheduler.get_max_eps(),
ratio=eps_scheduler.get_eps() / eps_scheduler.get_max_eps())
x = BoundedTensor(data, ptb)
output = self.model(x)
batch_predictions = torch.argmax(output, dim=1)
# meter.update('CE', regular_ce.item(), x.size(0))
# meter.update('Err', torch.sum(torch.argmax(output, dim=1) != labels).cpu().detach().numpy() / x.size(0),
# x.size(0))
# generate specifications
c = torch.eye(self.n_classes).type_as(data)[batch_predictions].unsqueeze(1) - torch.eye(
self.n_classes).type_as(
data).unsqueeze(0)
# remove specifications to self
I = (~(batch_predictions.data.unsqueeze(1) == torch.arange(self.n_classes).type_as(
batch_predictions.data).unsqueeze(0)))
c = (c[I].view(data.size(0), self.n_classes - 1, self.n_classes))
if list(self.model.parameters())[0].is_cuda:
c = c.cuda()
if self.args.bound_type == "IBP":
lb, ub = self.model.compute_bounds(IBP=True, C=c, method=None)
elif self.args.bound_type == "CROWN":
lb, ub = self.model.compute_bounds(IBP=False, C=c, method="backward", bound_upper=False)
elif self.args.bound_type == "CROWN-IBP":
# lb, ub = model.compute_bounds(ptb=ptb, IBP=True, x=data, C=c, method="backward") # pure IBP bound
# we use a mixed IBP and CROWN-IBP bounds, leading to better performance (Zhang et al., ICLR 2020)
factor = (eps_scheduler.get_max_eps() - eps) / eps_scheduler.get_max_eps()
ilb, iub = self.model.compute_bounds(IBP=True, C=c, method=None)
if factor < 1e-5:
lb = ilb
else:
clb, cub = self.model.compute_bounds(IBP=False, C=c, method="backward", bound_upper=False)
lb = clb * factor + ilb * (1 - factor)
elif self.args.bound_type == "CROWN-FAST":
# Similar to CROWN-IBP but no mix between IBP and CROWN bounds.
lb, ub = self.model.compute_bounds(IBP=True, C=c, method=None)
lb, ub = self.model.compute_bounds(IBP=False, C=c, method="backward", bound_upper=False)
# Pad zero at the beginning for each example, and use fake label "0" for all examples
# lb_padded = torch.cat((torch.zeros(size=(lb.size(0), 1), dtype=lb.dtype, device=lb.device), lb), dim=1)
# fake_labels = torch.zeros(size=(lb.size(0),), dtype=torch.int64, device=lb.device)
# robust_ce = CrossEntropyLoss()(-lb_padded, fake_labels)
# meter.update('Loss', loss.item(), data.size(0))
# if batch_method != "natural":
# meter.update('Robust_CE', robust_ce.item(), data.size(0))
# # For an example, if lower bounds of margins is >0 for all classes, the output is verifiably correct.
# # If any margin is < 0 this example is counted as an error
# meter.update('Verified_Err', torch.sum((lb < 0).any(dim=1)).item() / data.size(0), data.size(0))
# if i % 50 == 0 and train:
# print('[{:4d}]: eps={:.8f} {}'.format(i, eps, meter))
batch_predictions = batch_predictions.cpu().numpy()
batch_verified = (lb > 0).all(dim=1).cpu().numpy()
predictions = np.append(predictions, batch_predictions)
verified = np.append(verified, batch_verified)
# print('[{:4d}]: eps={:.8f} {}'.format(i, eps, meter))
return predictions, verified
def train(self, eps_scheduler, opt, loader, data_aug):
norm = float(self.args.norm)
meter = MultiAverageMeter()
self.model.train()
eps_scheduler.train()
eps_scheduler.step_epoch()
eps_scheduler.set_epoch_length(int((len(loader.dataset) + loader.batch_size - 1) / loader.batch_size))
for i, (data, labels) in enumerate(loader):
eps_scheduler.step_batch()
eps = eps_scheduler.get_eps()
# For small eps just use natural training, no need to compute LiRPA bounds
batch_method = "robust"
if eps < 1e-20:
batch_method = "natural"
opt.zero_grad()
# generate specifications
c = torch.eye(self.n_classes).type_as(data)[labels].unsqueeze(1) - torch.eye(self.n_classes).type_as(
data).unsqueeze(0)
# remove specifications to self
I = (~(labels.data.unsqueeze(1) == torch.arange(self.n_classes).type_as(labels.data).unsqueeze(0)))
c = (c[I].view(data.size(0), self.n_classes - 1, self.n_classes))
# bound input for Linf norm used only
# data_ub = data_lb = data
if list(self.model.parameters())[0].is_cuda:
data, labels, c = data.cuda(), labels.cuda(), c.cuda()
# data_lb, data_ub = data_lb.cuda(), data_ub.cuda()
if data_aug is not None:
data = data_aug(data)
# Specify Lp norm perturbation.
# When using Linf perturbation, we manually set element-wise bound x_L and x_U. eps is not used for Linf norm.
# if norm > 0:
# ptb = PerturbationLpNorm(norm=norm, eps=eps, x_L=data_lb, x_U=data_ub)
# elif norm == 0:
ptb = PerturbationL0Norm(eps=eps_scheduler.get_max_eps(),
ratio=eps_scheduler.get_eps() / eps_scheduler.get_max_eps())
x = BoundedTensor(data, ptb)
output = self.model(x)
regular_ce = CrossEntropyLoss()(output, labels) # regular CrossEntropyLoss used for warming up
meter.update('CE', regular_ce.item(), x.size(0))
meter.update('Err', torch.sum(torch.argmax(output, dim=1) != labels).cpu().detach().numpy() / x.size(0),
x.size(0))
if batch_method == "robust":
if self.args.bound_type == "IBP":
lb, ub = self.model.compute_bounds(IBP=True, C=c, method=None)
elif self.args.bound_type == "CROWN":
lb, ub = self.model.compute_bounds(IBP=False, C=c, method="backward", bound_upper=False)
elif self.args.bound_type == "CROWN-IBP":
# lb, ub = model.compute_bounds(ptb=ptb, IBP=True, x=data, C=c, method="backward") # pure IBP bound
# we use a mixed IBP and CROWN-IBP bounds, leading to better performance (Zhang et al., ICLR 2020)
factor = (eps_scheduler.get_max_eps() - eps) / eps_scheduler.get_max_eps()
ilb, iub = self.model.compute_bounds(IBP=True, C=c, method=None)
if factor < 1e-5:
lb = ilb
else:
clb, cub = self.model.compute_bounds(IBP=False, C=c, method="backward", bound_upper=False)
lb = clb * factor + ilb * (1 - factor)
elif self.args.bound_type == "CROWN-FAST":
# Similar to CROWN-IBP but no mix between IBP and CROWN bounds.
lb, ub = self.model.compute_bounds(IBP=True, C=c, method=None)
lb, ub = self.model.compute_bounds(IBP=False, C=c, method="backward", bound_upper=False)
# Pad zero at the beginning for each example, and use fake label "0" for all examples
lb_padded = torch.cat((torch.zeros(size=(lb.size(0), 1), dtype=lb.dtype, device=lb.device), lb), dim=1)
fake_labels = torch.zeros(size=(lb.size(0),), dtype=torch.int64, device=lb.device)
robust_ce = CrossEntropyLoss()(-lb_padded, fake_labels)
if batch_method == "robust":
loss = robust_ce
elif batch_method == "natural":
loss = regular_ce
loss.backward()
eps_scheduler.update_loss(loss.item() - regular_ce.item())
opt.step()
meter.update('Loss', loss.item(), data.size(0))
if batch_method != "natural":
meter.update('Robust_CE', robust_ce.item(), data.size(0))
# # For an example, if lower bounds of margins is >0 for all classes, the output is verifiably correct.
# # If any margin is < 0 this example is counted as an error
meter.update('Verified_Err', torch.sum((lb < 0).any(dim=1)).item() / data.size(0), data.size(0))
# # if i % 50 == 0 and train:
# print('[{:4d}]: eps={:.8f} {}'.format(i, eps, meter))
# print('[{:4d}]: eps={:.8f} {}'.format(i, eps, meter))
| 12,734 | 53.191489 | 123 | py |
defend_framework | defend_framework-main/models/EmberLiRPAModel.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from models.LiRPAModel import LiRPAModel
class EmberModel(LiRPAModel):
def __init__(self, n_features, n_classes, args, device, lr=1e-3):
super(EmberModel, self).__init__([1, 1, n_features], n_classes, args, device, mlp_4layer, lr)
def fit(self, X, y, batch_size, epochs, dummy=None):
X = np.expand_dims(X, axis=[1, 2])
def data_aug(data):
data += torch.clamp(torch.normal(torch.zeros_like(data), torch.ones_like(data) * 0.1), min=0, max=1)
return data
super(EmberModel, self).fit(X, y, batch_size, epochs, data_aug)
def evaluate(self, x_test, y_test):
x_test = np.expand_dims(x_test, axis=[1, 2])
return super(EmberModel, self).evaluate(x_test, y_test)
class mlp_4layer(nn.Module):
def __init__(self, in_ch, in_dim):
super(mlp_4layer, self).__init__()
self.fc1 = nn.Linear(in_ch * in_dim, 2000)
self.fc2 = nn.Linear(2000, 1000)
self.fc3 = nn.Linear(1000, 100)
self.fc4 = nn.Linear(100, 2)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
| 1,310 | 30.214286 | 112 | py |
defend_framework | defend_framework-main/models/MNISTModel.py | from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.layers import Conv2D, AveragePooling2D, MaxPooling2D
from tensorflow.keras.regularizers import l2
from models.Model import Model
class MNISTModel(Model):
def __init__(self, n_features, n_classes, lr=1e-3):
super(MNISTModel, self).__init__(n_features, n_classes, lr)
def build_model(self):
def scheduler(epoch, lr):
if epoch > 0 and epoch % 100 == 0:
lr *= 0.5
# print(lr)
return lr
reg = l2(1e-3)
model = Sequential()
model.add(Conv2D(16, kernel_size=(5, 5),
activation='relu',
input_shape=self.input_shape))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (5, 5), activation='relu'))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.25))
model.add(Dense(32, activation='relu', kernel_regularizer=reg, bias_regularizer=reg))
model.add(Dropout(0.25))
model.add(Dense(512, activation='relu', kernel_regularizer=reg, bias_regularizer=reg))
model.add(Dense(self.n_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=self.lr),
metrics=['accuracy'])
# self.callback = [keras.callbacks.LearningRateScheduler(scheduler)]
return model
class MNIST01Model(Model):
def __init__(self, n_features, n_classes, lr=1e-3):
super(MNIST01Model, self).__init__(n_features, n_classes, lr)
def build_model(self):
model = Sequential()
model.add(Conv2D(16, kernel_size=(5, 5),
activation='relu',
input_shape=self.input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(32, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(self.n_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=self.lr),
metrics=['accuracy'])
return model
class MNIST17Model(Model):
def __init__(self, n_features, n_classes, lr=1e-3):
super(MNIST17Model, self).__init__(n_features, n_classes, lr)
def build_model(self):
model = Sequential()
model.add(Flatten(input_shape=self.input_shape))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.n_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=self.lr),
metrics=['accuracy'])
return model
class FMNISTModel(Model):
def __init__(self, n_features, n_classes, lr=1e-3):
super(FMNISTModel, self).__init__(n_features, n_classes, lr)
def build_model(self):
reg = l2(1e-3)
model = Sequential()
model.add(Conv2D(16, kernel_size=(3, 3),
activation='relu',
input_shape=self.input_shape))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(Flatten())
model.add(Dropout(0.6))
model.add(Dense(128, activation='relu', kernel_regularizer=reg, bias_regularizer=reg))
model.add(Dropout(0.5))
model.add(Dense(self.n_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=self.lr),
metrics=['accuracy'])
return model
| 4,251 | 38.37037 | 94 | py |
defend_framework | defend_framework-main/models/MNISTLiRPAModel.py | from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from models.LiRPAModel import LiRPAModel
class MNISTModel(LiRPAModel):
def __init__(self, n_features, n_classes, args, device, lr=1e-3):
super(MNISTModel, self).__init__(n_features, n_classes, args, device, mlp_3layer, lr)
def fit(self, X, y, batch_size, epochs, dummy=None):
X = np.transpose(X, (0, 3, 1, 2))
def data_aug(data):
data = transforms.RandomCrop(28, 3)(data)
data = transforms.RandomRotation(10)(data)
return data
super(MNISTModel, self).fit(X, y, batch_size, epochs, data_aug)
def evaluate(self, x_test, y_test):
x_test = np.transpose(x_test, (0, 3, 1, 2))
return super(MNISTModel, self).evaluate(x_test, y_test)
class mlp_3layer(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super(mlp_3layer, self).__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 128 * width)
self.fc3 = nn.Linear(128 * width, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| 1,294 | 30.585366 | 93 | py |
defend_framework | defend_framework-main/models/Model.py | from abc import ABC, abstractmethod
import os
import numpy as np
from tensorflow.keras.models import load_model
class Model(ABC):
def __init__(self, input_shape, n_classes, lr):
self.input_shape = input_shape
self.n_classes = n_classes
self.lr = lr
self.callback = None
self.model = self.build_model()
@abstractmethod
def build_model(self):
pass
def init(self):
self.model = self.build_model()
def save(self, save_path, file_name='mnist_nn'):
save_model = self.model
save_model.save(os.path.join(save_path, file_name + '.h5'))
def load(self, save_path, file_name):
self.model = load_model(os.path.join(save_path, file_name + '.h5'))
def fit_generator(self, datagen, epochs):
self.model.fit(datagen, epochs=epochs, verbose=0, workers=4, callbacks=self.callback)
def fit(self, X, y, batch_size, epochs):
self.model.fit(X, y, batch_size=batch_size, epochs=epochs, verbose=0, workers=4, callbacks=self.callback)
def evaluate(self, x_test, y_test):
score = self.model.evaluate(x_test, y_test, verbose=0, batch_size=512)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
prediction_label = np.argmax(self.model.predict(x_test), axis=1)
return prediction_label
| 1,351 | 30.44186 | 113 | py |
defend_framework | defend_framework-main/models/IMDBTransformerModel.py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from models.Model import Model
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.ffn = keras.Sequential(
[layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim), ]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs, training):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, embed_dim):
super(TokenAndPositionEmbedding, self).__init__()
self.token_emb = layers.Embedding(input_dim=10000, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = tf.shape(x)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
class IMDBTransformerModel(Model):
def __init__(self, maxlen, n_classes, lr=1e-3):
super(IMDBTransformerModel, self).__init__(maxlen, n_classes, lr)
def build_model(self):
inputs = layers.Input(shape=(self.input_shape,))
embedding_layer = TokenAndPositionEmbedding(self.input_shape, 50)
x = embedding_layer(inputs)
transformer_block = TransformerBlock(50, 2, 100)
x = transformer_block(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(20, activation="relu")(x)
x = layers.Dropout(0.1)(x)
outputs = layers.Dense(2, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=self.lr),
metrics=['accuracy'])
return model
| 2,535 | 38.015385 | 84 | py |
defend_framework | defend_framework-main/models/EmberModel.py | from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Input, Dropout, BatchNormalization, Activation
from tensorflow.keras.layers import Conv2D, AveragePooling2D
from tensorflow.keras.regularizers import l2
from models.Model import Model
class EmberModel(Model):
def __init__(self, n_features, n_classes, lr=1e-3):
super(EmberModel, self).__init__(n_features, n_classes, lr)
def build_model(self):
def scheduler(epoch, lr):
if epoch > 0 and epoch % 10 == 0:
lr *= 0.9
# print(lr)
return lr
reg = l2(1e-5)
model = Sequential()
model.add(Input(shape=(self.input_shape,)))
model.add(Dense(2000, activation='relu', kernel_regularizer=reg, bias_regularizer=reg))
# model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(1000, activation='relu', kernel_regularizer=reg, bias_regularizer=reg))
# model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(100, activation='relu', kernel_regularizer=reg, bias_regularizer=reg))
# model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(2, activation='softmax'))
momentum = 0.9
decay = 0.000001
opt = keras.optimizers.SGD(lr=self.lr, momentum=momentum, decay=decay)
self.callback = []
self.callback += [keras.callbacks.LearningRateScheduler(scheduler)]
# self.callback += [keras.callbacks.EarlyStopping(
# monitor="loss",
# min_delta=0.01,
# patience=20,
# verbose=0,
# baseline=None,
# restore_best_weights=True,
# )]
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=self.lr),
metrics=['accuracy'],
)
return model
| 2,009 | 36.924528 | 95 | py |
defend_framework | defend_framework-main/models/CIFAR10Model.py | from tensorflow import keras
from tensorflow.keras.layers import BatchNormalization, Activation, Conv2D, Input, AveragePooling2D, Flatten, Dense
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam
import numpy as np
from tensorflow.keras.callbacks import LearningRateScheduler, ReduceLROnPlateau
from models.Model import Model
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
# print('Learning rate: ', lr)
return lr
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides)
y = resnet_layer(inputs=y,
num_filters=num_filters,
activation=None)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = keras.Model(inputs=inputs, outputs=outputs)
return model
class CIFAR10Model(Model):
def __init__(self, n_features, n_classes, lr=1e-3):
super(CIFAR10Model, self).__init__(n_features, n_classes, lr)
def build_model(self):
model = resnet_v1(self.input_shape, 20, num_classes=self.n_classes)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
# model.summary()
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
monitor='loss',
cooldown=0,
patience=5,
min_lr=0.5e-6)
self.callback = [lr_reducer, lr_scheduler]
return model
| 6,009 | 33.342857 | 115 | py |
defend_framework | defend_framework-main/utils/dataaug.py | from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
import numpy as np
import torch
from torchvision import transforms
def DataGeneratorForMNIST():
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly rotate images in the range (deg 0 to 180)
rotation_range=10,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set range for random shear
shear_range=0.0,
# set range for random zoom
zoom_range=0.0,
# set range for random channel shifts
channel_shift_range=0.,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# value used for fill_mode = "constant"
cval=0.,
# randomly flip images
horizontal_flip=False,
# randomly flip images
vertical_flip=False,
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format="channels_last",
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
return datagen
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, X, y, batch_size, shuffle=True):
'Initialization'
self.batch_size = batch_size
self.X = X
self.y = y
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.X) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
return self.X[indexes], self.y[indexes]
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.X))
if self.shuffle:
np.random.shuffle(self.indexes)
class MNISTDataGenerator(DataGenerator):
def __init__(self, X, y, batch_size, data_processor, no_eval_noise, shuffle=True):
super(MNISTDataGenerator, self).__init__(X, y, batch_size, shuffle=shuffle)
self.data_processor = data_processor
self.no_eval_noise = no_eval_noise
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
X, y = super(MNISTDataGenerator, self).__getitem__(index)
data = torch.Tensor(np.transpose(X, (0, 3, 1, 2)))
data = transforms.RandomCrop(28, 2)(data)
data = transforms.RandomRotation(10)(data)
data = np.transpose(data.numpy(), (0, 2, 3, 1))
if self.no_eval_noise:
return np.vstack([data, X]), np.vstack([y, y])
# return np.vstack([X + np.random.normal(0, 0.1, X.shape), X]), np.vstack([y, y])
else:
return np.vstack([self.data_processor.noise_data(X), data, X]), np.vstack([y, y, y])
class CIFARDataGenerator(DataGenerator):
def __init__(self, X, y, batch_size, data_processor, no_eval_noise, shuffle=True):
super(CIFARDataGenerator, self).__init__(X, y, batch_size, shuffle=shuffle)
self.data_processor = data_processor
self.no_eval_noise = no_eval_noise
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
X, y = super(CIFARDataGenerator, self).__getitem__(index)
data = torch.Tensor(np.transpose(X, (0, 3, 1, 2)))
data = transforms.RandomCrop(32, 3)(data)
data = transforms.RandomRotation(10)(data)
data = np.transpose(data.numpy(), (0, 2, 3, 1))
if self.no_eval_noise:
return np.vstack([data, X]), np.vstack([y, y])
# return np.vstack([X + np.random.normal(0, 0.1, X.shape), X]), np.vstack([y, y])
else:
return np.vstack([self.data_processor.noise_data(X), data, X]), np.vstack([y, y, y])
class EmberDataGenerator(DataGenerator):
def __init__(self, X, y, batch_size, data_processor, no_eval_noise, shuffle=True):
super(EmberDataGenerator, self).__init__(X, y, batch_size, shuffle=shuffle)
self.data_processor = data_processor
self.no_eval_noise = no_eval_noise
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
X, y = super(EmberDataGenerator, self).__getitem__(index)
if self.no_eval_noise:
return np.vstack([X + np.random.normal(0, 0.1, X.shape), X]), np.vstack([y, y])
else:
return np.vstack([self.data_processor.noise_data(X), X + np.random.normal(0, 0.1, X.shape), X]), np.vstack(
[y, y, y])
| 5,425 | 38.318841 | 119 | py |
defend_framework | defend_framework-main/utils/data_processing.py | import os
import pickle
import numpy as np
from tensorflow.keras.datasets import mnist, imdb, cifar10, fashion_mnist
from tensorflow import keras
import ember
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, KBinsDiscretizer, MinMaxScaler
from utils.ember_feature_utils import load_features
from utils import EMBER_DATASET, FEATURE_DATASET, LANGUAGE_DATASET, CONTAGIO_DATASET
class DataProcessor:
def __init__(self, X, y, select_strategy=None, k=None, noise_strategy=None, dataset=None, **kwargs):
"""
The initializer of data processor
:param X: the training data (features)
:param y: the training labels
:param select_strategy: ["bagging_replace", "bagging_wo_replace", "binomial"]
bagging_replace: bagging with replacement (the original bagging paper)
bagging_wo_replace: bagging without replacement
binomial: select each instance with probability p = k / |X|
:param k: the size of the (expected) bag
:param noise_strategy: ["feature_flipping", "label_flipping", "RAB_gaussian", "RAB_uniform"]
feature_flipping / label_flipping: each feature / label remains with alpha, flipped with 1 - alpha
RAB_gaussian: add gaussian noise of mu=0, sigma
RAB_uniform: add uniform noise of U[a, b]
:param dataset: the name of the dataset
:param kwargs: the parameters for each noise strategy
"""
self.select_strategy = select_strategy
self.noise_strategy = noise_strategy
self.dataset = dataset
self.X = X
self.y = y
self.DPA_partition_cnt = 0
if select_strategy is not None:
assert select_strategy in ["bagging_replace", "bagging_wo_replace", "binomial", "DPA"]
assert 0 < k <= len(X)
self.k = k
if select_strategy == "DPA":
self.ids = np.arange(self.X.shape[0])
np.random.shuffle(self.ids)
assert self.k * kwargs["N"] <= self.X.shape[0] and noise_strategy is None
if dataset in EMBER_DATASET:
self.minmax = MinMaxScaler()
self.minmax.fit(self.X)
if noise_strategy is not None:
assert noise_strategy in ["feature_flipping", "label_flipping", "all_flipping", "RAB_gaussian",
"RAB_uniform", "sentence_select"]
if dataset in FEATURE_DATASET:
if noise_strategy in ["feature_flipping", "label_flipping", "all_flipping"]:
self.K = kwargs["K"]
self.alpha = kwargs["alpha"]
self.test_alpha = kwargs["test_alpha"]
if self.test_alpha is None:
self.test_alpha = self.alpha
if noise_strategy in ["feature_flipping", "all_flipping"]:
if dataset in EMBER_DATASET or dataset in CONTAGIO_DATASET:
self.kbin = KBinsDiscretizer(n_bins=self.K + 1, strategy='uniform', encode='ordinal')
self.kbin.fit(self.X)
if dataset == "ember_limited":
self.limit_id, _, _, _ = load_features(False)
self.limit_id = self.limit_id['feasible']
self.limit_mask = np.zeros_like(self.X[0]).astype(np.bool)
self.limit_mask[self.limit_id] = True
else:
assert (self.X >= 0).all() and (self.X <= 1).all()
if noise_strategy in ["label_flipping", "all_flipping"]:
assert (self.y >= 0).all() and (self.y <= self.K).all()
elif noise_strategy == "RAB_gaussian":
self.sigma = kwargs["sigma"]
self.minmax = MinMaxScaler()
self.minmax.fit(self.X)
elif noise_strategy == "RAB_uniform":
self.a = kwargs["a"]
self.b = kwargs["b"]
self.minmax = MinMaxScaler()
self.minmax.fit(self.X)
else:
raise NotImplementedError
elif dataset in LANGUAGE_DATASET:
assert noise_strategy in ["sentence_select", "label_flipping", "all_flipping"]
if noise_strategy in ["sentence_select", "all_flipping"]:
self.l = kwargs["l"]
if noise_strategy in ["label_flipping", "all_flipping"]:
self.K = kwargs["K"]
self.alpha = kwargs["alpha"]
assert (self.y >= 0).all() and (self.y <= self.K).all()
else:
raise NotImplementedError
def noise_data(self, ret_X, alpha=None, shape=None):
if alpha is None:
alpha = self.alpha
if shape is None:
shape = ret_X.shape
mask = np.random.random(shape) < alpha
delta = np.random.randint(1, self.K + 1, shape) / self.K
ret_X = ret_X + (1 - mask) * delta
ret_X[ret_X > 1 + 1e-4] -= (1 + self.K) / self.K
return ret_X
def process_train(self, key_dict):
ret_X = self.X.copy()
ret_y = self.y.copy() # make sure the original data is not modified
if self.select_strategy is not None:
if self.select_strategy in ["bagging_replace", "bagging_wo_replace"]:
indices = np.random.choice(np.arange(len(self.X)), self.k,
replace=self.select_strategy == "bagging_replace")
ret_X = ret_X[indices]
ret_y = ret_y[indices]
elif self.select_strategy == "binomial":
pred = np.random.random(len(self.X)) * len(self.X) < self.k
ret_X = ret_X[pred]
ret_y = ret_y[pred]
elif self.select_strategy == "DPA":
ids = self.ids[self.DPA_partition_cnt * self.k:(self.DPA_partition_cnt + 1) * self.k]
ret_X = ret_X[ids]
ret_y = ret_y[ids]
if self.dataset in EMBER_DATASET:
ret_X = self.minmax.transform(ret_X)
if self.noise_strategy is not None:
if self.dataset in FEATURE_DATASET:
if self.noise_strategy in ["feature_flipping", "all_flipping"]:
if self.dataset in EMBER_DATASET or self.dataset in CONTAGIO_DATASET:
categorized = self.kbin.transform(ret_X) / self.K
if self.dataset == "ember_limited":
ret_X[:, self.limit_id] = categorized[:, self.limit_id]
else:
ret_X = categorized
pre_ret_X = ret_X
if self.dataset == "cifar10-02":
ret_X = self.noise_data(ret_X, shape=list(ret_X.shape[:-1]) + [1])
else:
ret_X = self.noise_data(ret_X)
if self.dataset == "ember_limited": # protect other features
ret_X = ret_X * self.limit_mask + pre_ret_X * (1 - self.limit_mask)
if self.noise_strategy in ["label_flipping", "all_flipping"]:
mask = np.random.random(ret_y.shape) < self.alpha
delta = np.random.randint(1, self.K + 1, ret_y.shape)
ret_y = ret_y * mask + (1 - mask) * (ret_y + delta)
ret_y[ret_y > self.K] -= self.K + 1
if self.noise_strategy == "RAB_gaussian":
ret_X = self.minmax.transform(ret_X)
ret_X += np.random.normal(0, self.sigma, ret_X.shape)
if self.noise_strategy == "RAB_uniform":
ret_X = self.minmax.transform(ret_X)
ret_X += np.random.uniform(self.a, self.b, ret_X.shape)
elif self.dataset in LANGUAGE_DATASET:
if self.noise_strategy in ["sentence_select", "all_flipping"]:
maxlen = ret_X.shape[1]
ret_X_new = []
for x in ret_X:
indices = sorted(np.random.choice(np.arange(maxlen), self.l, replace=False))
ret_X_new.append(
np.pad(x[indices], (0, maxlen - self.l), 'constant', constant_values=(0, 0)))
ret_X = np.array(ret_X_new)
if self.noise_strategy in ["label_flipping", "all_flipping"]:
mask = np.random.random(ret_y.shape) < self.alpha
delta = np.random.randint(1, self.K + 1, ret_y.shape)
ret_y = ret_y * mask + (1 - mask) * (ret_y + delta)
ret_y[ret_y > self.K] -= self.K + 1
if (self.noise_strategy is not None or self.select_strategy is not None) and self.dataset == "imdb":
for x in ret_X:
for i in range(len(x)):
if x[i] not in key_dict:
key_dict[x[i]] = len(key_dict)
x[i] = key_dict[x[i]]
if self.dataset in EMBER_DATASET and self.noise_strategy is None:
self.normal = StandardScaler()
ret_X = self.normal.fit_transform(ret_X)
return ret_X, ret_y
def process_test(self, X, fix_noise):
ret_X = X.copy()
if fix_noise:
if self.noise_strategy is not None:
if self.dataset in FEATURE_DATASET:
if self.noise_strategy in ["feature_flipping", "all_flipping"]:
pre_ret_X = ret_X
if self.dataset == "cifar10-02":
ret_X = self.noise_data(ret_X, alpha=self.test_alpha, shape=list(ret_X.shape[1:-1]) + [1])
else:
ret_X = self.noise_data(ret_X, alpha=self.test_alpha, shape=ret_X.shape[1:])
if self.dataset == "ember_limited": # protect other features
ret_X = ret_X * self.limit_mask + pre_ret_X * (1 - self.limit_mask)
if self.noise_strategy == "RAB_gaussian":
ret_X += np.random.normal(0, self.sigma, ret_X.shape[1:]) # fix the noise for each example
if self.noise_strategy == "RAB_uniform":
ret_X += np.random.uniform(self.a, self.b, ret_X.shape[1:]) # fix the noise for each example
elif self.dataset in LANGUAGE_DATASET:
if self.noise_strategy in ["sentence_select", "all_flipping"]:
maxlen = ret_X.shape[1]
ret_X_new = np.zeros_like(ret_X)
indices = sorted(
np.random.choice(np.arange(maxlen), self.l,
replace=False)) # fix the noise for each example
ret_X_new[:, :self.l] = ret_X[:, indices]
ret_X = ret_X_new
else:
if self.noise_strategy is not None:
if self.dataset in FEATURE_DATASET:
if self.noise_strategy in ["feature_flipping", "all_flipping"]:
pre_ret_X = ret_X
if self.dataset == "cifar10-02":
ret_X = self.noise_data(ret_X, alpha=self.test_alpha, shape=list(ret_X.shape[:-1]) + [1])
else:
ret_X = self.noise_data(ret_X, alpha=self.test_alpha)
if self.dataset == "ember_limited": # protect other features
ret_X = ret_X * self.limit_mask + pre_ret_X * (1 - self.limit_mask)
if self.noise_strategy == "RAB_gaussian":
ret_X += np.random.normal(0, self.sigma, ret_X.shape)
if self.noise_strategy == "RAB_uniform":
ret_X += np.random.uniform(self.a, self.b, ret_X.shape)
elif self.dataset in LANGUAGE_DATASET:
if self.noise_strategy in ["sentence_select", "all_flipping"]:
maxlen = ret_X.shape[1]
ret_X_new = []
for x in ret_X:
indices = sorted(np.random.choice(np.arange(maxlen), self.l, replace=False))
ret_X_new.append(
np.pad(x[indices], (0, maxlen - self.l), 'constant', constant_values=(0, 0)))
ret_X = np.array(ret_X_new)
if self.dataset in EMBER_DATASET and self.noise_strategy is None:
ret_X = self.normal.transform(ret_X)
return ret_X
class DataPreprocessor:
def __init__(self):
pass
@classmethod
def load(cls, filename, args):
with open(filename, "rb") as f:
attack = pickle.load(f)
this = attack.data_processor
this.attack = attack
this.data_processor = DataPreprocessor.build_processor(this.x_train, this.y_train, args)
return this
@staticmethod
def build_processor(x_train, y_train, args):
return DataProcessor(x_train, y_train, select_strategy=args.select_strategy, k=args.k,
noise_strategy=args.noise_strategy, K=args.K, alpha=args.alpha,
sigma=args.sigma, a=args.a, b=args.b, dataset=args.dataset, l=args.l,
test_alpha=args.test_alpha, N=args.N)
class MNIST17DataPreprocessor(DataPreprocessor):
def __init__(self, args):
super(MNIST17DataPreprocessor, self).__init__()
# input image dimensions
img_rows, img_cols = 28, 28
self.n_classes = 2
self.n_features = (img_rows, img_cols, 1)
(x_train, self.y_train), (x_test, self.y_test) = mnist.load_data()
x_train = x_train[(self.y_train == 1) | (self.y_train == 7)]
self.y_train = self.y_train[(self.y_train == 1) | (self.y_train == 7)]
self.y_train = self.y_train > 1
x_test = x_test[(self.y_test == 1) | (self.y_test == 7)]
self.y_test = self.y_test[(self.y_test == 1) | (self.y_test == 7)]
self.y_test = self.y_test > 1
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
self.x_train = x_train / 255
self.x_test = x_test / 255
if args.K != 1 and args.noise_strategy in ["label_flipping", "all_flipping"]:
raise NotImplementedError("K != 1 not implemented for MNIST17DataPreprocessor.")
if args.noise_strategy in ["feature_flipping", "all_flipping"]:
self.x_train = self.x_train >= 0.5
self.x_test = self.x_test >= 0.5
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', x_train.shape, self.y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
class MNIST17LimitedDataPreprocessor(DataPreprocessor):
def __init__(self, args):
super(MNIST17LimitedDataPreprocessor, self).__init__()
# input image dimensions
img_rows, img_cols = 28, 28
self.n_classes = 2
self.n_features = (img_rows, img_cols, 1)
(x_train, self.y_train), (x_test, self.y_test) = mnist.load_data()
x_train = x_train[(self.y_train == 1) | (self.y_train == 7)]
self.y_train = self.y_train[(self.y_train == 1) | (self.y_train == 7)]
self.y_train = self.y_train > 1
x_test = x_test[(self.y_test == 1) | (self.y_test == 7)]
self.y_test = self.y_test[(self.y_test == 1) | (self.y_test == 7)]
self.y_test = self.y_test > 1
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
self.x_train = x_train / 255
self.x_test = x_test / 255
if args.K != 1 and args.noise_strategy in ["label_flipping", "all_flipping"]:
raise NotImplementedError("K != 1 not implemented for MNIST17DataPreprocessor.")
if args.noise_strategy in ["feature_flipping", "all_flipping"]:
self.x_train = self.x_train >= 0.5
self.x_test = self.x_test >= 0.5
train_ids = np.random.choice(np.arange(x_train.shape[0]), 100, replace=False)
# test_ids = np.random.choice(np.arange(x_test.shape[0]), 1000, replace=False)
self.x_train = self.x_train[train_ids]
self.y_train = self.y_train[train_ids]
# self.x_test = self.x_test[test_ids]
# self.y_test = self.y_test[test_ids]
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', self.x_train.shape, self.y_train.shape)
print(self.x_train.shape[0], 'train samples')
print(self.x_test.shape[0], 'test samples')
class MNIST01DataPreprocessor(DataPreprocessor):
def __init__(self, args):
super(MNIST01DataPreprocessor, self).__init__()
# input image dimensions
img_rows, img_cols = 28, 28
self.n_classes = 2
self.n_features = (img_rows, img_cols, 1)
(x_train, self.y_train), (x_test, self.y_test) = mnist.load_data()
x_train = x_train[(self.y_train == 0) | (self.y_train == 1)]
self.y_train = self.y_train[(self.y_train == 0) | (self.y_train == 1)]
self.y_train = self.y_train > 0
x_test = x_test[(self.y_test == 0) | (self.y_test == 1)]
self.y_test = self.y_test[(self.y_test == 0) | (self.y_test == 1)]
self.y_test = self.y_test > 0
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
self.x_train = x_train / 255
self.x_test = x_test / 255
if args.K != 1 and args.noise_strategy in ["label_flipping", "all_flipping"]:
raise NotImplementedError("K != 1 not implemented for MNIST17DataPreprocessor.")
if args.noise_strategy in ["feature_flipping", "all_flipping"]:
self.x_train = self.x_train >= 0.5
self.x_test = self.x_test >= 0.5
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', x_train.shape, self.y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
class MNISTDataPreprocessor(DataPreprocessor):
def __init__(self, args):
super(MNISTDataPreprocessor, self).__init__()
# input image dimensions
img_rows, img_cols = 28, 28
self.n_classes = 10
self.n_features = (img_rows, img_cols, 1)
(x_train, self.y_train), (x_test, self.y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
self.x_train = x_train / 255
self.x_test = x_test / 255
if args.noise_strategy in ["label_flipping", "all_flipping"]:
assert args.K == 9
if args.noise_strategy in ["feature_flipping", "all_flipping"]:
self.x_train = np.minimum(np.floor(self.x_train * (args.K + 1)) / args.K, 1)
self.x_test = np.minimum(np.floor(self.x_test * (args.K + 1)) / args.K, 1)
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', x_train.shape, self.y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
class FMNISTDataPreprocessor(DataPreprocessor):
def __init__(self, args):
super(FMNISTDataPreprocessor, self).__init__()
# input image dimensions
img_rows, img_cols = 28, 28
self.n_classes = 10
self.n_features = (img_rows, img_cols, 1)
(x_train, self.y_train), (x_test, self.y_test) = fashion_mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
self.x_train = x_train / 255
self.x_test = x_test / 255
if args.noise_strategy in ["label_flipping", "all_flipping"]:
assert args.K == 9
if args.noise_strategy in ["feature_flipping", "all_flipping"]:
self.x_train = np.minimum(np.floor(self.x_train * (args.K + 1)) / args.K, 1)
self.x_test = np.minimum(np.floor(self.x_test * (args.K + 1)) / args.K, 1)
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', x_train.shape, self.y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
class CIFARDataPreprocessor(DataPreprocessor):
def __init__(self, args):
super(CIFARDataPreprocessor, self).__init__()
# input image dimensions
img_rows, img_cols = 32, 32
self.n_classes = 10
self.n_features = (img_rows, img_cols, 3)
(x_train, self.y_train), (x_test, self.y_test) = cifar10.load_data()
self.y_test = np.reshape(self.y_test, -1)
self.y_train = np.reshape(self.y_train, -1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
self.x_train = x_train / 255
self.x_test = x_test / 255
if args.noise_strategy in ["label_flipping", "all_flipping"]:
assert args.K == 9
if args.noise_strategy in ["feature_flipping", "all_flipping"]:
self.x_train = np.minimum(np.floor(self.x_train * (args.K + 1)) / args.K, 1)
self.x_test = np.minimum(np.floor(self.x_test * (args.K + 1)) / args.K, 1)
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', x_train.shape, self.y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
class CIFAR02DataPreprocessor(DataPreprocessor):
def __init__(self, args):
super(CIFAR02DataPreprocessor, self).__init__()
# input image dimensions
img_rows, img_cols = 32, 32
self.n_classes = 2
self.n_features = (img_rows, img_cols, 3)
(x_train, self.y_train), (x_test, self.y_test) = cifar10.load_data()
self.y_test = np.reshape(self.y_test, -1)
self.y_train = np.reshape(self.y_train, -1)
x_train = x_train[(self.y_train == 0) | (self.y_train == 2)]
self.y_train = self.y_train[(self.y_train == 0) | (self.y_train == 2)]
self.y_train = self.y_train > 0
x_test = x_test[(self.y_test == 0) | (self.y_test == 2)]
self.y_test = self.y_test[(self.y_test == 0) | (self.y_test == 2)]
self.y_test = self.y_test > 0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
self.x_train = x_train / 255
self.x_test = x_test / 255
if args.noise_strategy in ["label_flipping", "all_flipping"]:
assert args.K == 9
if args.noise_strategy in ["feature_flipping", "all_flipping"]:
self.x_train = np.minimum(np.floor(self.x_train * (args.K + 1)) / args.K, 1)
self.x_test = np.minimum(np.floor(self.x_test * (args.K + 1)) / args.K, 1)
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', x_train.shape, self.y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
class IMDBDataPreprocessor(DataPreprocessor):
def __init__(self, args):
super(IMDBDataPreprocessor, self).__init__()
vocab_size = 10000 # Only consider the top 20k words
self.n_features = args.L # Only consider the first 200 words of each movie review
self.n_classes = 2
(x_train, self.y_train), (x_test, self.y_test) = imdb.load_data(num_words=vocab_size)
self.x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=self.n_features)
self.x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=self.n_features)
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', self.x_train.shape, self.y_train.shape)
print(self.x_train.shape[0], 'train samples')
print(self.x_test.shape[0], 'test samples')
class EmberDataPreProcessor(DataPreprocessor):
def __init__(self, args):
super(EmberDataPreProcessor, self).__init__()
try:
x_train, y_train, x_test, y_test = ember.read_vectorized_features(
args.ember_data_dir,
feature_version=1
)
except:
ember.create_vectorized_features(
args.ember_data_dir,
feature_version=1
)
x_train, y_train, x_test, y_test = ember.read_vectorized_features(
args.ember_data_dir,
feature_version=1
)
x_train = x_train.astype(dtype='float64')
x_test = x_test.astype(dtype='float64')
if args.K != 1 and args.noise_strategy in ["all_flipping"]:
raise NotImplementedError("K != 1 not implemented for EmberDataPreProcessor with all_flipping.")
# Get rid of unknown labels
self.x_train = x_train[y_train != -1]
self.y_train = y_train[y_train != -1]
self.x_test = x_test[y_test != -1]
self.y_test = y_test[y_test != -1]
self.n_features = x_train.shape[1]
self.n_classes = 2
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', self.x_train.shape, self.y_train.shape)
print(self.x_train.shape[0], 'train samples')
print(self.x_test.shape[0], 'test samples')
class EmberPoisonDataPreProcessor(DataPreprocessor):
def __init__(self, args):
super(EmberPoisonDataPreProcessor, self).__init__()
self.x_train = np.load(os.path.join(args.load_poison_dir, "watermarked_X.npy"))
self.y_train = np.load(os.path.join(args.load_poison_dir, "watermarked_y.npy"))
self.x_test = np.load(os.path.join(args.load_poison_dir, "watermarked_X_test.npy"))
self.y_test = np.ones(self.x_test.shape[0])
if args.K != 1 and args.noise_strategy in ["all_flipping", "label_flipping"]:
raise NotImplementedError("K != 1 not implemented for EmberDataPreProcessor with all_flipping.")
self.n_features = self.x_train.shape[1]
self.n_classes = 2
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', self.x_train.shape, self.y_train.shape)
print(self.x_train.shape[0], 'train samples')
print(self.x_test.shape[0], 'test samples')
class ContagioDataPreProcessor(DataPreprocessor):
def __init__(self, args):
super(ContagioDataPreProcessor, self).__init__()
mw_file = 'ogcontagio_mw.npy'
gw_file = 'ogcontagio_gw.npy'
# Load malicious
mw = np.load(
# os.path.join(constants.SAVE_FILES_DIR, mw_file),
os.path.join(args.contagio_data_dir, mw_file),
allow_pickle=True
).item()
mwdf = pd.DataFrame(mw)
mwdf = mwdf.transpose()
mwdf['class'] = [True] * mwdf.shape[0]
mwdf.index.name = 'filename'
mwdf = mwdf.reset_index()
train_mw, test_mw = train_test_split(mwdf, test_size=0.4, random_state=42)
# Load benign
gw = np.load(
# os.path.join(constants.SAVE_FILES_DIR, gw_file),
os.path.join(args.contagio_data_dir, gw_file),
allow_pickle=True
).item()
gwdf = pd.DataFrame(gw)
gwdf = gwdf.transpose()
gwdf['class'] = [False] * gwdf.shape[0]
gwdf.index.name = 'filename'
gwdf = gwdf.reset_index()
train_gw, test_gw = train_test_split(gwdf, test_size=0.4, random_state=42)
# Merge dataframes
train_df = pd.concat([train_mw, train_gw])
test_df = pd.concat([test_mw, test_gw])
# Transform to numpy
self.y_train = train_df['class'].to_numpy()
self.y_test = test_df['class'].to_numpy()
# x_train_filename = train_df['filename'].to_numpy()
# x_test_filename = test_df['filename'].to_numpy()
self.x_train = train_df.drop(columns=['class', 'filename']).to_numpy()
self.x_test = test_df.drop(columns=['class', 'filename']).to_numpy()
self.x_train = self.x_train.astype(dtype='float64')
self.x_test = self.x_test.astype(dtype='float64')
self.n_features = self.x_train.shape[1]
self.n_classes = 2
self.data_processor = self.build_processor(self.x_train, self.y_train, args)
print('x_train shape:', self.x_train.shape, self.y_train.shape)
print(self.x_train.shape[0], 'train samples')
print(self.x_test.shape[0], 'test samples')
| 29,834 | 46.132701 | 118 | py |
defend_framework | defend_framework-main/utils/train_utils.py | import numpy as np
from tensorflow import keras
from tqdm import trange
import os
from utils.dataaug import DataGeneratorForMNIST, MNISTDataGenerator, EmberDataGenerator, CIFARDataGenerator
from utils import EMBER_DATASET, IMAGE_DATASET
def train_many(data_loader, model, args, aggregate_result, aggregate_noise_result):
test_size = data_loader.x_test.shape[0]
if aggregate_result is None:
aggregate_result = np.zeros([test_size, data_loader.n_classes + 1 + int(args.select_strategy == "DPA")],
dtype=np.int)
aggregate_noise_result = np.zeros([test_size, data_loader.n_classes + 1 + int(args.select_strategy == "DPA")],
dtype=np.int)
aggregate_result[np.arange(0, test_size), -1] = data_loader.y_test
aggregate_noise_result[np.arange(0, test_size), -1] = data_loader.y_test
remaining = args.N - np.sum(aggregate_result[0, :-1])
datagen = None
for i in trange(remaining):
key_dict = {0: 0, 1: 1, 2: 2} # used for imdb dataset to get word idx
X, y = data_loader.data_processor.process_train(key_dict)
# using the last index for the ground truth label
y = keras.utils.to_categorical(y, data_loader.n_classes)
if args.data_aug:
if args.dataset in IMAGE_DATASET:
if args.dataset == "cifar10":
datagen = CIFARDataGenerator(X, y, args.batch_size, data_loader.data_processor, args.no_eval_noise)
else:
datagen = MNISTDataGenerator(X, y, args.batch_size, data_loader.data_processor, args.no_eval_noise)
elif args.dataset in EMBER_DATASET:
datagen = EmberDataGenerator(X, y, args.batch_size, data_loader.data_processor, args.no_eval_noise)
y_test = keras.utils.to_categorical(data_loader.y_test, data_loader.n_classes)
x_test = data_loader.x_test.copy()
if args.dataset == "imdb":
for x in x_test:
for i in range(len(x)):
if x[i] in key_dict:
x[i] = key_dict[x[i]]
else:
x[i] = 2
elif args.dataset in EMBER_DATASET and args.noise_strategy in ["feature_flipping", "all_flipping"]:
categorized = data_loader.data_processor.kbin.transform(x_test) / args.K
if args.dataset == "ember_limited":
x_test[:, data_loader.data_processor.limit_id] = categorized[:, data_loader.data_processor.limit_id]
else:
x_test = categorized
elif args.noise_strategy in ["RAB_gaussian", "RAB_uniform"] or (
args.dataset in EMBER_DATASET and args.select_strategy == "DPA"):
x_test = data_loader.data_processor.minmax.transform(x_test)
if datagen is not None:
model.fit_generator(datagen, args.epochs)
else:
model.fit(X, y, args.batch_size, args.epochs)
if args.select_strategy != "DPA":
if args.dataset in EMBER_DATASET and args.noise_strategy is None:
prediction_label = model.evaluate(data_loader.data_processor.normal.transform(x_test), y_test)
else:
prediction_label = model.evaluate(x_test, y_test)
aggregate_result[np.arange(0, test_size), prediction_label] += 1
if args.noise_strategy is None or args.no_eval_noise:
aggregate_noise_result[np.arange(0, test_size), prediction_label] += 1
else:
X_test = data_loader.data_processor.process_test(x_test, args.fix_noise)
prediction_label = model.evaluate(X_test, y_test)
aggregate_noise_result[np.arange(0, test_size), prediction_label] += 1
else:
prediction_label, prediction_label_cert = model.evaluate(x_test, y_test)
aggregate_result[np.arange(0, test_size), prediction_label] += 1
aggregate_noise_result[np.arange(0, test_size), prediction_label_cert] += 1
model.init()
np.save(os.path.join(args.res_save_dir, args.exp_name, "aggre_res"), (aggregate_result, aggregate_noise_result))
print(aggregate_result, aggregate_noise_result)
def train_single(data_loader, model, args):
# train single classifier for attacking
model.fit(data_loader.x_train, keras.utils.to_categorical(data_loader.y_train, data_loader.n_classes),
args.batch_size, args.epochs)
# model.save(args.model_save_dir)
| 4,535 | 50.545455 | 120 | py |
secml-zoo | secml-zoo-master/models/mnist/mnist_cnn.py | """
.. module:: MNISTCNN
:synopsis: A CNN model for MNIST
.. moduleauthor:: Ambra Demontis <ambra.demontis@diee.unica.it>
"""
from collections import OrderedDict
import torch
from torch import nn, optim
from secml.ml.classifiers import CClassifierPyTorch
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
x = x.view(x.shape[0], -1)
return x
class MNISTCNN(nn.Module):
"""
The CNN learned on the MNIST dataset in the paper:
N. Carlini and D. A. Wagner, "Adversarial examples are not easily
detected: Bypassing ten detection methods"
Original code:
- https://github.com/carlini/nn_breaking_detection/blob/master/setup_mnist.py
Parameters
----------
num_classes : int
Number of classes (outputs). Default 10.
init_strategy : str
If 'default', use the default initialization strategy
for all the layers,
If `fan_out`, use the default init strategy for the
linear layer and the `kaiming_normal` init strategy with
the option fan_out for the convolutional layers.
If `uniform_scaling` use the uniform scaling strategy
for all the layers
"""
def __init__(self, num_classes=10, init_strategy='default'):
nb_filters = 64
super(MNISTCNN, self).__init__()
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(1, nb_filters, kernel_size=5, stride=2)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(in_channels=nb_filters,
out_channels=nb_filters,
kernel_size=3, stride=2)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(in_channels=nb_filters,
out_channels=nb_filters,
kernel_size=3, stride=1)),
('relu3', nn.ReLU(inplace=True)),
('flatten', Flatten()),
('fc1', nn.Linear(576, out_features=32)),
('relu4', nn.ReLU(inplace=True)),
('dropout', nn.Dropout(p=.5, inplace=False)),
('flatten2', Flatten()),
]))
self.classifier = nn.Linear(32, num_classes)
if init_strategy == "fan_out":
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
elif init_strategy == 'uniform_scaling':
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
uniform_scaling_(m.weight)
uniform_scaling_(m.bias)
elif init_strategy == "default":
# Delving deep into rectifiers: Surpassing human - level
# performance on ImageNet classification - He, K. et al. (2015)
pass
else:
raise ValueError("Unknown initialization strategy!")
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def mnist_cnn(lr=0.1, momentum=0.9, weight_decay=0, preprocess=None,
softmax_outputs=False, random_state=None, epochs=30,
batch_size=128, **kwargs):
if random_state is not None:
torch.manual_seed(random_state)
model = MNISTCNN(**kwargs)
loss = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum,
weight_decay=weight_decay)
cnn_mnist = CClassifierPyTorch(model=model,
loss=loss,
optimizer=optimizer,
input_shape=(1, 28, 28),
random_state=random_state,
preprocess=preprocess,
epochs=epochs,
batch_size=batch_size,
softmax_outputs=softmax_outputs)
return cnn_mnist
def uniform_scaling_(tensor, factor=1.0):
"""Initialization with random values from uniform distribution without scaling
variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf)) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Arguments:
factor: `float`. A multiplicative factor by which the values will be
scaled.
dtype: The tensor data type. Only float are supported.
seed: `int`. Used to create a random seed for the distribution.
Returns:
The Initializer, or an initialized `Tensor` if shape is specified.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
"""
import torch
import math
with torch.no_grad():
shape = tensor.shape
input_size = 1.0
for dim in shape[:-1]:
input_size *= dim
max_val = math.sqrt(3 / input_size) * factor
return torch.FloatTensor(shape).uniform_(-max_val, max_val)
| 6,209 | 36.409639 | 88 | py |
secml-zoo | secml-zoo-master/models/mnist/mnist3c_cnn.py | """
.. module:: MNIST3cCNN
:synopsis: CNN to be trained on MNIST 3-classes dataset
.. moduleauthor:: Maura Pintor <maura.pintor@unica.it>
.. moduleauthor:: Marco Melis <marco.melis@unica.it>
"""
import torch
from torch import nn, optim
from secml.ml.classifiers import CClassifierPyTorch
class MNIST3cCNN(nn.Module):
"""Model with input size (-1, 28, 28) for MNIST 3-classes dataset."""
def __init__(self):
super(MNIST3cCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 3)
def forward(self, x):
x = torch.relu(torch.max_pool2d(self.conv1(x), 2))
x = torch.relu(torch.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = torch.relu(self.fc1(x))
return self.fc2(x)
def mnist3c_cnn():
# Random seed
torch.manual_seed(0)
net = MNIST3cCNN()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
lr=0.01, momentum=0.9)
return CClassifierPyTorch(model=net,
loss=criterion,
optimizer=optimizer,
epochs=20,
batch_size=20,
input_shape=(1, 28, 28),
random_state=0)
| 1,486 | 29.346939 | 75 | py |
secml-zoo | secml-zoo-master/models/iCubWorld/utils.py | import numpy as np
from secml.array import CArray
from secml.ml import CNormalizerDNN
from .alexnet import alexnet
def attach_alexnet(clf):
"""Attach AlexNet (as a preprocessor) to input CClassifier.
The output of `classifier:4` layer is used as input for the classifier.
"""
clf.preprocess = CNormalizerDNN(net=alexnet(), out_layer='classifier:4')
def ds_numpy_to_pytorch(ds):
"""Converts ds samples from numpy flatten to pytorch flatten."""
imgs = ds.X
# Pytorch networks expects images in the tensor format (C x H x W)
# Our images have been flatten from the numpy format (H x W x C)
# We firstly need to get back to (n_samples x H x W x C)
imgs = imgs.tondarray().reshape(
(imgs.shape[0], ds.header.img_h, ds.header.img_w, 3))
# Then we move the "C" axis to the correct position,
# and finally ravel the rows again, done.
imgs = np.moveaxis(imgs, 3, 1).reshape(
imgs.shape[0], 3 * ds.header.img_h * ds.header.img_w)
ds.X = CArray(imgs)
| 1,021 | 30.9375 | 76 | py |
secml-zoo | secml-zoo-master/models/iCubWorld/alexnet.py | """
.. module:: AlexNet
:synopsis: AlexNet Convolutional Neural Network
.. moduleauthor:: Marco Melis <marco.melis@unica.it>
"""
from torchvision import models
from secml.ml.classifiers import CClassifierPyTorch
from secml.ml.features.normalization import CNormalizerMeanStd
def alexnet():
"""CClassifierPyTorch with AlexNet Convolutional Neural Network."""
model = models.alexnet(pretrained=True)
norm_rgb = CNormalizerMeanStd((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
net = CClassifierPyTorch(model=model,
input_shape=(3, 224, 224),
pretrained=True,
softmax_outputs=False,
preprocess=norm_rgb,
random_state=0)
return net
| 803 | 27.714286 | 79 | py |
secml-zoo | secml-zoo-master/models/iCubWorld/iCubWorld28/tests/test_models_icubworld28.py | from models.iCubWorld.tests import CICubWorldTestCases
from secml.ml.classifiers.multiclass import CClassifierMulticlassOVA
from secml.utils import fm, pickle_utils
from models.iCubWorld.utils import ds_numpy_to_pytorch
class TestModelsICubWorld28(CICubWorldTestCases):
"""Unittests for iCubWorld28 models."""
@classmethod
def setUpClass(cls):
CICubWorldTestCases.setUpClass()
cls.ds = pickle_utils.load(
fm.join(fm.abspath(__file__), 'iCubWorld28_red.gz'))
ds_numpy_to_pytorch(cls.ds)
def test_icubworld28_day4_svm(self):
model_id = 'icubworld28-day4-svm'
expected_class = CClassifierMulticlassOVA
expected_acc = 0.7
self._test_model(self.ds, model_id, expected_class, expected_acc)
def test_icubworld28_day4_svm_rbf(self):
model_id = 'icubworld28-day4-svm-rbf'
expected_class = CClassifierMulticlassOVA
expected_acc = 0.69
self._test_model(self.ds, model_id, expected_class, expected_acc)
if __name__ == '__main__':
CICubWorldTestCases.main()
| 1,080 | 29.027778 | 73 | py |
secml-zoo | secml-zoo-master/models/iCubWorld/iCubWorld28/_exporters/icubworld28-day4-svm.py | import sys
sys.path.insert(0, '../../../')
from svm_ova import svm_ova
from iCubWorld.utils import attach_alexnet, ds_numpy_to_pytorch
from secml.data.loader import CDataLoaderICubWorld28
from secml.data.splitter import CDataSplitter
from secml.ml.peval.metrics import CMetricAccuracy
dl = CDataLoaderICubWorld28()
dl.verbose = 2
tr = dl.load(
ds_type='train', day='day4',
resize_shape=(256, 256), crop_shape=(224, 224),
normalize=True
)
ds_numpy_to_pytorch(tr)
clf = svm_ova()
clf.verbose = 1
clf.set_params({
'C': 0.01,
'class_weight': 'balanced'
})
attach_alexnet(clf)
# xval_parameters = {'C': [1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2]}
#
# xval_splitter = CDataSplitter.create('kfold', num_folds=3, random_state=0)
#
# clf.estimate_parameters(
# tr, xval_parameters, xval_splitter, 'accuracy')
clf.fit(tr.X, tr.Y)
ts = dl.load(
ds_type='test', day='day4',
resize_shape=(256, 256), crop_shape=(224, 224),
normalize=True
)
ds_numpy_to_pytorch(ts)
print("Accuracy: {:}".format(
CMetricAccuracy().performance_score(ts.Y, clf.predict(ts.X))))
clf.preprocess = None
state_path = '../icubworld28-day4-svm.gz'
clf.save_state(state_path)
import os
print("Model stored into: " + os.path.abspath(state_path))
from hashlib import md5
md5_hash = md5()
a_file = open(state_path, "rb")
content = a_file.read()
md5_hash.update(content)
print('md5: ' + md5_hash.hexdigest())
| 1,418 | 20.5 | 76 | py |
secml-zoo | secml-zoo-master/models/iCubWorld/iCubWorld28/_exporters/icubworld28-day4-svm-rbf.py | import sys
sys.path.insert(0, '../../../')
from svm_rbf_ova import svm_rbf_ova
from iCubWorld.utils import attach_alexnet, ds_numpy_to_pytorch
from secml.data.loader import CDataLoaderICubWorld28
from secml.data.splitter import CDataSplitter
from secml.ml.peval.metrics import CMetricAccuracy
dl = CDataLoaderICubWorld28()
dl.verbose = 2
tr = dl.load(
ds_type='train', day='day4',
resize_shape=(256, 256), crop_shape=(224, 224),
normalize=True
)
ds_numpy_to_pytorch(tr)
clf = svm_rbf_ova()
clf.verbose = 1
clf.set_params({
'C': 1e3,
'kernel.gamma': 2e-4,
'class_weight': 'balanced'
})
attach_alexnet(clf)
# xval_parameters = {'C': [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4],
# 'kernel.gamma': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1]}
#
# xval_splitter = CDataSplitter.create('kfold', num_folds=3, random_state=0)
#
# clf.estimate_parameters(
# tr, xval_parameters, xval_splitter, 'accuracy')
clf.fit(tr.X, tr.Y)
ts = dl.load(
ds_type='test', day='day4',
resize_shape=(256, 256), crop_shape=(224, 224),
normalize=True
)
ds_numpy_to_pytorch(ts)
print("Accuracy: {:}".format(
CMetricAccuracy().performance_score(ts.Y, clf.predict(ts.X))))
clf.preprocess = None
state_path = '../icubworld28-day4-svm-rbf.gz'
clf.save_state(state_path)
import os
print("Model stored into: " + os.path.abspath(state_path))
from hashlib import md5
md5_hash = md5()
a_file = open(state_path, "rb")
content = a_file.read()
md5_hash.update(content)
print('md5: ' + md5_hash.hexdigest())
| 1,536 | 21.602941 | 78 | py |
secml-zoo | secml-zoo-master/models/iCubWorld/iCubWorld7/tests/test_models_icubworld7.py | from models.iCubWorld.tests import CICubWorldTestCases
from secml.ml.classifiers.multiclass import CClassifierMulticlassOVA
from secml.utils import fm, pickle_utils
from models.iCubWorld.utils import ds_numpy_to_pytorch
class TestModelsICubWorld7(CICubWorldTestCases):
"""Unittests for iCubWorld7 models."""
@classmethod
def setUpClass(cls):
CICubWorldTestCases.setUpClass()
cls.ds = pickle_utils.load(
fm.join(fm.abspath(__file__), 'iCubWorld7_red.gz'))
ds_numpy_to_pytorch(cls.ds)
def test_icubworld7_day4_svm(self):
model_id = 'icubworld7-day4-svm'
expected_class = CClassifierMulticlassOVA
expected_acc = 0.93
self._test_model(self.ds, model_id, expected_class, expected_acc)
def test_icubworld7_day4_svm_rbf(self):
model_id = 'icubworld7-day4-svm-rbf'
expected_class = CClassifierMulticlassOVA
expected_acc = 0.94
self._test_model(self.ds, model_id, expected_class, expected_acc)
if __name__ == '__main__':
CICubWorldTestCases.main()
| 1,074 | 28.861111 | 73 | py |
secml-zoo | secml-zoo-master/models/iCubWorld/iCubWorld7/_exporters/icubworld7-day4-svm.py | import sys
sys.path.insert(0, '../../../')
from svm_ova import svm_ova
from iCubWorld.utils import attach_alexnet, ds_numpy_to_pytorch
from secml.data.loader import CDataLoaderICubWorld28
from secml.data.splitter import CDataSplitter
from secml.ml.peval.metrics import CMetricAccuracy
dl = CDataLoaderICubWorld28()
dl.verbose = 2
tr = dl.load(
ds_type='train', day='day4', icub7=True,
resize_shape=(256, 256), crop_shape=(224, 224),
normalize=True
)
ds_numpy_to_pytorch(tr)
clf = svm_ova()
clf.verbose = 1
clf.set_params({
'C': 0.1,
'class_weight': 'balanced'
})
attach_alexnet(clf)
# xval_parameters = {'C': [1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3]}
#
# xval_splitter = CDataSplitter.create('kfold', num_folds=3, random_state=0)
#
# clf.estimate_parameters(
# tr, xval_parameters, xval_splitter, 'accuracy')
clf.fit(tr.X, tr.Y)
ts = dl.load(
ds_type='test', day='day4', icub7=True,
resize_shape=(256, 256), crop_shape=(224, 224),
normalize=True
)
ds_numpy_to_pytorch(ts)
print("Accuracy: {:}".format(
CMetricAccuracy().performance_score(ts.Y, clf.predict(ts.X))))
clf.preprocess = None
state_path = '../icubworld7-day4-svm.gz'
clf.save_state(state_path)
import os
print("Model stored into: " + os.path.abspath(state_path))
from hashlib import md5
md5_hash = md5()
a_file = open(state_path, "rb")
content = a_file.read()
md5_hash.update(content)
print('md5: ' + md5_hash.hexdigest())
| 1,439 | 20.818182 | 76 | py |
secml-zoo | secml-zoo-master/models/iCubWorld/iCubWorld7/_exporters/icubworld7-day4-svm-rbf.py | import sys
sys.path.insert(0, '../../../')
from svm_rbf_ova import svm_rbf_ova
from iCubWorld.utils import attach_alexnet, ds_numpy_to_pytorch
from secml.data.loader import CDataLoaderICubWorld28
from secml.ml.peval.metrics import CMetricAccuracy
dl = CDataLoaderICubWorld28()
dl.verbose = 2
tr = dl.load(
ds_type='train', day='day4', icub7=True,
resize_shape=(256, 256), crop_shape=(224, 224),
normalize=True
)
ds_numpy_to_pytorch(tr)
clf = svm_rbf_ova()
clf.verbose = 1
clf.set_params({
'C': 1e2,
'kernel.gamma': 2e-4,
'class_weight': 'balanced'
})
attach_alexnet(clf)
# xval_parameters = {'C': [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4],
# 'kernel.gamma': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1]}
#
# xval_splitter = CDataSplitter.create('kfold', num_folds=3, random_state=0)
#
# clf.estimate_parameters(
# tr, xval_parameters, xval_splitter, 'accuracy')
clf.fit(tr.X, tr.Y)
ts = dl.load(
ds_type='test', day='day4', icub7=True,
resize_shape=(256, 256), crop_shape=(224, 224),
normalize=True
)
ds_numpy_to_pytorch(ts)
print("Accuracy: {:}".format(
CMetricAccuracy().performance_score(ts.Y, clf.predict(ts.X))))
clf.preprocess = None
state_path = '../icubworld7-day4-svm-rbf.gz'
clf.save_state(state_path)
import os
print("Model stored into: " + os.path.abspath(state_path))
from hashlib import md5
md5_hash = md5()
a_file = open(state_path, "rb")
content = a_file.read()
md5_hash.update(content)
print('md5: ' + md5_hash.hexdigest())
| 1,513 | 21.597015 | 78 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/main.py | import os
import copy
import logging
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
from matplotlib import pyplot as plt
import torch
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import ReduceLROnPlateau as ReduceLROnPlateau
from config import ParseConfig
from utils import fast_argmax
from utils import visualize_image
from utils import heatmap_loss
from utils import count_parameters
from utils import get_pairwise_joint_distances
from activelearning import ActiveLearning
from activelearning_viz import ActiveLearning_Visualization
from dataloader import load_hp_dataset
from dataloader import HumanPoseDataLoader
from evaluation import PercentageCorrectKeypoint
from models.auxiliary.AuxiliaryNet import AuxNet
from models.hrnet.pose_hrnet import PoseHighResolutionNet as HRNet
from models.stacked_hourglass.StackedHourglass import PoseNet as Hourglass
# Global declarations
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
logging.getLogger().setLevel(logging.INFO)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
class Train(object):
def __init__(self, pose_model, aux_net, hyperparameters, dataset_obj, conf, tb_writer):
"""
Class for training the human pose and aux_net model
:param pose_model: (torch.nn) Human pose model
:param aux_net: (torch.nn) Auxiliary network
:param hyperparameters: (dict) Various hyperparameters used in training
:param dataset_obj: (torch.utils.data.Dataset)
:param conf: (Object of ParseConfig) Contains the configurations for the model
:param tb_writer: (Object of SummaryWriter) Tensorboard writer to log values
"""
self.conf = conf
self.aux_net = aux_net
self.network = pose_model
self.tb_writer = tb_writer
self.dataset_obj = dataset_obj
self.hyperparameters = hyperparameters
# Experiment Settings
self.batch_size = conf.experiment_settings['batch_size']
self.epoch = hyperparameters['num_epochs']
self.optimizer = hyperparameters['optimizer'] # Adam / SGD
self.loss_fn = hyperparameters['loss_fn'] # MSE
self.learning_rate = hyperparameters['optimizer_config']['lr']
self.start_epoch = hyperparameters['start_epoch'] # Used in case of resume training
self.num_hm = conf.experiment_settings['num_hm'] # Number of heatmaps
self.joint_names = self.dataset_obj.ind_to_jnt
self.model_save_path = conf.model['save_path']
self.train_aux_net = conf.model['aux_net']['train']
# Stacked Hourglass scheduling
if self.train_aux_net:
min_lr = [0.000003, conf.experiment_settings['lr']]
else:
min_lr = 0.000003
self.scheduler = ReduceLROnPlateau(self.optimizer, factor=0.5, patience=5, cooldown=2, min_lr=min_lr, verbose=True)
self.torch_dataloader = torch.utils.data.DataLoader(self.dataset_obj, self.batch_size,
shuffle=True, num_workers=2, drop_last=True)
def train_model(self):
"""
Training loop
:return:
"""
print("Initializing training: Epochs - {}\tBatch Size - {}".format(
self.hyperparameters['num_epochs'], self.batch_size))
# 'mean_loss_validation': {'Pose': validation_loss_pose, 'AuxNet': validation_aux_net}
if self.conf.resume_training:
best_val_pose = self.hyperparameters['mean_loss_validation']['Pose']
best_val_auxnet = self.hyperparameters['mean_loss_validation']['AuxNet']
best_epoch_pose = self.hyperparameters['start_epoch']
best_epoch_auxnet = -1
global_step = 0
else:
best_val_pose = np.inf
best_val_auxnet = np.inf
best_epoch_pose = -1
best_epoch_auxnet = -1
global_step = 0
# Variable to store all the loss values for logging
loss_across_epochs = []
validation_across_epochs = []
for e in range(self.start_epoch, self.epoch):
epoch_loss = []
epoch_loss_aux_net = []
# Network alternates between train() and validate()
self.network.train()
if self.train_aux_net:
self.aux_net.train()
self.dataset_obj.input_dataset(train=True)
# Training loop
logging.info('Training for epoch: {}'.format(e+1))
for (images, heatmaps, _, _, _, gt_per_image, split, _, _, _, joint_exist) in tqdm(self.torch_dataloader):
assert split[0] == 0, "Training split should be 0."
self.optimizer.zero_grad()
outputs, pose_features = self.network(images) # images.cuda() done internally within the model
loss = heatmap_loss(outputs, heatmaps) # heatmaps transferred to GPU within the function
if self.conf.model['aux_net']['train'] and self.conf.model['aux_net']['method'] == 'learning_loss':
learning_loss_ = loss.clone().detach().to('cuda:{}'.format(torch.cuda.device_count() - 1))
learning_loss_ = torch.mean(learning_loss_, dim=[1])
loss_learnloss = self.learning_loss(pose_encodings=pose_features, true_loss=learning_loss_,
gt_per_img=gt_per_image, epoch=e)
loss_learnloss.backward()
epoch_loss_aux_net.append(loss_learnloss.item())
if self.conf.model['aux_net']['train'] and self.conf.model['aux_net']['method'] == 'aleatoric':
loss_aleatoric = self.aleatoric_uncertainty(pose_encodings=pose_features, outputs=outputs,
heatmaps=heatmaps, joint_exist=joint_exist, epoch=e)
loss_aleatoric.backward()
epoch_loss_aux_net.append(loss_aleatoric.item())
if self.conf.model['aux_net']['train'] and self.conf.model['aux_net']['method'] == 'vl4pose':
loss_vl4pose = self.vl4pose(pose_encodings=pose_features, heatmaps=heatmaps,
joint_exist=joint_exist, epoch=e)
loss_vl4pose.backward()
epoch_loss_aux_net.append(loss_vl4pose.item())
if self.conf.model['aux_net']['train_auxnet_only']:
loss = torch.mean(loss) * 0
else:
loss = torch.mean(loss)
loss.backward()
if self.conf.tensorboard:
self.tb_writer.add_scalar('Train/Loss_batch', torch.mean(loss), global_step)
epoch_loss.append(loss.item())
# Weight update
self.optimizer.step()
global_step += 1
# Epoch training ends -------------------------------------------------------------------------------------
epoch_loss = np.mean(epoch_loss)
if self.conf.model['aux_net']['train']:
epoch_loss_aux_net = np.mean(epoch_loss_aux_net)
validation_loss_pose, validation_aux_net = self.validation(e)
else:
validation_loss_pose = self.validation(e)
validation_aux_net = 0.0
# Learning rate scheduler on the Human Pose validation loss
self.scheduler.step(validation_loss_pose)
# TensorBoard Summaries
if self.conf.tensorboard:
self.tb_writer.add_scalar('Train', torch.tensor([epoch_loss]), global_step)
self.tb_writer.add_scalar('Validation/HG_Loss', torch.tensor([validation_loss_pose]), global_step)
if self.conf.model['aux_net']['train']:
self.tb_writer.add_scalar('Validation/Learning_Loss', torch.tensor([validation_aux_net]), global_step)
# Save if best model
if best_val_pose > validation_loss_pose:
torch.save(self.network.state_dict(),
os.path.join(self.model_save_path, 'model_checkpoints/pose_net.pth'))
if self.conf.model['aux_net']['train']:
torch.save(
self.aux_net.state_dict(),
os.path.join(self.model_save_path,
'model_checkpoints/aux_net_{}_BestPose.pth'.format(self.conf.model['aux_net']['method'])))
best_val_pose = validation_loss_pose
best_epoch_pose = e + 1
torch.save({'epoch': e + 1,
'optimizer_load_state_dict': self.optimizer.state_dict(),
'mean_loss_train': epoch_loss,
'mean_loss_validation': {'Pose': validation_loss_pose, 'AuxNet': validation_aux_net},
'aux_net': self.conf.model['aux_net']['train']},
os.path.join(self.model_save_path, 'model_checkpoints/optim_best_model.tar'))
if self.conf.model['aux_net']['train']:
if (best_val_auxnet > validation_aux_net) and (validation_aux_net != 0.0):
torch.save(self.aux_net.state_dict(),
os.path.join(self.model_save_path, 'model_checkpoints/aux_net_{}.pth'.format(self.conf.model['aux_net']['method'])))
best_val_auxnet = validation_aux_net
best_epoch_auxnet = e + 1
print("Loss at epoch {}/{}: (train:Pose) {}\t"
"(train:AuxNet) {}\t"
"(validation:Pose) {}\t"
"(Validation:AuxNet) {}\t"
"(Best Model) {}".format(
e+1,
self.epoch,
epoch_loss,
epoch_loss_aux_net,
validation_loss_pose,
validation_aux_net,
best_epoch_pose))
loss_across_epochs.append(epoch_loss)
validation_across_epochs.append(validation_loss_pose)
# Save the loss values
f = open(os.path.join(self.model_save_path, 'model_checkpoints/loss_data.txt'), "w")
f_ = open(os.path.join(self.model_save_path, 'model_checkpoints/validation_data.txt'), "w")
f.write("\n".join([str(lsx) for lsx in loss_across_epochs]))
f_.write("\n".join([str(lsx) for lsx in validation_across_epochs]))
f.close()
f_.close()
if self.conf.tensorboard:
self.tb_writer.close()
logging.info("Model training completed\nBest validation loss (Pose): {}\tBest Epoch: {}"
"\nBest validation loss (AuxNet): {}\tBest Epoch: {}".format(
best_val_pose, best_epoch_pose, best_val_auxnet, best_epoch_auxnet))
def validation(self, e):
"""
:param e: Epoch
:return:
"""
with torch.no_grad():
# Stores the loss for all batches
epoch_val_pose = []
self.network.eval()
if self.conf.model['aux_net']['train']:
epoch_val_auxnet = []
self.aux_net.eval()
# Augmentation only needed in Training
self.dataset_obj.input_dataset(validate=True)
# Compute and store batch-wise validation loss in a list
logging.info('Validation for epoch: {}'.format(e+1))
for (images, heatmaps, _, _, _, gt_per_img, split, _, _, _, joint_exist) in tqdm(self.torch_dataloader):
assert split[0] == 1, "Validation split should be 1."
outputs, pose_features = self.network(images)
loss_val_pose = heatmap_loss(outputs, heatmaps)
if self.conf.model['aux_net']['train'] and self.conf.model['aux_net']['method'] == 'learning_loss':
learning_loss_val = loss_val_pose.clone().detach().to('cuda:{}'.format(torch.cuda.device_count() - 1))
learning_loss_val = torch.mean(learning_loss_val, dim=[1])
loss_val_auxnet = self.learning_loss(pose_features, learning_loss_val, gt_per_img, e)
epoch_val_auxnet.append(loss_val_auxnet.item())
if self.conf.model['aux_net']['train'] and self.conf.model['aux_net']['method'] == 'aleatoric':
loss_val_aleatoric = self.aleatoric_uncertainty(pose_encodings=pose_features, outputs=outputs,
heatmaps=heatmaps, joint_exist=joint_exist, epoch=e)
epoch_val_auxnet.append(loss_val_aleatoric.item())
if self.conf.model['aux_net']['train'] and self.conf.model['aux_net']['method'] == 'vl4pose':
vl4pose_loss = self.vl4pose(pose_encodings=pose_features, heatmaps=heatmaps, joint_exist=joint_exist, epoch=e)
epoch_val_auxnet.append(vl4pose_loss.item())
loss_val_pose = torch.mean(loss_val_pose)
epoch_val_pose.append(loss_val_pose.item())
if self.conf.model['aux_net']['train']:
return np.mean(epoch_val_pose), np.mean(epoch_val_auxnet)
else:
return np.mean(epoch_val_pose)
def learning_loss(self, pose_encodings, true_loss, gt_per_img, epoch):
'''
Learning Loss module
Based on the paper: "Learning Loss For Active Learning, CVPR 2019" and "A Mathematical Analysis of Learning Loss for Active Learning in Regression, CVPR-W 2021"
:param pose_encodings: (Dict of tensors) Intermediate (Hourglass) and penultimate layer output of the M10 network
:param true_loss: (Tensor of shape [Batch Size]) Loss computed from M10 prediction and ground truth
:param gt_per_img: (Tensor, shape [Batch Size]) Number of ground truth per image
:param epoch: (scalar) Epoch, used in learning loss warm start-up
:return: (Torch scalar tensor) Learning Loss
'''
learnloss_margin = self.conf.active_learning['learningLoss']['margin']
learnloss_objective = self.conf.active_learning['learningLoss']['objective']
learnloss_warmup = self.conf.model['aux_net']['warmup']
emperical_loss = self._aux_net_inference(pose_encodings)
emperical_loss = emperical_loss.squeeze()
assert emperical_loss.shape == true_loss.shape, "Mismatch in Batch size for true and emperical loss"
with torch.no_grad():
# Scale the images as per the number of joints
# To prevent DivideByZero. PyTorch does not throw an exception to DivideByZero
gt_per_img = torch.sum(gt_per_img, dim=1)
gt_per_img += 0.1
true_loss = true_loss / gt_per_img.to(true_loss.device)
# Splitting into pairs: (i, i+half)
half_split = true_loss.shape[0] // 2
true_loss_i = true_loss[: half_split]
true_loss_j = true_loss[half_split: 2 * half_split]
emp_loss_i = emperical_loss[: (emperical_loss.shape[0] // 2)]
emp_loss_j = emperical_loss[(emperical_loss.shape[0] // 2): 2 * (emperical_loss.shape[0] // 2)]
# Pair wise loss as mentioned in the original paper
if learnloss_objective == 'YooAndKweon':
loss_sign = torch.sign(true_loss_i - true_loss_j)
loss_emp = (emp_loss_i - emp_loss_j)
# Learning Loss objective
llal_loss = torch.max(torch.zeros(half_split, device=loss_sign.device), (-1 * (loss_sign * loss_emp)) + learnloss_margin)
# Computing loss over the entire batch using softmax.
elif learnloss_objective == 'KLdivergence':
# Removed the standardization-KL Divergence parts
with torch.no_grad():
true_loss_ = torch.cat([true_loss_i.reshape(-1, 1), true_loss_j.reshape(-1, 1)], dim=1)
true_loss_scaled = true_loss_ / torch.sum(true_loss_, dim=1, keepdim=True)
emp_loss_ = torch.cat([emp_loss_i.reshape(-1, 1), emp_loss_j.reshape(-1, 1)], dim=1)
emp_loss_logsftmx = torch.nn.LogSoftmax(dim=1)(emp_loss_)
# Scaling the cross entropy loss with respect to true loss values
llal_loss = torch.nn.KLDivLoss(reduction='batchmean')(input=emp_loss_logsftmx, target=true_loss_scaled)
#llal_loss = torch.sum((-true_loss_scaled * torch.log(emp_loss_logsftmx)), dim=1, keepdim=True)
else:
raise NotImplementedError('Currently only "YooAndKweon" or "KLdivergence" supported. ')
if learnloss_warmup <= epoch:
return torch.mean(llal_loss)
else:
return 0.0 * torch.mean(llal_loss)
def aleatoric_uncertainty(self, pose_encodings, outputs, heatmaps, joint_exist, epoch):
"""
Extension of Kendall and Gal's method for calculating uncertainty to human pose estimation
Auxiliary Network module to train sigmas for the HG/HRN network's heatmaps directly
:param pose_encodings: (Dict of tensors) Intermediate and penultimate layer outputs of the pose model
:param outputs: Tensor of size (batch_size, num_joints, hm_size, hm_size) Outputs of the main HG/HRN network
:param heatmaps: Tensor of size (batch_size, num_joints, hm_size, hm_size) Ground truth heatmaps
:param joint_exist: Tensor of size (batch_size, num_joints) Joint status (1=Present, 0=Absent)
Present=1 may include occluded joints depending on configuration.yml setting
:param epoch: (scalar) Epoch, used in auxiliary network loss to compare warmp-up
:return: auxnet_loss: (Torch scalar tensor) auxiliary network Loss
"""
parameters = self._aux_net_inference(pose_encodings)
# Final stack of hourglass / output of HRNet
outputs = outputs[:, -1].clone().detach().to('cuda:{}'.format(torch.cuda.device_count() - 1))
assert heatmaps.shape == outputs.shape # Batch Size x num_joints x 64 x 64
joint_exist = joint_exist.float().to(device=parameters.device)
residual = torch.sum((fast_argmax(outputs) - fast_argmax(heatmaps).to(outputs.device))**2, dim=-1) # along axis representing u,v
residual = 0.5 * residual * torch.exp(-parameters)
neg_log_likelihood = residual + (0.5 * parameters)
neg_log_likelihood = neg_log_likelihood * joint_exist
if self.conf.model['aux_net']['warmup'] <= epoch:
return torch.mean(neg_log_likelihood)
else:
return 0.0 * torch.mean(neg_log_likelihood)
def vl4pose(self, pose_encodings, heatmaps, joint_exist, epoch):
'''
Train the auxiliary network for VL4Pose.
- param pose_encodings: (Dict of tensors) output from the pose network
- param heatmaps: Tensor of size (batch_size, num_joints, hm_size, hm_size)
- param joint_exist: Tensor of size (batch_size, num_joints)
- Joint status (1=Present, 0=Absent)
- Present=1 may include occluded joints depending on configuration.yml setting
- param epoch: (scalar) Epoch, used in auxiliary network loss warm start-up
- return auxnet_loss: (Torch scalar tensor) auxiliary network Loss
'''
assert joint_exist.dim() == 2, "joint_exist should be BS x num_hm, received: {}".format(joint_exist.shape)
j2i = self.dataset_obj.jnt_to_ind
if self.conf.dataset['load'] == 'mpii':
links = [[j2i['head'], j2i['neck']], [j2i['neck'], j2i['thorax']], [j2i['thorax'], j2i['pelvis']],
[j2i['thorax'], j2i['lsho']], [j2i['lsho'], j2i['lelb']], [j2i['lelb'], j2i['lwri']],
[j2i['thorax'], j2i['rsho']], [j2i['rsho'], j2i['relb']], [j2i['relb'], j2i['rwri']],
[j2i['pelvis'], j2i['lhip']], [j2i['lhip'], j2i['lknee']], [j2i['lknee'], j2i['lankl']],
[j2i['pelvis'], j2i['rhip']], [j2i['rhip'], j2i['rknee']], [j2i['rknee'], j2i['rankl']]]
else:
links = [[j2i['head'], j2i['neck']],
[j2i['neck'], j2i['lsho']], [j2i['lsho'], j2i['lelb']], [j2i['lelb'], j2i['lwri']],
[j2i['neck'], j2i['rsho']], [j2i['rsho'], j2i['relb']], [j2i['relb'], j2i['rwri']],
[j2i['lsho'], j2i['lhip']], [j2i['lhip'], j2i['lknee']], [j2i['lknee'], j2i['lankl']],
[j2i['rsho'], j2i['rhip']], [j2i['rhip'], j2i['rknee']], [j2i['rknee'], j2i['rankl']]]
parameters = self._aux_net_inference(pose_encodings)
parameters = parameters.reshape(self.batch_size, len(links), 2)
joint_exist = joint_exist.to(parameters.device)
heatmaps = heatmaps.to(parameters.device)
with torch.no_grad():
joint_distances = get_pairwise_joint_distances(heatmaps)
joint_exist = torch.matmul(joint_exist.unsqueeze(2).type(torch.float16), joint_exist.unsqueeze(1).type(torch.float16))
# Batch Size x num_links
skeleton_exist = torch.stack([joint_exist[:, u, v] for u,v in links], dim=1)
skeleton_distances = torch.stack([joint_distances[:, u, v] for u,v in links], dim=1)
####
residual = (parameters[:, :, 0].squeeze() - skeleton_distances)**2
residual = 0.5 * residual * torch.exp(-parameters[:, :, 1]).squeeze()
neg_log_likelihood = residual + (0.5 * parameters[:, :, 1].squeeze())
neg_log_likelihood = neg_log_likelihood * skeleton_exist
if self.conf.model['aux_net']['warmup'] <= epoch:
return torch.mean(neg_log_likelihood)
else:
return 0.0 * torch.mean(neg_log_likelihood)
def _aux_net_inference(self, pose_features):
"""
Common to VL4Pose, LearningLoss++ and Aleatoric which all use an auxiliary network
"""
extractor = self.conf.architecture['aux_net']['conv_or_avg_pooling']
with torch.no_grad():
if extractor == 'avg':
# Transfer to GPU where auxiliary network is stored
encodings = pose_features['penultimate']
else:
depth = len(self.conf.architecture['aux_net']['spatial_dim'])
encodings = torch.cat(
[pose_features['feature_{}'.format(i)].reshape(
self.batch_size, pose_features['feature_{}'.format(i)].shape[1], -1)
for i in range(depth, 0, -1)],
dim=2)
aux_out = self.aux_net(encodings)
return aux_out
class Metric(object):
def __init__(self, network, dataset_obj, conf):
'''
Class for Testing the model:
1. Compute ground truth and predictions
2. Computing metrics: PCK@0.x
:param network: (torch.nn) Hourglass network to compute predictions
:param dataset_obj: (Dataset object) Handles data to be fed to PyTorch DataLoader
:param conf: (Object of ParseConfig) Configuration for the experiment
'''
self.dataset_obj = dataset_obj
self.dataset_obj.input_dataset(validate=True)
self.network = network
self.viz=conf.viz # Controls visualization
self.conf = conf
self.batch_size = conf.experiment_settings['batch_size']
self.ind_to_jnt = self.dataset_obj.ind_to_jnt
self.torch_dataloader = torch.utils.data.DataLoader(self.dataset_obj, batch_size=self.batch_size,
shuffle=False, num_workers=2)
def inference(self):
'''
Obtains model inference
:return: None
'''
self.network.eval()
logging.info("Starting model inference")
outputs_ = None
scale_ = None
num_gt_ = None
dataset_ = None
name_ = None
gt_ = None
normalizer_ = None
with torch.no_grad():
for (images, _, gt, name, dataset, num_gt, split, _, scale_params, normalizer, joint_exist) in tqdm(
self.torch_dataloader):
assert split[0] == 1, "Validation split should be 1."
outputs, pose_features = self.network(images)
outputs = outputs[:, -1]
try:
outputs_ = torch.cat((outputs_, outputs.cpu().clone()), dim=0)
scale_['scale_factor'] = torch.cat((scale_['scale_factor'], scale_params['scale_factor']), dim=0)
scale_['padding_u'] = torch.cat((scale_['padding_u'], scale_params['padding_u']), dim=0)
scale_['padding_v'] = torch.cat((scale_['padding_v'], scale_params['padding_v']), dim=0)
num_gt_ = torch.cat((num_gt_, num_gt), dim=0)
dataset_ = dataset_ + dataset
name_ = name_ + name
gt_ = torch.cat((gt_, gt), dim=0)
normalizer_ = torch.cat((normalizer_, normalizer), dim=0)
except TypeError:
outputs_ = outputs.cpu().clone()
scale_ = copy.deepcopy(scale_params)
num_gt_ = num_gt
dataset_ = dataset
name_ = name
gt_ = gt
normalizer_ = normalizer
# Generate visualizations (256x256) for that batch of images
if self.conf.viz:
hm_uv_stack = []
# Compute u,v values from heatmap
for i in range(images.shape[0]):
hm_uv = self.dataset_obj.estimate_uv(hm_array=outputs.cpu().numpy()[i],
pred_placeholder=-np.ones_like(gt[i].numpy()))
hm_uv_stack.append(hm_uv)
hm_uv = np.stack(hm_uv_stack, axis=0)
self.visualize_predictions(image=images.numpy(), name=name, dataset=dataset, gt=gt.numpy(), pred=hm_uv)
scale_['scale_factor'] = scale_['scale_factor'].numpy()
scale_['padding_u'] = scale_['padding_u'].numpy()
scale_['padding_v'] = scale_['padding_v'].numpy()
model_inference = {'heatmap': outputs_.numpy(), 'scale': scale_, 'dataset': dataset_,
'name': name_, 'gt': gt_.numpy(), 'normalizer': normalizer_.numpy()}
return model_inference
def keypoint(self, infer):
'''
:param infer:
:return:
'''
heatmap = infer['heatmap']
scale = infer['scale']
dataset = infer['dataset']
name = infer['name']
gt = infer['gt']
normalizer = infer['normalizer']
hm_uv_stack = []
csv_columns = ['name', 'dataset', 'normalizer', 'joint', 'uv']
gt_csv = []
pred_csv = []
# Iterate over all heatmaps to obtain predictions
for i in range(gt.shape[0]):
heatmap_ = heatmap[i]
gt_uv = gt[i]
hm_uv = self.dataset_obj.estimate_uv(hm_array=heatmap_, pred_placeholder=-np.ones_like(gt_uv))
hm_uv_stack.append(hm_uv)
# Scaling the point ensures that the distance between gt and pred is same as the scale of normalization
scale_factor = scale['scale_factor'][i]
padding_u = scale['padding_u'][i]
padding_v = scale['padding_v'][i]
# Scaling ground truth
gt_uv_correct = np.copy(gt_uv)
hm_uv_correct = np.copy(hm_uv)
gt_uv_correct[:, :, 1] -= padding_v
gt_uv_correct[:, :, 0] -= padding_u
gt_uv_correct /= np.array([scale_factor, scale_factor, 1]).reshape(1, 1, 3)
# Scaling predictions
hm_uv_correct[:, :, 1] -= padding_v
hm_uv_correct[:, :, 0] -= padding_u
hm_uv_correct /= np.array([scale_factor, scale_factor, 1]).reshape(1, 1, 3)
assert gt_uv_correct.shape == hm_uv_correct.shape, "Mismatch in gt ({}) and prediction ({}) shape".format(
gt_uv_correct.shape, hm_uv_correct.shape)
# Iterate over joints
for jnt in range(gt_uv_correct.shape[1]):
gt_entry = {
'name': name[i],
'dataset': dataset[i],
'normalizer': normalizer[i],
'joint': self.ind_to_jnt[jnt],
'uv': gt_uv_correct[:, jnt, :].astype(np.float32)
}
pred_entry = {
'name': name[i],
'dataset': dataset[i],
'normalizer': normalizer[i],
'joint': self.ind_to_jnt[jnt],
'uv': hm_uv_correct[:, jnt, :].astype(np.float32)
}
gt_csv.append(gt_entry)
pred_csv.append(pred_entry)
pred_csv = pd.DataFrame(pred_csv, columns=csv_columns)
gt_csv = pd.DataFrame(gt_csv, columns=csv_columns)
pred_csv.sort_values(by='dataset', ascending=True, inplace=True)
gt_csv.sort_values(by='dataset', ascending=True, inplace=True)
assert len(pred_csv.index) == len(gt_csv.index), "Mismatch in number of entries in pred and gt dataframes."
pred_csv.to_csv(os.path.join(self.conf.model['save_path'], 'model_checkpoints/pred.csv'), index=False)
gt_csv.to_csv(os.path.join(self.conf.model['save_path'], 'model_checkpoints/gt.csv'), index=False)
logging.info('Pandas dataframe saved successfully.')
return gt_csv, pred_csv
def visualize_predictions(self, image=None, name=None, dataset=None, gt=None, pred=None):
dataset_viz = {}
dataset_viz['img'] = image
dataset_viz['name'] = name
dataset_viz['display_string'] = name
dataset_viz['split'] = np.ones(image.shape[0])
dataset_viz['dataset'] = dataset
dataset_viz['bbox_coords'] = np.zeros([image.shape[0], 4, 4])
dataset_viz['num_persons'] = np.ones([image.shape[0], 1])
dataset_viz['gt'] = gt
dataset_viz['pred'] = pred
dataset_viz = self.dataset_obj.recreate_images(gt=True, pred=True, external=True, ext_data=dataset_viz)
visualize_image(dataset_viz, save_dir=self.conf.model['save_path'], bbox=False)
def compute_metrics(self, gt_df=None, pred_df=None):
'''
Loads the ground truth and prediction CSVs into memory.
Evaluates Precision, FPFN metrics for the prediction and stores them into memory.
:return: None
'''
# Ensure that same datasets have been loaded
assert all(pred_df['dataset'].unique() == gt_df['dataset'].unique()), \
"Mismatch in dataset column for gt and pred"
logging.info('Generating evaluation metrics for dataset:')
# Iterate over unique datasets
for dataset_ in gt_df['dataset'].unique():
logging.info(str(dataset_))
# Separate out images based on dataset
pred_ = pred_df.loc[pred_df['dataset'] == dataset_]
gt_ = gt_df.loc[gt_df['dataset'] == dataset_]
# Compute scores
pck_df = PercentageCorrectKeypoint(
pred_df=pred_, gt_df=gt_, config=self.conf, jnts=list(self.ind_to_jnt.values()))
# Save the tables
if dataset_ == 'mpii':
metric_ = 'PCKh'
else:
metric_ = 'PCK'
pck_df.to_csv(os.path.join(self.conf.model['save_path'],
'model_checkpoints/{}_{}.csv'.format(metric_, dataset_)),
index=False)
print("Metrics computation completed.")
def eval(self):
'''
:return:
'''
model_inference = self.inference()
gt_csv, pred_csv = self.keypoint(model_inference)
self.compute_metrics(gt_df=gt_csv, pred_df=pred_csv)
def load_models(conf, load_pose, load_aux, model_dir):
"""
:param conf:
:param load_pose:
:param load_aux:
:param model_dir:
:return:
"""
# Initialize AuxNet, Hourglass/HRNet
# Elsewhere, resume training ensures the code creates a copy of the best models from the interrupted run.
if conf.use_auxnet:
logging.info('Initializing Auxiliary Network')
aux_net = AuxNet(arch=conf.architecture['aux_net'])
else:
aux_net = None
if conf.model['type'] == 'hourglass':
logging.info('Initializing Hourglass Network')
pose_net = Hourglass(arch=conf.architecture['hourglass'],
auxnet=conf.use_auxnet,
intermediate_features=conf.architecture['aux_net']['conv_or_avg_pooling'])
print('Number of parameters (Hourglass): {}\n'.format(count_parameters(pose_net)))
else:
logging.info('Initializing HRNet')
assert conf.model['type'] == 'hrnet', "Currently support 'hourglass' and 'hrnet'."
pose_net = HRNet(arch=conf.architecture['hrnet'],
auxnet=conf.use_auxnet,
intermediate_features=conf.architecture['aux_net']['conv_or_avg_pooling'])
print('Number of parameters (HRNet): {}\n'.format(count_parameters(pose_net)))
# Load AuxNet modules (Best Model / resume training)
if load_aux:
if conf.resume_training:
logging.info('\n-------------- Resuming training (Loading AuxNet) --------------\n')
# Load and save the previous best model
aux_net.load_state_dict(torch.load(
os.path.join(model_dir, 'model_checkpoints/aux_net_{}.pth'.format(conf.active_learning['algorithm'])),
map_location='cpu'))
torch.save(
aux_net.state_dict(),
os.path.join(
conf.model['save_path'],
'model_checkpoints/aux_net_{}.pth'.format(conf.active_learning['algorithm'])))
# Load the model corresponding to best pose model
aux_net.load_state_dict(
torch.load(
os.path.join(
model_dir, 'model_checkpoints/aux_net_{}_BestPose.pth'.format(conf.active_learning['algorithm'])),
map_location='cpu'))
else:
logging.info('Loading AuxNet Best Model')
aux_net.load_state_dict(
torch.load(os.path.join(
model_dir, 'model_checkpoints/aux_net_{}.pth'.format(conf.active_learning['algorithm'])), map_location='cpu'))
# Load Pose model (code is independent of architecture)
if load_pose:
# Load model
logging.info('Loading Pose model from: ' + model_dir)
pose_net.load_state_dict(torch.load(os.path.join(model_dir, 'model_checkpoints/pose_net.pth'), map_location='cpu'))
logging.info("Successfully loaded Pose model.")
if conf.resume_training:
logging.info('\n-------------- Resuming training (Loading PoseNet) --------------\n')
torch.save(pose_net.state_dict(), os.path.join(conf.model['save_path'], 'model_checkpoints/pose_net.pth'))
# CUDA support: Single/Multi-GPU
# Hourglass net and HRNet have CUDA definitions inside __init__(), specify only for aux_net
if conf.model['aux_net']['train'] or load_aux:
aux_net.cuda(torch.device('cuda:{}'.format(torch.cuda.device_count()-1)))
logging.info('Successful: Model transferred to GPUs.\n')
return pose_net, aux_net
def define_hyperparams(conf, pose_model, aux_net):#(conf, net, learnloss):
"""
:param conf:
:param pose_model:
:param aux_net:
:return:
"""
logging.info('Initializing the hyperparameters for the experiment.')
hyperparameters = dict()
hyperparameters['optimizer_config'] = {
'lr': conf.experiment_settings['lr'],
'weight_decay': conf.experiment_settings['weight_decay']
}
hyperparameters['loss_params'] = {'size_average': True}
hyperparameters['num_epochs'] = conf.experiment_settings['epochs']
hyperparameters['start_epoch'] = 0 # Used for resume training
# Parameters declared to the optimizer
if conf.model['aux_net']['train']:
logging.info('Parameters of AuxNet and PoseNet passed to Optimizer.')
params_list = [{'params': pose_model.parameters()},
{'params': aux_net.parameters()}]
else:
logging.info('Parameters of PoseNet passed to Optimizer')
params_list = [{'params': pose_model.parameters()}]
hyperparameters['optimizer'] = torch.optim.Adam(params_list, **hyperparameters['optimizer_config'])
if conf.resume_training:
logging.info('Loading optimizer state dictionary')
optim_dict = torch.load(os.path.join(conf.model['load_path'], 'model_checkpoints/optim_best_model.tar'))
# If the previous experiment trained aux_net, ensure the flag is true for the current experiment
assert optim_dict['aux_net'] == conf.model['aux_net']['train'], "AuxNet model needed to resume training"
hyperparameters['optimizer'].load_state_dict(optim_dict['optimizer_load_state_dict'])
logging.info('Optimizer state loaded successfully.\n')
logging.info('Optimizer and Training parameters:\n')
for key in optim_dict:
if key == 'optimizer_load_state_dict':
logging.info('Param group length: {}'.format(len(optim_dict[key]['param_groups'])))
else:
logging.info('Key: {}\tValue: {}'.format(key, optim_dict[key]))
logging.info('\n')
hyperparameters['start_epoch'] = optim_dict['epoch']
hyperparameters['mean_loss_validation'] = optim_dict['mean_loss_validation']
hyperparameters['loss_fn'] = torch.nn.MSELoss(reduction='none')
return hyperparameters
def main():
"""
Control flow for the code
"""
# 1. Load configuration file --------------------------------------------------------------------------------------
logging.info('Loading configurations.\n')
conf = ParseConfig()
# 2. Loading datasets ---------------------------------------------------------------------------------------------
logging.info('Loading pose dataset(s)\n')
dataset_dict = load_hp_dataset(dataset_conf=conf.dataset, model_conf=conf.model)
# 3. Defining the network -----------------------------------------------------------------------------------------
logging.info('Initializing (and loading) human pose network and auxiliary network for Active Learning.\n')
pose_model, aux_net = load_models(conf=conf, load_pose=conf.model['load'], load_aux=conf.model['aux_net']['load'],
model_dir=conf.model['load_path'])
# 4. Defining the Active Learning library --------------------------------------------------------------------------
logging.info('Importing active learning object.\n')
if conf.activelearning_viz:
activelearning = ActiveLearning_Visualization(conf=conf, pose_net=pose_model, aux_net=aux_net)
else:
activelearning = ActiveLearning(conf=conf, pose_net=pose_model, aux_net=aux_net)
# 5. Defining DataLoader -------------------------------------------------------------------------------------------
logging.info('Defining DataLoader.\n')
datasets = HumanPoseDataLoader(dataset_dict=dataset_dict, activelearning=activelearning, conf=conf)
# 5.a: Delete models, activelearning object to remove stray computational graphs (esp. for EGL)
if conf.activelearning_viz:
exit()
del activelearning
del pose_model, aux_net
torch.cuda.empty_cache()
logging.info('Re-Initializing (and loading) human pose network and auxiliary network.\n')
pose_model, aux_net = load_models(conf=conf, load_pose=conf.model['load'], load_aux=conf.model['aux_net']['load'],
model_dir=conf.model['load_path'])
# 6. Defining Hyperparameters, TensorBoard directory ---------------------------------------------------------------
logging.info('Initializing experiment settings.')
hyperparameters = define_hyperparams(conf=conf, pose_model=pose_model, aux_net=aux_net)
if conf.tensorboard:
writer = SummaryWriter(log_dir=os.path.join(conf.model['save_path'], 'tensorboard'))
else:
writer = None
# 7. Train the model
if conf.train:
train_obj = Train(pose_model=pose_model, aux_net=aux_net, hyperparameters=hyperparameters,
dataset_obj=datasets, conf=conf, tb_writer=writer)
train_obj.train_model()
del train_obj
# Reload the best model for metric evaluation
conf.resume_training = False
pose_model, _ = load_models(conf=conf, load_pose=True, load_aux=False, model_dir=conf.model['save_path'])
if conf.metric:
metric_obj = Metric(network=pose_model, dataset_obj=datasets, conf=conf)
metric_obj.eval()
if __name__ == "__main__":
main()
| 41,564 | 42.342023 | 168 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/activelearning.py | import os
import cv2
import torch
import torch.utils.data
import numpy as np
import logging
from tqdm import tqdm
from pathlib import Path
from sklearn.metrics import pairwise_distances
from skimage.feature import peak_local_max
from scipy.special import softmax as softmax_fn
from scipy.stats import entropy as entropy_fn
from matplotlib import pyplot as plt
# EGL sampling
import autograd_hacks
from utils import heatmap_loss
from utils import shannon_entropy
from utils import heatmap_generator
class ActiveLearning(object):
"""
Contains collection of active learning algorithms for human joint localization
"""
def __init__(self, conf, pose_net, aux_net): #(self, conf, hg_network, learnloss_network):
self.conf = conf
self.pose_model = pose_net
self.aux_net = aux_net
self.num_images = conf.active_learning['num_images']
self.pose_model.eval()
if conf.active_learning['algorithm'] in ['learning_loss', 'aleatoric', 'vl4pose']:
self.aux_net.eval()
def base(self, train, dataset_size):
"""
:param train:
:param dataset_size:
:return:
"""
logging.info('Initializing base dataset.')
# Set of indices not annotated
unlabelled_idx = train['index']
# Determine if per dataset sampling or overall
if self.conf.dataset['load'] == 'mpii':
selection = np.random.choice(unlabelled_idx, size=self.num_images, replace=False).astype(np.int32)
elif self.conf.dataset['load'] == 'lsp':
# First sample from lsp dataset only
lsp_images = min(dataset_size['lsp']['train'], self.num_images)
lspet_images = self.num_images - lsp_images
selection_lsp = np.random.choice(np.arange(dataset_size['lsp']['train']), size=lsp_images, replace=False)
selection_lspet = np.random.choice(np.arange(dataset_size['lsp']['train'],
dataset_size['lsp']['train'] + dataset_size['lspet']['train']),
size=lspet_images, replace=False)
selection = np.concatenate([selection_lsp, selection_lspet], axis=0).astype(np.int32)
else:
# Merged dataset
selection = np.random.choice(unlabelled_idx, size=self.num_images, replace=False).astype(np.int32)
self._uniquecounts(dataset=train, selection=selection, method='base')
np.save(file=os.path.join(self.conf.model['save_path'], 'model_checkpoints/annotation.npy'), arr=selection)
return selection
def random(self, train, dataset_size):
"""
:param train:
:param dataset_size: Maintain same method signature across all sampling methods, not used
:return:
"""
logging.info('Performing random sampling.')
if self.conf.resume_training:
return np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Load previously annotated images indices
assert self.conf.model['load'], "Use 'base' to train model from scratch."
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Set of indices not annotated
unlabelled_idx = np.array(list(set(train['index'])-set(annotated_idx)))
selection = np.random.choice(unlabelled_idx, size=self.num_images, replace=False)
selection = np.concatenate([annotated_idx, selection], axis=0).astype(np.int32)
self._uniquecounts(dataset=train, selection=selection, method='random')
np.save(file=os.path.join(self.conf.model['save_path'], 'model_checkpoints/annotation.npy'), arr=selection)
return selection
def coreset_sampling(self, train, dataset_size):
'''
:return:
'''
logging.info('Performing Core-Set sampling.')
def update_distances(cluster_centers, encoding, min_distances=None):
'''
Based on: https://github.com/google/active-learning/blob/master/sampling_methods/kcenter_greedy.py
Update min distances given cluster centers.
Args:
cluster_centers: indices of cluster centers
only_new: only calculate distance for newly selected points and update
min_distances.
rest_dist: whether to reset min_distances.
'''
if len(cluster_centers) != 0:
# Update min_distances for all examples given new cluster center.
x = encoding[cluster_centers]
dist = pairwise_distances(encoding, x, metric='euclidean')
if min_distances is None:
min_distances = np.min(dist, axis=1).reshape(-1, 1)
else:
min_distances = np.minimum(min_distances, dist)
return min_distances
if self.conf.resume_training:
return np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
assert self.conf.model['load'], "Core-set requires a pretrained model."
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
assert np.all(train['index'] == np.arange(train['name'].shape[0]))
dataset_ = ActiveLearningDataset(train, indices=np.arange(train['name'].shape[0]), conf=self.conf)
coreset_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
pose_encoding = None
# Part 1: Obtain embeddings for all (labelled, unlabelled images)
# Disable autograd to speed up inference
with torch.no_grad():
for images in tqdm(coreset_dataloader):
_, pose_features = self.pose_model(images)
try:
pose_encoding = torch.cat((pose_encoding, pose_features['penultimate'].cpu()), dim=0)
except TypeError:
pose_encoding = pose_features['penultimate'].cpu()
pose_encoding = pose_encoding.squeeze().numpy()
logging.info('Core-Set encodings computed.')
# Part 2: k-Centre Greedy
core_set_budget = self.num_images
min_distances = None
assert len(annotated_idx) != 0, "No annotations for previous model found, core-set cannot proceeed."
min_distances = update_distances(cluster_centers=annotated_idx, encoding=pose_encoding, min_distances=None)
for _ in tqdm(range(core_set_budget)):
if len(annotated_idx) == 0: # Initial choice of point
# Initialize center with a randomly selected datapoint
ind = np.random.choice(np.arange(pose_encoding.shape[0]))
else:
ind = np.argmax(min_distances)
# New examples should not be in already selected since those points
# should have min_distance of zero to a cluster center.
min_distances = update_distances(cluster_centers=[ind], encoding=pose_encoding, min_distances=min_distances)
annotated_idx = np.concatenate([annotated_idx, [ind]], axis=0).astype(np.int32)
selection = annotated_idx
self._uniquecounts(dataset=train, selection=selection, method='coreset')
np.save(file=os.path.join(self.conf.model['save_path'], 'model_checkpoints/annotation.npy'), arr=selection)
return selection
def learning_loss_sampling(self, train, dataset_size):
"""
:param train:
:param dataset_size:
:param hg_depth:
:return:
"""
logging.info('Performing learning loss sampling.')
if self.conf.resume_training:
return np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
assert self.conf.model['load'], "Learning loss requires a previously trained model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Set of indices not annotated
unlabelled_idx = np.array(list(set(train['index'])-set(annotated_idx)))
dataset_ = ActiveLearningDataset(train, indices=unlabelled_idx, conf=self.conf)
learnloss_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
learnloss_pred = None
# Prediction and concatenation of the learning loss network outputs
with torch.no_grad():
for images in tqdm(learnloss_dataloader):
_, pose_features = self.pose_model(images)
learnloss_pred_ = self._aux_net_inference(pose_features)
learnloss_pred_ = learnloss_pred_.squeeze()
try:
learnloss_pred = torch.cat([learnloss_pred, learnloss_pred_.cpu()], dim=0)
except TypeError:
learnloss_pred = learnloss_pred_.cpu()
# argsort defaults to ascending
pred_with_index = np.concatenate([learnloss_pred.numpy().reshape(-1, 1),
unlabelled_idx.reshape(-1, 1)], axis=-1)
pred_with_index = pred_with_index[pred_with_index[:, 0].argsort()]
indices = pred_with_index[-self.num_images:, 1]
selection = np.concatenate([annotated_idx, indices], axis=0).astype(np.int32)
self._uniquecounts(dataset=train, selection=selection, method='learning_loss')
np.save(file=os.path.join(self.conf.model['save_path'], 'model_checkpoints/annotation.npy'), arr=selection)
return selection
def expected_gradient_length_sampling(self, train, dataset_size):
'''
:return:
'''
def probability(pair_dist):
'''
Computes P(j|i) using Binary Search
:param pairwise_dist: (2D Tensor) pairwise distances between samples --> actual dist, not squared
:return: 2D Tensor containing conditional probabilities
'''
def calc_probs_perp(lower_bound, upper_bound, pair_dist):
sigmas = (lower_bound + upper_bound) / 2
variance = (sigmas ** 2).reshape(-1, 1)
scaled_pair_dist_neg = -pair_dist / (2 * variance)
probs_unnormalized = torch.exp(scaled_pair_dist_neg)
probs_unnormalized = torch.clamp(probs_unnormalized, min=1e-20, max=1.)
softmax = probs_unnormalized / torch.sum(probs_unnormalized, dim=1, keepdim=True)
softmax = torch.clamp(softmax, min=1e-30, max=1.)
entropy = shannon_entropy(softmax)
perplexity_hat = torch.pow(2 * torch.ones(n_samples), entropy)
return perplexity_hat, softmax
def condition(perplexity_hat, perplexity):
mask = torch.lt(torch.abs(perplexity_hat - perplexity), TOLERANCE)
return False in mask
global PERPLEXITY, TOLERANCE, n_samples
tries = 100
n_samples = pair_dist.shape[0]
PERPLEXITY = self.conf.active_learning['egl']['perplexity']
TOLERANCE = self.conf.active_learning['egl']['tolerance'] * torch.ones(n_samples)
pair_dist = pair_dist ** 2
lower = torch.zeros(n_samples)
upper = (torch.max(torch.max(pair_dist), torch.max(pair_dist**0.5))) * torch.ones(n_samples) * 5
perplexity = PERPLEXITY * torch.ones(n_samples)
perplexity_hat, probs = calc_probs_perp(lower, upper, pair_dist)
while condition(perplexity_hat, perplexity):
if tries < 0:
break
tries -= 1
mask_gt = torch.gt(perplexity_hat - perplexity, TOLERANCE).type(torch.float32)
upper_update = upper - torch.mul(mask_gt, (upper - lower) / 2)
mask_lt = torch.lt(perplexity_hat - perplexity, -TOLERANCE).type(torch.float32)
lower_update = lower + torch.mul(mask_lt, (upper - lower) / 2)
upper = upper_update
lower = lower_update
perplexity_hat, probs = calc_probs_perp(lower, upper, pair_dist)
del PERPLEXITY, TOLERANCE, n_samples
return probs
logging.info('Performing expected gradient length sampling.')
# Setup --------------------------------------------------------------------------------------------------------
if self.conf.resume_training:
return np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Load indices of previously annotated data
assert self.conf.model['load'], "Expected Gradient Length requires a previously trained model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Set of indices not annotated
unlabelled_idx = np.array(list(set(train['index']) - set(annotated_idx)))
# Part 1: Obtain embeddings and heatmaps for LABELLED data ----------------------------------------------------
dataset_ = EGLpp_Dataset(dataset_dict=train, conf=self.conf, indices=annotated_idx)
egl_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
logging.info('Computing heatmaps, embedding for labelled images.')
# Disable autograd to speed up inference
with torch.no_grad():
pose_encoding_L = None
pose_heatmap_L = None
for images, og_heatmaps in tqdm(egl_dataloader):
heatmaps, pose_features = self.pose_model(images)
if self.conf.active_learning['egl']['og_heatmap']:
heatmaps = torch.stack([og_heatmaps, og_heatmaps], dim=1).to(heatmaps.device)
try:
pose_encoding_L = torch.cat((pose_encoding_L, pose_features['penultimate'].cpu()), dim=0) # GAP over the 4x4 lyr
pose_heatmap_L = torch.cat((pose_heatmap_L, heatmaps.cpu()), dim=0)
except TypeError:
pose_encoding_L = pose_features['penultimate'].cpu()
pose_heatmap_L = heatmaps.cpu()
# Part 2: Obtain embeddings ONLY for UNLABELLED data -----------------------------------------------------------
logging.info('Computing embeddings (not heatmaps) for unlabelled data')
dataset_ = ActiveLearningDataset(dataset_dict=train, indices=unlabelled_idx, conf=self.conf)
egl_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
# Disable autograd to speed up inference
with torch.no_grad():
pose_encoding_U = None
for images in tqdm(egl_dataloader):
_, pose_features = self.pose_model(images)
try:
pose_encoding_U = torch.cat((pose_encoding_U, pose_features['penultimate'].cpu()), dim=0) # GAP over the 4x4 lyr
except TypeError:
pose_encoding_U = pose_features['penultimate'].cpu()
# Part 3: Compute the heatmap error between the unlabelled images and its neighbors ----------------------------
with torch.no_grad():
pair_dist = torch.cdist(pose_encoding_U, pose_encoding_L, p=2) # Unlabelled[i] to Labelled[j]
p_i_given_j = probability(pair_dist)
k = self.conf.active_learning['egl']['k']
assert len(p_i_given_j.shape) == 2, "Not a 2-dimensional tensor"
vals, idx = torch.topk(p_i_given_j, k=k, dim=1, sorted=True, largest=True)
logging.info('Computing the gradient between the unlabelled and labelled images.')
pose_gradients_nbrs = torch.zeros(size=(unlabelled_idx.shape[0], k), dtype=torch.float32).to(vals.device)
assert vals.shape == pose_gradients_nbrs.shape
autograd_hacks.add_hooks(self.pose_model)
dataset_ = ActiveLearningDataset(dataset_dict=train, indices=unlabelled_idx, conf=self.conf)
egl_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
i_unlabelled = 0
# Obtain images in batches:
for unlabelled_images in tqdm(egl_dataloader):
# Iterate over each unlabelled image
for i_ in range(unlabelled_images.shape[0]):
self.pose_model.zero_grad()
i_unlabelled_copies = torch.cat(k * [unlabelled_images[i_].unsqueeze(0)], dim=0)#.cuda()
i_heatmaps, _ = self.pose_model(i_unlabelled_copies)
loss = heatmap_loss(i_heatmaps, pose_heatmap_L[idx[i_unlabelled]], egl=True).mean()
loss.backward()
autograd_hacks.compute_grad1(model=self.pose_model, loss_type='mean')
with torch.no_grad():
grads = torch.zeros((i_heatmaps.shape[0],), dtype=torch.float32)
for param in self.pose_model.parameters():
try:
# Sum of squared gradients for each batch element
grads = grads.to(param.grad1.device)
grads += (param.grad1 ** 2).sum(dim=list(range(len(param.grad1.shape)))[1:])
except AttributeError:
continue
pose_gradients_nbrs[i_unlabelled] = grads.to(pose_gradients_nbrs.device)
# Removing gradients due to previous image
self.pose_model.zero_grad()
autograd_hacks.clear_backprops(self.pose_model)
i_unlabelled += 1
autograd_hacks.remove_hooks(self.pose_model)
egl = (vals * pose_gradients_nbrs).sum(dim=1).squeeze()
vals, idx = torch.topk(egl, k=self.num_images, sorted=False, largest=True)
assert idx.dim() == 1, "'idx' should be a single dimensional array"
selection = np.concatenate([annotated_idx, unlabelled_idx[idx.cpu().numpy()]], axis=0).astype(np.int32)
self._uniquecounts(dataset=train, selection=selection, method='egl')
np.save(file=os.path.join(self.conf.model['save_path'], 'model_checkpoints/annotation.npy'), arr=selection)
return selection
def multipeak_entropy(self, train, dataset_size):
"""
:param train:
:param dataset_size:
:return:
"""
logging.info('Performing multi-peak entropy sampling.')
if self.conf.resume_training:
return np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
assert self.conf.model['load'], "Multipeak entropy was called without a pretrained model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
unlabelled_idx = np.array(list(set(train['index']) - set(annotated_idx)))
# Multi-peak entropy only over the unlabelled set of images
dataset_ = ActiveLearningDataset(dataset_dict=train, indices=unlabelled_idx, conf=self.conf)
mpe_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
pose_heatmaps = None
# Part 1: Obtain set of heatmaps
# Disable autograd to speed up inference
with torch.no_grad():
for images in tqdm(mpe_dataloader):
pose_heatmaps_, _ = self.pose_model(images)
try:
pose_heatmaps = torch.cat((pose_heatmaps, pose_heatmaps_[:, -1, :, :, :].cpu()), dim=0)
except TypeError:
pose_heatmaps = pose_heatmaps_[:, -1, :, :, :].cpu()
pose_heatmaps = pose_heatmaps.squeeze().numpy()
logging.info('Heatmaps computed. Calculating multi-peak entropy')
# Part 2: Multi-peak entropy
mpe_budget = self.num_images
mpe_value_per_img = np.zeros(pose_heatmaps.shape[0], dtype=np.float32)
# e.g. shape of heatmap final is BS x 14 x 64 x 64
for i in tqdm(range(pose_heatmaps.shape[0])):
normalizer = 0
entropy = 0
for hm in range(pose_heatmaps.shape[1]):
loc = peak_local_max(pose_heatmaps[i, hm], min_distance=5, num_peaks=5)
peaks = pose_heatmaps[i, hm][loc[:, 0], loc[:, 1]]
if peaks.shape[0] > 0:
normalizer += 1
peaks = softmax_fn(peaks)
entropy += entropy_fn(peaks)
mpe_value_per_img[i] = entropy
mpe_value_per_img = torch.from_numpy(mpe_value_per_img)
vals, idx = torch.topk(mpe_value_per_img, k=mpe_budget, sorted=False, largest=True)
assert idx.dim() == 1, "'idx' should be a single dimensional array"
annotated_idx = np.concatenate([annotated_idx, unlabelled_idx[idx.numpy()]], axis=0).astype(np.int32)
selection = annotated_idx
self._uniquecounts(dataset=train, selection=selection, method='multipeak_entropy')
np.save(file=os.path.join(self.conf.model['save_path'], 'model_checkpoints/annotation.npy'), arr=selection)
return selection
def aleatoric_uncertainty(self, train, dataset_size):
"""
:param train:
:param dataset_size:
:param hg_depth:
:return:
"""
logging.info('Performing Uncertainty: Kendall and Gal sampling.')
if self.conf.resume_training:
return np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
assert self.conf.model['load'], "Aleatoric uncertainty requires a previously trained model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Set of indices not annotated
unlabelled_idx = np.array(list(set(train['index'])-set(annotated_idx)))
dataset_ = ActiveLearningDataset(train, indices=unlabelled_idx, conf=self.conf)
aleatoric_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
aleatoric_pred = None
# Prediction and concatenation of the aleatoric predictions
with torch.no_grad():
for images in tqdm(aleatoric_dataloader):
_, pose_features = self.pose_model(images)
aleatoric_pred_ = self._aux_net_inference(pose_features)
aleatoric_pred_ = aleatoric_pred_.squeeze()
try:
aleatoric_pred = torch.cat([aleatoric_pred, aleatoric_pred_.cpu()], dim=0)
except TypeError:
aleatoric_pred = aleatoric_pred_.cpu()
aleatoric_pred = aleatoric_pred.mean(dim=-1)
# argsort defaults to ascending
pred_with_index = np.concatenate([aleatoric_pred.numpy().reshape(-1, 1),
unlabelled_idx.reshape(-1, 1)], axis=-1)
pred_with_index = pred_with_index[pred_with_index[:, 0].argsort()]
indices = pred_with_index[-self.num_images:, 1]
selection = np.concatenate([annotated_idx, indices], axis=0).astype(np.int32)
self._uniquecounts(dataset=train, selection=selection, method='aleatoric')
np.save(file=os.path.join(self.conf.model['save_path'], 'model_checkpoints/annotation.npy'), arr=selection)
return selection
def vl4pose(self, train, dataset_size):
"""
:param train:
:param dataset_size:
:param hg_depth:
:return:
"""
logging.info('Performing VL4Pose sampling.')
if self.conf.resume_training:
return np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
assert self.conf.model['load'], "VL4Pose requires a previously trained model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Set of indices not annotated
unlabelled_idx = np.array(list(set(train['index'])-set(annotated_idx)))
j2i = {'head': 0, 'neck': 1, 'lsho': 2, 'lelb': 3, 'lwri': 4, 'rsho': 5, 'relb': 6, 'rwri': 7, 'lhip': 8,
'lknee': 9, 'lankl': 10, 'rhip': 11, 'rknee': 12, 'rankl': 13}
i2j = {0: 'head', 1: 'neck', 2: 'lsho', 3: 'lelb', 4: 'lwri', 5: 'rsho', 6: 'relb', 7: 'rwri',
8: 'lhip', 9: 'lknee', 10: 'lankl', 11: 'rhip', 12: 'rknee', 13: 'rankl'}
if self.conf.dataset['load'] == 'mpii' or self.conf.dataset['load'] == 'merged':
j2i['pelvis'] = 14
j2i['thorax'] = 15
i2j[14] = 'pelvis'
i2j[15] = 'thorax'
if self.conf.dataset['load'] == 'mpii':
links = [[j2i['head'], j2i['neck']], [j2i['neck'], j2i['thorax']], [j2i['thorax'], j2i['pelvis']],
[j2i['thorax'], j2i['lsho']], [j2i['lsho'], j2i['lelb']], [j2i['lelb'], j2i['lwri']],
[j2i['thorax'], j2i['rsho']], [j2i['rsho'], j2i['relb']], [j2i['relb'], j2i['rwri']],
[j2i['pelvis'], j2i['lhip']], [j2i['lhip'], j2i['lknee']], [j2i['lknee'], j2i['lankl']],
[j2i['pelvis'], j2i['rhip']], [j2i['rhip'], j2i['rknee']], [j2i['rknee'], j2i['rankl']]]
else:
links = [[j2i['head'], j2i['neck']],
[j2i['neck'], j2i['lsho']], [j2i['lsho'], j2i['lelb']], [j2i['lelb'], j2i['lwri']],
[j2i['neck'], j2i['rsho']], [j2i['rsho'], j2i['relb']], [j2i['relb'], j2i['rwri']],
[j2i['lsho'], j2i['lhip']], [j2i['lhip'], j2i['lknee']], [j2i['lknee'], j2i['lankl']],
[j2i['rsho'], j2i['rhip']], [j2i['rhip'], j2i['rknee']], [j2i['rknee'], j2i['rankl']]]
dataset_ = ActiveLearningDataset(train, indices=unlabelled_idx, conf=self.conf)
vl4pose_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
pose_heatmaps = None
likelihood_params = None
# Part 1: Obtain set of heatmaps
# Disable autograd to speed up inference
with torch.no_grad():
for images in tqdm(vl4pose_dataloader):
pose_heatmaps_, pose_features_ = self.pose_model(images)
likelihood_pred_ = self._aux_net_inference(pose_features_)
try:
pose_heatmaps = torch.cat((pose_heatmaps, pose_heatmaps_[:, -1, :, :, :].cpu()), dim=0)
likelihood_params = torch.cat([likelihood_params, likelihood_pred_.cpu().reshape(images.shape[0], len(links), 2)], dim=0)
except TypeError:
pose_heatmaps = pose_heatmaps_[:, -1, :, :, :].cpu()
likelihood_params = likelihood_pred_.cpu().reshape(images.shape[0], len(links), 2)
pose_heatmaps = pose_heatmaps.squeeze().numpy()
likelihood_params = likelihood_params.numpy()
del vl4pose_dataloader
logging.info('Heatmaps computed. Calculating likelihood of pose.')
keypoint_compute = Keypoint_ParallelWrapper(hm=pose_heatmaps, param=likelihood_params, j2i=j2i, i2j=i2j,
links=links, vl4pose_config=self.conf.active_learning['vl4pose'])
vl4pose_dataloader = torch.utils.data.DataLoader(keypoint_compute, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
max_likelihood = np.zeros(shape=pose_heatmaps.shape[0])
ptr = 0
for likelihoods, trace in tqdm(vl4pose_dataloader):
max_likelihood[ptr: ptr + likelihoods.shape[0]] = likelihoods.squeeze().numpy()
ptr += likelihoods.shape[0]
loglikelihood_with_index = np.concatenate([max_likelihood.reshape(-1, 1),
unlabelled_idx.reshape(-1, 1)], axis=-1)
loglikelihood_with_index = loglikelihood_with_index[loglikelihood_with_index[:, 0].argsort()]
# Select the images with the lowest likelihood
indices = loglikelihood_with_index[:self.num_images, 1]
selection = np.concatenate([annotated_idx, indices], axis=0).astype(np.int32)
self._uniquecounts(dataset=train, selection=selection, method='vl4pose')
np.save(file=os.path.join(self.conf.model['save_path'], 'model_checkpoints/annotation.npy'), arr=selection)
return selection
def _uniquecounts(self, dataset, selection, method):
"""
:param dataset:
:param selection:
:return:
"""
# ['dataset'] is the dataset name such as mpii, lsp, lspet for an image
unique, counts = np.unique(dataset['dataset'][selection], return_counts=True)
proportion = {key: value for (key, value) in zip(unique, counts)}
with open(os.path.join(self.conf.model['save_path'], 'model_checkpoints/sampling_proportion.txt'), "x") as file:
file.write('{} Sampling\n'.format(method))
[file.write("{}: {}\n".format(key, proportion[key])) for key in proportion.keys()]
def _aux_net_inference(self, pose_features):
extractor = self.conf.architecture['aux_net']['conv_or_avg_pooling']
with torch.no_grad():
if extractor == 'avg':
# Transfer to GPU where auxiliary network is stored
encodings = pose_features['penultimate']
else:
depth = len(self.conf.architecture['aux_net']['spatial_dim'])
encodings = torch.cat(
[pose_features['feature_{}'.format(i)].reshape(
pose_features['feature_{}'.format(i)].shape[0], pose_features['feature_{}'.format(i)].shape[1], -1)
for i in range(depth, 0, -1)],
dim=2)
aux_out = self.aux_net(encodings)
return aux_out
class ActiveLearningDataset(torch.utils.data.Dataset):
def __init__(self, dataset_dict, indices, conf):
'''
:param dataset_dict:
'''
self.names = dataset_dict['name'][indices]
self.bounding_box = dataset_dict['bbox_coords'][indices]
self.dataset = dataset_dict['dataset'][indices]
self.xy_to_uv = lambda xy: (xy[1], xy[0])
def __len__(self):
return self.dataset.shape[0]
def __getitem__(self, item):
'''
:param item:
:return:
'''
root = Path(os.getcwd()).parent
mpii_path = os.path.join(root, 'data', 'mpii')
lsp_path = os.path.join(root, 'data', 'lsp')
lspet_path = os.path.join(root, 'data', 'lspet')
bounding_box = self.bounding_box[item]
dataset = self.dataset[item]
name = self.names[item]
if dataset == 'mpii':
image = plt.imread(os.path.join(mpii_path, 'images', '{}.jpg'.format(name.split('_')[0])))
elif dataset == 'lsp':
image = plt.imread(os.path.join(lsp_path, 'images', name))
else:
image = plt.imread(os.path.join(lspet_path, 'images', name))
# Determine crop
img_shape = np.array(image.shape)
# Bounding box for the first person
[min_x, min_y, max_x, max_y] = bounding_box[0]
tl_uv = self.xy_to_uv(np.array([min_x, min_y]))
br_uv = self.xy_to_uv(np.array([max_x, max_y]))
min_u = tl_uv[0]
min_v = tl_uv[1]
max_u = br_uv[0]
max_v = br_uv[1]
centre = np.array([(min_u + max_u) / 2, (min_v + max_v) / 2])
height = max_u - min_u
width = max_v - min_v
scale = 1.75
top_left = np.array([centre[0] - (scale * height / 2), centre[1] - (scale * width / 2)])
bottom_right = np.array([centre[0] + (scale * height / 2), centre[1] + (scale * width / 2)])
top_left = np.maximum(np.array([0, 0], dtype=np.int16), top_left.astype(np.int16))
bottom_right = np.minimum(img_shape.astype(np.int16)[:-1], bottom_right.astype(np.int16))
# Cropping the image
image = image[top_left[0]: bottom_right[0], top_left[1]: bottom_right[1], :]
# Resize the image
image = self.resize_image(image, target_size=[256, 256, 3])
return torch.tensor(data=image / 256.0, dtype=torch.float32, device='cpu')
def resize_image(self, image_=None, target_size=None):
'''
:return:
'''
# Compute the aspect ratios
image_aspect_ratio = image_.shape[0] / image_.shape[1]
tgt_aspect_ratio = target_size[0] / target_size[1]
# Compare the original and target aspect ratio
if image_aspect_ratio > tgt_aspect_ratio:
# If target aspect ratio is smaller, scale the first dim
scale_factor = target_size[0] / image_.shape[0]
else:
# If target aspect ratio is bigger or equal, scale the second dim
scale_factor = target_size[1] / image_.shape[1]
# Compute the padding to fit the target size
pad_u = (target_size[0] - int(image_.shape[0] * scale_factor))
pad_v = (target_size[1] - int(image_.shape[1] * scale_factor))
output_img = np.zeros(target_size, dtype=image_.dtype)
# Write scaled size in reverse order because opencv resize
scaled_size = (int(image_.shape[1] * scale_factor), int(image_.shape[0] * scale_factor))
padding_u = int(pad_u / 2)
padding_v = int(pad_v / 2)
im_scaled = cv2.resize(image_, scaled_size)
# logging.debug('Scaled, pre-padding size: {}'.format(im_scaled.shape))
output_img[padding_u : im_scaled.shape[0] + padding_u,
padding_v : im_scaled.shape[1] + padding_v, :] = im_scaled
return output_img
class EGLpp_Dataset(torch.utils.data.Dataset):
def __init__(self, dataset_dict, conf, indices=None):
"""
:param dataset_dict:
:param conf:
:param indices:
"""
self.names = dataset_dict['name'][indices]
self.bounding_box = dataset_dict['bbox_coords'][indices]
self.dataset = dataset_dict['dataset'][indices]
self.gt = dataset_dict['gt'][indices]
self.occlusion = conf.experiment_settings['occlusion']
self.hm_shape = [64, 64]
self.hm_peak = conf.experiment_settings['hm_peak']
self.xy_to_uv = lambda xy: (xy[1], xy[0])
def __len__(self):
return self.dataset.shape[0]
def __getitem__(self, item):
'''
:param item:
:return:
'''
root = Path(os.getcwd()).parent
mpii_path = os.path.join(root, 'data', 'mpii')
lsp_path = os.path.join(root, 'data', 'lsp')
lspet_path = os.path.join(root, 'data', 'lspet')
dataset = self.dataset[item]
name = self.names[item]
if dataset == 'mpii':
image = plt.imread(os.path.join(mpii_path, 'images', '{}.jpg'.format(name.split('_')[0])))
elif dataset == 'lsp':
image = plt.imread(os.path.join(lsp_path, 'images', name))
else:
image = plt.imread(os.path.join(lspet_path, 'images', name))
bounding_box = self.bounding_box[item]
gt = self.gt[item]
# Determine crop
img_shape = np.array(image.shape)
# Bounding box for the first person
[min_x, min_y, max_x, max_y] = bounding_box[0]
tl_uv = self.xy_to_uv(np.array([min_x, min_y]))
br_uv = self.xy_to_uv(np.array([max_x, max_y]))
min_u = tl_uv[0]
min_v = tl_uv[1]
max_u = br_uv[0]
max_v = br_uv[1]
centre = np.array([(min_u + max_u) / 2, (min_v + max_v) / 2])
height = max_u - min_u
width = max_v - min_v
scale = 2.0
window = max(scale * height, scale * width)
top_left = np.array([centre[0] - (window / 2), centre[1] - (window / 2)])
bottom_right = np.array([centre[0] + (window / 2), centre[1] + (window / 2)])
top_left = np.maximum(np.array([0, 0], dtype=np.int16), top_left.astype(np.int16))
bottom_right = np.minimum(img_shape.astype(np.int16)[:-1], bottom_right.astype(np.int16))
# Cropping the image and adjusting the ground truth
image = image[top_left[0]: bottom_right[0], top_left[1]: bottom_right[1], :]
for person in range(gt.shape[0]):
for joint in range(gt.shape[1]):
gt_uv = self.xy_to_uv(gt[person][joint])
gt_uv = gt_uv - top_left
gt[person][joint] = np.concatenate([gt_uv, np.array([gt[person][joint][2]])], axis=0)
# Resize the image
image, gt = self.resize_image(image, gt, target_size=[256, 256, 3])
heatmaps, joint_exist = heatmap_generator(
joints=np.copy(gt), occlusion=self.occlusion, hm_shape=self.hm_shape, img_shape=image.shape)
heatmaps = self.hm_peak * heatmaps
return torch.tensor(data=image / 256.0, dtype=torch.float32, device='cpu'),\
torch.tensor(data=heatmaps, dtype=torch.float32, device='cpu')
def resize_image(self, image_=None, gt=None, target_size=None):
'''
:return:
'''
# Compute the aspect ratios
image_aspect_ratio = image_.shape[0] / image_.shape[1]
tgt_aspect_ratio = target_size[0] / target_size[1]
# Compare the original and target aspect ratio
if image_aspect_ratio > tgt_aspect_ratio:
# If target aspect ratio is smaller, scale the first dim
scale_factor = target_size[0] / image_.shape[0]
else:
# If target aspect ratio is bigger or equal, scale the second dim
scale_factor = target_size[1] / image_.shape[1]
# Compute the padding to fit the target size
pad_u = (target_size[0] - int(image_.shape[0] * scale_factor))
pad_v = (target_size[1] - int(image_.shape[1] * scale_factor))
output_img = np.zeros(target_size, dtype=image_.dtype)
# Write scaled size in reverse order because opencv resize
scaled_size = (int(image_.shape[1] * scale_factor), int(image_.shape[0] * scale_factor))
padding_u = int(pad_u / 2)
padding_v = int(pad_v / 2)
im_scaled = cv2.resize(image_, scaled_size)
# logging.debug('Scaled, pre-padding size: {}'.format(im_scaled.shape))
output_img[padding_u : im_scaled.shape[0] + padding_u,
padding_v : im_scaled.shape[1] + padding_v, :] = im_scaled
gt *= np.array([scale_factor, scale_factor, 1]).reshape(1, 1, 3)
gt[:, :, 0] += padding_u
gt[:, :, 1] += padding_v
return output_img, gt
class Keypoint_ParallelWrapper(torch.utils.data.Dataset):
def __init__(self, hm, param, j2i, i2j, links, vl4pose_config):
self.hm = hm
self.param = param
self.j2i = j2i
self.i2j = i2j
self.links = links
self.config = vl4pose_config
def __len__(self):
return self.hm.shape[0]
def __getitem__(self, i):
joints = {}
heatmaps = self.hm[i]
parameters = self.param[i]
# Initialize keypoints for each node
for key in self.j2i.keys():
heatmap = heatmaps[self.j2i[key]]
loc = peak_local_max(heatmap, min_distance=self.config['min_distance'], num_peaks=self.config['num_peaks'])
peaks = heatmap[loc[:, 0], loc[:, 1]]
peaks = softmax_fn(peaks)
joints[key] = Keypoint(name=key, loc=loc, peaks=peaks)
# Initialize parent-child relations
for k, l in enumerate(self.links):
joints[self.i2j[l[0]]].parameters.append(parameters[k])
joints[self.i2j[l[0]]].children.append(joints[self.i2j[l[1]]])
max_ll, trace = joints['head'].run_likelihood()
return max_ll, trace
class Keypoint(object):
def __init__(self, name, loc, peaks):
self.name = name
self.loc = loc
self.peaks = peaks
self.children = []
self.parameters = []
def run_likelihood(self):
"""
:return:
"""
assert self.name == 'head'
likelihood_per_location = []
per_location_trace = []
for location in range(self.loc.shape[0]):
log_ll = np.log(self.peaks[location])
per_child_trace = []
for child in range(len(self.children)):
child_ll, joint_trace = self.children[child].compute_likelihood_given_parent(self.loc[location], self.parameters[child])
log_ll += child_ll
per_child_trace.append(joint_trace)
likelihood_per_location.append(log_ll)
per_location_trace.append(per_child_trace)
likelihood_per_location = np.array(likelihood_per_location)
return_trace = {}
for child_trace in per_location_trace[np.argmax(likelihood_per_location)]:
return_trace.update(child_trace)
return_trace[self.name] = np.argmax(likelihood_per_location)
return_trace['{}_uv'.format(self.name)] = self.loc[np.argmax(likelihood_per_location)]
return np.sum(likelihood_per_location), return_trace
def compute_likelihood_given_parent(self, parent_location, gaussian_params):
"""
:param parent_location:
:param gaussian_params:
:return:
"""
likelihood_per_location = []
per_location_trace = []
for location in range(self.loc.shape[0]):
log_ll = np.log(2 * np.pi) + gaussian_params[1]
log_ll += (gaussian_params[0] - np.linalg.norm(parent_location - self.loc[location]))**2 * np.exp(-gaussian_params[1])
log_ll *= -0.5
log_ll += np.log(self.peaks[location])
if len(self.children) == 0:
likelihood_per_location.append(log_ll)
else:
per_child_trace = []
for child in range(len(self.children)):
child_ll, joint_trace = self.children[child].compute_likelihood_given_parent(self.loc[location], self.parameters[child])
log_ll += child_ll
per_child_trace.append(joint_trace)
likelihood_per_location.append(log_ll)
per_location_trace.append(per_child_trace)
likelihood_per_location = np.array(likelihood_per_location)
if len(self.children) == 0:
return np.sum(likelihood_per_location), {self.name: np.argmax(likelihood_per_location),
'{}_uv'.format(self.name): self.loc[np.argmax(likelihood_per_location)]}
return_trace = {}
for child_trace in per_location_trace[np.argmax(likelihood_per_location)]:
return_trace.update(child_trace)
return_trace[self.name] = np.argmax(likelihood_per_location)
return_trace['{}_uv'.format(self.name)] = self.loc[np.argmax(likelihood_per_location)]
return np.sum(likelihood_per_location), return_trace | 43,791 | 39.774674 | 160 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/dataloader.py | import os
import copy
import logging
from pathlib import Path
import cv2
import scipy.io
import numpy as np
from tqdm import tqdm
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
import torch
import torch.utils.data
import albumentations as albu
from utils import heatmap_generator
from utils import uv_from_heatmap
jnt_to_ind = {'head': 0, 'neck': 1, 'lsho': 2, 'lelb': 3, 'lwri': 4, 'rsho': 5, 'relb': 6, 'rwri': 7,
'lhip': 8, 'lknee': 9, 'lankl': 10, 'rhip': 11, 'rknee': 12, 'rankl': 13}
ind_to_jnt = {0: 'head', 1: 'neck', 2: 'lsho', 3: 'lelb', 4: 'lwri', 5: 'rsho', 6: 'relb', 7: 'rwri',
8: 'lhip', 9: 'lknee', 10: 'lankl', 11: 'rhip', 12: 'rknee', 13: 'rankl'}
def load_mpii(mpii_conf, del_extra_jnts):
"""
Converts Matlab structure .mat file into a more intuitive dictionary object.
:param mpii_conf:
:return:
"""
# Lambda_head brings the head keypoint annotation closer to the neck
lambda_head = mpii_conf['lambda_head']
precached_mpii = mpii_conf['precached']
mpii_only = False if del_extra_jnts else True
root = Path(os.getcwd()).parent
dataset_path = os.path.join(root, 'data', 'mpii')
# Adds unique joints corresponding to MPII
if not del_extra_jnts:
global jnt_to_ind, ind_to_jnt
jnt_to_ind['pelvis'] = 14
jnt_to_ind['thorax'] = 15
ind_to_jnt[14] = 'pelvis'
ind_to_jnt[15] = 'thorax'
# Load MPII images directly into memory
if precached_mpii:
logging.info('Loading precached MPII.')
if mpii_only: string = '16jnts'
else: string = '14jnts'
img_dict = np.load(os.path.join(root, 'cached', 'mpii_cache_{}.npy'.format(string)), allow_pickle=True)
img_dict = img_dict[()]
try:
assert img_dict['mpii']['del_extra_jnts'] == del_extra_jnts
assert img_dict['mpii']['lambda_head'] == lambda_head
# Why am I deleting this? :'( ANSWER - TO MAINTAIN COMPATIBILITY IF DATASETS ARE MERGED
del img_dict['mpii']['del_extra_jnts']
del img_dict['mpii']['lambda_head']
return img_dict
except AssertionError:
logging.warning('Cannot load MPII due to different configurations.')
logging.warning('Loading MPII from scratch.\n')
mpii_idx_to_jnt = {0: 'rankl', 1: 'rknee', 2: 'rhip', 5: 'lankl', 4: 'lknee', 3: 'lhip',
6: 'pelvis', 7: 'thorax', 8: 'neck', 11: 'relb', 10: 'rwri', 9: 'head',
12: 'rsho', 13: 'lsho', 14: 'lelb', 15: 'lwri'}
max_person_in_img = 0
# Create a template for GT and Pred to follow
mpii_template = dict([(mpii_idx_to_jnt[i], []) for i in range(16)])
img_dict = {'mpii': {'img': [], 'img_name': [], 'img_pred': [], 'img_gt': [], 'normalizer': [],
'dataset': [], 'num_gt': [], 'split': [], 'scale': [], 'objpos': [], 'num_ppl': []}}
# Load MPII
matlab_mpii = scipy.io.loadmat(os.path.join(dataset_path, 'joints.mat'), struct_as_record=False)['RELEASE'][0, 0]
# Iterate over all images
# matlab_mpii.__dict__['annolist'][0].shape[0]
for img_idx in tqdm(range(matlab_mpii.__dict__['annolist'][0].shape[0])):
# Load annotation data per image
annotation_mpii = matlab_mpii.__dict__['annolist'][0, img_idx]
train_test_mpii = matlab_mpii.__dict__['img_train'][0, img_idx].flatten()[0]
person_id = matlab_mpii.__dict__['single_person'][img_idx][0].flatten()
num_people = len(person_id)
max_person_in_img = max(max_person_in_img, len(person_id))
# Read image
img_name = annotation_mpii.__dict__['image'][0, 0].__dict__['name'][0]
try:
image = plt.imread(os.path.join(dataset_path, 'images', img_name))
except FileNotFoundError:
logging.warning('Could not load filename: {}'.format(img_name))
continue
# Create a deepcopy of the template to avoid overwriting the original
gt_per_image = copy.deepcopy(mpii_template)
num_joints_persons = []
normalizer_persons = []
scale = []
objpos = []
# Default is that there are no annotated people in the image
annotated_person_flag = False
# Iterate over each person
for person in (person_id - 1):
try:
per_person_jnts = []
# If annopoints not present, then annotations for that person absent. Throw exception and skip to next
annopoints_img_mpii = annotation_mpii.__dict__['annorect'][0, person].__dict__['annopoints'][0, 0]
scale_img_mpii = annotation_mpii.__dict__['annorect'][0, person].__dict__['scale'][0][0]
objpose_img_mpii = annotation_mpii.__dict__['annorect'][0, person].__dict__['objpos'][0][0]
objpose_img_mpii = [objpose_img_mpii.__dict__['x'][0][0], objpose_img_mpii.__dict__['y'][0][0]]
num_joints = annopoints_img_mpii.__dict__['point'][0].shape[0]
remove_pelvis_thorax_from_num_joints = 0
# PCKh@0.x: Head bounding box normalizer
head_x1 = annotation_mpii.__dict__['annorect'][0, person].__dict__['x1'][0][0]
head_y1 = annotation_mpii.__dict__['annorect'][0, person].__dict__['y1'][0][0]
head_x2 = annotation_mpii.__dict__['annorect'][0, person].__dict__['x2'][0][0]
head_y2 = annotation_mpii.__dict__['annorect'][0, person].__dict__['y2'][0][0]
xy_1 = np.array([head_x1, head_y1], dtype=np.float32)
xy_2 = np.array([head_x2, head_y2], dtype=np.float32)
normalizer_persons.append(np.linalg.norm(xy_1 - xy_2, ord=2))
# If both are true, pulls the head joint closer to the neck, and body
head_jt, neck_jt = False, False
# MPII does not have a [-1, -1] or absent GT, hence the number of gt differ for each image
for i in range(num_joints):
x = annopoints_img_mpii.__dict__['point'][0, i].__dict__['x'].flatten()[0]
y = annopoints_img_mpii.__dict__['point'][0, i].__dict__['y'].flatten()[0]
id_ = annopoints_img_mpii.__dict__['point'][0, i].__dict__['id'][0][0]
vis = annopoints_img_mpii.__dict__['point'][0, i].__dict__['is_visible'].flatten()
# No entry corresponding to visible, mostly head vis is missing.
if vis.size == 0:
vis = 1
else:
vis = vis.item()
if id_ == 9: head_jt = True
if id_ == 8: neck_jt = True
if ((id_ == 6) or (id_ == 7)) and del_extra_jnts:
remove_pelvis_thorax_from_num_joints += 1
# Arrange ground truth in form {jnt: [[person1], [person2]]}
gt_per_joint = np.array([x, y, vis]).astype(np.float16)
gt_per_image[mpii_idx_to_jnt[id_]].append(gt_per_joint)
per_person_jnts.append(mpii_idx_to_jnt[id_])
# If person 1 does not have rankl and person 2 has rankl, then prevent rankl being associated with p1
# If jnt absent in person, then we append np.array([-1, -1, -1])
all_jnts = set(list(mpii_idx_to_jnt.values()))
per_person_jnts = set(per_person_jnts)
jnt_absent_person = all_jnts - per_person_jnts
for abs_joint in jnt_absent_person:
gt_per_image[abs_joint].append(np.array([-1, -1, -1]))
num_joints_persons.append(num_joints - remove_pelvis_thorax_from_num_joints)
scale.append(scale_img_mpii)
objpos.append(objpose_img_mpii)
# If both head and neck joint present, then move the head joint linearly towards the neck joint.
if head_jt and neck_jt:
gt_per_image['head'][-1] = (lambda_head * gt_per_image['head'][-1])\
+ ((1 - lambda_head) * gt_per_image['neck'][-1])
# Since annotation for atleast on person in image present, this flag will add GT to the dataset
annotated_person_flag = True
except KeyError:
# Person 'x' could not have annotated joints, hence move to person 'y'
continue
if not annotated_person_flag:
continue
# Maintain compatibility with MPII and LSPET
if del_extra_jnts:
del gt_per_image['pelvis']
del gt_per_image['thorax']
# Add image, name, pred placeholder and gt
img_dict['mpii']['img_name'].append(img_name)
img_dict['mpii']['img_pred'].append(mpii_template.copy())
img_dict['mpii']['img_gt'].append(gt_per_image)
img_dict['mpii']['normalizer'].append(normalizer_persons)
img_dict['mpii']['dataset'].append('mpii')
img_dict['mpii']['num_gt'].append(num_joints_persons)
img_dict['mpii']['split'].append(train_test_mpii)
img_dict['mpii']['scale'].append(scale)
img_dict['mpii']['objpos'].append(objpos)
img_dict['mpii']['num_ppl'].append(num_people)
img_dict['mpii']['del_extra_jnts'] = del_extra_jnts
img_dict['mpii']['lambda_head'] = lambda_head
img_dict['mpii']['max_person_in_img'] = max_person_in_img
if mpii_only: string = '16jnts'
else: string = '14jnts'
np.save(file=os.path.join(root, 'cached', 'mpii_cache_{}.npy'.format(string)), arr=img_dict, allow_pickle=True)
del img_dict['mpii']['del_extra_jnts']
del img_dict['mpii']['lambda_head']
return img_dict
def load_lsp(lsp_conf, model_conf):#shuffle=False, train_ratio=0.7, conf=None):
"""
:param lsp_conf:
:param model_conf:
:return:
"""
root = Path(os.getcwd()).parent
dataset_path = os.path.join(root, 'data', 'lsp')
with open(os.path.join(dataset_path, 'lsp_filenames.txt'), 'r') as f:
filenames = f.read().split()
lsp_idx_to_jnt = {0: 'rankl', 1: 'rknee', 2: 'rhip', 5: 'lankl', 4: 'lknee', 3: 'lhip', 6: 'rwri', 7: 'relb',
8: 'rsho', 11: 'lwri', 10: 'lelb', 9: 'lsho', 12: 'neck', 13: 'head'}
lsp_template = dict([(lsp_idx_to_jnt[i], []) for i in range(14)])
img_dict = {'lsp': {'img': [], 'img_name': [], 'img_pred': [], 'img_gt': [], 'normalizer': [],
'dataset': [], 'num_gt': [], 'split': []}}
annotation_lsp = scipy.io.loadmat(os.path.join(dataset_path, 'joints.mat'))['joints'] # Shape: 3,14,2000
# 0: Train; 1: Validate
# Load Train/Test split if conf.model_load_hg == True
if model_conf['load']:
train_test_split = np.load(os.path.join(model_conf['load_path'], 'model_checkpoints/lsp_split.npy'))
else:
train_test_split = np.concatenate(
[np.zeros((int(annotation_lsp.shape[2]*lsp_conf['train_ratio']),), dtype=np.int8),
np.ones((annotation_lsp.shape[2] - int(annotation_lsp.shape[2]*lsp_conf['train_ratio']),), dtype=np.int8)],
axis=0)
if lsp_conf['shuffle']:
logging.info('Shuffling LSP')
np.random.shuffle(train_test_split)
np.save(file=os.path.join(model_conf['save_path'], 'model_checkpoints', 'lsp_split.npy'), arr=train_test_split)
for index in tqdm(range(annotation_lsp.shape[2])):
image = plt.imread(os.path.join(dataset_path, 'images', filenames[index]))
# Broadcasting rules apply: Toggle visibility of ground truth
gt = abs(np.array([[0], [0], [1]]) - annotation_lsp[:, :, index])
gt_dict = dict([(lsp_idx_to_jnt[i], [gt[:, i]]) for i in range(gt.shape[1])])
num_gt = sum([1 for i in range(gt.shape[1]) if gt[:, i][2]])
# PCK@0.x : Normalizer
lsho = gt[:2, 9]
rsho = gt[:2, 8]
lhip = gt[:2, 3]
rhip = gt[:2, 2]
torso_1 = np.linalg.norm(lsho - rhip)
torso_2 = np.linalg.norm(rsho - lhip)
torso = max(torso_1, torso_2)
img_dict['lsp']['img_name'].append(filenames[index])
img_dict['lsp']['img_pred'].append(copy.deepcopy(lsp_template))
img_dict['lsp']['img_gt'].append(gt_dict)
img_dict['lsp']['normalizer'].append([torso])
img_dict['lsp']['dataset'].append('lsp')
img_dict['lsp']['num_gt'].append([num_gt])
img_dict['lsp']['split'].append(train_test_split[index])
return img_dict
def load_lspet(lspet_conf, model_conf):
"""
:param lspet_conf:
:param model_conf:
:return:
"""
root = Path(os.getcwd()).parent
dataset_path = os.path.join(root, 'data', 'lspet')
with open(os.path.join(dataset_path, 'lspet_filenames.txt'), 'r') as f:
filenames = f.read().split()
lspet_idx_to_jnt = {0: 'rankl', 1: 'rknee', 2: 'rhip', 5: 'lankl', 4: 'lknee', 3: 'lhip',
6: 'rwri', 7: 'relb', 8: 'rsho', 11: 'lwri', 10: 'lelb', 9: 'lsho',
12: 'neck', 13: 'head'}
lspet_template = dict([(lspet_idx_to_jnt[i], []) for i in range(14)])
img_dict = {'lspet': {'img': [], 'img_name': [], 'img_pred': [], 'img_gt': [], 'normalizer': [],
'dataset': [], 'num_gt': [], 'split': []}}
annotation_lspet = scipy.io.loadmat(os.path.join(dataset_path, 'joints.mat'))['joints'] # Shape: 14,3,10000
# 0: Train; 1: Validate
# Load Train/Test split if conf.model_load_hg == True
if model_conf['load']:
train_test_split = np.load(os.path.join(model_conf['load_path'], 'model_checkpoints/lspet_split.npy'))
else:
train_test_split = np.concatenate(
[np.zeros((int(annotation_lspet.shape[2] * lspet_conf['train_ratio']),), dtype=np.int8),
np.ones((annotation_lspet.shape[2] - int(annotation_lspet.shape[2] * lspet_conf['train_ratio']),), dtype=np.int8)],
axis=0)
if lspet_conf['shuffle']:
logging.info('Shuffling LSPET')
np.random.shuffle(train_test_split)
np.save(file=os.path.join(model_conf['save_path'], 'model_checkpoints', 'lspet_split.npy'), arr=train_test_split)
for index in tqdm(range(annotation_lspet.shape[2])):
image = plt.imread(os.path.join(dataset_path, 'images', filenames[index]))
gt = annotation_lspet[:, :, index]
gt_dict = dict([(lspet_idx_to_jnt[i], [gt[i]]) for i in range(gt.shape[0])])
num_gt = sum([1 for i in range(gt.shape[0]) if gt[i][2]])
# PCK@0.x : Normalizer
lsho = gt[9, :2]
rsho = gt[8, :2]
lhip = gt[3, :2]
rhip = gt[2, :2]
torso_1 = np.linalg.norm(lsho - rhip)
torso_2 = np.linalg.norm(rsho - lhip)
torso = max(torso_1, torso_2)
img_dict['lspet']['img_name'].append(filenames[index])
img_dict['lspet']['img_pred'].append(copy.deepcopy(lspet_template))
img_dict['lspet']['img_gt'].append(gt_dict)
img_dict['lspet']['normalizer'].append([torso])
img_dict['lspet']['dataset'].append('lspet')
img_dict['lspet']['num_gt'].append([num_gt])
img_dict['lspet']['split'].append(train_test_split[index])
return img_dict
def load_hp_dataset(dataset_conf, model_conf):
"""
:param dataset_conf:
:return:
"""
dataset_dict = dict()
if dataset_conf['load'] == 'mpii' or dataset_conf['load'] == 'merged':
logging.info('Loading MPII dataset')
del_extra_jnts = False if dataset_conf['load'] == 'mpii' else True
dataset_dict.update(load_mpii(mpii_conf=dataset_conf['mpii_params'],
del_extra_jnts=del_extra_jnts))
if dataset_conf['load'] == 'lsp' or dataset_conf['load'] == 'merged':
logging.info('Loading LSP dataset')
dataset_dict.update(load_lsp(lsp_conf=dataset_conf['lsp_params'],
model_conf=model_conf))
logging.info('Loading LSPET dataset')
dataset_dict.update(load_lspet(lspet_conf=dataset_conf['lspet_params'],
model_conf=model_conf))
return dataset_dict
class HumanPoseDataLoader(torch.utils.data.Dataset):
def __init__(self, dataset_dict, activelearning, conf):
#(self, mpii_dict=None, lspet_dict=None, lsp_dict=None, activelearning_obj=None,
#getitem_dump=None, conf=None, **kwargs):
"""
:param dataset_dict:
:param activelearning:
:param conf:
"""
self.conf = conf
self.viz = conf.viz
self.occlusion = conf.experiment_settings['occlusion']
self.hm_shape = [64, 64] # Hardcoded
self.hm_peak = conf.experiment_settings['hm_peak']
self.threshold = conf.experiment_settings['threshold'] * self.hm_peak
self.model_save_path = conf.model['save_path']
self.jnt_to_ind = jnt_to_ind
self.ind_to_jnt = ind_to_jnt
# Training specific attributes:
self.train_flag = False
self.validate_flag = False
self.model_input_dataset = None
# Define active learning functions
activelearning_samplers = {
'base': activelearning.base,
'random': activelearning.random,
'coreset': activelearning.coreset_sampling,
'learning_loss': activelearning.learning_loss_sampling,
'egl': activelearning.expected_gradient_length_sampling,
'multipeak_entropy': activelearning.multipeak_entropy,
'aleatoric': activelearning.aleatoric_uncertainty,
'vl4pose': activelearning.vl4pose
}
self.dataset_size = dict()
# Create dataset by converting into numpy compatible types
if conf.dataset['load'] in ['mpii', 'merged']:
logging.info('Creating MPII dataset\n')
self.mpii = dataset_dict['mpii']
self.mpii_dataset = self.create_mpii()
self.mpii_train, self.mpii_validate = self.create_train_validate(dataset=self.mpii_dataset)
if self.conf.experiment_settings['all_joints']:
logging.info('Creating single person patches\n')
self.mpii_train = self.mpii_single_person_extractor(mpii_dataset=self.mpii_train)
self.mpii_validate = self.mpii_single_person_extractor(mpii_dataset=self.mpii_validate)
logging.info('Selecting train and validation images where all joints are present.\n')
self.mpii_train = self.mpii_all_joints(mpii_dataset=self.mpii_train)
self.mpii_validate = self.mpii_all_joints(mpii_dataset=self.mpii_validate)
else:
logging.info('Creating single person patches\n')
self.mpii_train = self.mpii_single_person_extractor(mpii_dataset=self.mpii_train)
self.mpii_validate = self.mpii_single_person_extractor(mpii_dataset=self.mpii_validate)
logging.info('Size of MPII processed dataset: ')
logging.info('Train: {}'.format(self.mpii_train['name'].shape[0]))
logging.info('Validate: {}\n'.format(self.mpii_validate['name'].shape[0]))
self.dataset_size.update(
{'mpii': {'train': self.mpii_train['name'].shape[0], 'validation': self.mpii_validate['name'].shape[0]}})
if conf.dataset['load'] in ['lsp', 'merged']:
self.lspet = dataset_dict['lspet']
self.lsp = dataset_dict['lsp']
logging.info('Creating LSPET dataset\n')
self.lspet_dataset = self.create_lspet()
logging.info('Creating LSP dataset\n')
self.lsp_dataset = self.create_lsp()
self.lspet_train, self.lspet_validate = self.create_train_validate(dataset=self.lspet_dataset)
self.lsp_train, self.lsp_validate = self.create_train_validate(dataset=self.lsp_dataset)
self.dataset_size.update({
'lspet': {'train': self.lspet_train['name'].shape[0], 'validation': self.lspet_validate['name'].shape[0]},
'lsp': {'train': self.lsp_train['name'].shape[0], 'validation': self.lsp_validate['name'].shape[0]}})
# Create train / validation data by combining individual components
logging.info('Creating train and validation splits\n')
if self.conf.dataset['load'] == 'mpii':
self.train = self.merge_dataset(datasets=[self.mpii_train],
indices=[np.arange(self.mpii_train['name'].shape[0])])
self.validate = self.merge_dataset(datasets=[self.mpii_validate],
indices=[np.arange(self.mpii_validate['name'].shape[0])])
elif self.conf.dataset['load'] == 'lsp':
self.train = self.merge_dataset(datasets=[self.lsp_train, self.lspet_train],
indices=[np.arange(self.lsp_train['name'].shape[0]),
np.arange(self.lspet_train['name'].shape[0])])
self.validate = self.merge_dataset(datasets=[self.lsp_validate, self.lspet_validate],
indices=[np.arange(self.lsp_validate['name'].shape[0]),
np.arange(self.lspet_validate['name'].shape[0])])
else:
assert self.conf.dataset['load'] == 'merged', "dataset['load'] should be mpii, lsp or merged"
self.train = self.merge_dataset(datasets=[self.mpii_train, self.lsp_train, self.lspet_train],
indices=[np.arange(self.mpii_train['name'].shape[0]),
np.arange(self.lsp_train['name'].shape[0]),
np.arange(self.lspet_train['name'].shape[0])])
self.validate = self.merge_dataset(datasets=[self.mpii_validate, self.lsp_validate, self.lspet_validate],
indices=[np.arange(self.mpii_validate['name'].shape[0]),
np.arange(self.lsp_validate['name'].shape[0]),
np.arange(self.lspet_validate['name'].shape[0])])
# Clearing RAM
if conf.dataset['load'] in ['mpii', 'merged']:
del self.mpii_train, self.mpii_validate, self.mpii_dataset, self.mpii
if conf.dataset['load'] in ['lsp', 'merged']:
del self.lspet_train, self.lspet_validate, self.lspet_dataset, self.lspet,\
self.lsp_train, self.lsp_validate, self.lsp_dataset, self.lsp,
indices = activelearning_samplers[conf.active_learning['algorithm']](
train=self.train, dataset_size=self.dataset_size)
self.train = self.merge_dataset(datasets=[self.train], indices=[indices])
logging.info('\nFinal size of Training Data: {}'.format(self.train['name'].shape[0]))
logging.info('Final size of Validation Data: {}\n'.format(self.validate['name'].shape[0]))
# Decide which dataset is input to the model
self.input_dataset(train=True)
# Deciding augmentation techniques
self.shift_scale_rotate = self.augmentation([albu.ShiftScaleRotate(p=1, shift_limit=0.2, scale_limit=0.25,
rotate_limit=45, interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_CONSTANT, value=0)])
self.flip_prob = 0.5
self.horizontal_flip = self.augmentation([albu.HorizontalFlip(p=1)])
logging.info('\nDataloader Initialized.\n')
def __len__(self):
"""
Returns the length of the dataset. Duh!
:return:
"""
return self.model_input_dataset['gt'].shape[0]
def __getitem__(self, i):
"""
:param i:
:return:
"""
root = Path(os.getcwd()).parent
mpii_path = os.path.join(root, 'data', 'mpii')
lsp_path = os.path.join(root, 'data', 'lsp')
lspet_path = os.path.join(root, 'data', 'lspet')
name = self.model_input_dataset['name'][i]
gt = self.model_input_dataset['gt'][i]
dataset = self.model_input_dataset['dataset'][i]
num_gt = self.model_input_dataset['num_gt'][i]
split = self.model_input_dataset['split'][i]
num_persons = self.model_input_dataset['num_persons'][i]
bbox_coords = self.model_input_dataset['bbox_coords'][i]
normalizer = self.model_input_dataset['normalizer'][i]
if dataset == 'mpii':
image = plt.imread(os.path.join(mpii_path, 'images', '{}.jpg'.format(name.split('_')[0])))
elif dataset == 'lsp':
image = plt.imread(os.path.join(lsp_path, 'images', name))
else:
image = plt.imread(os.path.join(lspet_path, 'images', name))
# Convert from XY cartesian to UV image coordinates
xy_to_uv = lambda xy: (xy[1], xy[0])
uv_to_xy = lambda uv: (uv[1], uv[0])
# Determine crop
img_shape = np.array(image.shape)
[min_x, min_y, max_x, max_y] = bbox_coords[0]
tl_uv = xy_to_uv(np.array([min_x, min_y]))
br_uv = xy_to_uv(np.array([max_x, max_y]))
min_u = tl_uv[0]
min_v = tl_uv[1]
max_u = br_uv[0]
max_v = br_uv[1]
centre = np.array([(min_u + max_u) / 2, (min_v + max_v) / 2])
height = max_u - min_u
width = max_v - min_v
if self.train_flag:
scale = np.random.uniform(low=1.5,high=2)
else:
scale = 1.75
window = max(scale * height, scale * width)
top_left = np.array([centre[0] - (window / 2), centre[1] - (window / 2)])
bottom_right = np.array([centre[0] + (window / 2), centre[1] + (window / 2)])
top_left = np.maximum(np.array([0, 0], dtype=np.int16), top_left.astype(np.int16))
bottom_right = np.minimum(img_shape.astype(np.int16)[:-1], bottom_right.astype(np.int16))
# Cropping the image and adjusting the ground truth
image = image[top_left[0]: bottom_right[0], top_left[1]: bottom_right[1], :]
for person in range(gt.shape[0]):
for joint in range(gt.shape[1]):
gt_uv = xy_to_uv(gt[person][joint])
gt_uv = gt_uv - top_left
gt[person][joint] = np.concatenate([gt_uv, np.array([gt[person][joint][2]])], axis=0)
# Resize the image
image, gt, scale_params = self.resize_image(image, gt, target_size=[256, 256, 3])
# Augmentation
if self.train_flag:
# Horizontal flip can't be done using albu's probability
if torch.rand(1) < self.flip_prob:
# Augment image and keypoints
augmented = self.horizontal_flip(image=image, keypoints=gt.reshape(-1, 3)[:, :2])
image = augmented['image']
gt[:, :, :2] = np.stack(augmented['keypoints'], axis=0).reshape(-1, self.conf.experiment_settings['num_hm'], 2)
# Flip ground truth to match horizontal flip
gt[:, [jnt_to_ind['lsho'], jnt_to_ind['rsho']], :] = gt[:, [jnt_to_ind['rsho'], jnt_to_ind['lsho']], :]
gt[:, [jnt_to_ind['lelb'], jnt_to_ind['relb']], :] = gt[:, [jnt_to_ind['relb'], jnt_to_ind['lelb']], :]
gt[:, [jnt_to_ind['lwri'], jnt_to_ind['rwri']], :] = gt[:, [jnt_to_ind['rwri'], jnt_to_ind['lwri']], :]
gt[:, [jnt_to_ind['lhip'], jnt_to_ind['rhip']], :] = gt[:, [jnt_to_ind['rhip'], jnt_to_ind['lhip']], :]
gt[:, [jnt_to_ind['lknee'], jnt_to_ind['rknee']], :] = gt[:, [jnt_to_ind['rknee'], jnt_to_ind['lknee']], :]
gt[:, [jnt_to_ind['lankl'], jnt_to_ind['rankl']], :] = gt[:, [jnt_to_ind['rankl'], jnt_to_ind['lankl']], :]
# Ensure shift scale rotate augmentation retains all joints
if self.conf.experiment_settings['all_joints']:
tries = 5
augment_ok = False
image_, gt_ = None, None
while tries > 0:
tries -= 1
augmented = self.shift_scale_rotate(image=image, keypoints=gt.reshape(-1, 3)[:, :2])
image_ = augmented['image']
gt_ = np.stack(augmented['keypoints'], axis=0).reshape(
-1, self.conf.experiment_settings['num_hm'], 2)
if (np.all(gt_[0]) > -5) and (np.all(gt_[0]) < 261): # 0 index single person
augment_ok = True
break
if augment_ok:
image = image_
gt[:, :, :2] = gt_
# Else augmentation does not need to keep all joints
else:
augmented = self.shift_scale_rotate(image=image, keypoints=gt.reshape(-1, 3)[:, :2])
image = augmented['image']
gt[:, :, :2] = np.stack(augmented['keypoints'], axis=0).reshape(-1, self.conf.num_hm, 2)
heatmaps, joint_exist = heatmap_generator(
joints=np.copy(gt),occlusion=self.occlusion, hm_shape=self.hm_shape, img_shape=image.shape)
heatmaps = self.hm_peak * heatmaps
# Reproduce GT overlayed on Image
if False:# self.viz:
fig, ax = plt.subplots(1, 1)
ax.imshow(image)
for person in range(gt.shape[0]):
for joint in range(gt.shape[1]):
gt_xy = uv_to_xy(gt[person][joint])
if gt[person][joint][2] >= 0 and gt[person][joint][0] > 0 and gt[person][joint][1] > 0:
ax.add_patch(Circle(gt_xy, radius=2.5, color='green', fill=True))
for k in range(joint_exist.shape[0]):
print(ind_to_jnt[k], joint_exist[k])
print('\n')
plt.show()
#os.makedirs(os.path.join(self.model_save_path[:-20], 'viz_getitem'), exist_ok=True)
#plt.savefig(os.path.join(self.model_save_path[:-20], 'viz_getitem', '{}.jpg'.format(i)), dpi=350)
return torch.tensor(data=image/256.0, dtype=torch.float32, device='cpu'), \
torch.tensor(data=heatmaps, dtype=torch.float32, device='cpu'),\
gt, name, dataset, num_gt.astype(np.float32), split, num_persons,\
scale_params, normalizer, joint_exist
def create_mpii(self):#(self, train_ratio=0.7, max_persons=0, shuffle=False):
'''
:param train_ratio:
:param max_persons:
:param shuffle:
:return:
'''
mpii = self.mpii
max_persons = mpii['max_person_in_img']
len_dataset = len(mpii['img_name'])
assert len(mpii['img_name']) == len(mpii['img_gt']), "MPII dataset image and labels mismatched."
# What the create mpii dataset will look like.
dataset = {'img': [],
'name': [],
'gt': -np.ones(shape=(len_dataset, max_persons, self.conf.experiment_settings['num_hm'], 3)),
'dataset': [],
'num_gt': np.zeros(shape=(len_dataset, max_persons)),
'split': [],
'num_persons': np.zeros(shape=(len_dataset, 1)),
'normalizer': np.zeros(shape=(len_dataset, max_persons)),
'bbox_coords': -np.ones(shape=(len_dataset, max_persons, 4))}
# Converting the dataset to numpy arrays
for i in range(len_dataset):
image_name = mpii['img_name'][i]
ground_truth = mpii['img_gt'][i]
dataset_ = mpii['dataset'][i]
num_gt = mpii['num_gt'][i]
split = mpii['split'][i]
normalizer = mpii['normalizer'][i]
# Calculating the number of people
num_ppl = 0
for key in ground_truth.keys():
num_ppl = max(num_ppl, len(ground_truth[key]))
break # All keys have same length, as if jnt absent in person, then we append np.array([-1, -1, -1])
dataset['num_persons'][i] = num_ppl
# Split 0 indicates testing dataset
assert split == 1, "All annotated images should have split == 1"
# Assigning to a Numpy Ground truth array
for jnt in ground_truth.keys():
for person_id in range(len(ground_truth[jnt])):
dataset['gt'][i, person_id, jnt_to_ind[jnt]] = ground_truth[jnt][person_id]
# Assigning Bounding Box coordinates per person
for person_id in range(num_ppl):
x_coord = dataset['gt'][i, person_id, :, 0][np.where(dataset['gt'][i, person_id, :, 0] > -1)]
y_coord = dataset['gt'][i, person_id, :, 1][np.where(dataset['gt'][i, person_id, :, 1] > -1)]
min_x = np.min(x_coord)
max_x = np.max(x_coord)
min_y = np.min(y_coord)
max_y = np.max(y_coord)
dataset['bbox_coords'][i, person_id] = np.array([min_x, min_y, max_x, max_y])
# Number of joints scaling factor
for person_id in range(len(num_gt)):
dataset['num_gt'][i, person_id] = num_gt[person_id]
# PCK normalizer
for person_id in range(len(normalizer)):
dataset['normalizer'][i, person_id] = normalizer[person_id]
dataset['name'].append(image_name)
dataset['dataset'].append(dataset_)
dataset['split'].append(split)
# Load Train/Test split if conf.model_load_hg = True
if self.conf.model['load']:
dataset['split'] = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/mpii_split.npy'))
else:
# Create our own train/validation split for multi person dataset
# 0: Train; 1: Validate
if self.conf.dataset['mpii_params']['newell_validation']:
root = Path(os.getcwd()).parent
logging.info('\nCreating the Newell validation split.\n')
with open(os.path.join(root, 'cached', 'Stacked_HG_ValidationImageNames.txt')) as valNames:
valNames_ = [x.strip('\n') for x in valNames.readlines()]
dataset['split'] = np.array([1 if x in valNames_ else 0 for x in dataset['name']])
# assert np.sum(dataset['split']) == len(valNames_)
# "THIS ASSERTION WILL NOT HOLD TRUE. Newell list has duplicates."
else:
train_ratio = self.conf.dataset['mpii_params']['train_ratio']
dataset['split'] = np.concatenate([np.zeros(int(len_dataset*train_ratio),),
np.ones((len_dataset - int(len_dataset*train_ratio)),)],
axis=0)
if self.conf.dataset['mpii_params']['shuffle']: np.random.shuffle(dataset['split'])
np.save(file=os.path.join(self.conf.model['save_path'], 'model_checkpoints/mpii_split.npy'), arr=dataset['split'])
dataset['img'] = np.array(dataset['img']) # array of shape == 0, dataset['img'] exists for legacy reasons only
dataset['name'] = np.array(dataset['name'])
dataset['dataset'] = np.array(dataset['dataset'])
logging.info('MPII dataset description:')
logging.info('Length (#images): {}'.format(dataset['gt'].shape[0]))
return dataset
def create_lspet(self):
"""
:return:
"""
try:
max_persons = self.mpii['max_person_in_img']
except AttributeError:
max_persons = 1
lspet = copy.deepcopy(self.lspet)
assert len(lspet['img_name']) == len(lspet['img_gt']), "LSPET dataset image and labels mismatched."
dataset = {'img': [],
'name': [],
'gt': -np.ones(shape=(len(lspet['img_name']), max_persons, self.conf.experiment_settings['num_hm'], 3)),
'dataset': [],
'num_gt': np.zeros(shape=(len(lspet['img_name']), max_persons)),
'split': [],
'num_persons': np.ones(shape=(len(lspet['img_name']), 1)),
'normalizer': np.zeros(shape=(len(lspet['img_name']), max_persons)),
'bbox_coords': -np.ones(shape=(len(lspet['img_name']), max_persons, 4))} # max_persons is always 1 for lsp*
len_dataset = len(lspet['img_name'])
for i in range(len_dataset):
image_name = lspet['img_name'][i]
ground_truth = lspet['img_gt'][i]
dataset_ = lspet['dataset'][i]
num_gt = lspet['num_gt'][i]
split = lspet['split'][i]
normalizer = lspet['normalizer'][i]
for jnt in ground_truth.keys():
dataset['gt'][i, 0, jnt_to_ind[jnt]] = ground_truth[jnt][0]
# Assigning Bounding Box coordinates per person
x_coord = dataset['gt'][i, 0, :, 0][np.where(dataset['gt'][i, 0, :, 2] == 1)]
y_coord = dataset['gt'][i, 0, :, 1][np.where(dataset['gt'][i, 0, :, 2] == 1)]
x_coord = x_coord[np.where(x_coord > -1)]
y_coord = y_coord[np.where(y_coord > -1)]
min_x = np.min(x_coord)
max_x = np.max(x_coord)
min_y = np.min(y_coord)
max_y = np.max(y_coord)
dataset['bbox_coords'][i, 0] = np.array([min_x, min_y, max_x, max_y])
# Assigning number of GT to person 0
dataset['num_gt'][i, 0] = num_gt[0]
dataset['normalizer'][i, 0] = normalizer[0]
dataset['name'].append(image_name)
dataset['dataset'].append(dataset_)
dataset['split'].append(split)
dataset['img'] = np.array(dataset['img']) # Empty array if load_all_images_in_memory = False
dataset['name'] = np.array(dataset['name'])
dataset['dataset'] = np.array(dataset['dataset'])
dataset['split'] = np.array(dataset['split'])
logging.info('LSPET dataset description:')
logging.info('Length (#images): {}'.format(dataset['gt'].shape[0]))
return dataset
def create_lsp(self):
"""
:param max_persons:
:return:
"""
try:
max_persons = self.mpii['max_person_in_img']
except AttributeError:
max_persons = 1
lsp = copy.deepcopy(self.lsp)
assert len(lsp['img_name']) == len(lsp['img_gt']), "LSP dataset image and labels mismatched."
dataset = {'img': [],
'name': [],
'gt': -np.ones(shape=(len(lsp['img_name']), max_persons, self.conf.experiment_settings['num_hm'], 3)),
'dataset': [],
'num_gt': np.zeros(shape=(len(lsp['img_name']), max_persons)),
'split': [],
'num_persons': np.ones(shape=(len(lsp['img_name']), 1)),
'normalizer': np.zeros(shape=(len(lsp['img_name']), max_persons)),
'bbox_coords': -np.ones(shape=(len(lsp['img_name']), max_persons, 4))} # max_persons is always 1 for lsp*
len_dataset = len(lsp['img_name'])
for i in range(len_dataset):
image_name = lsp['img_name'][i]
ground_truth = lsp['img_gt'][i]
dataset_ = lsp['dataset'][i]
num_gt = lsp['num_gt'][i]
split = lsp['split'][i]
normalizer = lsp['normalizer'][i]
for jnt in ground_truth.keys():
dataset['gt'][i, 0, jnt_to_ind[jnt]] = ground_truth[jnt][0]
# Assigning Bounding Box coordinates per person
x_coord = dataset['gt'][i, 0, :, 0][np.where(dataset['gt'][i, 0, :, 0] > -1)]
y_coord = dataset['gt'][i, 0, :, 1][np.where(dataset['gt'][i, 0, :, 1] > -1)]
min_x = np.min(x_coord)
max_x = np.max(x_coord)
min_y = np.min(y_coord)
max_y = np.max(y_coord)
dataset['bbox_coords'][i, 0] = np.array([min_x, min_y, max_x, max_y])
dataset['num_gt'][i, 0] = num_gt[0]
dataset['normalizer'][i, 0] = normalizer[0]
dataset['name'].append(image_name)
dataset['dataset'].append(dataset_)
dataset['split'].append(split)
dataset['img'] = np.array(dataset['img'])
dataset['name'] = np.array(dataset['name'])
dataset['dataset'] = np.array(dataset['dataset'])
dataset['split'] = np.array(dataset['split'])
logging.info('LSP dataset description:')
logging.info('Length (#images): {}'.format(dataset['gt'].shape[0]))
return dataset
def create_train_validate(self, dataset=None):
"""
:param dataset:
:return:
"""
# Separate train and validate
train_idx = []
val_idx = []
for i in range(len(dataset['name'])):
if dataset['split'][i] == 0:
train_idx.append(i)
else:
assert dataset['split'][i] == 1, \
"Split has value: {}, should be either 0 or 1".format(dataset['split'][i])
val_idx.append(i)
train_dataset = {}
val_dataset = {}
for key in dataset.keys():
if key == 'img':
train_dataset[key] = dataset[key] # Empty numpy array
val_dataset[key] = dataset[key]
continue
train_dataset[key] = dataset[key][train_idx]
val_dataset[key] = dataset[key][val_idx]
return train_dataset, val_dataset
def merge_dataset(self, datasets=None, indices=None):
"""
:param datasets:
:param indices:
:return:
"""
assert type(datasets) == list and len(datasets) != 0
assert len(datasets) == len(indices)
for i in range(len(datasets) - 1):
assert datasets[i].keys() == datasets[i+1].keys(), "Dataset keys do not match"
# Merge datasets
merged_dataset = {}
for key in datasets[0].keys():
if key == 'img':
merged_dataset['img'] = np.array([])
continue
merged_dataset[key] = np.concatenate([data[key][index_] for index_, data in zip(indices, datasets)], axis=0)
# Sampling based on indices
merged_dataset['index'] = np.arange(merged_dataset['name'].shape[0])
return merged_dataset
def recreate_images(self, gt=False, pred=False, train=False, validate=False, external=False, ext_data=None):
'''
:return:
'''
assert gt + pred != 0, "Specify atleast one of GT or Pred"
assert train + validate + external == 1,\
"Can create visualize_image compatible arrays only for train/val in one function call."
if external:
assert ext_data, "ext_dataset can't be none to recreate external datasets"
data_split = ext_data
elif train:
data_split = self.train
else:
data_split = self.validate
# Along with the below entries, we also pass bbox coordinates for each dataset
img_dict = {'mpii': {'img': [], 'img_name': [], 'img_pred': [], 'img_gt': [], 'split': [], 'dataset': [], 'display_string': []},
'lspet': {'img': [], 'img_name': [], 'img_pred': [], 'img_gt': [], 'split': [], 'dataset': [], 'display_string': []},
'lsp': {'img': [], 'img_name': [], 'img_pred': [], 'img_gt': [], 'split': [], 'dataset': [], 'display_string': []}}
for i in range(len(data_split['img'])):
dataset = data_split['dataset'][i]
img_dict[dataset]['img'].append(data_split['img'][i])
img_dict[dataset]['img_name'].append(data_split['name'][i])
img_dict[dataset]['split'].append(data_split['split'][i])
img_dict[dataset]['dataset'].append(data_split['dataset'][i])
img_dict[dataset]['display_string'].append(data_split['name'][i])
try:
img_dict[dataset]['bbox_coords'] = np.concatenate([img_dict[dataset]['bbox_coords'],
data_split['bbox_coords'][i].reshape(1, -1, 4)], axis=0)
except KeyError:
img_dict[dataset]['bbox_coords'] = data_split['bbox_coords'][i].reshape(1, -1, 4)
joint_dict = dict([(ind_to_jnt[i], []) for i in range(self.conf.experiment_settings['num_hm'])])
gt_dict = copy.deepcopy(joint_dict)
pred_dict = copy.deepcopy(joint_dict)
if gt:
for person in range(int(data_split['num_persons'][i, 0])):
for joint in range(self.conf.experiment_settings['num_hm']):
gt_dict[ind_to_jnt[joint]].append(data_split['gt'][i, person, joint])
if pred:
for person in range(int(data_split['num_persons'][i, 0])):
for joint in range(self.conf.experiment_settings['num_hm']):
pred_dict[ind_to_jnt[joint]].append(data_split['pred'][i, person, joint])
img_dict[dataset]['img_gt'].append(gt_dict)
img_dict[dataset]['img_pred'].append(pred_dict)
return img_dict
def input_dataset(self, train=False, validate=False):
'''
:param train:
:param validate_entire:
:return:
'''
assert train + validate == 1, "Either one of train or validate_entire needs to be chosen"
if train:
self.model_input_dataset = self.train
self.train_flag = True
self.validate_flag = False
else:
self.model_input_dataset = self.validate
self.validate_flag = True
self.train_flag = False
return None
def mpii_single_person_extractor(self, mpii_dataset):
"""
:param train:
:param validate:
:param max_persons:
:return:
"""
max_persons = self.mpii['max_person_in_img']
dataset = {'img': [],
'name': [],
'gt': np.empty(shape=(0, max_persons, self.conf.experiment_settings['num_hm'], 3)),
'dataset': [],
'num_gt': np.empty(shape=(0, max_persons)),
'split': [],
'num_persons': np.empty(shape=(0, 1)),
'normalizer': np.empty(shape=(0, max_persons)),
'bbox_coords': np.empty(shape=(0, max_persons, 4))}
for i in range(len(mpii_dataset['name'])):
for p in range(int(mpii_dataset['num_persons'][i][0])):
dataset['name'].append(mpii_dataset['name'][i][:-4] + '_{}.jpg'.format(p))
dataset['dataset'].append(mpii_dataset['dataset'][i])
dataset['split'].append(mpii_dataset['split'][i])
gt_ = -1 * np.ones_like(mpii_dataset['gt'][i])
gt_[0] = mpii_dataset['gt'][i, p]
dataset['gt'] = np.concatenate([dataset['gt'],
gt_.reshape(1, max_persons, self.conf.experiment_settings['num_hm'], 3)],
axis=0)
num_gt_ = np.zeros_like(mpii_dataset['num_gt'][i])
num_gt_[0] = mpii_dataset['num_gt'][i, p]
dataset['num_gt'] = np.concatenate([dataset['num_gt'], num_gt_.reshape(1, max_persons)],
axis=0)
normalizer_ = np.zeros_like(mpii_dataset['normalizer'][i])
normalizer_[0] = mpii_dataset['normalizer'][i, p]
dataset['normalizer'] = np.concatenate([dataset['normalizer'], normalizer_.reshape(1, max_persons)],
axis=0)
dataset['num_persons'] = np.concatenate([dataset['num_persons'], np.array([1]).reshape(1, 1)],
axis=0)
bbox_ = np.zeros_like(mpii_dataset['bbox_coords'][i])
bbox_[0] = mpii_dataset['bbox_coords'][i, p]
dataset['bbox_coords'] = np.concatenate([dataset['bbox_coords'],bbox_.reshape(1, max_persons, 4)],
axis=0)
dataset['img'] = np.array(dataset['img'])
dataset['name'] = np.array(dataset['name'])
dataset['dataset'] = np.array(dataset['dataset'])
dataset['split'] = np.array(dataset['split'])
return dataset
def mpii_all_joints(self, mpii_dataset):
"""
:param mpii_dataset:
:return:
"""
# [:, 0, :, 2] --> all images, first person, all joints, visibility
all_joint_indices = mpii_dataset['gt'][:, 0, :, 2] > -0.5 # Identify indices: occluded and visible joints
all_joint_indices = np.all(all_joint_indices, axis=1)
for key in mpii_dataset.keys():
if key == 'img':
mpii_dataset['img'] = mpii_dataset['img']
continue
mpii_dataset[key] = mpii_dataset[key][all_joint_indices]
return mpii_dataset
def resize_image(self, image_=None, gt=None, target_size=None):
'''
:return:
'''
# Compute the aspect ratios
image_aspect_ratio = image_.shape[0] / image_.shape[1]
tgt_aspect_ratio = target_size[0] / target_size[1]
# Compare the original and target aspect ratio
if image_aspect_ratio > tgt_aspect_ratio:
# If target aspect ratio is smaller, scale the first dim
scale_factor = target_size[0] / image_.shape[0]
else:
# If target aspect ratio is bigger or equal, scale the second dim
scale_factor = target_size[1] / image_.shape[1]
# Compute the padding to fit the target size
pad_u = (target_size[0] - int(image_.shape[0] * scale_factor))
pad_v = (target_size[1] - int(image_.shape[1] * scale_factor))
output_img = np.zeros(target_size, dtype=image_.dtype)
# Write scaled size in reverse order because opencv resize
scaled_size = (int(image_.shape[1] * scale_factor), int(image_.shape[0] * scale_factor))
padding_u = int(pad_u / 2)
padding_v = int(pad_v / 2)
im_scaled = cv2.resize(image_, scaled_size)
# logging.debug('Scaled, pre-padding size: {}'.format(im_scaled.shape))
output_img[padding_u : im_scaled.shape[0] + padding_u,
padding_v : im_scaled.shape[1] + padding_v, :] = im_scaled
gt *= np.array([scale_factor, scale_factor, 1]).reshape(1, 1, 3)
gt[:, :, 0] += padding_u
gt[:, :, 1] += padding_v
scale_params = {'scale_factor': scale_factor, 'padding_u': padding_u, 'padding_v': padding_v}
return output_img, gt, scale_params
def augmentation(self, transform):
'''
:param transformation:
:return:
'''
return albu.Compose(transform, p=1, keypoint_params=albu.KeypointParams(format='yx', remove_invisible=False))
def estimate_uv(self, hm_array, pred_placeholder):
'''
:param hm_array:
:param pred_placeholder:
:return:
'''
if self.conf.experiment_settings['all_joints']:
threshold = 0
else:
threshold = self.threshold
# Iterate over each heatmap
for jnt_id in range(hm_array.shape[0]):
pred_placeholder[0, jnt_id, :] = uv_from_heatmap(hm=hm_array[jnt_id], threshold=threshold)
return pred_placeholder
def upscale(self, joints, scale_params):
'''
:return:
'''
joints[:, :, 0] -= scale_params['padding_u']
joints[:, :, 1] -= scale_params['padding_v']
joints /= np.array([scale_params['scale_factor'], scale_params['scale_factor'], 1]).reshape(1, 1, 3)
return joints | 52,432 | 41.182623 | 137 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/utils.py | import os
import sys
import copy
import math
from pathlib import Path
import torch
import scipy.io
import numpy as np
from tqdm import tqdm
from adjustText import adjust_text
from matplotlib import pyplot as plt
from matplotlib.patches import Circle, Rectangle
import umap
from sklearn.decomposition import PCA
plt.style.use('ggplot')
plt.switch_backend('agg')
def visualize_image(image_info, save_dir, bbox=False, adjusttext=False):
'''
:param image_info: (dict)
'''
uv_to_xy = lambda uv: (uv[1], uv[0])
mpii_skelton = [['head', 'neck', (0, 0, 1)], ['neck', 'thorax', (0, 0, 1)],
['thorax', 'lsho', (0, 1, 0)], ['lsho', 'lelb', (0, 1, 1)], ['lelb', 'lwri', (0, 1, 1)],
['thorax', 'rsho', (0, 1, 0)], ['rsho', 'relb', (1, 0, 0)], ['relb', 'rwri', (1, 0, 0)],
['lsho', 'lhip', (0, 1, 0)], ['rsho', 'rhip', (0, 1, 0)],
['pelvis', 'lhip', (0, 1, 0)], ['lhip', 'lknee', (1, 0, 1)], ['lknee', 'lankl', (1, 0, 1)],
['pelvis', 'rhip', (0, 1, 0)], ['rhip', 'rknee', (1, 1, 0)], ['rknee', 'rankl', (1, 1, 0)]]
lsp_skeleton = [['head', 'neck', (0, 0, 1)], ['lhip', 'rhip', (0, 1, 0)],
['neck', 'lsho', (0, 1, 0)], ['lsho', 'lelb', (0, 1, 1)], ['lelb', 'lwri', (0, 1, 1)],
['neck', 'rsho', (0, 1, 0)], ['rsho', 'relb', (1, 0, 0)], ['relb', 'rwri', (1, 0, 0)],
['lsho', 'lhip', (0, 1, 0)], ['lhip', 'lknee', (1, 0, 1)], ['lknee', 'lankl', (1, 0, 1)],
['rsho', 'rhip', (0, 1, 0)], ['rhip', 'rknee', (1, 1, 0)], ['rknee', 'rankl', (1, 1, 0)]]
colour = {'rankl': (1, 1, 0), 'rknee': (1, 1, 0), 'rhip': (1, 1, 0),
'lankl': (1, 0, 1), 'lknee': (1, 0, 1), 'lhip': (1, 0, 1), 'pelvis': (0, 1, 0),
'rwri': (1, 0, 0), 'relb': (1, 0, 0), 'rsho': (1, 0, 0),
'lwri': (0, 1, 1), 'lelb': (0, 1, 1), 'lsho': (0, 1, 1),
'head': (0, 1, 1), 'neck': (0, 1, 1), 'thorax': (0, 1, 0)}
os.makedirs(os.path.join(save_dir, 'skeleton_visualizations'), exist_ok=True)
img_dump = os.path.join(save_dir, 'skeleton_visualizations')
# Currently will iterate over MPII and LSPET and LSP
for dataset_name_ in image_info.keys():
# Iterate over all images
for i in tqdm(range(len(image_info[dataset_name_]['img']))):
fig, ax = plt.subplots(nrows=1, ncols=1, frameon=False)
ax.set_axis_off()
img = image_info[dataset_name_]['img'][i]
img_name = image_info[dataset_name_]['img_name'][i]
img_pred = image_info[dataset_name_]['img_pred'][i]
img_gt = image_info[dataset_name_]['img_gt'][i]
img_dataset = image_info[dataset_name_]['dataset'][i]
img_string = image_info[dataset_name_]['display_string'][i]
# One list for each, ground truth and predictions
text_overlay = []
ax.set_title('Name: {}, Shape: {}, Dataset: {}'.format(img_string, str(img.shape), img_dataset),
color='orange')
ax.imshow(img)
if dataset_name_ == 'mpii':
skeleton = mpii_skelton
else:
assert dataset_name_ == 'lsp' or dataset_name_ == 'lspet'
skeleton = lsp_skeleton
for link in skeleton:
joint_1_name = link[0]
joint_2_name = link[1]
color = link[2]
joint_1 = img_pred[joint_1_name][0]
joint_2 = img_pred[joint_2_name][0]
if joint_1[2] == 1 and joint_2[2] == 1:
joint_1 = uv_to_xy(joint_1)
joint_2 = uv_to_xy(joint_2)
ax.plot([joint_1[0], joint_2[0]], [joint_1[1], joint_2[1]], color=color)
joint_names = list(colour.keys())
for jnt in joint_names:
if (dataset_name_ in ['lsp', 'lspet']) and (jnt in ['pelvis', 'thorax']):
continue
for jnt_gt in img_gt[jnt]:
if jnt_gt[2] >= 0 and jnt_gt[1] >= 0 and jnt_gt[0] >= 0:
jnt_gt = uv_to_xy(jnt_gt)
text_overlay.append(ax.text(x=jnt_gt[0], y=jnt_gt[1], s=jnt, color=colour[jnt], fontsize=6))
ax.add_patch(Circle(jnt_gt[:2], radius=1.5, color=colour[jnt], fill=False))
if bbox:
for person_patch in range(image_info[dataset_name_]['bbox_coords'].shape[1]):
coords = image_info[dataset_name_]['bbox_coords'][i, person_patch]
ax.add_patch(Rectangle(xy=(coords[0], coords[1]), height=(coords[3] - coords[1]), width=(coords[2]-coords[0]),
linewidth=1, edgecolor='r', fill=False))
if adjusttext:
adjust_text(text_overlay)
plt.savefig(fname=os.path.join(img_dump, '{}'.format(img_string)),
facecolor='black', edgecolor='black', bbox_inches='tight', dpi=500)
del fig, ax
plt.close()
def heatmap_loss(combined_hm_preds, heatmaps, egl=False):
'''
:param combined_hm_preds:
:param heatmaps:
:param nstack:
:return:
'''
calc_loss = lambda pred, gt: ((pred - gt) ** 2).mean(dim=[1, 2, 3])
combined_loss = []
nstack = combined_hm_preds.shape[1]
for i in range(nstack):
if egl:
combined_loss.append(calc_loss(combined_hm_preds[:, i], heatmaps[:, i].to(combined_hm_preds[:, i].device)))
else:
combined_loss.append(calc_loss(combined_hm_preds[:, i], heatmaps.to(combined_hm_preds[:, i].device)))
combined_loss = torch.stack(combined_loss, dim=1)
return combined_loss
def heatmap_generator(joints, occlusion, hm_shape=(0, 0), img_shape=(0, 0)):
'''
:param joints:
:return:
'''
def draw_heatmap(pt_uv, use_occlusion, hm_shape, sigma=1.75):
'''
2D gaussian (exponential term only) centred at given point.
No constraints on point to be integer only.
:param im: (Numpy array of size=64x64) Heatmap
:param pt: (Numpy array of size=2) Float values denoting point on the heatmap
:param sigma: (Float) self.joint_size which determines the standard deviation of the gaussian
:return: (Numpy array of size=64x64) Heatmap with gaussian centred around point.
'''
im = np.zeros(hm_shape, dtype=np.float32)
# If joint is absent
if pt_uv[2] == -1:
return im, 0
elif pt_uv[2] == 0:
if not use_occlusion:
return im, 0
else:
assert pt_uv[2] == 1, "joint[2] should be (-1, 0, 1), but got {}".format(pt_uv[2])
# Point around which Gaussian will be centred.
pt_uv = pt_uv[:2]
pt_uv_rint = np.rint(pt_uv).astype(int)
# Size of 2D Gaussian window.
size = int(math.ceil(6 * sigma))
# Ensuring that size remains an odd number
if not size % 2:
size += 1
# Check whether gaussian intersects with im:
if (pt_uv_rint[0] - (size//2) >= hm_shape[0]) or (pt_uv_rint[0] + (size//2) <= 0) \
or (pt_uv_rint[1] - (size//2) > hm_shape[1]) or (pt_uv_rint[1] + (size//2) < 0):
return im, 0
else:
# Generate gaussian, with window=size and variance=sigma
u = np.arange(pt_uv_rint[0] - (size // 2), pt_uv_rint[0] + (size // 2) + 1)
v = np.arange(pt_uv_rint[1] - (size // 2), pt_uv_rint[1] + (size // 2) + 1)
uu, vv = np.meshgrid(u, v, sparse=True)
z = np.exp(-((uu - pt_uv[0]) ** 2 + (vv - pt_uv[1]) ** 2) / (2 * (sigma ** 2)))
z = z.T
# Identify indices in im that will define the crop area
top = max(0, pt_uv_rint[0] - (size//2))
bottom = min(hm_shape[0], pt_uv_rint[0] + (size//2) + 1)
left = max(0, pt_uv_rint[1] - (size//2))
right = min(hm_shape[1], pt_uv_rint[1] + (size//2) + 1)
im[top:bottom, left:right] = \
z[top - (pt_uv_rint[0] - (size//2)): top - (pt_uv_rint[0] - (size//2)) + (bottom - top),
left - (pt_uv_rint[1] - (size//2)): left - (pt_uv_rint[1] - (size//2)) + (right - left)]
return im, 1 # heatmap, joint_exist
assert len(joints.shape) == 3, 'Joints should be rank 3:' \
'(num_person, num_joints, [u,v,vis]), but is instead {}'.format(joints.shape)
heatmaps = np.zeros([joints.shape[1], hm_shape[0], hm_shape[1]], dtype=np.float32)
joints_exist = np.zeros([joints.shape[1]], dtype=np.uint8)
# Downscale
downscale = [(img_shape[0] - 1)/(hm_shape[0] - 1), ((img_shape[1] - 1)/(hm_shape[1] - 1))]
joints /= np.array([downscale[0], downscale[1], 1]).reshape(1, 1, 3)
# Iterate over number of heatmaps
for i in range(joints.shape[1]):
# Create new heatmap for joint
hm_i = np.zeros(hm_shape, dtype=np.float32)
# Iterate over persons
for p in range(joints.shape[0]):
hm_, joint_present = draw_heatmap(pt_uv=joints[p, i, :], use_occlusion=occlusion, hm_shape=hm_shape)
joints_exist[i] = max(joints_exist[i], joint_present)
hm_i = np.maximum(hm_i, hm_)
heatmaps[i] = hm_i
return heatmaps, joints_exist
def uv_from_heatmap(hm=None, threshold=None, img_shape=(256, 256)):
'''
:param hm:
:param threshold:
:param img_shape:
:return:
'''
max_uv = arg_max(hm)
corrected_uv = weight_avg_centre(hm=hm, max_uv=max_uv)
if hm[int(corrected_uv[0]), int(corrected_uv[1])] < threshold:
return np.array([-1, -1, -1])
else:
joints = np.array([corrected_uv[0], corrected_uv[1], 1])
hm_shape = hm.shape
upscale = [(img_shape[0] - 1) / (hm_shape[0] - 1), ((img_shape[1] - 1) / (hm_shape[1] - 1))]
joints *= np.array([upscale[0], upscale[1], 1])
return joints
def arg_max(img):
'''
Find the indices corresponding to maximum values in the heatmap
:param img: (Numpy array of size=64x64) Heatmap
:return: (Torch tensor of size=2) argmax of the image
'''
img = torch.tensor(img)
assert img.dim() == 2, 'Expected img.dim() == 2, got {}'.format(img.dim())
h = img.shape[0]
w = img.shape[1]
rawmaxidx = img.flatten().argmax()
max_u = int(rawmaxidx) // int(w)
max_v = int(rawmaxidx) % int(w)
return torch.FloatTensor([max_u, max_v])
def fast_argmax(_heatmaps):
"""
Direct argmax from the heatmap, does not perform smoothing of heatmaps
:param _heatmaps:
:return:
"""
batch_size = _heatmaps.shape[0]
num_jnts = _heatmaps.shape[1]
spatial_dim = _heatmaps.shape[3]
assert _heatmaps.shape[2] == _heatmaps.shape[3]
assert len(_heatmaps.shape) == 4, "Heatmaps should be of shape: BatchSize x num_joints x 64 x64"
_heatmaps = _heatmaps.reshape(batch_size, num_jnts, -1)
indices = torch.argmax(_heatmaps, dim=2)
indices = torch.cat(((indices // spatial_dim).view(batch_size, num_jnts, 1),
(indices % spatial_dim).view(batch_size, num_jnts, 1)),
dim=2)
return indices.type(torch.float32)
def weight_avg_centre(hm, max_uv=None, jnt_size=1.75):
'''
Weighted average of points around the maxima. Weighted average avoids solitary spikes being identified.
:param hm: (Numpy array of size 64x64)
:param jnt_size: (Float) Windows size around the maxima to compute weighted average.
:return: (Numpy array of size=2)
'''
hm = torch.clamp(torch.from_numpy(hm), min=0.0)
mx = max_uv
# Dimension of the heatmap
siz = torch.Tensor([hm.shape[0], hm.shape[1]]).float()
# Clip indices if needed so that start and end indices are valid points.
st_idx = torch.max(torch.zeros(2), mx - np.ceil(jnt_size))
end_idx = torch.min(siz - 1, mx + np.ceil(jnt_size))
# Crop around the maxima.
img_crop = hm[int(st_idx[0]):int(end_idx[0] + 1), int(st_idx[1]):int(end_idx[1] + 1)].clone()
img_crop = img_crop.type(torch.FloatTensor)
img_sum = img_crop.sum()
if img_sum == 0:
img_sum = img_sum + 0.000001
# Weighted average along column/row
u = img_crop.sum(1).mul(torch.arange(st_idx[0], end_idx[0] + 1)).div(img_sum).sum()
v = img_crop.sum(0).mul(torch.arange(st_idx[1], end_idx[1] + 1)).div(img_sum).sum()
return np.array([u, v])
def principal_component_analysis(encodings, n_components=2):
'''
Compute the principal component transform of the encodings
:param encodings: Encodings generated by LLAL network
:param n_components: Number of components to retain
:return: Principal Component Transform of encodings
'''
pca = PCA(n_components=n_components)
pca.fit(encodings)
pca_encodings = pca.transform(encodings)
return pca_encodings
def umap_fn(encodings, n_components=2):
'''
NUMPY
https://umap-learn.readthedocs.io/en/latest/how_umap_works.html
Compute the UMAP transform of the encodings
:param encodings: Encodings generated by LLAL network
:param n_components: Number of components to retain
:return: UMAP transform of the encodings
'''
# Number of neighbours balances the local versus the global structure of the data
umap_transform = umap.UMAP(n_neighbors=30, min_dist=0.0, n_components=n_components).fit(encodings)
umap_encodings = umap_transform.transform(encodings)
return umap_encodings
def shannon_entropy(probs):
'''
Computes the Shannon Entropy for a distribution
:param probs_array: 2D-Tensor; Probability distribution along axis=1
:return: Scalar; H(p)
'''
return torch.sum(-probs * torch.log(probs), dim=1)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_pairwise_joint_distances(heatmaps):
'''
Computes pairwise distances between joints, given a batch of heatmaps
- param heatmaps: Tensor of size (batch_size, num_joints, hm_size, hm_size)
- return pairwise_dists: List[2D lists]
- length = batch_size
- Each 2D List (pairwise distances) of dim [num_joints,num_joints]
'''
# Batch Size, num_jnts, 64, 64
assert heatmaps.dim() == 4, 'Dimension of input heatmaps must be 4 but was {}'.format(heatmaps.shape)
# Batch Size, num_jnts, 2
joint_uv = fast_argmax(heatmaps)
pairwise_dists = torch.cdist(joint_uv, joint_uv)
return pairwise_dists | 14,742 | 36.513995 | 130 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/activelearning_viz.py | import os
import cv2
import math
import torch
import torch.utils.data
import numpy as np
import logging
from tqdm import tqdm
from pathlib import Path
from sklearn.metrics import pairwise_distances
from skimage.feature import peak_local_max
from scipy.special import softmax as softmax_fn
from scipy.stats import entropy as entropy_fn
from matplotlib import pyplot as plt
import copy
# EGL sampling
import autograd_hacks
from utils import heatmap_loss
from utils import shannon_entropy
from utils import heatmap_generator
from utils import uv_from_heatmap
from utils import visualize_image
plt.style.use('ggplot')
class ActiveLearning_Visualization(object):
"""
Contains collection of active learning algorithms for human joint localization
"""
def __init__(self, conf, pose_net, aux_net):
self.conf = conf
self.pose_model = pose_net
self.aux_net = aux_net
self.pose_model.eval()
if conf.active_learning['algorithm'] in ['learning_loss', 'aleatoric', 'vl4pose']:
self.aux_net.eval()
self.j2i = {'head': 0, 'neck': 1, 'lsho': 2, 'lelb': 3, 'lwri': 4, 'rsho': 5, 'relb': 6, 'rwri': 7, 'lhip': 8,
'lknee': 9, 'lankl': 10, 'rhip': 11, 'rknee': 12, 'rankl': 13}
self.i2j = {0: 'head', 1: 'neck', 2: 'lsho', 3: 'lelb', 4: 'lwri', 5: 'rsho', 6: 'relb', 7: 'rwri',
8: 'lhip', 9: 'lknee', 10: 'lankl', 11: 'rhip', 12: 'rknee', 13: 'rankl'}
# update j2i, i2j with new joints
if conf.dataset['load'] == 'mpii' or conf.dataset['load'] == 'merged':
self.j2i['pelvis'] = 14
self.j2i['thorax'] = 15
self.i2j[14] = 'pelvis'
self.i2j[15] = 'thorax'
# I don't know why I have this assertion, need to check
assert self.conf.dataset['load'] != 'merged'
def base(self, train, dataset_size):
"""
Do no visualization
"""
raise Exception('Base method cannot be visualized')
def random(self, train, dataset_size):
"""
Do no visualization
"""
raise Exception('Random method cannot be visualized')
def coreset_sampling(self, train, dataset_size):
"""
Sener and Savarese, "Active Learning for Convolutional Neural Networks: A Core-Set Approach"
ICLR 2018
https://arxiv.org/abs/1708.00489
"""
logging.info('\nVisualizing Core-Set.')
def update_distances(cluster_centers, encoding, min_distances=None):
'''
Based on: https://github.com/google/active-learning/blob/master/sampling_methods/kcenter_greedy.py
Update min distances given cluster centers.
Args:
cluster_centers: indices of cluster centers
only_new: only calculate distance for newly selected points and update
min_distances.
rest_dist: whether to reset min_distances.
'''
if len(cluster_centers) != 0:
# Update min_distances for all examples given new cluster center.
x = encoding[cluster_centers]
dist = pairwise_distances(encoding, x, metric='euclidean')
if min_distances is None:
min_distances = np.min(dist, axis=1).reshape(-1, 1)
else:
min_distances = np.minimum(min_distances, dist)
return min_distances
if self.conf.resume_training:
return np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
assert self.conf.model['load'], "Core-set requires a pretrained model."
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
unlabelled_idx = np.array(list(set(train['index'])-set(annotated_idx)))
assert np.all(train['index'] == np.arange(train['name'].shape[0]))
dataset_ = ActiveLearningVizDataLoader(train, indices=np.arange(train['name'].shape[0]), conf=self.conf)
coreset_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
################################################################################################################
# Part 1: Obtain Pose Embeddings
################################################################################################################
pose_encoding = None
logging.info('\nComputing Core-Set embeddings.')
with torch.no_grad():
for images, _, names, gts, datasets in tqdm(coreset_dataloader):
pose_heatmaps_, pose_features = self.pose_model(images)
try:
pose_encoding = torch.cat((pose_encoding, pose_features['penultimate'].cpu()), dim=0)
except TypeError:
pose_encoding = pose_features['penultimate'].cpu()
pose_encoding = pose_encoding.squeeze().numpy()
logging.info('Core-Set embeddings computed.\n')
################################################################################################################
# Part 2: K-Centre Greedy
################################################################################################################
logging.info('\nComputing k-Centre Greedy')
core_set_budget = 15 # Hardcoded, we want to see the first 15 diverse samples
min_distances = None
assert len(annotated_idx) != 0, "No annotations for previous model found, core-set cannot proceeed."
min_distances = update_distances(cluster_centers=annotated_idx, encoding=pose_encoding, min_distances=None)
display_idx = []
distances_over_time = []
for _ in tqdm(range(unlabelled_idx.shape[0])):
ind = np.argmax(min_distances)
distances_over_time.append(np.max(min_distances))
# New examples should not be in already selected since those points
# should have min_distance of zero to a cluster center.
min_distances = update_distances(cluster_centers=[ind], encoding=pose_encoding, min_distances=min_distances)
annotated_idx = np.concatenate([annotated_idx, [ind]], axis=0).astype(np.int32)
display_idx.append(ind)
logging.info('Computed k-Centre Greedy.\n')
del pose_encoding
################################################################################################################
# Part 3: Plot max distances over time
################################################################################################################
plt.plot(np.arange(unlabelled_idx.shape[0]), distances_over_time, label='Maximum distance')
plt.title('Core-Set: Maximum Distances over n-selections')
os.makedirs(os.path.join(self.conf.model['save_path'], 'coreset_visualizations'), exist_ok=True)
plt.savefig(fname=os.path.join(self.conf.model['save_path'], 'coreset_visualizations/Distances_CoreSet.jpg'),
facecolor='black', edgecolor='black', bbox_inches='tight', dpi=300)
plt.close()
################################################################################################################
# Part 4: Visualize Core-Set images
################################################################################################################
dataset_ = ActiveLearningVizDataLoader(train, indices=display_idx[:core_set_budget], conf=self.conf)
coreset_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
logging.info('Obtaining images and data for selected samples.')
# Disable autograd to speed up inference
with torch.no_grad():
for images, _, names, gts, datasets in tqdm(coreset_dataloader):
pose_heatmaps_, _ = self.pose_model(images)
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
except TypeError:
images_all = images
gts_all = gts
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
logging.info('Images loaded.\n')
logging.info('Visualizing images (GT and Prediction)')
self._visualize_predictions(image=images_all, name=names_all, dataset=datasets_all,
gt=gts_all, pred=hm_uv_stack, string=names_all)
def learning_loss_sampling(self, train, dataset_size):
"""
Yoo and Kweon, "Learning Loss for Active Learning"
CVPR 2019
https://openaccess.thecvf.com/content_CVPR_2019/papers/Yoo_Learning_Loss_for_Active_Learning_CVPR_2019_paper.pdf
Shukla and Ahmed, "A Mathematical Analysis of Learning Loss for Active Learning in Regression"
CVPR-W 2021
https://openaccess.thecvf.com/content/CVPR2021W/TCV/papers/Shukla_A_Mathematical_Analysis_of_Learning_Loss_for_Active_Learning_in_CVPRW_2021_paper.pdf
"""
logging.info('Visualizing learning loss.')
assert self.conf.model['load'], "Learning loss requires a previously trained model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Set of indices not annotated
unlabelled_idx = np.array(list(set(train['index'])-set(annotated_idx)))
dataset_ = ActiveLearningVizDataLoader(train, indices=unlabelled_idx, conf=self.conf)
learnloss_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
################################################################################################################
# Part 1: Obtain Learning Loss predictions
################################################################################################################
learnloss_pred = None
with torch.no_grad():
for images, _, names, gts, datasets in tqdm(learnloss_dataloader):
pose_heatmaps_, pose_features_ = self.pose_model(images)
learnloss_pred_ = self._aux_net_inference(pose_features_).squeeze()
try:
learnloss_pred = torch.cat([learnloss_pred, learnloss_pred_.cpu()], dim=0)
except TypeError:
learnloss_pred = learnloss_pred_.cpu()
learnloss_pred = learnloss_pred.squeeze().numpy()
# argsort defaults to ascending
new_index = np.arange(learnloss_pred.shape[0]).reshape(-1, 1)
learnloss_with_index = np.concatenate([learnloss_pred.reshape(-1, 1),
new_index], axis=-1)
learnloss_with_index = learnloss_with_index[learnloss_with_index[:, 0].argsort()]
# Slice aleatoric for top-5 and bottom-5 images
min_learnloss_idx = learnloss_with_index[:15, 1].astype(np.int32)
max_learnloss_idx = learnloss_with_index[-15:, 1].astype(np.int32)
################################################################################################################
# Part 2: Visualize images with low loss
################################################################################################################
idx = min_learnloss_idx
# Part 2.a: Loading images
dataset_ = ActiveLearningVizDataLoader(train, indices=unlabelled_idx[idx], conf=self.conf)
learnloss_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
with torch.no_grad():
for images, _, names, gts, datasets in tqdm(learnloss_dataloader):
pose_heatmaps_, pose_features_ = self.pose_model(images)
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
except TypeError:
images_all = images
gts_all = gts
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
names_modified = []
for i in range(15):
temp = 'LearningLoss_{}_andName_{}'.format(learnloss_pred[min_learnloss_idx[i]], names_all[i])
names_modified.append(temp)
names_modified = np.array(names_modified)
self._visualize_predictions(image=images_all, name=names_all, dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
################################################################################################################
# Part 3: Visualize images with high loss
################################################################################################################
idx = max_learnloss_idx
# Part 3.a: Loading images
dataset_ = ActiveLearningVizDataLoader(train, indices=unlabelled_idx[idx], conf=self.conf)
learnloss_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
with torch.no_grad():
for images, _, names, gts, datasets in tqdm(learnloss_dataloader):
pose_heatmaps_, pose_features_ = self.pose_model(images)
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
except TypeError:
images_all = images
gts_all = gts
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
names_modified = []
for i in range(15):
temp = 'LearningLoss_{}_andName_{}'.format(learnloss_pred[max_learnloss_idx[i]], names_all[i])
names_modified.append(temp)
names_modified = np.array(names_modified)
self._visualize_predictions(image=images_all, name=names_all, dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
def expected_gradient_length_sampling(self, train, dataset_size):
"""
Megh Shukla, "Bayesian Uncertainty and Expected Gradient Length - Regression: Two Sides Of The Same Coin?"
WACV 2022
https://openaccess.thecvf.com/content/WACV2022/papers/Shukla_Bayesian_Uncertainty_and_Expected_Gradient_Length_-_Regression_Two_Sides_WACV_2022_paper.pdf
"""
def _probability(pair_dist):
'''
Computes P(j|i) using Binary Search
:param pairwise_dist: (2D Tensor) pairwise distances between samples --> actual dist, not squared
:return: 2D Tensor containing conditional probabilities
'''
def calc_probs_perp(lower_bound, upper_bound, pair_dist):
sigmas = (lower_bound + upper_bound) / 2
variance = (sigmas ** 2).reshape(-1, 1)
scaled_pair_dist_neg = -pair_dist / (2 * variance)
probs_unnormalized = torch.exp(scaled_pair_dist_neg)
probs_unnormalized = torch.clamp(probs_unnormalized, min=1e-20, max=1.)
softmax = probs_unnormalized / torch.sum(probs_unnormalized, dim=1, keepdim=True)
softmax = torch.clamp(softmax, min=1e-30, max=1.)
entropy = shannon_entropy(softmax)
perplexity_hat = torch.pow(2 * torch.ones(n_samples), entropy)
return perplexity_hat, softmax
def condition(perplexity_hat, perplexity):
mask = torch.lt(torch.abs(perplexity_hat - perplexity), TOLERANCE)
return False in mask
global PERPLEXITY, TOLERANCE, n_samples
tries = 100
n_samples = pair_dist.shape[0]
PERPLEXITY = self.conf.active_learning['egl']['perplexity']
TOLERANCE = self.conf.active_learning['egl']['tolerance'] * torch.ones(n_samples)
pair_dist = pair_dist ** 2
lower = torch.zeros(n_samples)
upper = (torch.max(torch.max(pair_dist), torch.max(pair_dist**0.5))) * torch.ones(n_samples) * 5
perplexity = PERPLEXITY * torch.ones(n_samples)
perplexity_hat, probs = calc_probs_perp(lower, upper, pair_dist)
while condition(perplexity_hat, perplexity):
if tries < 0:
break
tries -= 1
mask_gt = torch.gt(perplexity_hat - perplexity, TOLERANCE).type(torch.float32)
upper_update = upper - torch.mul(mask_gt, (upper - lower) / 2)
mask_lt = torch.lt(perplexity_hat - perplexity, -TOLERANCE).type(torch.float32)
lower_update = lower + torch.mul(mask_lt, (upper - lower) / 2)
upper = upper_update
lower = lower_update
perplexity_hat, probs = calc_probs_perp(lower, upper, pair_dist)
del PERPLEXITY, TOLERANCE, n_samples
return probs
logging.info('VISUALIZING expected gradient length sampling.')
# Setup --------------------------------------------------------------------------------------------------------
# Load indices of previously annotated data
assert self.conf.model['load'], "Expected Gradient Length requires a previously trained model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Set of indices not annotated
unlabelled_idx = np.array(list(set(train['index']) - set(annotated_idx)))
################################################################################################################
# Part 1: Obtain embeddings for labelled data
################################################################################################################
dataset_ = ActiveLearningVizDataLoader(dataset_dict=train, conf=self.conf, indices=annotated_idx)
egl_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
logging.info('Computing embeddings for labelled images.')
# Disable autograd to speed up inference
with torch.no_grad():
pose_encoding_L = None
for images, _, names, gts, datasets in tqdm(egl_dataloader):
_, pose_features_ = self.pose_model(images)
try:
pose_encoding_L = torch.cat((pose_encoding_L, pose_features_['penultimate'].cpu()), dim=0)
except TypeError:
pose_encoding_L = pose_features_['penultimate'].cpu()
################################################################################################################
# Part 2: Obtain embeddings for unlabelled data
################################################################################################################
logging.info('Computing embeddings for unlabelled data')
dataset_ = ActiveLearningVizDataLoader(dataset_dict=train, conf=self.conf, indices=unlabelled_idx)
egl_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
# Disable autograd to speed up inference
with torch.no_grad():
pose_encoding_U = None
names_U = []
gts_U = None
hm_uv_stack_U = []
datasets_U = []
for images, _, names, gts, datasets in tqdm(egl_dataloader):
pose_heatmaps_, pose_features_ = self.pose_model(images)
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack_U.append(hm_uv)
names_U.append(names[i])
datasets_U.append(datasets[i])
try:
pose_encoding_U = torch.cat((pose_encoding_U, pose_features_['penultimate'].cpu()), dim=0) # GAP over the 4x4 lyr
gts_U = torch.cat([gts_U, gts], dim=0)
except TypeError:
pose_encoding_U = pose_features_['penultimate'].cpu()
gts_U = gts
hm_uv_stack_U = np.stack(hm_uv_stack_U, axis=0)
names_U = np.array(names_U)
datasets_U = np.array(datasets_U)
gts_U = gts_U.numpy()
################################################################################################################
# Part 3: Compute pairwise distances
################################################################################################################
logging.info('Computing pairwise probabilities data')
with torch.no_grad():
pair_dist = torch.cdist(pose_encoding_U, pose_encoding_L, p=2) # Unlabelled[i] to Labelled[j]
p_i_given_j = _probability(pair_dist)
k = self.conf.active_learning['egl']['k']
assert len(p_i_given_j.shape) == 2, "Not a 2-dimensional tensor"
vals_L, idx_L = torch.topk(p_i_given_j, k=k, dim=1, sorted=True, largest=True)
del pose_encoding_L, pose_encoding_U, pair_dist, p_i_given_j
################################################################################################################
# Part 4: Compute expected gradient length
################################################################################################################
logging.info('Computing the gradient between the unlabelled and labelled images.')
pose_gradients_nbrs = torch.zeros(size=(unlabelled_idx.shape[0], k), dtype=torch.float32).to(vals_L.device)
assert vals_L.shape == pose_gradients_nbrs.shape
autograd_hacks.add_hooks(self.pose_model)
unlabelled_dataset = ActiveLearningVizDataLoader(dataset_dict=train, indices=unlabelled_idx, conf=self.conf)
for i in tqdm(range(len(unlabelled_dataset))):
neighbors_ = ActiveLearningVizDataLoader(dataset_dict=train, indices=annotated_idx[idx_L[i]], conf=self.conf)
neighbors_dataloader = torch.utils.data.DataLoader(neighbors_,
batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False,
num_workers=2)
# Keep on GPU, collect heatmaps for the neighbours
with torch.no_grad():
hm_L = None
for images_l, _, _, _, _ in neighbors_dataloader:
hm_l, _ = self.pose_model(images_l)
try:
hm_L = torch.cat((hm_L, hm_l), dim=0)
except TypeError:
hm_L = hm_l
# Compute gradient wrt these neighbors
image, _, _, _, _ = unlabelled_dataset.__getitem__(i)
images = torch.cat(k * [image.unsqueeze(0)], dim=0)
hm_U, _ = self.pose_model(images)
loss = heatmap_loss(hm_U, hm_L, egl=True).mean()
loss.backward()
autograd_hacks.compute_grad1(model=self.pose_model, loss_type='mean')
with torch.no_grad():
grads = torch.zeros((k,), dtype=torch.float32)
for param in self.pose_model.parameters():
try:
# Sum of squared gradients for each batch element
grads = grads.to(param.grad1.device)
grads += (param.grad1 ** 2).sum(dim=list(range(len(param.grad1.shape)))[1:])
except AttributeError:
continue
pose_gradients_nbrs[i] = grads.to(pose_gradients_nbrs.device)
# Removing gradients due to previous image
self.pose_model.zero_grad()
autograd_hacks.clear_backprops(self.pose_model)
autograd_hacks.remove_hooks(self.pose_model)
egl = (vals_L * pose_gradients_nbrs).sum(dim=1).squeeze()
################################################################################################################
# Part 5: Visualize top-K images and their nearest neighbors
################################################################################################################
vals_topK, idx_topK = torch.topk(egl, k=15, sorted=True, largest=True)
idx_topK = idx_topK.numpy()
vals_topK = vals_topK.numpy()
# Part 5.a: topk actual imgs visualization
dataset_ = ActiveLearningVizDataLoader(dataset_dict=train, conf=self.conf, indices=unlabelled_idx[idx_topK])
egl_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
logging.info('Generating top-K images')
images_U = None
# Disable autograd to speed up inference
with torch.no_grad():
for images, _, _, _, datasets in tqdm(egl_dataloader):
try:
images_U = torch.cat([images_U, images], dim=0)
except TypeError:
images_U = images
images_U = images_U.numpy()
name_modified = []
for i in range(15):
#name_ = train['name'][unlabelled_idx_topK[i]]
name_ = names_U[idx_topK[i]]
name_modified.append('top_{}_egl_{}_name_{}.png'.format(i + 1, vals_topK[i], name_))
self._visualize_predictions(image=images_U, name=names_U[idx_topK],
dataset=datasets_U[idx_topK], gt=gts_U[idx_topK],
pred=hm_uv_stack_U[idx_topK], string=np.array(name_modified))
# Part 5.b: topk nbrs
logging.info('Generating neighbors for top-K images')
images_L = None
gts_L = None
names_L = []
hm_uv_stack_L = []
datasets_L = []
name_modified = []
for i in tqdm(range(15)): # Top fifteen samples with highest EGL
# Top 5 neighbors for each joint
dataset_ = ActiveLearningVizDataLoader(dataset_dict=train, conf=self.conf, indices=annotated_idx[idx_L[idx_topK[i], :5]])
egl_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2, drop_last=False)
nbr = 0
with torch.no_grad():
for images, _, names, gts, datasets in egl_dataloader:
pose_heatmaps_, pose_features_ = self.pose_model(images)
for j in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[j])
hm_uv_stack_L.append(hm_uv)
names_L.append(names[j])
datasets_L.append(datasets[j])
name_modified.append(
'top_{}_nbr_{}_pji_{}_name_{}.png'.format(i + 1, nbr + j, vals_L[idx_topK[i], nbr+j], names[j]))
nbr += images.shape[0]
try:
images_L = torch.cat([images_L, images], dim=0)
gts_L = torch.cat([gts_L, gts], dim=0)
except TypeError:
images_L = images
gts_L = gts
images_L = images_L.numpy()
hm_uv_stack_L = np.stack(hm_uv_stack_L, axis=0)
names_L = np.array(names_L)
datasets_L = np.array(datasets_L)
gts_L = gts_L.numpy()
name_modified = np.array(name_modified)
self._visualize_predictions(image=images_L, name=names_L, dataset=datasets_L, gt=gts_L,
pred=hm_uv_stack_L, string=name_modified)
################################################################################################################
# Part 6: Visualize bottom-K images and their nearest neighbors
################################################################################################################
vals_topK, idx_topK = torch.topk(-egl, k=15, sorted=True, largest=True)
idx_topK = idx_topK.numpy()
vals_topK = vals_topK.numpy()
# Generate images
dataset_ = ActiveLearningVizDataLoader(dataset_dict=train, conf=self.conf, indices=unlabelled_idx[idx_topK])
egl_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
logging.info('Generating Bottom-K images')
images_U = None
# Disable autograd to speed up inference
with torch.no_grad():
for images, _, _, _, datasets in tqdm(egl_dataloader):
try:
images_U = torch.cat([images_U, images], dim=0)
except TypeError:
images_U = images
images_U = images_U.numpy()
name_modified = []
for i in range(15):
#name_ = train['name'][unlabelled_idx_topK[i]]
name_ = names_U[idx_topK[i]]
name_modified.append('bottom_{}_egl_{}_name_{}.png'.format(i + 1, vals_topK[i], name_))
self._visualize_predictions(image=images_U, name=names_U[idx_topK],
dataset=datasets_U[idx_topK], gt=gts_U[idx_topK],
pred=hm_uv_stack_U[idx_topK], string=np.array(name_modified))
# Part 5.d: bottom-K nbrs
logging.info('Generating neighbors for bottom-K images')
images_L = None
gts_L = None
names_L = []
hm_uv_stack_L = []
datasets_L = []
name_modified = []
for i in tqdm(range(15)): # Top fifteen samples with highest EGL
# Top 5 neighbors for each joint
dataset_ = ActiveLearningVizDataLoader(dataset_dict=train, conf=self.conf, indices=annotated_idx[idx_L[idx_topK[i], :5]])
egl_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2, drop_last=False)
nbr = 0
with torch.no_grad():
for images, _, names, gts, datasets in egl_dataloader:
pose_heatmaps_, pose_features_ = self.pose_model(images)
for j in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[j])
hm_uv_stack_L.append(hm_uv)
names_L.append(names[j])
datasets_L.append(datasets[j])
name_modified.append(
'bottom_{}_nbr_{}_pji_{}_name_{}.png'.format(i + 1, nbr + j, vals_L[idx_topK[i], nbr+j], names[j]))
nbr += images.shape[0]
try:
images_L = torch.cat([images_L, images], dim=0)
gts_L = torch.cat([gts_L, gts], dim=0)
except TypeError:
images_L = images
gts_L = gts
images_L = images_L.numpy()
hm_uv_stack_L = np.stack(hm_uv_stack_L, axis=0)
names_L = np.array(names_L)
datasets_L = np.array(datasets_L)
gts_L = gts_L.numpy()
name_modified = np.array(name_modified)
self._visualize_predictions(image=images_L, name=names_L, dataset=datasets_L, gt=gts_L,
pred=hm_uv_stack_L, string=name_modified)
def multipeak_entropy(self, train, dataset_size):
"""
Liu and Ferrari, "Active Learning for Human Pose Estimation"
ICCV 2017
https://openaccess.thecvf.com/content_ICCV_2017/papers/Liu_Active_Learning_for_ICCV_2017_paper.pdf
"""
logging.info('VISUALIZING multi-peak entropy sampling.')
if self.conf.resume_training:
return np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
assert self.conf.model['load'], "Multipeak entropy was called without a base model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
unlabelled_idx = np.array(list(set(train['index']) - set(annotated_idx)))
# Multi-peak entropy only over the unlabelled set of images
dataset_ = ActiveLearningVizDataLoader(dataset_dict=train, indices=unlabelled_idx, conf=self.conf)
mpe_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
################################################################################################################
# Part 1: Computing entropy for unlabelled data
################################################################################################################
mpe_value_per_img = []
logging.info('Computing entropy.')
with torch.no_grad():
for images, _, _, _, _ in tqdm(mpe_dataloader):
pose_heatmaps_, _ = self.pose_model(images)
pose_heatmaps_ = pose_heatmaps_.detach().cpu().numpy()[:, -1, :, :, :]
for i in range(pose_heatmaps_.shape[0]):
entropy = 0
normalize = 0
for hm in range(pose_heatmaps_.shape[1]):
loc = peak_local_max(pose_heatmaps_[i, hm], min_distance=5, num_peaks=5, exclude_border=False)
peaks = pose_heatmaps_[i, hm][loc[:, 0], loc[:, 1]]
if peaks.shape[0] > 0:
peaks = softmax_fn(peaks)
entropy += entropy_fn(peaks)
normalize += 1
mpe_value_per_img.append(entropy / normalize)
################################################################################################################
# Part 2: Finding images with lowest and highest entropy
################################################################################################################
mpe_value_per_img = np.array(mpe_value_per_img)
mpe_with_index = np.concatenate([mpe_value_per_img.reshape(-1, 1), unlabelled_idx.reshape(-1, 1)], axis=-1)
mpe_with_index = mpe_with_index[mpe_with_index[:, 0].argsort()]
# Slice multipeak entropy for top-15 and bottom-15 images
min_mpe_idx = mpe_with_index[:15, 1].astype(np.int32)
max_mpe_idx = mpe_with_index[-15:, 1].astype(np.int32)
################################################################################################################
# Part 3: Visualizing images with lowest entropy
################################################################################################################
logging.info('Visualizing samples with low entropy')
idx = min_mpe_idx
dataset_ = ActiveLearningVizDataLoader(dataset_dict=train, indices=idx, conf=self.conf)
mpe_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
pose_heatmaps = None
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
# Disable autograd to speed up inference
with torch.no_grad():
for images, _, names, gts, datasets in mpe_dataloader:
pose_heatmaps_, _ = self.pose_model(images)
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
pose_heatmaps = torch.cat((pose_heatmaps, pose_heatmaps_[:, -1, :, :, :].cpu()), dim=0)
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
except TypeError:
pose_heatmaps = pose_heatmaps_[:, -1, :, :, :].cpu()
images_all = images
gts_all = gts
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
pose_heatmaps = pose_heatmaps.squeeze().numpy()
for i in range(15):
img = images_all[i]
hm = pose_heatmaps[i]
name = names_all[i]
for j in range(hm.shape[0]):
plt.imshow(img)
plt.imshow(cv2.resize(hm[j], dsize=(256, 256), interpolation=cv2.INTER_CUBIC), alpha=.5)
plt.title('{}'.format(self.i2j[j]))
os.makedirs(os.path.join(self.conf.model['save_path'], 'entropy_visualizations/images_entropy/minimum'), exist_ok=True)
plt.savefig(fname=os.path.join(self.conf.model['save_path'], 'entropy_visualizations/images_entropy/minimum/{}_{}.jpg'.format(name, self.i2j[j])),
facecolor='black', edgecolor='black', bbox_inches='tight', dpi=300)
plt.close()
names_modified = []
for i in range(15):
names_modified.append('MPE_{}_andName_{}'.format(mpe_with_index[i, 0], names_all[i]))
names_modified = np.array(names_modified)
self._visualize_predictions(image=images_all, name=names_all, dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
################################################################################################################
# Part 4: Visualizing images with highest entropy
################################################################################################################
logging.info('Visualizing samples with high entropy')
idx = max_mpe_idx
dataset_ = ActiveLearningVizDataLoader(dataset_dict=train, indices=idx, conf=self.conf)
mpe_dataloader = torch.utils.data.DataLoader(dataset_, batch_size=self.conf.experiment_settings['batch_size'],
shuffle=False, num_workers=2)
pose_heatmaps = None
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
# Disable autograd to speed up inference
with torch.no_grad():
for images, _, names, gts, datasets in mpe_dataloader:
pose_heatmaps_, _ = self.pose_model(images)
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
pose_heatmaps = torch.cat((pose_heatmaps, pose_heatmaps_[:, -1, :, :, :].cpu()), dim=0)
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
except TypeError:
pose_heatmaps = pose_heatmaps_[:, -1, :, :, :].cpu()
images_all = images
gts_all = gts
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
pose_heatmaps = pose_heatmaps.squeeze().numpy()
for i in range(15):
img = images_all[i]
hm = pose_heatmaps[i]
name = names_all[i]
for j in range(hm.shape[0]):
plt.imshow(img)
plt.imshow(cv2.resize(hm[j], dsize=(256, 256), interpolation=cv2.INTER_CUBIC), alpha=.5)
plt.title('{}'.format(self.i2j[j]))
os.makedirs(os.path.join(self.conf.model['save_path'], 'entropy_visualizations/images_entropy/maximum'), exist_ok=True)
plt.savefig(fname=os.path.join(self.conf.model['save_path'], 'entropy_visualizations/images_entropy/maximum/{}_{}.jpg'.format(name, self.i2j[j])),
facecolor='black', edgecolor='black', bbox_inches='tight', dpi=300)
plt.close()
names_modified = []
for i in range(15):
names_modified.append('MPE_{}_andName_{}'.format(mpe_with_index[-15 + i, 0], names_all[i]))
names_modified = np.array(names_modified)
self._visualize_predictions(image=images_all, name=names_all, dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
def aleatoric_uncertainty(self, train, dataset_size):
"""
Kendall and Gal, "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?"
NeurIPS 2017
https://proceedings.neurips.cc/paper/2017/hash/2650d6089a6d640c5e85b2b88265dc2b-Abstract.html
"""
logging.info('VISUALIZING Uncertainty: Kendall and Gal sampling.')
assert self.conf.model['load'], "Aleatoric uncertainty requires a previously trained model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Set of indices not annotated
unlabelled_idx = np.array(list(set(train['index'])-set(annotated_idx)))
dataset_ = ActiveLearningVizDataLoader(train, indices=unlabelled_idx, conf=self.conf)
aleatoric_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
################################################################################################################
# Part 1: Computing aleatoric uncertainty for unlabelled data
################################################################################################################
aleatoric_pred = None
# Part 1: Active Learning
logging.info('Computing aleatoric uncertainty.')
with torch.no_grad():
for images, _, names, gts, datasets in tqdm(aleatoric_dataloader):
_, pose_features_ = self.pose_model(images)
aleatoric_pred_ = self._aux_net_inference(pose_features_)
aleatoric_pred_ = aleatoric_pred_.squeeze()
try:
aleatoric_pred = torch.cat([aleatoric_pred, aleatoric_pred_.cpu()], dim=0)
except TypeError:
aleatoric_pred = aleatoric_pred_.cpu()
aleatoric_pred_copy = aleatoric_pred.mean(dim=-1)
################################################################################################################
# Part 2: Sort images based on aleatoric uncertainty
################################################################################################################
# argsort defaults to ascending
aleatoric_with_index = np.concatenate([aleatoric_pred_copy.numpy().reshape(-1, 1),
unlabelled_idx.reshape(-1, 1)], axis=-1)
aleatoric_with_index = aleatoric_with_index[aleatoric_with_index[:, 0].argsort()]
# Slice aleatoric for top-5 and bottom-5 images
min_aleatoric_idx = aleatoric_with_index[:15, 1].astype(np.int32)
max_aleatoric_idx = aleatoric_with_index[-15:, 1].astype(np.int32)
################################################################################################################
# Part 3: Visualize skeletons for samples with low aleatoric uncertainty
################################################################################################################
logging.info('Visualizing samples with low aleatoric uncertainty')
idx = min_aleatoric_idx
dataset_ = ActiveLearningVizDataLoader(train, indices=idx, conf=self.conf)
aleatoric_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
# Compile all together
aleatoric_pred = None
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
# Disable autograd to speed up inference
with torch.no_grad():
for images, _, names, gts, datasets in aleatoric_dataloader:
pose_heatmaps_, pose_features_ = self.pose_model(images)
aleatoric_pred_ = self._aux_net_inference(pose_features_)
aleatoric_pred_ = aleatoric_pred_.squeeze()
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
aleatoric_pred = torch.cat([aleatoric_pred, aleatoric_pred_.cpu()], dim=0)
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
except TypeError:
aleatoric_pred = aleatoric_pred_.cpu()
images_all = images
gts_all = gts
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
aleatoric_pred_copy = aleatoric_pred.mean(dim=-1)
names_modified = []
for i in range(15):
temp = 'Aleatoric_{}andName_{}'.format(aleatoric_pred_copy[i], names_all[i])
names_modified.append(temp)
names_modified = np.array(names_modified)
self._visualize_predictions(image=images_all,
name=names_all,
dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
################################################################################################################
# Part 4: Visualize heatmaps for samples with low aleatoric uncertainty
################################################################################################################
aleatoric_hm_all = []
for i in range(15):
aleatoric_hm = np.zeros([64, 64])
for j in range(self.conf.experiment_settings['num_hm']):
# after idx slicing: hm_uv_stack = 5 x 1 x 14 x 3
pred = hm_uv_stack[i, 0, j]
log_var = aleatoric_pred[i, j]
# print('Mean: {}'.format(mean))
# print('Var: {}'.format(np.exp(log_var)))
target = pred[:2] / np.array([(256 - 1) / (64 - 1), ((256 - 1) / (64 - 1))])
# print('Target: {}'.format(target))
aleatoric_hm_jnt = self._draw_gaussian(target, log_var)
aleatoric_hm = np.maximum(aleatoric_hm, aleatoric_hm_jnt)
# print()
# RESIZE HM
aleatoric_hm_all.append(cv2.resize(aleatoric_hm, dsize=(256, 256), interpolation=cv2.INTER_CUBIC))
aleatoric_hm_all = np.stack(aleatoric_hm_all, axis=0)
names_modified = []
for i in range(15):
names_modified.append('SkeletonViz_Name_{}'.format(names_all[i]))
names_modified = np.array(names_modified)
self._visualize_predictions(image=aleatoric_hm_all,
name=names_all,
dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
################################################################################################################
# Part 5: Visualize skeletons for samples with high aleatoric uncertainty
################################################################################################################
logging.info('Visualizing samples with high aleatoric uncertainty')
idx = max_aleatoric_idx
dataset_ = ActiveLearningVizDataLoader(train, indices=idx, conf=self.conf)
aleatoric_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
# Compile all together
aleatoric_pred = None
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
# Disable autograd to speed up inference
with torch.no_grad():
for images, _, names, gts, datasets in aleatoric_dataloader:
pose_heatmaps_, pose_features_ = self.pose_model(images)
aleatoric_pred_ = self._aux_net_inference(pose_features_)
aleatoric_pred_ = aleatoric_pred_.squeeze()
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
aleatoric_pred = torch.cat([aleatoric_pred, aleatoric_pred_.cpu()], dim=0)
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
except TypeError:
aleatoric_pred = aleatoric_pred_.cpu()
images_all = images
gts_all = gts
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
aleatoric_pred_copy = aleatoric_pred.mean(dim=-1)
names_modified = []
for i in range(15):
temp = 'Aleatoric_{}andName_{}'.format(aleatoric_pred_copy[i], names_all[i])
names_modified.append(temp)
names_modified = np.array(names_modified)
self._visualize_predictions(image=images_all,
name=names_all,
dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
################################################################################################################
# Part 4: Visualize heatmaps for samples with high aleatoric uncertainty
################################################################################################################
aleatoric_hm_all = []
for i in range(15):
aleatoric_hm = np.zeros([64, 64])
for j in range(self.conf.experiment_settings['num_hm']):
# after idx slicing: hm_uv_stack = 5 x 1 x 14 x 3
pred = hm_uv_stack[i, 0, j]
log_var = aleatoric_pred[i, j]
# print('Mean: {}'.format(mean))
# print('Var: {}'.format(np.exp(log_var)))
target = pred[:2] / np.array([(256 - 1) / (64 - 1), ((256 - 1) / (64 - 1))])
# print('Target: {}'.format(target))
aleatoric_hm_jnt = self._draw_gaussian(target, log_var)
aleatoric_hm = np.maximum(aleatoric_hm, aleatoric_hm_jnt)
# print()
# RESIZE HM
aleatoric_hm_all.append(cv2.resize(aleatoric_hm, dsize=(256, 256), interpolation=cv2.INTER_CUBIC))
aleatoric_hm_all = np.stack(aleatoric_hm_all, axis=0)
names_modified = []
for i in range(15):
names_modified.append('SkeletonViz_Name_{}'.format(names_all[i]))
names_modified = np.array(names_modified)
self._visualize_predictions(image=aleatoric_hm_all,
name=names_all,
dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
def vl4pose(self, train, dataset_size):
"""
Shukla et al., "VL4Pose: Active Learning Through Out-Of-Distribution Detection For Pose Estimation"
BMVC 2022
https://bmvc2022.mpi-inf.mpg.de/610/
"""
logging.info('Visualizing: VL4Pose Sampling.')
assert self.conf.model['load'], "VL4Pose requires a previously trained model"
annotated_idx = np.load(os.path.join(self.conf.model['load_path'], 'model_checkpoints/annotation.npy'))
# Set of indices not annotated
unlabelled_idx = np.array(list(set(train['index'])-set(annotated_idx)))
# links definition
if self.conf.dataset['load'] == 'mpii':
links = [[self.j2i['head'], self.j2i['neck']], [self.j2i['neck'], self.j2i['thorax']], [self.j2i['thorax'], self.j2i['pelvis']],
[self.j2i['thorax'], self.j2i['lsho']], [self.j2i['lsho'], self.j2i['lelb']], [self.j2i['lelb'], self.j2i['lwri']],
[self.j2i['thorax'], self.j2i['rsho']], [self.j2i['rsho'], self.j2i['relb']], [self.j2i['relb'], self.j2i['rwri']],
[self.j2i['pelvis'], self.j2i['lhip']], [self.j2i['lhip'], self.j2i['lknee']], [self.j2i['lknee'], self.j2i['lankl']],
[self.j2i['pelvis'], self.j2i['rhip']], [self.j2i['rhip'], self.j2i['rknee']], [self.j2i['rknee'], self.j2i['rankl']]]
else:
links = [[self.j2i['head'], self.j2i['neck']],
[self.j2i['neck'], self.j2i['lsho']], [self.j2i['lsho'], self.j2i['lelb']], [self.j2i['lelb'], self.j2i['lwri']],
[self.j2i['neck'], self.j2i['rsho']], [self.j2i['rsho'], self.j2i['relb']], [self.j2i['relb'], self.j2i['rwri']],
[self.j2i['lsho'], self.j2i['lhip']], [self.j2i['lhip'], self.j2i['lknee']], [self.j2i['lknee'], self.j2i['lankl']],
[self.j2i['rsho'], self.j2i['rhip']], [self.j2i['rhip'], self.j2i['rknee']], [self.j2i['rknee'], self.j2i['rankl']]]
dataset_ = ActiveLearningVizDataLoader(train, indices=unlabelled_idx, conf=self.conf)
vl4pose_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=1)
################################################################################################################
# Part 1: Computing images where VL4Pose has changed the pose estimator's prediction
################################################################################################################
logging.info('Computing images where poses have changed.')
with torch.no_grad():
has_pose_changed = None
vl4pose_refinement = None
for images, _, names, gts, datasets in tqdm(vl4pose_dataloader):
pose_heatmaps_, pose_features_ = self.pose_model(images)
likelihood_pred_ = self._aux_net_inference(pose_features_).reshape(images.shape[0], len(links), 2)
keypoint_compute = Keypoint_ParallelWrapper(
hm=pose_heatmaps_[:, -1, :, :, :].cpu().numpy(), param=likelihood_pred_.cpu().numpy(), j2i=self.j2i, i2j=self.i2j,
links=links, vl4pose_config=self.conf.active_learning['vl4pose'], function=np.max)
keypoint_dataloader = torch.utils.data.DataLoader(
keypoint_compute, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
# Poses evaluated according to VL4Pose
for likelihoods, vl4pose_refinement_, has_pose_changed_ in keypoint_dataloader:
try:
vl4pose_refinement = torch.cat((vl4pose_refinement, vl4pose_refinement_), dim=0)
has_pose_changed.extend(has_pose_changed_.tolist())
except TypeError:
vl4pose_refinement = vl4pose_refinement_
has_pose_changed = has_pose_changed_.tolist()
vl4pose_refinement = vl4pose_refinement.numpy()
vl4pose_refinement *= np.array([4.0476, 4.0476, 1])
# Part 1.b: Collect the corresponding images
logging.info('Collecting data where {} poses have changed.'.format(sum(has_pose_changed)))
dataset_ = ActiveLearningVizDataLoader(train, indices=unlabelled_idx[has_pose_changed], conf=self.conf)
vl4pose_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=1)
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
for images, _, names, gts, datasets in tqdm(vl4pose_dataloader):
pose_heatmaps_, _ = self.pose_model(images)
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
except TypeError:
images_all = images
gts_all = gts
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
# Part 1.c: For these images visualize the poses
logging.info('Visualizing images where pose has changed.')
self._visualize_predictions(image=images_all,
name=names_all,
dataset=datasets_all,
gt=hm_uv_stack,
pred=vl4pose_refinement[has_pose_changed],
string=names_all)
del vl4pose_refinement, has_pose_changed, images_all, names_all, datasets_all, hm_uv_stack
################################################################################################################
# Part 2: Computing minimum and maximum expected likelihood
################################################################################################################
logging.info('Computing images with maximum and minimum likelihood.')
dataset_ = ActiveLearningVizDataLoader(train, indices=unlabelled_idx, conf=self.conf)
vl4pose_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=1)
with torch.no_grad():
max_likelihood = None
for images, _, names, gts, datasets in tqdm(vl4pose_dataloader):
pose_heatmaps_, pose_features_ = self.pose_model(images)
likelihood_pred_ = self._aux_net_inference(pose_features_).reshape(images.shape[0], len(links), 2)
keypoint_compute = Keypoint_ParallelWrapper(
hm=pose_heatmaps_[:, -1, :, :, :].cpu().numpy(), param=likelihood_pred_.cpu().numpy(), j2i=self.j2i, i2j=self.i2j,
links=links, vl4pose_config=self.conf.active_learning['vl4pose'], function=np.sum)
keypoint_dataloader = torch.utils.data.DataLoader(
keypoint_compute, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=2)
# Poses evaluated according to VL4Pose
for likelihoods, _, _ in keypoint_dataloader:
try:
max_likelihood = torch.cat((max_likelihood, likelihoods.squeeze()), dim=0)
except TypeError:
max_likelihood = likelihoods.squeeze()
max_likelihood = max_likelihood.numpy()
new_index = np.arange(max_likelihood.shape[0])
loglikelihood_with_index = np.concatenate([max_likelihood.reshape(-1, 1), new_index.reshape(-1, 1)], axis=-1)
loglikelihood_with_index = loglikelihood_with_index[loglikelihood_with_index[:, 0].argsort()]
# Slice images, heatmaps, likelihood and parameters for top-5 and bottom-5 images
min_likelihood_idx = loglikelihood_with_index[:15, 1].astype(np.int32)
max_likelihood_idx = loglikelihood_with_index[-15:, 1].astype(np.int32)
################################################################################################################
# Part 3: Visualizing images with minimum expected likelihood
################################################################################################################
logging.info('Visualizing images with minimum likelihood.')
idx = min_likelihood_idx
# Collect images for these indices
dataset_ = ActiveLearningVizDataLoader(train, indices=unlabelled_idx[idx], conf=self.conf)
vl4pose_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=1)
with torch.no_grad():
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
likelihood_params = None
for images, _, names, gts, datasets in tqdm(vl4pose_dataloader):
pose_heatmaps_, pose_features_ = self.pose_model(images)
likelihood_pred_ = self._aux_net_inference(pose_features_)
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
likelihood_params = torch.cat([likelihood_params, likelihood_pred_.cpu().reshape(images.shape[0], len(links), 2)], dim=0)
except TypeError:
images_all = images
gts_all = gts
likelihood_params = likelihood_pred_.cpu().reshape(images.shape[0], len(links), 2)
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
likelihood_params = likelihood_params.numpy()
names_modified = []
for i in range(15):
temp = 'LogLikelihood_{}_andName_{}'.format(max_likelihood[min_likelihood_idx[i]], names_all[i])
names_modified.append(temp)
names_modified = np.array(names_modified)
self._visualize_predictions(image=images_all, name=names_all, dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
del images_all
# Part 3.b: Next, prepare conditional heatmaps
logging.info('Visualizing heatmaps for minimum likelihood.')
conditional_hm_all = []
for i in range(15):
conditional_hm = np.zeros([64, 64])
for j, link in enumerate(links):
pred1 = hm_uv_stack[i, 0, link[0]]
pred2 = hm_uv_stack[i, 0, link[1]]
mean = likelihood_params[i, j, 0]
log_var = likelihood_params[i, j, 1]
target = self._find_point_along_line(source_pt=pred1, dest_pt=pred2, magnitude=mean)
conditional_hm_link = self._draw_gaussian(target, log_var)
conditional_hm = np.maximum(conditional_hm, conditional_hm_link)
#RESIZE HM
conditional_hm_all.append(cv2.resize(conditional_hm, dsize=(256, 256), interpolation=cv2.INTER_CUBIC))
conditional_hm_all = np.stack(conditional_hm_all, axis=0)
names_modified = []
for i in range(15):
names_modified.append('SkeletonVizandName{}'.format(names_all[i]))
names_modified = np.array(names_modified)
self._visualize_predictions(image=conditional_hm_all, name=names_all, dataset=datasets_all,
gt=gts_all, pred=hm_uv_stack, string=names_modified)
################################################################################################################
# Part 4: Visualizing images with maximum expected likelihood
################################################################################################################
logging.info('Visualizing images with maximum likelihood.')
idx = max_likelihood_idx
# Collect images for these indices
dataset_ = ActiveLearningVizDataLoader(train, indices=unlabelled_idx[idx], conf=self.conf)
vl4pose_dataloader = torch.utils.data.DataLoader(
dataset_, batch_size=self.conf.experiment_settings['batch_size'], shuffle=False, num_workers=1)
with torch.no_grad():
images_all = None
gts_all = None
names_all = []
hm_uv_stack = []
datasets_all = []
likelihood_params = None
for images, _, names, gts, datasets in tqdm(vl4pose_dataloader):
pose_heatmaps_, pose_features_ = self.pose_model(images)
likelihood_pred_ = self._aux_net_inference(pose_features_)
for i in range(images.shape[0]):
hm_uv = self._estimate_uv(hm_array=pose_heatmaps_[:, -1].cpu().numpy()[i])
hm_uv_stack.append(hm_uv)
names_all.append(names[i])
datasets_all.append(datasets[i])
try:
images_all = torch.cat([images_all, images], dim=0)
gts_all = torch.cat([gts_all, gts], dim=0)
likelihood_params = torch.cat([likelihood_params, likelihood_pred_.cpu().reshape(images.shape[0], len(links), 2)], dim=0)
except TypeError:
images_all = images
gts_all = gts
likelihood_params = likelihood_pred_.cpu().reshape(images.shape[0], len(links), 2)
images_all = images_all.numpy()
hm_uv_stack = np.stack(hm_uv_stack, axis=0)
names_all = np.array(names_all)
datasets_all = np.array(datasets_all)
gts_all = gts_all.numpy()
likelihood_params = likelihood_params.numpy()
names_modified = []
for i in range(15):
temp = 'LogLikelihood_{}_andName_{}'.format(max_likelihood[max_likelihood_idx[i]], names_all[i])
names_modified.append(temp)
names_modified = np.array(names_modified)
self._visualize_predictions(image=images_all, name=names_all, dataset=datasets_all, gt=gts_all,
pred=hm_uv_stack, string=names_modified)
del images_all
# Part 4.b: Next, prepare conditional heatmaps
logging.info('Visualizing heatmaps for maximum likelihood.')
conditional_hm_all = []
for i in range(15):
conditional_hm = np.zeros([64, 64])
for j, link in enumerate(links):
pred1 = hm_uv_stack[i, 0, link[0]]
pred2 = hm_uv_stack[i, 0, link[1]]
mean = likelihood_params[i, j, 0]
log_var = likelihood_params[i, j, 1]
target = self._find_point_along_line(source_pt=pred1, dest_pt=pred2, magnitude=mean)
conditional_hm_link = self._draw_gaussian(target, log_var)
conditional_hm = np.maximum(conditional_hm, conditional_hm_link)
#RESIZE HM
conditional_hm_all.append(cv2.resize(conditional_hm, dsize=(256, 256), interpolation=cv2.INTER_CUBIC))
conditional_hm_all = np.stack(conditional_hm_all, axis=0)
names_modified = []
for i in range(15):
names_modified.append('SkeletonVizandName{}'.format(names_all[i]))
names_modified = np.array(names_modified)
self._visualize_predictions(image=conditional_hm_all, name=names_all, dataset=datasets_all,
gt=gts_all, pred=hm_uv_stack, string=names_modified)
def _draw_gaussian(self, pt, log_var, hm_shape=(64, 64)):
"""
:param pt:
:param log_var:
:param hm_shape:
:return:
"""
im = np.zeros(hm_shape, dtype=np.float32)
pt_rint = np.rint(pt).astype(int)
sigma = (np.exp(log_var))**0.5
# Size of 2D Gaussian window.
size = int(math.ceil(6 * sigma))
# Ensuring that size remains an odd number
if not size % 2:
size += 1
# Generate gaussian, with window=size and variance=sigma
u = np.arange(pt_rint[0] - (size // 2), pt_rint[0] + (size // 2) + 1)
v = np.arange(pt_rint[1] - (size // 2), pt_rint[1] + (size // 2) + 1)
uu, vv = np.meshgrid(u, v, sparse=True)
z = (np.exp(-((uu - pt[0]) ** 2 + (vv - pt[1]) ** 2) / (2 * (sigma ** 2)))) * (1/((2*np.pi*sigma*sigma)**0.5))
z = z.T
# Identify indices in im that will define the crop area
top = max(0, pt_rint[0] - (size//2))
bottom = min(hm_shape[0], pt_rint[0] + (size//2) + 1)
left = max(0, pt_rint[1] - (size//2))
right = min(hm_shape[1], pt_rint[1] + (size//2) + 1)
im[top:bottom, left:right] = \
z[top - (pt_rint[0] - (size//2)): top - (pt_rint[0] - (size//2)) + (bottom - top),
left - (pt_rint[1] - (size//2)): left - (pt_rint[1] - (size//2)) + (right - left)]
return im
def _visualize_predictions(self, image=None, name=None, dataset=None, gt=None, pred=None, string=None):
dataset_viz = {}
dataset_viz['img'] = image
dataset_viz['name'] = name
dataset_viz['dataset'] = dataset
dataset_viz['gt'] = gt
dataset_viz['pred'] = pred
dataset_viz['string'] = string
dataset_viz = self._recreate_images(gt=True, pred=True, external=True, ext_data=dataset_viz)
visualize_image(dataset_viz, save_dir=self.conf.model['save_path'], bbox=False)
def _recreate_images(self, gt=False, pred=False, external=False, ext_data=None):
'''
:return:
'''
assert gt + pred != 0, "Specify atleast one of GT or Pred"
assert external
assert ext_data, "ext_dataset can't be none to recreate external datasets"
data_split = ext_data
# Along with the below entries, we also pass bbox coordinates for each dataset
img_dict = {'mpii': {'img': [], 'img_name': [], 'img_pred': [], 'img_gt': [], 'dataset': [], 'display_string': []},
'lspet': {'img': [], 'img_name': [], 'img_pred': [], 'img_gt': [], 'dataset': [], 'display_string': []},
'lsp': {'img': [], 'img_name': [], 'img_pred': [], 'img_gt': [], 'dataset': [], 'display_string': []}}
for i in range(len(data_split['img'])):
dataset = data_split['dataset'][i]
img_dict[dataset]['img'].append(data_split['img'][i])
img_dict[dataset]['img_name'].append(data_split['name'][i])
img_dict[dataset]['dataset'].append(data_split['dataset'][i])
img_dict[dataset]['display_string'].append(data_split['string'][i])
joint_dict = dict([(self.i2j[i], []) for i in range(self.conf.experiment_settings['num_hm'])])
gt_dict = copy.deepcopy(joint_dict)
pred_dict = copy.deepcopy(joint_dict)
if gt:
for person in range(1):
for joint in range(self.conf.experiment_settings['num_hm']):
gt_dict[self.i2j[joint]].append(data_split['gt'][i, person, joint])
if pred:
for person in range(1):
for joint in range(self.conf.experiment_settings['num_hm']):
pred_dict[self.i2j[joint]].append(data_split['pred'][i, person, joint])
img_dict[dataset]['img_gt'].append(gt_dict)
img_dict[dataset]['img_pred'].append(pred_dict)
return img_dict
def _aux_net_inference(self, pose_features):
extractor = self.conf.architecture['aux_net']['conv_or_avg_pooling']
with torch.no_grad():
if extractor == 'avg':
# Transfer to GPU where auxiliary network is stored
encodings = pose_features['penultimate']
else:
depth = len(self.conf.architecture['aux_net']['spatial_dim'])
encodings = torch.cat(
[pose_features['feature_{}'.format(i)].reshape(
pose_features['feature_{}'.format(i)].shape[0], pose_features['feature_{}'.format(i)].shape[1], -1)
for i in range(depth, 0, -1)],
dim=2)
aux_out = self.aux_net(encodings)
return aux_out
def _estimate_uv(self, hm_array):
'''
Assumes single person
:param hm_array:
:param pred_placeholder:
:return:
'''
threshold = 0
joint = np.empty(shape=[1, hm_array.shape[0], 3], dtype=np.float32)
# Iterate over each heatmap
for jnt_id in range(hm_array.shape[0]):
joint[0, jnt_id, :] = uv_from_heatmap(hm=hm_array[jnt_id], threshold=threshold)
return joint
def _find_point_along_line(self, source_pt, dest_pt, magnitude):
downscale = [(256 - 1) / (64 - 1), ((256 - 1) / (64 - 1))]
# Ignore visibility flag
source_pt = source_pt[:2] / np.array(downscale)
dest_pt = dest_pt[:2] / np.array(downscale)
direction = dest_pt - source_pt
t = magnitude / np.linalg.norm(source_pt - dest_pt)
return source_pt + (t * direction)
class ActiveLearningVizDataLoader(torch.utils.data.Dataset):
def __init__(self, dataset_dict, conf, indices=None):
"""
:param dataset_dict:
:param conf:
:param indices:
"""
self.names = dataset_dict['name'][indices]
self.bounding_box = dataset_dict['bbox_coords'][indices]
self.dataset = dataset_dict['dataset'][indices]
self.gt = dataset_dict['gt'][indices]
self.occlusion = conf.experiment_settings['occlusion']
self.hm_shape = [64, 64]
self.hm_peak = conf.experiment_settings['hm_peak']
self.xy_to_uv = lambda xy: (xy[1], xy[0])
def __len__(self):
return self.dataset.shape[0]
def __getitem__(self, item):
'''
:param item:
:return:
'''
root = Path(os.getcwd()).parent
mpii_path = os.path.join(root, 'data', 'mpii')
lsp_path = os.path.join(root, 'data', 'lsp')
lspet_path = os.path.join(root, 'data', 'lspet')
dataset = self.dataset[item]
name = self.names[item]
bounding_box = self.bounding_box[item]
gt = self.gt[item]
if self.load_all_imgs:
image = self.images[item]
else:
if dataset == 'mpii':
image = plt.imread(os.path.join(mpii_path, 'images', '{}.jpg'.format(name.split('_')[0])))
elif dataset == 'lsp':
image = plt.imread(os.path.join(lsp_path, 'images', name))
else:
image = plt.imread(os.path.join(lspet_path, 'images', name))
# Determine crop
img_shape = np.array(image.shape)
# Bounding box for the first person
[min_x, min_y, max_x, max_y] = bounding_box[0]
tl_uv = self.xy_to_uv(np.array([min_x, min_y]))
br_uv = self.xy_to_uv(np.array([max_x, max_y]))
min_u = tl_uv[0]
min_v = tl_uv[1]
max_u = br_uv[0]
max_v = br_uv[1]
centre = np.array([(min_u + max_u) / 2, (min_v + max_v) / 2])
height = max_u - min_u
width = max_v - min_v
scale = 1.75
window = max(scale * height, scale * width)
top_left = np.array([centre[0] - (window / 2), centre[1] - (window / 2)])
bottom_right = np.array([centre[0] + (window / 2), centre[1] + (window / 2)])
top_left = np.maximum(np.array([0, 0], dtype=np.int16), top_left.astype(np.int16))
bottom_right = np.minimum(img_shape.astype(np.int16)[:-1], bottom_right.astype(np.int16))
# Cropping the image and adjusting the ground truth
image = image[top_left[0]: bottom_right[0], top_left[1]: bottom_right[1], :]
for person in range(gt.shape[0]):
for joint in range(gt.shape[1]):
gt_uv = self.xy_to_uv(gt[person][joint])
gt_uv = gt_uv - top_left
gt[person][joint] = np.concatenate([gt_uv, np.array([gt[person][joint][2]])], axis=0)
# Resize the image
image, gt = self.resize_image(image, gt, target_size=[256, 256, 3])
heatmaps, joint_exist = heatmap_generator(
joints=np.copy(gt), occlusion=self.occlusion, hm_shape=self.hm_shape, img_shape=image.shape)
heatmaps = self.hm_peak * heatmaps
return torch.tensor(data=image / 256.0, dtype=torch.float32, device='cpu'),\
torch.tensor(data=heatmaps, dtype=torch.float32, device='cpu'), name, gt, dataset
def resize_image(self, image_=None, gt=None, target_size=None):
'''
:return:
'''
# Compute the aspect ratios
image_aspect_ratio = image_.shape[0] / image_.shape[1]
tgt_aspect_ratio = target_size[0] / target_size[1]
# Compare the original and target aspect ratio
if image_aspect_ratio > tgt_aspect_ratio:
# If target aspect ratio is smaller, scale the first dim
scale_factor = target_size[0] / image_.shape[0]
else:
# If target aspect ratio is bigger or equal, scale the second dim
scale_factor = target_size[1] / image_.shape[1]
# Compute the padding to fit the target size
pad_u = (target_size[0] - int(image_.shape[0] * scale_factor))
pad_v = (target_size[1] - int(image_.shape[1] * scale_factor))
output_img = np.zeros(target_size, dtype=image_.dtype)
# Write scaled size in reverse order because opencv resize
scaled_size = (int(image_.shape[1] * scale_factor), int(image_.shape[0] * scale_factor))
padding_u = int(pad_u / 2)
padding_v = int(pad_v / 2)
im_scaled = cv2.resize(image_, scaled_size)
# logging.debug('Scaled, pre-padding size: {}'.format(im_scaled.shape))
output_img[padding_u : im_scaled.shape[0] + padding_u,
padding_v : im_scaled.shape[1] + padding_v, :] = im_scaled
gt *= np.array([scale_factor, scale_factor, 1]).reshape(1, 1, 3)
gt[:, :, 0] += padding_u
gt[:, :, 1] += padding_v
return output_img, gt
class Keypoint_ParallelWrapper(torch.utils.data.Dataset):
def __init__(self, hm, param, j2i, i2j, links, vl4pose_config, function):
self.hm = hm
self.param = param
self.j2i = j2i
self.i2j = i2j
self.links = links
self.config = vl4pose_config
self.function = function
def __len__(self):
return self.hm.shape[0]
def __getitem__(self, i):
joints = {}
heatmaps = self.hm[i]
parameters = self.param[i]
# Initialize keypoints for each node
for key in self.j2i.keys():
heatmap = heatmaps[self.j2i[key]]
loc = peak_local_max(heatmap, min_distance=self.config['min_distance'], num_peaks=self.config['num_peaks'], exclude_border=False)
peaks = heatmap[loc[:, 0], loc[:, 1]]
peaks = softmax_fn(peaks)
joints[key] = Keypoint(name=key, loc=loc, peaks=peaks, function=self.function)
# Initialize parent-child relations
for k, l in enumerate(self.links):
joints[self.i2j[l[0]]].parameters.append(parameters[k])
joints[self.i2j[l[0]]].children.append(joints[self.i2j[l[1]]])
max_ll, trace = joints['head'].run_likelihood()
vl4pose_image = []
for j in range(heatmaps.shape[0]):
vl4pose_image.append(torch.from_numpy(trace['{}_uv'.format(self.i2j[j])]))
vl4pose_image = torch.stack(vl4pose_image, dim=0)
vl4pose_image = torch.cat([vl4pose_image, torch.ones(heatmaps.shape[0]).view(-1, 1)], dim=1)
vl4pose_image = vl4pose_image.unsqueeze(0)
string = ''
for jnt in self.j2i.keys():
string += str(trace[jnt].item())
if string != ('0'*len(self.j2i.keys())): has_pose_changed = True
else: has_pose_changed = False
return max_ll, vl4pose_image, has_pose_changed
class Keypoint(object):
def __init__(self, name, loc, peaks, function):
self.name = name
self.loc = loc
self.peaks = peaks
self.children = []
self.parameters = []
self.function = function
def run_likelihood(self):
"""
:return:
"""
assert self.name == 'head'
likelihood_per_location = []
per_location_trace = []
for location in range(self.loc.shape[0]):
log_ll = np.log(self.peaks[location])
per_child_trace = []
for child in range(len(self.children)):
child_ll, joint_trace = self.children[child].compute_likelihood_given_parent(self.loc[location], self.parameters[child])
log_ll += child_ll
per_child_trace.append(joint_trace)
likelihood_per_location.append(log_ll)
per_location_trace.append(per_child_trace)
likelihood_per_location = np.array(likelihood_per_location)
return_trace = {}
for child_trace in per_location_trace[np.argmax(likelihood_per_location)]:
return_trace.update(child_trace)
return_trace[self.name] = np.argmax(likelihood_per_location)
return_trace['{}_uv'.format(self.name)] = self.loc[np.argmax(likelihood_per_location)]
return self.function(likelihood_per_location), return_trace
def compute_likelihood_given_parent(self, parent_location, gaussian_params):
"""
:param parent_location:
:param gaussian_params:
:return:
"""
likelihood_per_location = []
per_location_trace = []
for location in range(self.loc.shape[0]):
log_ll = np.log(2 * np.pi) + gaussian_params[1]
log_ll += (gaussian_params[0] - np.linalg.norm(parent_location - self.loc[location]))**2 * np.exp(-gaussian_params[1])
log_ll *= -0.5
log_ll += np.log(self.peaks[location])
if len(self.children) == 0:
likelihood_per_location.append(log_ll)
else:
per_child_trace = []
for child in range(len(self.children)):
child_ll, joint_trace = self.children[child].compute_likelihood_given_parent(self.loc[location], self.parameters[child])
log_ll += child_ll
per_child_trace.append(joint_trace)
likelihood_per_location.append(log_ll)
per_location_trace.append(per_child_trace)
likelihood_per_location = np.array(likelihood_per_location)
if len(self.children) == 0:
return self.function(likelihood_per_location), {self.name: np.argmax(likelihood_per_location),
'{}_uv'.format(self.name): self.loc[np.argmax(likelihood_per_location)]}
return_trace = {}
for child_trace in per_location_trace[np.argmax(likelihood_per_location)]:
return_trace.update(child_trace)
return_trace[self.name] = np.argmax(likelihood_per_location)
return_trace['{}_uv'.format(self.name)] = self.loc[np.argmax(likelihood_per_location)]
return self.function(likelihood_per_location), return_trace
| 89,213 | 43.056296 | 162 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/autograd_hacks.py | """
Credits: https://github.com/cybertronai/autograd-hacks
Library for extracting interesting quantites from autograd, see README.md
Not thread-safe because of module-level variables
Notation:
o: number of output classes (exact Hessian), number of Hessian samples (sampled Hessian)
n: batch-size
do: output dimension (output channels for convolution)
di: input dimension (input channels for convolution)
Hi: per-example Hessian of matmul, shaped as matrix of [dim, dim], indices have been row-vectorized
Hi_bias: per-example Hessian of bias
Oh, Ow: output height, output width (convolution)
Kh, Kw: kernel height, kernel width (convolution)
Jb: batch output Jacobian of matmul, output sensitivity for example,class pair, [o, n, ....]
Jb_bias: as above, but for bias
A, activations: inputs into current layer
B, backprops: backprop values (aka Lop aka Jacobian-vector product) observed at current layer
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
_supported_layers = ['Linear', 'Conv2d'] # Supported layer class types
_hooks_disabled: bool = False # work-around for https://github.com/pytorch/pytorch/issues/25723
_enforce_fresh_backprop: bool = False # global switch to catch double backprop errors on Hessian computation
def add_hooks(model: nn.Module) -> None:
"""
Adds hooks to model to save activations and backprop values.
The hooks will
1. save activations into param.activations during forward pass
2. append backprops to params.backprops_list during backward pass.
Call "remove_hooks(model)" to disable this.
Args:
model:
"""
global _hooks_disabled
_hooks_disabled = False
handles = []
for layer in model.modules():
if _layer_type(layer) in _supported_layers:
handles.append(layer.register_forward_hook(_capture_activations))
handles.append(layer.register_backward_hook(_capture_backprops))
# handles.append(layer.register_full_backward_hook(_capture_backprops))
model.__dict__.setdefault('autograd_hacks_hooks', []).extend(handles)
def remove_hooks(model: nn.Module) -> None:
"""
Remove hooks added by add_hooks(model)
"""
#assert model == 0, "not working, remove this after fix to https://github.com/pytorch/pytorch/issues/25723"
if not hasattr(model, 'autograd_hacks_hooks'):
print("Warning, asked to remove hooks, but no hooks found")
else:
for handle in model.autograd_hacks_hooks:
handle.remove()
del model.autograd_hacks_hooks
def disable_hooks() -> None:
"""
Globally disable all hooks installed by this library.
"""
global _hooks_disabled
_hooks_disabled = True
def enable_hooks() -> None:
"""the opposite of disable_hooks()"""
global _hooks_disabled
_hooks_disabled = False
def is_supported(layer: nn.Module) -> bool:
"""Check if this layer is supported"""
return _layer_type(layer) in _supported_layers
def _layer_type(layer: nn.Module) -> str:
return layer.__class__.__name__
def _capture_activations(layer: nn.Module, input: List[torch.Tensor], output: torch.Tensor):
"""Save activations into layer.activations in forward pass"""
if _hooks_disabled:
return
assert _layer_type(layer) in _supported_layers, "Hook installed on unsupported layer, this shouldn't happen"
setattr(layer, "activations", input[0].detach())
def _capture_backprops(layer: nn.Module, _input, output):
"""Append backprop to layer.backprops_list in backward pass."""
global _enforce_fresh_backprop
if _hooks_disabled:
return
if _enforce_fresh_backprop:
assert not hasattr(layer, 'backprops_list'), "Seeing result of previous backprop, use clear_backprops(model) to clear"
_enforce_fresh_backprop = False
if not hasattr(layer, 'backprops_list'):
setattr(layer, 'backprops_list', [])
layer.backprops_list.append(output[0].detach())
def clear_backprops(model: nn.Module) -> None:
"""Delete layer.backprops_list in every layer."""
for layer in model.modules():
if hasattr(layer, 'backprops_list'):
del layer.backprops_list
def compute_grad1(model: nn.Module, loss_type: str = 'mean') -> None:
"""
Compute per-example gradients and save them under 'param.grad1'. Must be called after loss.backprop()
Args:
model:
loss_type: either "mean" or "sum" depending whether backpropped loss was averaged or summed over batch
"""
assert loss_type in ('sum', 'mean')
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
if not hasattr(layer, 'activations'):
continue #"No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
assert len(layer.backprops_list) == 1, "Multiple backprops detected, make sure to call clear_backprops(model)"
A = layer.activations
n = A.shape[0]
if loss_type == 'mean':
B = layer.backprops_list[0] * n
else: # loss_type == 'sum':
B = layer.backprops_list[0]
if layer_type == 'Linear':
setattr(layer.weight, 'grad1', torch.einsum('ni,nj->nij', B, A))
if layer.bias is not None:
setattr(layer.bias, 'grad1', B)
elif layer_type == 'Conv2d':
A = torch.nn.functional.unfold(A, layer.kernel_size, dilation=layer.dilation, padding=layer.padding, stride=layer.stride)
B = B.reshape(n, -1, A.shape[-1])
grad1 = torch.einsum('ijk,ilk->ijl', B, A)
shape = [n] + list(layer.weight.shape)
setattr(layer.weight, 'grad1', grad1.reshape(shape))
if layer.bias is not None:
setattr(layer.bias, 'grad1', torch.sum(B, dim=2))
def compute_hess(model: nn.Module,) -> None:
"""Save Hessian under param.hess for each param in the model"""
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
assert hasattr(layer, 'activations'), "No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
if layer_type == 'Linear':
A = layer.activations
B = torch.stack(layer.backprops_list)
n = A.shape[0]
o = B.shape[0]
A = torch.stack([A] * o)
Jb = torch.einsum("oni,onj->onij", B, A).reshape(n*o, -1)
H = torch.einsum('ni,nj->ij', Jb, Jb) / n
setattr(layer.weight, 'hess', H)
if layer.bias is not None:
setattr(layer.bias, 'hess', torch.einsum('oni,onj->ij', B, B)/n)
elif layer_type == 'Conv2d':
Kh, Kw = layer.kernel_size
di, do = layer.in_channels, layer.out_channels
A = layer.activations.detach()
A = torch.nn.functional.unfold(A, (Kh, Kw)) # n, di * Kh * Kw, Oh * Ow
n = A.shape[0]
B = torch.stack([Bt.reshape(n, do, -1) for Bt in layer.backprops_list]) # o, n, do, Oh*Ow
o = B.shape[0]
A = torch.stack([A] * o) # o, n, di * Kh * Kw, Oh*Ow
Jb = torch.einsum('onij,onkj->onik', B, A) # o, n, do, di * Kh * Kw
Hi = torch.einsum('onij,onkl->nijkl', Jb, Jb) # n, do, di*Kh*Kw, do, di*Kh*Kw
Jb_bias = torch.einsum('onij->oni', B)
Hi_bias = torch.einsum('oni,onj->nij', Jb_bias, Jb_bias)
setattr(layer.weight, 'hess', Hi.mean(dim=0))
if layer.bias is not None:
setattr(layer.bias, 'hess', Hi_bias.mean(dim=0))
def backprop_hess(output: torch.Tensor, hess_type: str) -> None:
"""
Call backprop 1 or more times to get values needed for Hessian computation.
Args:
output: prediction of neural network (ie, input of nn.CrossEntropyLoss())
hess_type: type of Hessian propagation, "CrossEntropy" results in exact Hessian for CrossEntropy
Returns:
"""
assert hess_type in ('LeastSquares', 'CrossEntropy')
global _enforce_fresh_backprop
n, o = output.shape
_enforce_fresh_backprop = True
if hess_type == 'CrossEntropy':
batch = F.softmax(output, dim=1)
mask = torch.eye(o).expand(n, o, o)
diag_part = batch.unsqueeze(2).expand(n, o, o) * mask
outer_prod_part = torch.einsum('ij,ik->ijk', batch, batch)
hess = diag_part - outer_prod_part
assert hess.shape == (n, o, o)
for i in range(n):
hess[i, :, :] = symsqrt(hess[i, :, :])
hess = hess.transpose(0, 1)
elif hess_type == 'LeastSquares':
hess = []
assert len(output.shape) == 2
batch_size, output_size = output.shape
id_mat = torch.eye(output_size)
for out_idx in range(output_size):
hess.append(torch.stack([id_mat[out_idx]] * batch_size))
for o in range(o):
output.backward(hess[o], retain_graph=True)
def symsqrt(a, cond=None, return_rank=False, dtype=torch.float32):
"""Symmetric square root of a positive semi-definite matrix.
See https://github.com/pytorch/pytorch/issues/25481"""
s, u = torch.symeig(a, eigenvectors=True)
cond_dict = {torch.float32: 1e3 * 1.1920929e-07, torch.float64: 1E6 * 2.220446049250313e-16}
if cond in [None, -1]:
cond = cond_dict[dtype]
above_cutoff = (abs(s) > cond * torch.max(abs(s)))
psigma_diag = torch.sqrt(s[above_cutoff])
u = u[:, above_cutoff]
B = u @ torch.diag(psigma_diag) @ u.t()
if return_rank:
return B, len(psigma_diag)
else:
return B | 9,972 | 33.628472 | 133 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/models/stacked_hourglass/StackedHourglass.py | '''
Baseline Architecture: Stacked Hourglass
https://github.com/princeton-vl/pytorch_stacked_hourglass
'''
import torch
from torch import nn
from .layers import Conv, Hourglass, Pool, Residual
class Merge(nn.Module):
'''
'''
def __init__(self, x_dim, y_dim):
super(Merge, self).__init__()
self.conv = Conv(x_dim, y_dim, 1, relu=False, bn=False)
def forward(self, x):
return self.conv(x)
class PoseNet(nn.Module):
def __init__(self, arch, auxnet, intermediate_features):#, nstack, inp_dim, oup_dim, bn=False, increase=0, **kwargs):
'''
:param nstack: (int) Number of stacks
:param inp_dim: (int) Number of input channels for the Stacked Hourglass
:param oup_dim: (int) Number of output channels for the Stacked Hourglass
:param bn: (bool) Whether to perform Batch Normalization
:param increase:
:param kwargs:
'''
super(PoseNet, self).__init__()
self.auxnet = auxnet # Whether to compute features for auxnet
self.intermediate_features = intermediate_features # Whether extractor is conv or avg
self.nstack = arch['nstack']
inp_dim = arch['channels']
oup_dim = arch['num_hm']
if torch.cuda.device_count() > 1:
# We don't need 8 GPUs for 3 stacks lol.
n_gpus = min(torch.cuda.device_count(), self.nstack)
stacks_per_gpu = torch.zeros(size=(n_gpus,), dtype=torch.int16)
# 1. Equal allocation to all
stacks_per_gpu += (self.nstack // n_gpus)
# 2. Distribute the remaining (max 1) among all the GPUs
temp_tensor = torch.zeros(size=(n_gpus,), dtype=torch.int16)
for i in range(self.nstack % n_gpus):
temp_tensor[i] = 1
stacks_per_gpu += temp_tensor
cuda_devices = []
for i in range(stacks_per_gpu.shape[0]):
for _ in range(stacks_per_gpu[i]):
cuda_devices.append(torch.device('cuda:{}'.format(i)))
else:
cuda_devices = [torch.device('cuda:0')] * self.nstack
self.cuda_devices = cuda_devices
self.pre = nn.Sequential(
Conv(inp_dim=3, out_dim=64, kernel_size=7, stride=2, bn=True, relu=True),
Residual(inp_dim=64, out_dim=128),
Pool(2, 2),
Residual(inp_dim=128, out_dim=128),
Residual(inp_dim=128, out_dim=inp_dim)).cuda(cuda_devices[0])
self.hgs = nn.ModuleList(
[nn.Sequential(Hourglass(n=4, f=inp_dim, bn=False, increase=0,
intermediate_features=intermediate_features, auxnet=auxnet)
).cuda(cuda_devices[i]) for i in range(self.nstack)])
self.features = nn.ModuleList([nn.Sequential(Residual(inp_dim, inp_dim),
Conv(inp_dim, inp_dim, 1, bn=True, relu=True)
).cuda(cuda_devices[i]) for i in range(self.nstack)])
self.outs = nn.ModuleList(
[Conv(inp_dim=inp_dim, out_dim=oup_dim, kernel_size=1, relu=False, bn=False).cuda(cuda_devices[i])
for i in range(self.nstack)])
self.merge_features = nn.ModuleList([Merge(inp_dim, inp_dim).cuda(cuda_devices[i]) for i in range(self.nstack-1)])
self.merge_preds = nn.ModuleList([Merge(oup_dim, inp_dim).cuda(cuda_devices[i]) for i in range(self.nstack-1)])
self.global_avg_pool = nn.ModuleList([nn.AvgPool2d(kernel_size=(64, 64), stride=1).cuda(cuda_devices[i])
for i in range(self.nstack)])
def forward(self, imgs):
'''
Constructing the Stacked Hourglass Posenet Model
:param imgs:
:return:
'''
# x is of shape: (BatchSize, #channels == 3, input_dim1, input_dim2)
x = imgs.permute(0, 3, 1, 2).cuda(self.cuda_devices[0])
x = self.pre(x)
combined_hm_preds = []
hourglass_dict= {}
for i in range(self.nstack):
x = x.cuda(self.cuda_devices[i])
hourglass_dict = self.hgs[i](x)
x = hourglass_dict['out']
# Hourglass parameters
if self.intermediate_features == 'conv' and self.auxnet:
hourglass_dict['feature_5'] = x.clone().detach().to(
'cuda:{}'.format(torch.cuda.device_count() - 1))
x = self.features[i](x)
hourglass_dict['penultimate'] = self.global_avg_pool[i](x).clone().detach().to(
'cuda:{}'.format(torch.cuda.device_count() - 1)).reshape(x.shape[0], -1)
preds = self.outs[i](x)
combined_hm_preds.append(preds.cuda(self.cuda_devices[-1]))
if i < self.nstack - 1:
x = x + self.merge_preds[i](preds) + self.merge_features[i](x)
del hourglass_dict['out']
# ip_learn_loss_dict is a dictionary containing intermediate outputs of hourglass
return torch.stack(combined_hm_preds, 1), hourglass_dict
| 5,127 | 40.024 | 122 | py |
ActiveLearningForHumanPose | ActiveLearningForHumanPose-main/code/models/stacked_hourglass/layers.py | import torch
from torch import nn
Pool = nn.MaxPool2d
def batchnorm(x):
return nn.BatchNorm2d(x.size()[1])(x)
class Conv(nn.Module):
'''
Initializes: Conv, Conv-Relu or Conv-Relu-BN combinaton
'''
def __init__(self, inp_dim, out_dim, kernel_size, stride=1, bn=False, relu=True):
'''
:param inp_dim: (int) Number of input channels
:param out_dim: (int) Number of output channels
:param kernel_size: (int) Kernel size
:param stride: (int) Convolution stride
:param bn: (bool) Whether to perform Batch Normalization
:param relu: (bool) Whether to perform ReLU
'''
assert type(inp_dim) == type(out_dim) == type(kernel_size) == type(stride) == int, "[Conv]: Wrong typing"
assert type(bn) == type(relu) == bool, "[Conv]: Wrong typing"
super(Conv, self).__init__()
self.bn = None
self.relu = None
self.inp_dim = inp_dim
# Input spatial dim is same as output spatial dim for stride == 1
self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True)
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(num_features=out_dim)
def forward(self, x):
assert x.size()[1] == self.inp_dim, "Passed: {}\tExpected: {}".format(x.size()[1], self.inp_dim)
x = self.conv(x)
if self.bn:
x = self.bn(x)
if self.relu:
x = self.relu(x)
return x
class Residual(nn.Module):
'''
'''
def __init__(self, inp_dim, out_dim):
super(Residual, self).__init__()
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm2d(num_features=inp_dim)
self.conv1 = Conv(inp_dim=inp_dim, out_dim=int(out_dim/2), kernel_size=1, relu=False)
self.bn2 = nn.BatchNorm2d(num_features=int(out_dim/2))
self.conv2 = Conv(inp_dim=int(out_dim/2), out_dim=int(out_dim/2), kernel_size=3, relu=False)
self.bn3 = nn.BatchNorm2d(num_features=int(out_dim/2))
self.conv3 = Conv(inp_dim=int(out_dim/2), out_dim=out_dim, kernel_size=1, relu=False)
self.skip_layer = Conv(inp_dim=inp_dim, out_dim=out_dim, kernel_size=1, relu=False)
if inp_dim == out_dim:
self.need_skip = False
else:
self.need_skip = True
def forward(self, x):
if self.need_skip:
residual = self.skip_layer(x)
else:
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
out += residual
return out
class Hourglass(nn.Module):
'''
'''
def __init__(self, n, f, bn=None, increase=0, intermediate_features=None, auxnet=None):
super(Hourglass, self).__init__()
self.auxnet = auxnet
self.intermediate_features = intermediate_features
nf = f + increase
self.up1 = Residual(f, f)
# Lower branch
self.pool1 = Pool(2, 2)
self.low1 = Residual(f, nf)
self.n = n
# Recursive hourglass
if self.n > 1:
self.low2 = Hourglass(n=n-1, f=nf, bn=bn, increase=0,
intermediate_features=intermediate_features, auxnet=auxnet)
else:
self.low2 = Residual(nf, nf)
self.low3 = Residual(nf, f)
self.up2 = nn.Upsample(scale_factor=2, mode='nearest')
self.avg_pool = nn.AvgPool2d(kernel_size=2 ** (self.n + 1), stride=1)
def forward(self, x):
upper_1 = self.up1(x)
x = self.pool1(x)
x = self.low1(x)
if self.n > 1:
hourglass_dict = self.low2(x)
x = hourglass_dict['out']
else:
x = self.low2(x)
x = self.low3(x)
if self.intermediate_features == 'conv' and self.auxnet:
hourglass_feature_map = x.clone().detach().to('cuda:{}'.format(torch.cuda.device_count() - 1))
upper_2 = self.up2(x)
if self.n > 1:
hourglass_dict['out'] = upper_1 + upper_2
if self.intermediate_features == 'conv' and self.auxnet:
hourglass_dict['feature_{}'.format(self.n)] = hourglass_feature_map
return hourglass_dict
else:
if self.intermediate_features == 'conv' and self.auxnet:
return {'out': upper_1 + upper_2, 'feature_1': hourglass_feature_map}
else:
return {'out': upper_1 + upper_2} | 4,697 | 31.625 | 114 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.