repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/coco.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torchvision
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self, ann_file, root, remove_images_without_annotations, transforms=None
):
super(COCODataset, self).__init__(root, ann_file)
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
self.ids = [
img_id
for img_id in self.ids
if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0
]
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self.transforms = transforms
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh",use_char_ann=False).convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, idx
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
| 2,363 | 34.818182 | 89 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import itertools
import torch
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
| 4,844 | 41.130435 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch.utils.data.sampler import BatchSampler
class IterationBasedBatchSampler(BatchSampler):
"""
Wraps a BatchSampler, resampling from it until
a specified number of iterations have been sampled
"""
def __init__(self, batch_sampler, num_iterations, start_iter=0):
self.batch_sampler = batch_sampler
self.num_iterations = num_iterations
self.start_iter = start_iter
def __iter__(self):
iteration = self.start_iter
while iteration <= self.num_iterations:
# if the underlying sampler has a set_epoch method, like
# DistributedSampler, used for making each process see
# a different split of the dataset, then set it
if hasattr(self.batch_sampler.sampler, "set_epoch"):
self.batch_sampler.sampler.set_epoch(iteration)
for batch in self.batch_sampler:
iteration += 1
if iteration > self.num_iterations:
break
yield batch
def __len__(self):
return self.num_iterations
| 1,164 | 35.40625 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/samplers/distributed.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Code is copy-pasted exactly as in torch.utils.data.distributed,
# with a modification in the import to use the deprecated backend
# FIXME remove this once c10d fixes the bug it has
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
from maskrcnn_benchmark.utils.comm import get_rank, get_world_size
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
# num_replicas = dist.get_world_size()
num_replicas = get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
# rank = dist.get_rank()
rank = get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = True
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset : offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,777 | 38.126761 | 86 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/transforms/transforms.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import cv2
import numpy as np
from PIL import Image
from shapely import affinity
from shapely.geometry import Polygon
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size, strict_resize):
self.min_size = min_size
self.max_size = max_size
self.strict_resize = strict_resize
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
if isinstance(self.min_size, tuple):
if len(self.min_size) == 1:
size = self.min_size[0]
else:
random_size_index = random.randint(0, len(self.min_size) - 1)
size = self.min_size[random_size_index]
else:
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
if self.strict_resize:
h = h if h % 32 == 0 else (h // 32) * 32
w = w if w % 32 == 0 else (w // 32) * 32
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
if self.strict_resize:
oh = oh if oh % 32 == 0 else (oh // 32) * 32
ow = ow if ow % 32 == 0 else (ow // 32) * 32
return (oh, ow)
def __call__(self, image, target):
size = self.get_size(image.size)
image = F.resize(image, size)
if target is not None:
target = target.resize(image.size)
return image, target
class RandomCrop(object):
def __init__(self, prob, crop_min_size=500, crop_max_size=1000, max_trys=50):
self.min_size = crop_min_size
self.max_size = crop_max_size
self.max_trys = max_trys
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
im = np.array(image)
w, h = image.size
h_array = np.zeros((h), dtype=np.int32)
w_array = np.zeros((w), dtype=np.int32)
boxes = target.bbox.numpy()
if len(boxes) == 0:
return image, target
for box in boxes:
box = np.round(box, decimals=0).astype(np.int32)
minx = box[0]
maxx = box[2]
w_array[minx:maxx] = 1
miny = box[1]
maxy = box[3]
h_array[miny:maxy] = 1
h_axis = np.where(h_array == 0)[0]
w_axis = np.where(w_array == 0)[0]
if len(h_axis) == 0 or len(w_axis) == 0:
return image, target
for _ in range(self.max_trys):
xx = np.random.choice(w_axis, size=2)
xmin = min(xx)
xmax = max(xx)
x_size = xmax - xmin
if x_size > self.max_size or x_size < self.min_size:
continue
yy = np.random.choice(h_axis, size=2)
ymin = min(yy)
ymax = max(yy)
y_size = ymax - ymin
if y_size > self.max_size or y_size < self.min_size:
continue
box_in_area = (
(boxes[:, 0] >= xmin)
& (boxes[:, 1] >= ymin)
& (boxes[:, 2] <= xmax)
& (boxes[:, 3] <= ymax)
)
if len(np.where(box_in_area)[0]) == 0:
continue
im = im[ymin:ymax, xmin:xmax]
target = target.crop([xmin, ymin, xmax, ymax])
return Image.fromarray(im), target
return image, target
else:
return image, target
# class RandomCropFixSize(object):
# def __init__(self, prob, crop_size=512, max_trys=50):
# self.crop_size = crop_size
# self.max_trys = max_trys
# self.prob = prob
# def __call__(self, image, target):
# if random.random() < self.prob:
# im = np.array(image)
# w, h = image.size
# h_array = np.zeros((h), dtype=np.int32)
# w_array = np.zeros((w), dtype=np.int32)
# boxes = target.bbox.numpy()
# if len(boxes) == 0:
# return image, target
# for box in boxes:
# box = np.round(box, decimals=0).astype(np.int32)
# minx = box[0]
# maxx = box[2]
# w_array[minx:maxx] = 1
# miny = box[1]
# maxy = box[3]
# h_array[miny:maxy] = 1
# h_axis = np.where(h_array == 0)[0]
# w_axis = np.where(w_array == 0)[0]
# if len(h_axis) == 0 or len(w_axis) == 0:
# return image, target
# for _ in range(self.max_trys):
# xx = np.random.choice(w_axis, size=2)
# xmin = min(xx)
# xmax = max(xx)
# x_size = xmax - xmin
# if x_size > self.max_size or x_size < self.min_size:
# continue
# yy = np.random.choice(h_axis, size=2)
# ymin = min(yy)
# ymax = max(yy)
# y_size = ymax - ymin
# if y_size > self.max_size or y_size < self.min_size:
# continue
# box_in_area = (
# (boxes[:, 0] >= xmin)
# & (boxes[:, 1] >= ymin)
# & (boxes[:, 2] <= xmax)
# & (boxes[:, 3] <= ymax)
# )
# if len(np.where(box_in_area)[0]) == 0:
# continue
# im = im[ymin:ymax, xmin:xmax]
# target = target.crop([xmin, ymin, xmax, ymax])
# return Image.fromarray(im), target
# return image, target
# else:
# return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
class RandomBrightness(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
brightness_factor = random.uniform(0.5, 2)
image = F.adjust_brightness(image, brightness_factor)
return image, target
class RandomContrast(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
contrast_factor = random.uniform(0.5, 2)
image = F.adjust_contrast(image, contrast_factor)
return image, target
class RandomHue(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
hue_factor = random.uniform(-0.25, 0.25)
image = F.adjust_hue(image, hue_factor)
return image, target
class RandomSaturation(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
saturation_factor = random.uniform(0.5, 2)
image = F.adjust_saturation(image, saturation_factor)
return image, target
class RandomGamma(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
gamma_factor = random.uniform(0.5, 2)
image = F.adjust_gamma(image, gamma_factor)
return image, target
class RandomRotate(object):
def __init__(self, prob, max_theta=30, fix_rotate=False):
self.prob = prob
self.max_theta = max_theta
self.fix_rotate = fix_rotate
def __call__(self, image, target):
if random.random() < self.prob and target is not None:
# try:
if self.fix_rotate:
delta = 30
else:
delta = random.uniform(-1 * self.max_theta, self.max_theta)
width, height = image.size
## get the minimal rect to cover the rotated image
img_box = [[[0, 0], [width, 0], [width, height], [0, height]]]
rotated_img_box = _quad2minrect(
_rotate_polygons(img_box, delta, (width / 2, height / 2))
)
r_height = int(
max(rotated_img_box[0][3], rotated_img_box[0][1])
- min(rotated_img_box[0][3], rotated_img_box[0][1])
)
r_width = int(
max(rotated_img_box[0][2], rotated_img_box[0][0])
- min(rotated_img_box[0][2], rotated_img_box[0][0])
)
r_height = max(r_height, height + 1)
r_width = max(r_width, width + 1)
## padding im
im_padding = np.zeros((r_height, r_width, 3))
start_h, start_w = (
int((r_height - height) / 2.0),
int((r_width - width) / 2.0),
)
end_h, end_w = start_h + height, start_w + width
im_padding[start_h:end_h, start_w:end_w, :] = image
M = cv2.getRotationMatrix2D((r_width / 2, r_height / 2), delta, 1)
im = cv2.warpAffine(im_padding, M, (r_width, r_height))
im = Image.fromarray(im.astype(np.uint8))
target = target.rotate(
-delta, (r_width / 2, r_height / 2), start_h, start_w
)
return im, target
# except:
# return image, target
else:
return image, target
def _quad2minrect(boxes):
## trans a quad(N*4) to a rectangle(N*4) which has miniual area to cover it
return np.hstack(
(
boxes[:, ::2].min(axis=1).reshape((-1, 1)),
boxes[:, 1::2].min(axis=1).reshape((-1, 1)),
boxes[:, ::2].max(axis=1).reshape((-1, 1)),
boxes[:, 1::2].max(axis=1).reshape((-1, 1)),
)
)
def _boxlist2quads(boxlist):
res = np.zeros((len(boxlist), 8))
for i, box in enumerate(boxlist):
# print(box)
res[i] = np.array(
[
box[0][0],
box[0][1],
box[1][0],
box[1][1],
box[2][0],
box[2][1],
box[3][0],
box[3][1],
]
)
return res
def _rotate_polygons(polygons, angle, r_c):
## polygons: N*8
## r_x: rotate center x
## r_y: rotate center y
## angle: -15~15
rotate_boxes_list = []
for poly in polygons:
box = Polygon(poly)
rbox = affinity.rotate(box, angle, r_c)
if len(list(rbox.exterior.coords)) < 5:
print("img_box_ori:", poly)
print("img_box_rotated:", rbox)
# assert(len(list(rbox.exterior.coords))>=5)
rotate_boxes_list.append(rbox.boundary.coords[:-1])
res = _boxlist2quads(rotate_boxes_list)
return res
| 12,621 | 32.480106 | 83 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/matcher.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned to zero or more predicted elements.
Matching is based on the MxN match_quality_matrix, that characterizes how well
each (ground-truth, predicted)-pair match. For example, if the elements are
boxes, the matrix may contain box IoU overlap values.
The matcher returns a tensor of size N containing the index of the ground-truth
element m that matches to prediction n. If there is no match, a negative value
is returned.
"""
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
"""
Args:
high_threshold (float): quality values greater than or equal to
this value are candidate matches.
low_threshold (float): a lower quality threshold used to stratify
matches into three levels:
1) matches >= high_threshold
2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
allow_low_quality_matches (bool): if True, produce additional matches
for predictions that have only low-quality match candidates. See
set_low_quality_matches_ for more details.
"""
assert low_threshold <= high_threshold
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted elements.
Returns:
matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
[0, M - 1] or a negative value indicating that prediction i could not
be matched.
"""
if match_quality_matrix.numel() == 0:
# handle empty case
device = match_quality_matrix.device
return torch.empty((0,), dtype=torch.int64, device=device)
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
matched_vals, matches = match_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
# Assign candidate matches with low quality to negative (unassigned) values
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (
matched_vals < self.high_threshold
)
matches[below_low_threshold] = Matcher.BELOW_LOW_THRESHOLD
matches[between_thresholds] = Matcher.BETWEEN_THRESHOLDS
if self.allow_low_quality_matches:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth with which it has the highest
quality value.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# Example gt_pred_pairs_of_highest_quality:
# tensor([[ 0, 39796],
# [ 1, 32055],
# [ 1, 32070],
# [ 2, 39190],
# [ 2, 40255],
# [ 3, 40390],
# [ 3, 41455],
# [ 4, 45470],
# [ 5, 45325],
# [ 5, 46390]])
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
| 4,845 | 44.28972 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/make_layers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Miscellaneous utility functions
"""
import torch
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.layers import Conv2d
from maskrcnn_benchmark.modeling.poolers import Pooler
def get_group_gn(dim, dim_per_gp, num_groups):
"""get number of groups used by GroupNorm, based on number of channels."""
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0, \
"dim: {}, dim_per_gp: {}".format(dim, dim_per_gp)
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0, \
"dim: {}, num_groups: {}".format(dim, num_groups)
group_gn = num_groups
return group_gn
def group_norm(out_channels, affine=True, divisor=1):
out_channels = out_channels // divisor
dim_per_gp = cfg.MODEL.GROUP_NORM.DIM_PER_GP // divisor
num_groups = cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor
eps = cfg.MODEL.GROUP_NORM.EPSILON # default: 1e-5
return torch.nn.GroupNorm(
get_group_gn(out_channels, dim_per_gp, num_groups),
out_channels,
eps,
affine
)
def make_conv3x3(
in_channels,
out_channels,
dilation=1,
stride=1,
use_gn=False,
use_relu=False,
kaiming_init=True
):
conv = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False if use_gn else True
)
if kaiming_init:
nn.init.kaiming_normal_(
conv.weight, mode="fan_out", nonlinearity="relu"
)
else:
torch.nn.init.normal_(conv.weight, std=0.01)
if not use_gn:
nn.init.constant_(conv.bias, 0)
module = [conv,]
if use_gn:
module.append(group_norm(out_channels))
if use_relu:
module.append(nn.ReLU(inplace=True))
if len(module) > 1:
return nn.Sequential(*module)
return conv
def make_fc(dim_in, hidden_dim, use_gn=False):
'''
Caffe2 implementation uses XavierFill, which in fact
corresponds to kaiming_uniform_ in PyTorch
'''
if use_gn:
fc = nn.Linear(dim_in, hidden_dim, bias=False)
nn.init.kaiming_uniform_(fc.weight, a=1)
return nn.Sequential(fc, group_norm(hidden_dim))
fc = nn.Linear(dim_in, hidden_dim)
nn.init.kaiming_uniform_(fc.weight, a=1)
nn.init.constant_(fc.bias, 0)
return fc
def conv_with_kaiming_uniform(use_gn=False, use_relu=False):
def make_conv(
in_channels, out_channels, kernel_size, stride=1, dilation=1
):
conv = Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=dilation * (kernel_size - 1) // 2,
dilation=dilation,
bias=False if use_gn else True
)
# Caffe2 implementation uses XavierFill, which in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(conv.weight, a=1)
if not use_gn:
nn.init.constant_(conv.bias, 0)
module = [conv,]
if use_gn:
module.append(group_norm(out_channels))
if use_relu:
module.append(nn.ReLU(inplace=True))
if len(module) > 1:
return nn.Sequential(*module)
return conv
return make_conv
| 3,557 | 27.926829 | 78 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Miscellaneous utility functions
"""
import torch
def cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
| 404 | 22.823529 | 97 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/poolers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.layers import ROIAlign
from .utils import cat
class LevelMapper(object):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6):
"""
Arguments:
k_min (int)
k_max (int)
canonical_scale (int)
canonical_level (int)
eps (float)
"""
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists):
"""
Arguments:
boxlists (list[BoxList])
"""
# Compute level ids
s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))
# Eqn.(1) in FPN paper
target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return target_lvls.to(torch.int64) - self.k_min
class Pooler(nn.Module):
"""
Pooler for Detection with or without FPN.
It currently hard-code ROIAlign in the implementation,
but that can be made more generic later on.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales, sampling_ratio):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[flaot]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(Pooler, self).__init__()
poolers = []
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -math.log2(scales[0])
lvl_max = -math.log2(scales[-1])
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
rois = self.convert_to_roi_format(boxes)
if num_levels == 1:
return self.poolers[0](x[0], rois)
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size_h = self.output_size[0]
output_size_w = self.output_size[1]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size_h, output_size_w),
dtype=dtype,
device=device,
)
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level)
return result
| 4,171 | 32.645161 | 90 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# TODO
class BalancedPositiveNegativeSampler(object):
"""
This class samples batches,
ensuring that they contain a fixed proportion of positives
"""
def __init__(self, batch_size_per_image, positive_fraction):
"""
Arguments:
batch_size_per_image (int): number of elements to be selected per image
positive_fraction (float): percentace of positive elements per batch
"""
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
def __call__(self, matched_idxs):
"""
Arguments:
matched idxs: list of tensors containing -1, 0 or positive values.
Each tensor corresponds to a specific image.
-1 values are ignored, 0 are considered as negatives and > 0 as
positives.
Returns:
pos_idx (list[tensor])
neg_idx (list[tensor])
Returns two lists of binary masks for each image.
The first list contains the positive elements that were selected,
and the second list the negative example.
"""
pos_idx = []
neg_idx = []
for matched_idxs_per_image in matched_idxs:
positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1)
negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1)
num_pos = int(self.batch_size_per_image * self.positive_fraction)
# protect against not enough positive examples
num_pos = min(positive.numel(), num_pos)
num_neg = self.batch_size_per_image - num_pos
# protect against not enough negative examples
num_neg = min(negative.numel(), num_neg)
# randomly select positive and negative examples
perm1 = torch.randperm(positive.numel())[:num_pos]
perm2 = torch.randperm(negative.numel())[:num_neg]
pos_idx_per_image = positive[perm1]
neg_idx_per_image = negative[perm2]
# create binary mask from indices
pos_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.bool
)
neg_idx_per_image_mask = torch.zeros_like(
matched_idxs_per_image, dtype=torch.bool
)
pos_idx_per_image_mask[pos_idx_per_image] = 1
neg_idx_per_image_mask[neg_idx_per_image] = 1
pos_idx.append(pos_idx_per_image_mask)
neg_idx.append(neg_idx_per_image_mask)
return pos_idx, neg_idx
| 2,678 | 37.271429 | 83 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/box_coder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import torch
class BoxCoder(object):
"""
This class encodes and decodes a set of bounding boxes into
the representation used for training the regressors.
"""
def __init__(self, weights, bbox_xform_clip=None):
"""
Arguments:
weights (4-element tuple)
bbox_xform_clip (float)
"""
self.weights = weights
if bbox_xform_clip is None:
bbox_xform_clip = math.log(1000.0 / 16)
self.bbox_xform_clip = bbox_xform_clip
def encode(self, reference_boxes, proposals):
"""
Encode a set of proposals with respect to some
reference boxes
Arguments:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
TO_REMOVE = 1 # TODO remove
ex_widths = proposals[:, 2] - proposals[:, 0] + TO_REMOVE
ex_heights = proposals[:, 3] - proposals[:, 1] + TO_REMOVE
ex_ctr_x = proposals[:, 0] + 0.5 * ex_widths
ex_ctr_y = proposals[:, 1] + 0.5 * ex_heights
gt_widths = reference_boxes[:, 2] - reference_boxes[:, 0] + TO_REMOVE
gt_heights = reference_boxes[:, 3] - reference_boxes[:, 1] + TO_REMOVE
gt_ctr_x = reference_boxes[:, 0] + 0.5 * gt_widths
gt_ctr_y = reference_boxes[:, 1] + 0.5 * gt_heights
wx, wy, ww, wh = self.weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets
def decode(self, rel_codes, boxes):
"""
From a set of original boxes and encoded relative box offsets,
get the decoded boxes.
Arguments:
rel_codes (Tensor): encoded boxes
boxes (Tensor): reference boxes.
"""
boxes = boxes.to(rel_codes.dtype)
TO_REMOVE = 1 # TODO remove
widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE
heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = rel_codes[:, 0::4] / wx
dy = rel_codes[:, 1::4] / wy
dw = rel_codes[:, 2::4] / ww
dh = rel_codes[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.bbox_xform_clip)
dh = torch.clamp(dh, max=self.bbox_xform_clip)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(rel_codes)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
return pred_boxes
| 3,462 | 33.979798 | 86 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/backbone/resnet.py | # # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# """
# Variant of the resnet module that takes cfg as an argument.
# Example usage. Strings may be specified in the config file.
# model = ResNet(
# "StemWithFixedBatchNorm",
# "BottleneckWithFixedBatchNorm",
# "ResNet50StagesTo4",
# )
# Custom implementations may be written in user code and hooked in via the
# `register_*` functions.
# """
# from collections import namedtuple
# import torch
# import torch.nn.functional as F
# from torch import nn
# from maskrcnn_benchmark.layers import FrozenBatchNorm2d
# from maskrcnn_benchmark.layers import Conv2d
# # ResNet stage specification
# StageSpec = namedtuple(
# "StageSpec",
# [
# "index", # Index of the stage, eg 1, 2, ..,. 5
# "block_count", # Numer of residual blocks in the stage
# "return_features", # True => return the last feature map from this stage
# ],
# )
# # -----------------------------------------------------------------------------
# # Standard ResNet models
# # -----------------------------------------------------------------------------
# # ResNet-50 (including all stages)
# ResNet50StagesTo5 = (
# StageSpec(index=i, block_count=c, return_features=r)
# for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, False), (4, 3, True))
# )
# # ResNet-50 up to stage 4 (excludes stage 5)
# ResNet50StagesTo4 = (
# StageSpec(index=i, block_count=c, return_features=r)
# for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, True))
# )
# # ResNet-50-FPN (including all stages)
# ResNet50FPNStagesTo5 = (
# StageSpec(index=i, block_count=c, return_features=r)
# for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 6, True), (4, 3, True))
# )
# # ResNet-101-FPN (including all stages)
# ResNet101FPNStagesTo5 = (
# StageSpec(index=i, block_count=c, return_features=r)
# for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 23, True), (4, 3, True))
# )
# class ResNet(nn.Module):
# def __init__(self, cfg):
# super(ResNet, self).__init__()
# # If we want to use the cfg in forward(), then we should make a copy
# # of it and store it for later use:
# # self.cfg = cfg.clone()
# # Translate string names to implementations
# stem_module = _STEM_MODULES[cfg.MODEL.RESNETS.STEM_FUNC]
# stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY]
# transformation_module = _TRANSFORMATION_MODULES[cfg.MODEL.RESNETS.TRANS_FUNC]
# # Construct the stem module
# self.stem = stem_module(cfg)
# # Constuct the specified ResNet stages
# num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
# width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
# in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
# stage2_bottleneck_channels = num_groups * width_per_group
# stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
# self.stages = []
# self.return_features = {}
# for stage_spec in stage_specs:
# name = "layer" + str(stage_spec.index)
# stage2_relative_factor = 2 ** (stage_spec.index - 1)
# bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor
# out_channels = stage2_out_channels * stage2_relative_factor
# module = _make_stage(
# transformation_module,
# in_channels,
# bottleneck_channels,
# out_channels,
# stage_spec.block_count,
# num_groups,
# cfg.MODEL.RESNETS.STRIDE_IN_1X1,
# first_stride=int(stage_spec.index > 1) + 1,
# )
# in_channels = out_channels
# self.add_module(name, module)
# self.stages.append(name)
# self.return_features[name] = stage_spec.return_features
# # Optionally freeze (requires_grad=False) parts of the backbone
# self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT)
# def _freeze_backbone(self, freeze_at):
# for stage_index in range(freeze_at):
# if stage_index == 0:
# m = self.stem # stage 0 is the stem
# else:
# m = getattr(self, "layer" + str(stage_index))
# for p in m.parameters():
# p.requires_grad = False
# def forward(self, x):
# outputs = []
# x = self.stem(x)
# for stage_name in self.stages:
# x = getattr(self, stage_name)(x)
# if self.return_features[stage_name]:
# outputs.append(x)
# return outputs
# class ResNetHead(nn.Module):
# def __init__(
# self,
# block_module,
# stages,
# num_groups=1,
# width_per_group=64,
# stride_in_1x1=True,
# stride_init=None,
# res2_out_channels=256,
# ):
# super(ResNetHead, self).__init__()
# stage2_relative_factor = 2 ** (stages[0].index - 1)
# stage2_bottleneck_channels = num_groups * width_per_group
# out_channels = res2_out_channels * stage2_relative_factor
# in_channels = out_channels // 2
# bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor
# block_module = _TRANSFORMATION_MODULES[block_module]
# self.stages = []
# stride = stride_init
# for stage in stages:
# name = "layer" + str(stage.index)
# if not stride:
# stride = int(stage.index > 1) + 1
# module = _make_stage(
# block_module,
# in_channels,
# bottleneck_channels,
# out_channels,
# stage.block_count,
# num_groups,
# stride_in_1x1,
# first_stride=stride,
# )
# stride = None
# self.add_module(name, module)
# self.stages.append(name)
# def forward(self, x):
# for stage in self.stages:
# x = getattr(self, stage)(x)
# return x
# def _make_stage(
# transformation_module,
# in_channels,
# bottleneck_channels,
# out_channels,
# block_count,
# num_groups,
# stride_in_1x1,
# first_stride,
# ):
# blocks = []
# stride = first_stride
# for _ in range(block_count):
# blocks.append(
# transformation_module(
# in_channels,
# bottleneck_channels,
# out_channels,
# num_groups,
# stride_in_1x1,
# stride,
# )
# )
# stride = 1
# in_channels = out_channels
# return nn.Sequential(*blocks)
# class BottleneckWithFixedBatchNorm(nn.Module):
# def __init__(
# self,
# in_channels,
# bottleneck_channels,
# out_channels,
# num_groups=1,
# stride_in_1x1=True,
# stride=1,
# ):
# super(BottleneckWithFixedBatchNorm, self).__init__()
# self.downsample = None
# if in_channels != out_channels:
# self.downsample = nn.Sequential(
# Conv2d(
# in_channels, out_channels, kernel_size=1, stride=stride, bias=False
# ),
# FrozenBatchNorm2d(out_channels),
# )
# # The original MSRA ResNet models have stride in the first 1x1 conv
# # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# # stride in the 3x3 conv
# stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
# self.conv1 = Conv2d(
# in_channels,
# bottleneck_channels,
# kernel_size=1,
# stride=stride_1x1,
# bias=False,
# )
# self.bn1 = FrozenBatchNorm2d(bottleneck_channels)
# # TODO: specify init for the above
# self.conv2 = Conv2d(
# bottleneck_channels,
# bottleneck_channels,
# kernel_size=3,
# stride=stride_3x3,
# padding=1,
# bias=False,
# groups=num_groups,
# )
# self.bn2 = FrozenBatchNorm2d(bottleneck_channels)
# self.conv3 = Conv2d(
# bottleneck_channels, out_channels, kernel_size=1, bias=False
# )
# self.bn3 = FrozenBatchNorm2d(out_channels)
# def forward(self, x):
# residual = x
# out = self.conv1(x)
# out = self.bn1(out)
# out = F.relu_(out)
# out = self.conv2(out)
# out = self.bn2(out)
# out = F.relu_(out)
# out0 = self.conv3(out)
# out = self.bn3(out0)
# if self.downsample is not None:
# residual = self.downsample(x)
# out += residual
# out = F.relu_(out)
# return out
# class StemWithFixedBatchNorm(nn.Module):
# def __init__(self, cfg):
# super(StemWithFixedBatchNorm, self).__init__()
# out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
# self.conv1 = Conv2d(
# 3, out_channels, kernel_size=7, stride=2, padding=3, bias=False
# )
# self.bn1 = FrozenBatchNorm2d(out_channels)
# def forward(self, x):
# x = self.conv1(x)
# x = self.bn1(x)
# x = F.relu_(x)
# x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
# return x
# _TRANSFORMATION_MODULES = {"BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm}
# _STEM_MODULES = {"StemWithFixedBatchNorm": StemWithFixedBatchNorm}
# _STAGE_SPECS = {
# "R-50-C4": ResNet50StagesTo4,
# "R-50-C5": ResNet50StagesTo5,
# "R-50-FPN": ResNet50FPNStagesTo5,
# "R-101-FPN": ResNet101FPNStagesTo5,
# }
# def register_transformation_module(module_name, module):
# _register_generic(_TRANSFORMATION_MODULES, module_name, module)
# def register_stem_module(module_name, module):
# _register_generic(_STEM_MODULES, module_name, module)
# def register_stage_spec(stage_spec_name, stage_spec):
# _register_generic(_STAGE_SPECS, stage_spec_name, stage_spec)
# def _register_generic(module_dict, module_name, module):
# assert module_name not in module_dict
# module_dict[module_name] = module
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Variant of the resnet module that takes cfg as an argument.
Example usage. Strings may be specified in the config file.
model = ResNet(
"StemWithFixedBatchNorm",
"BottleneckWithFixedBatchNorm",
"ResNet50StagesTo4",
)
OR:
model = ResNet(
"StemWithGN",
"BottleneckWithGN",
"ResNet50StagesTo4",
)
Custom implementations may be written in user code and hooked in via the
`register_*` functions.
"""
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.layers import FrozenBatchNorm2d
from maskrcnn_benchmark.layers import Conv2d
from maskrcnn_benchmark.layers import DFConv2d
from maskrcnn_benchmark.modeling.make_layers import group_norm
from maskrcnn_benchmark.utils.registry import Registry
# ResNet stage specification
StageSpec = namedtuple(
"StageSpec",
[
"index", # Index of the stage, eg 1, 2, ..,. 5
"block_count", # Number of residual blocks in the stage
"return_features", # True => return the last feature map from this stage
],
)
# -----------------------------------------------------------------------------
# Standard ResNet models
# -----------------------------------------------------------------------------
# ResNet-50 (including all stages)
ResNet50StagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, False), (4, 3, True))
)
# ResNet-50 up to stage 4 (excludes stage 5)
ResNet50StagesTo4 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, True))
)
# ResNet-101 (including all stages)
ResNet101StagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, False), (4, 3, True))
)
# ResNet-101 up to stage 4 (excludes stage 5)
ResNet101StagesTo4 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, True))
)
# ResNet-50-FPN (including all stages)
ResNet50FPNStagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 6, True), (4, 3, True))
)
# ResNet-101-FPN (including all stages)
ResNet101FPNStagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 23, True), (4, 3, True))
)
# ResNet-152-FPN (including all stages)
ResNet152FPNStagesTo5 = tuple(
StageSpec(index=i, block_count=c, return_features=r)
for (i, c, r) in ((1, 3, True), (2, 8, True), (3, 36, True), (4, 3, True))
)
class ResNet(nn.Module):
def __init__(self, cfg):
super(ResNet, self).__init__()
# If we want to use the cfg in forward(), then we should make a copy
# of it and store it for later use:
# self.cfg = cfg.clone()
# Translate string names to implementations
stem_module = _STEM_MODULES[cfg.MODEL.RESNETS.STEM_FUNC]
stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY]
transformation_module = _TRANSFORMATION_MODULES[cfg.MODEL.RESNETS.TRANS_FUNC]
# Construct the stem module
self.stem = stem_module(cfg)
# Constuct the specified ResNet stages
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
stage2_bottleneck_channels = num_groups * width_per_group
stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
self.stages = []
self.return_features = {}
for stage_spec in stage_specs:
name = "layer" + str(stage_spec.index)
stage2_relative_factor = 2 ** (stage_spec.index - 1)
bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor
out_channels = stage2_out_channels * stage2_relative_factor
stage_with_dcn = cfg.MODEL.RESNETS.STAGE_WITH_DCN[stage_spec.index -1]
module = _make_stage(
transformation_module,
in_channels,
bottleneck_channels,
out_channels,
stage_spec.block_count,
num_groups,
cfg.MODEL.RESNETS.STRIDE_IN_1X1,
first_stride=int(stage_spec.index > 1) + 1,
dcn_config={
"stage_with_dcn": stage_with_dcn,
"with_modulated_dcn": cfg.MODEL.RESNETS.WITH_MODULATED_DCN,
"deformable_groups": cfg.MODEL.RESNETS.DEFORMABLE_GROUPS,
}
)
in_channels = out_channels
self.add_module(name, module)
self.stages.append(name)
self.return_features[name] = stage_spec.return_features
# Optionally freeze (requires_grad=False) parts of the backbone
self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT)
def _freeze_backbone(self, freeze_at):
if freeze_at < 0:
return
for stage_index in range(freeze_at):
if stage_index == 0:
m = self.stem # stage 0 is the stem
else:
m = getattr(self, "layer" + str(stage_index))
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
outputs = []
x = self.stem(x)
for stage_name in self.stages:
x = getattr(self, stage_name)(x)
if self.return_features[stage_name]:
outputs.append(x)
return outputs
class ResNetHead(nn.Module):
def __init__(
self,
block_module,
stages,
num_groups=1,
width_per_group=64,
stride_in_1x1=True,
stride_init=None,
res2_out_channels=256,
dilation=1,
dcn_config={}
):
super(ResNetHead, self).__init__()
stage2_relative_factor = 2 ** (stages[0].index - 1)
stage2_bottleneck_channels = num_groups * width_per_group
out_channels = res2_out_channels * stage2_relative_factor
in_channels = out_channels // 2
bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor
block_module = _TRANSFORMATION_MODULES[block_module]
self.stages = []
stride = stride_init
for stage in stages:
name = "layer" + str(stage.index)
if not stride:
stride = int(stage.index > 1) + 1
module = _make_stage(
block_module,
in_channels,
bottleneck_channels,
out_channels,
stage.block_count,
num_groups,
stride_in_1x1,
first_stride=stride,
dilation=dilation,
dcn_config=dcn_config
)
stride = None
self.add_module(name, module)
self.stages.append(name)
self.out_channels = out_channels
def forward(self, x):
for stage in self.stages:
x = getattr(self, stage)(x)
return x
def _make_stage(
transformation_module,
in_channels,
bottleneck_channels,
out_channels,
block_count,
num_groups,
stride_in_1x1,
first_stride,
dilation=1,
dcn_config={}
):
blocks = []
stride = first_stride
for _ in range(block_count):
blocks.append(
transformation_module(
in_channels,
bottleneck_channels,
out_channels,
num_groups,
stride_in_1x1,
stride,
dilation=dilation,
dcn_config=dcn_config
)
)
stride = 1
in_channels = out_channels
return nn.Sequential(*blocks)
class Bottleneck(nn.Module):
def __init__(
self,
in_channels,
bottleneck_channels,
out_channels,
num_groups,
stride_in_1x1,
stride,
dilation,
norm_func,
dcn_config
):
super(Bottleneck, self).__init__()
self.downsample = None
if in_channels != out_channels:
down_stride = stride if dilation == 1 else 1
self.downsample = nn.Sequential(
Conv2d(
in_channels, out_channels,
kernel_size=1, stride=down_stride, bias=False
),
norm_func(out_channels),
)
for modules in [self.downsample,]:
for l in modules.modules():
if isinstance(l, Conv2d):
nn.init.kaiming_uniform_(l.weight, a=1)
if dilation > 1:
stride = 1 # reset to be 1
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
)
self.bn1 = norm_func(bottleneck_channels)
# TODO: specify init for the above
with_dcn = dcn_config.get("stage_with_dcn", False)
if with_dcn:
deformable_groups = dcn_config.get("deformable_groups", 1)
with_modulated_dcn = dcn_config.get("with_modulated_dcn", False)
self.conv2 = DFConv2d(
bottleneck_channels,
bottleneck_channels,
with_modulated_dcn=with_modulated_dcn,
kernel_size=3,
stride=stride_3x3,
groups=num_groups,
dilation=dilation,
deformable_groups=deformable_groups,
bias=False
)
else:
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=dilation,
bias=False,
groups=num_groups,
dilation=dilation
)
nn.init.kaiming_uniform_(self.conv2.weight, a=1)
self.bn2 = norm_func(bottleneck_channels)
self.conv3 = Conv2d(
bottleneck_channels, out_channels, kernel_size=1, bias=False
)
self.bn3 = norm_func(out_channels)
for l in [self.conv1, self.conv3,]:
nn.init.kaiming_uniform_(l.weight, a=1)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = F.relu_(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu_(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = F.relu_(out)
return out
class BaseStem(nn.Module):
def __init__(self, cfg, norm_func):
super(BaseStem, self).__init__()
out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
self.conv1 = Conv2d(
3, out_channels, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_func(out_channels)
for l in [self.conv1,]:
nn.init.kaiming_uniform_(l.weight, a=1)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
class BottleneckWithFixedBatchNorm(Bottleneck):
def __init__(
self,
in_channels,
bottleneck_channels,
out_channels,
num_groups=1,
stride_in_1x1=True,
stride=1,
dilation=1,
dcn_config={}
):
super(BottleneckWithFixedBatchNorm, self).__init__(
in_channels=in_channels,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
stride_in_1x1=stride_in_1x1,
stride=stride,
dilation=dilation,
norm_func=FrozenBatchNorm2d,
dcn_config=dcn_config
)
class StemWithFixedBatchNorm(BaseStem):
def __init__(self, cfg):
super(StemWithFixedBatchNorm, self).__init__(
cfg, norm_func=FrozenBatchNorm2d
)
class BottleneckWithGN(Bottleneck):
def __init__(
self,
in_channels,
bottleneck_channels,
out_channels,
num_groups=1,
stride_in_1x1=True,
stride=1,
dilation=1,
dcn_config={}
):
super(BottleneckWithGN, self).__init__(
in_channels=in_channels,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
stride_in_1x1=stride_in_1x1,
stride=stride,
dilation=dilation,
norm_func=group_norm,
dcn_config=dcn_config
)
class StemWithGN(BaseStem):
def __init__(self, cfg):
super(StemWithGN, self).__init__(cfg, norm_func=group_norm)
_TRANSFORMATION_MODULES = Registry({
"BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm,
"BottleneckWithGN": BottleneckWithGN,
})
_STEM_MODULES = Registry({
"StemWithFixedBatchNorm": StemWithFixedBatchNorm,
"StemWithGN": StemWithGN,
})
_STAGE_SPECS = Registry({
"R-50-C4": ResNet50StagesTo4,
"R-50-C5": ResNet50StagesTo5,
"R-101-C4": ResNet101StagesTo4,
"R-101-C5": ResNet101StagesTo5,
"R-50-FPN": ResNet50FPNStagesTo5,
"R-50-FPN-RETINANET": ResNet50FPNStagesTo5,
"R-101-FPN": ResNet101FPNStagesTo5,
"R-101-FPN-RETINANET": ResNet101FPNStagesTo5,
"R-152-FPN": ResNet152FPNStagesTo5,
})
| 24,619 | 30.808786 | 90 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/backbone/backbone.py | # # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# from collections import OrderedDict
# from torch import nn
# from . import fpn as fpn_module
# from . import resnet
# def build_resnet_backbone(cfg):
# body = resnet.ResNet(cfg)
# model = nn.Sequential(OrderedDict([("body", body)]))
# return model
# def build_resnet_fpn_backbone(cfg):
# body = resnet.ResNet(cfg)
# in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
# out_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
# fpn = fpn_module.FPN(
# in_channels_list=[
# in_channels_stage2,
# in_channels_stage2 * 2,
# in_channels_stage2 * 4,
# in_channels_stage2 * 8,
# ],
# out_channels=out_channels,
# top_blocks=fpn_module.LastLevelMaxPool(),
# )
# model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
# return model
# _BACKBONES = {"resnet": build_resnet_backbone, "resnet-fpn": build_resnet_fpn_backbone}
# def build_backbone(cfg):
# assert cfg.MODEL.BACKBONE.CONV_BODY.startswith(
# "R-"
# ), "Only ResNet and ResNeXt models are currently implemented"
# # Models using FPN end with "-FPN"
# if cfg.MODEL.BACKBONE.CONV_BODY.endswith("-FPN"):
# return build_resnet_fpn_backbone(cfg)
# return build_resnet_backbone(cfg)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform
from . import fpn as fpn_module
# from . import resnet
@registry.BACKBONES.register("R-50-C4")
@registry.BACKBONES.register("R-50-C5")
@registry.BACKBONES.register("R-101-C4")
@registry.BACKBONES.register("R-101-C5")
def build_resnet_backbone(cfg):
body = resnet.ResNet(cfg)
model = nn.Sequential(OrderedDict([("body", body)]))
model.out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
return model
@registry.BACKBONES.register("R-18-FPN")
@registry.BACKBONES.register("R-34-FPN")
@registry.BACKBONES.register("R-50-FPN")
@registry.BACKBONES.register("R-101-FPN")
@registry.BACKBONES.register("R-152-FPN")
def build_resnet_fpn_backbone(cfg):
if cfg.MODEL.RESNET34:
from . import resnet34 as resnet
body = resnet.ResNet(layers=cfg.MODEL.RESNETS.LAYERS)
else:
from . import resnet
body = resnet.ResNet(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
fpn = fpn_module.FPN(
in_channels_list=[
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
],
out_channels=out_channels,
conv_block=conv_with_kaiming_uniform(
cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU
),
top_blocks=fpn_module.LastLevelMaxPool(),
)
model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
model.out_channels = out_channels
return model
@registry.BACKBONES.register("R-50-FPN-RETINANET")
@registry.BACKBONES.register("R-101-FPN-RETINANET")
def build_resnet_fpn_p3p7_backbone(cfg):
body = resnet.ResNet(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
in_channels_p6p7 = in_channels_stage2 * 8 if cfg.MODEL.RETINANET.USE_C5 \
else out_channels
fpn = fpn_module.FPN(
in_channels_list=[
0,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
],
out_channels=out_channels,
conv_block=conv_with_kaiming_uniform(
cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU
),
top_blocks=fpn_module.LastLevelP6P7(in_channels_p6p7, out_channels),
)
model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
model.out_channels = out_channels
return model
def build_backbone(cfg):
assert cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES, \
"cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry".format(
cfg.MODEL.BACKBONE.CONV_BODY
)
return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg)
| 4,395 | 32.30303 | 89 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/backbone/resnet34.py | import torch
import torch.nn.functional as F
from torch import nn
import math
from maskrcnn_benchmark.layers import FrozenBatchNorm2d
from maskrcnn_benchmark.layers import Conv2d
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = FrozenBatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = FrozenBatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block=BasicBlock, layers=[3, 4, 6, 3]):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = FrozenBatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
for m in self.modules():
if isinstance(m, Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, FrozenBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
FrozenBatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x2 = self.layer1(x)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
return [x2, x3, x4, x5]
| 2,729 | 26.857143 | 67 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/backbone/fpn.py | # #!/usr/bin/env python3
# # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# import torch
# import torch.nn.functional as F
# from torch import nn
# class FPN(nn.Module):
# """
# Module that adds FPN on top of a list of feature maps.
# The feature maps are currently supposed to be in increasing depth
# order, and must be consecutive
# """
# def __init__(self, in_channels_list, out_channels, top_blocks=None):
# """
# Arguments:
# in_channels_list (list[int]): number of channels for each feature map that
# will be fed
# out_channels (int): number of channels of the FPN representation
# top_blocks (nn.Module or None): if provided, an extra operation will
# be performed on the output of the last (smallest resolution)
# FPN output, and the result will extend the result list
# """
# super(FPN, self).__init__()
# self.inner_blocks = []
# self.layer_blocks = []
# for idx, in_channels in enumerate(in_channels_list, 1):
# inner_block = "fpn_inner{}".format(idx)
# layer_block = "fpn_layer{}".format(idx)
# inner_block_module = nn.Conv2d(in_channels, out_channels, 1)
# layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1)
# for module in [inner_block_module, layer_block_module]:
# # Caffe2 implementation uses XavierFill, which in fact
# # corresponds to kaiming_uniform_ in PyTorch
# nn.init.kaiming_uniform_(module.weight, a=1)
# nn.init.constant_(module.bias, 0)
# self.add_module(inner_block, inner_block_module)
# self.add_module(layer_block, layer_block_module)
# self.inner_blocks.append(inner_block)
# self.layer_blocks.append(layer_block)
# self.top_blocks = top_blocks
# def forward(self, x):
# """
# Arguments:
# x (list[Tensor]): feature maps for each feature level.
# Returns:
# results (tuple[Tensor]): feature maps after FPN layers.
# They are ordered from highest resolution first.
# """
# last_inner = getattr(self, self.inner_blocks[-1])(x[-1])
# results = []
# results.append(getattr(self, self.layer_blocks[-1])(last_inner))
# for feature, inner_block, layer_block in zip(
# x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]
# ):
# inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest")
# inner_lateral = getattr(self, inner_block)(feature)
# # TODO use size instead of scale to make it robust to different sizes
# # inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:],
# # mode='bilinear', align_corners=False)
# last_inner = inner_lateral + inner_top_down
# results.insert(0, getattr(self, layer_block)(last_inner))
# if self.top_blocks is not None:
# last_results = self.top_blocks(results[-1])
# results.extend(last_results)
# return tuple(results)
# class LastLevelMaxPool(nn.Module):
# def forward(self, x):
# return [F.max_pool2d(x, 1, 2, 0)]
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
class FPN(nn.Module):
"""
Module that adds FPN on top of a list of feature maps.
The feature maps are currently supposed to be in increasing depth
order, and must be consecutive
"""
def __init__(
self, in_channels_list, out_channels, conv_block, top_blocks=None
):
"""
Arguments:
in_channels_list (list[int]): number of channels for each feature map that
will be fed
out_channels (int): number of channels of the FPN representation
top_blocks (nn.Module or None): if provided, an extra operation will
be performed on the output of the last (smallest resolution)
FPN output, and the result will extend the result list
"""
super(FPN, self).__init__()
self.inner_blocks = []
self.layer_blocks = []
for idx, in_channels in enumerate(in_channels_list, 1):
inner_block = "fpn_inner{}".format(idx)
layer_block = "fpn_layer{}".format(idx)
if in_channels == 0:
continue
inner_block_module = conv_block(in_channels, out_channels, 1)
layer_block_module = conv_block(out_channels, out_channels, 3, 1)
self.add_module(inner_block, inner_block_module)
self.add_module(layer_block, layer_block_module)
self.inner_blocks.append(inner_block)
self.layer_blocks.append(layer_block)
self.top_blocks = top_blocks
def forward(self, x):
"""
Arguments:
x (list[Tensor]): feature maps for each feature level.
Returns:
results (tuple[Tensor]): feature maps after FPN layers.
They are ordered from highest resolution first.
"""
last_inner = getattr(self, self.inner_blocks[-1])(x[-1])
results = []
results.append(getattr(self, self.layer_blocks[-1])(last_inner))
for feature, inner_block, layer_block in zip(
x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]
):
if not inner_block:
continue
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest")
inner_lateral = getattr(self, inner_block)(feature)
# TODO use size instead of scale to make it robust to different sizes
# inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:],
# mode='bilinear', align_corners=False)
last_inner = inner_lateral + inner_top_down
results.insert(0, getattr(self, layer_block)(last_inner))
if isinstance(self.top_blocks, LastLevelP6P7):
last_results = self.top_blocks(x[-1], results[-1])
results.extend(last_results)
elif isinstance(self.top_blocks, LastLevelMaxPool):
last_results = self.top_blocks(results[-1])
results.extend(last_results)
return tuple(results)
class LastLevelMaxPool(nn.Module):
def forward(self, x):
return [F.max_pool2d(x, 1, 2, 0)]
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7.
"""
def __init__(self, in_channels, out_channels):
super(LastLevelP6P7, self).__init__()
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
nn.init.kaiming_uniform_(module.weight, a=1)
nn.init.constant_(module.bias, 0)
self.use_P5 = in_channels == out_channels
def forward(self, c5, p5):
x = p5 if self.use_P5 else c5
p6 = self.p6(x)
p7 = self.p7(F.relu(p6))
return [p6, p7]
| 7,331 | 40.659091 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/detector/generalized_rcnn.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Implements the Generalized R-CNN framework
"""
import torch
from torch import nn
from maskrcnn_benchmark.structures.image_list import to_image_list
from ..backbone import build_backbone
from ..rpn.rpn import build_rpn
from ..segmentation.segmentation import build_segmentation
from ..roi_heads.roi_heads import build_roi_heads
import time
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN. Currently supports boxes and masks.
It consists of three main parts:
- backbone
= rpn
- heads: takes the features + the proposals from the RPN and computes
detections / masks from it.
"""
def __init__(self, cfg):
super(GeneralizedRCNN, self).__init__()
self.cfg = cfg
self.backbone = build_backbone(cfg)
if cfg.MODEL.SEG_ON:
self.proposal = build_segmentation(cfg)
else:
self.proposal = build_rpn(cfg)
if cfg.MODEL.TRAIN_DETECTION_ONLY:
self.roi_heads = None
else:
self.roi_heads = build_roi_heads(cfg)
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
# torch.cuda.synchronize()
# start_time = time.time()
images = to_image_list(images)
# torch.cuda.synchronize()
# end_time = time.time()
# print('image load time:', end_time - start_time)
# torch.cuda.synchronize()
# start_time = time.time()
features = self.backbone(images.tensors)
# torch.cuda.synchronize()
# end_time = time.time()
# print('backbone time:', end_time - start_time)
if self.cfg.MODEL.SEG_ON and not self.training:
# torch.cuda.synchronize()
# start_time = time.time()
(proposals, seg_results), fuse_feature = self.proposal(images, features, targets)
# torch.cuda.synchronize()
# end_time = time.time()
# print('seg time:', end_time - start_time)
else:
if self.cfg.MODEL.SEG_ON:
(proposals, proposal_losses), fuse_feature = self.proposal(images, features, targets)
else:
proposals, proposal_losses = self.proposal(images, features, targets)
if self.roi_heads is not None:
if self.cfg.MODEL.SEG_ON and self.cfg.MODEL.SEG.USE_FUSE_FEATURE:
x, result, detector_losses = self.roi_heads(fuse_feature, proposals, targets)
else:
x, result, detector_losses = self.roi_heads(features, proposals, targets)
else:
# RPN-only models don't have roi_heads
# x = features
result = proposals
detector_losses = {}
if self.training:
losses = {}
if self.roi_heads is not None:
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
else:
if self.cfg.MODEL.SEG_ON:
return result, proposals, seg_results
else:
return result
# return result
| 3,837 | 35.903846 | 101 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/segmentation/inference.py | #!/usr/bin/env python3
import numpy as np
import torch
import cv2
import pyclipper
from shapely.geometry import Polygon
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist, cat_boxlist_gt
from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
import random
import time
class SEGPostProcessor(torch.nn.Module):
"""
Performs post-processing on the outputs of the RPN boxes, before feeding the
proposals to the heads
"""
def __init__(
self,
top_n,
binary_thresh,
box_thresh,
min_size,
cfg,
):
"""
Arguments:
top_n (int)
binary_thresh (float)
box_thresh (float)
min_size (int)
"""
super(SEGPostProcessor, self).__init__()
self.top_n = top_n
self.binary_thresh = binary_thresh
self.box_thresh = box_thresh
self.min_size = min_size
self.cfg = cfg
def add_gt_proposals(self, proposals, targets):
"""
Arguments:
proposals: list[BoxList]
targets: list[BoxList]
"""
# Get the device we're operating on
# device = proposals[0].bbox.
if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE:
gt_boxes = [target.copy_with_fields(['masks']) for target in targets]
else:
gt_boxes = [target.copy_with_fields([]) for target in targets]
# later cat of bbox requires all fields to be present for all bbox
# so we need to add a dummy for objectness that's missing
# for gt_box in gt_boxes:
# gt_box.add_field("objectness", torch.ones(len(gt_box), device=device))
proposals = [
cat_boxlist_gt([proposal, gt_box])
for proposal, gt_box in zip(proposals, gt_boxes)
]
return proposals
def aug_tensor_proposals(self, boxes):
# boxes: N * 4
boxes = boxes.float()
N = boxes.shape[0]
device = boxes.device
aug_boxes = torch.zeros((4, N, 4), device=device)
aug_boxes[0, :, :] = boxes.clone()
xmin, ymin, xmax, ymax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
x_center = (xmin + xmax) / 2.
y_center = (ymin + ymax) / 2.
width = xmax - xmin
height = ymax - ymin
for i in range(3):
choice = random.random()
if choice < 0.5:
# shrink or expand
ratio = (torch.randn((N,), device=device) * 3 + 1) / 2.
height = height * ratio
ratio = (torch.randn((N,), device=device) * 3 + 1) / 2.
width = width * ratio
else:
move_x = width * (torch.randn((N,), device=device) * 4 - 2)
move_y = height * (torch.randn((N,), device=device) * 4 - 2)
x_center += move_x
y_center += move_y
boxes[:, 0] = x_center - width / 2
boxes[:, 2] = x_center + width / 2
boxes[:, 1] = y_center - height / 2
boxes[:, 3] = y_center + height / 2
aug_boxes[i+1, :, :] = boxes.clone()
return aug_boxes.reshape((-1, 4))
def forward_for_single_feature_map(self, pred, image_shapes):
"""
Arguments:
pred: tensor of size N, 1, H, W
"""
device = pred.device
# torch.cuda.synchronize()
# start_time = time.time()
bitmap = self.binarize(pred)
# torch.cuda.synchronize()
# end_time = time.time()
# print('binarize time:', end_time - start_time)
N, height, width = pred.shape[0], pred.shape[2], pred.shape[3]
# torch.cuda.synchronize()
# start_time = time.time()
bitmap_numpy = bitmap.cpu().numpy() # The first channel
pred_map_numpy = pred.cpu().numpy()
# torch.cuda.synchronize()
# end_time = time.time()
# print('gpu2numpy time:', end_time - start_time)
boxes_batch = []
rotated_boxes_batch = []
polygons_batch = []
scores_batch = []
# torch.cuda.synchronize()
# start_time = time.time()
for batch_index in range(N):
image_shape = image_shapes[batch_index]
boxes, scores, rotated_boxes, polygons = self.boxes_from_bitmap(
pred_map_numpy[batch_index],
bitmap_numpy[batch_index], width, height)
boxes = boxes.to(device)
if self.training and self.cfg.MODEL.SEG.AUG_PROPOSALS:
boxes = self.aug_tensor_proposals(boxes)
if boxes.shape[0] > self.top_n:
boxes = boxes[:self.top_n, :]
# _, top_index = scores.topk(self.top_n, 0, sorted=False)
# boxes = boxes[top_index, :]
# scores = scores[top_index]
# boxlist = BoxList(boxes, (width, height), mode="xyxy")
boxlist = BoxList(boxes, (image_shape[1], image_shape[0]), mode="xyxy")
if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE:
masks = SegmentationMask(polygons, (image_shape[1], image_shape[0]))
boxlist.add_field('masks', masks)
boxlist = boxlist.clip_to_image(remove_empty=False)
# boxlist = remove_small_boxes(boxlist, self.min_size)
boxes_batch.append(boxlist)
rotated_boxes_batch.append(rotated_boxes)
polygons_batch.append(polygons)
scores_batch.append(scores)
# torch.cuda.synchronize()
# end_time = time.time()
# print('loop time:', end_time - start_time)
return boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch
def forward(self, seg_output, image_shapes, targets=None):
"""
Arguments:
seg_output: list[tensor]
Returns:
boxlists (list[BoxList]): bounding boxes
"""
sampled_boxes = []
boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch = self.forward_for_single_feature_map(seg_output, image_shapes)
if not self.training:
return boxes_batch, rotated_boxes_batch, polygons_batch, scores_batch
sampled_boxes.append(boxes_batch)
boxlists = list(zip(*sampled_boxes))
boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
# append ground-truth bboxes to proposals
if self.training and targets is not None:
boxlists = self.add_gt_proposals(boxlists, targets)
return boxlists
# def select_over_all_levels(self, boxlists):
# num_images = len(boxlists)
# # different behavior during training and during testing:
# # during training, post_nms_top_n is over *all* the proposals combined, while
# # during testing, it is over the proposals for each image
# # TODO resolve this difference and make it consistent. It should be per image,
# # and not per batch
# if self.training:
# objectness = torch.cat(
# [boxlist.get_field("objectness") for boxlist in boxlists], dim=0
# )
# box_sizes = [len(boxlist) for boxlist in boxlists]
# post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
# _, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True)
# inds_mask = torch.zeros_like(objectness, dtype=torch.uint8)
# inds_mask[inds_sorted] = 1
# inds_mask = inds_mask.split(box_sizes)
# for i in range(num_images):
# boxlists[i] = boxlists[i][inds_mask[i]]
# else:
# for i in range(num_images):
# objectness = boxlists[i].get_field("objectness")
# post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
# _, inds_sorted = torch.topk(
# objectness, post_nms_top_n, dim=0, sorted=True
# )
# boxlists[i] = boxlists[i][inds_sorted]
# return boxlists
def binarize(self, pred):
if self.cfg.MODEL.SEG.USE_MULTIPLE_THRESH:
binary_maps = []
for thre in self.cfg.MODEL.SEG.MULTIPLE_THRESH:
binary_map = pred > thre
binary_maps.append(binary_map)
return torch.cat(binary_maps, dim=1)
else:
return pred > self.binary_thresh
def boxes_from_bitmap(self, pred, bitmap, dest_width, dest_height):
"""
_bitmap: single map with shape (1, H, W),
whose values are binarized as {0, 1}
"""
# assert _bitmap.size(0) == 1
# bitmap = _bitmap[0] # The first channel
pred = pred[0]
height, width = bitmap.shape[1], bitmap.shape[2]
boxes = []
scores = []
rotated_boxes = []
polygons = []
contours_all = []
for i in range(bitmap.shape[0]):
try:
_, contours, _ = cv2.findContours(
(bitmap[i] * 255).astype(np.uint8),
cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE,
)
except BaseException:
contours, _ = cv2.findContours(
(bitmap[i] * 255).astype(np.uint8),
cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE,
)
contours_all.extend(contours)
for contour in contours_all:
epsilon = 0.01 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
polygon = approx.reshape((-1, 2))
points, sside = self.get_mini_boxes(contour)
if sside < self.min_size:
continue
points = np.array(points)
score = self.box_score_fast(pred, points)
if not self.training and self.box_thresh > score:
continue
if polygon.shape[0] > 2:
polygon = self.unclip(polygon, expand_ratio=self.cfg.MODEL.SEG.EXPAND_RATIO)
if len(polygon) > 1:
continue
else:
continue
# polygon = polygon.reshape(-1, 2)
polygon = polygon.reshape(-1)
box = self.unclip(points, expand_ratio=self.cfg.MODEL.SEG.BOX_EXPAND_RATIO).reshape(-1, 2)
box = np.array(box)
box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width)
box[:, 1] = np.clip(
np.round(box[:, 1] / height * dest_height), 0, dest_height
)
min_x, min_y = min(box[:, 0]), min(box[:, 1])
max_x, max_y = max(box[:, 0]), max(box[:, 1])
horizontal_box = torch.from_numpy(np.array([min_x, min_y, max_x, max_y]))
boxes.append(horizontal_box)
scores.append(score)
rotated_box, _ = self.get_mini_boxes(box.reshape(-1, 1, 2))
rotated_box = np.array(rotated_box)
rotated_boxes.append(rotated_box)
polygons.append([polygon])
if len(boxes) == 0:
boxes = [torch.from_numpy(np.array([0, 0, 0, 0]))]
scores = [0.]
boxes = torch.stack(boxes)
scores = torch.from_numpy(np.array(scores))
return boxes, scores, rotated_boxes, polygons
def aug_proposals(self, box):
xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3]
x_center = int((xmin + xmax) / 2.)
y_center = int((ymin + ymax) / 2.)
width = xmax - xmin
height = ymax - ymin
choice = random.random()
if choice < 0.5:
# shrink or expand
ratio = (random.random() * 3 + 1) / 2.
height = height * ratio
ratio = (random.random() * 3 + 1) / 2.
width = width * ratio
else:
move_x = width * (random.random() * 4 - 2)
move_y = height * (random.random() * 4 - 2)
x_center += move_x
y_center += move_y
xmin = int(x_center - width / 2)
xmax = int(x_center + width / 2)
ymin = int(y_center - height / 2)
ymax = int(y_center + height / 2)
return [xmin, ymin, xmax, ymax]
def unclip(self, box, expand_ratio=1.5):
poly = Polygon(box)
distance = poly.area * expand_ratio / poly.length
offset = pyclipper.PyclipperOffset()
offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
expanded = np.array(offset.Execute(distance))
return expanded
def get_mini_boxes(self, contour):
bounding_box = cv2.minAreaRect(contour)
points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
index_1, index_2, index_3, index_4 = 0, 1, 2, 3
if points[1][1] > points[0][1]:
index_1 = 0
index_4 = 1
else:
index_1 = 1
index_4 = 0
if points[3][1] > points[2][1]:
index_2 = 2
index_3 = 3
else:
index_2 = 3
index_3 = 2
box = [points[index_1], points[index_2], points[index_3], points[index_4]]
return box, min(bounding_box[1])
def box_score(self, bitmap, box):
"""
naive version of box score computation,
only for helping principle understand.
"""
mask = np.zeros_like(bitmap, dtype=np.uint8)
cv2.fillPoly(mask, box.reshape(1, 4, 2).astype(np.int32), 1)
return cv2.mean(bitmap, mask)[0]
def box_score_fast(self, bitmap, _box):
h, w = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1)
xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1)
ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1)
ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
box[:, 0] = box[:, 0] - xmin
box[:, 1] = box[:, 1] - ymin
cv2.fillPoly(mask, box.reshape(1, 4, 2).astype(np.int32), 1)
return cv2.mean(bitmap[ymin : ymax + 1, xmin : xmax + 1], mask)[0]
def make_seg_postprocessor(config, is_train):
top_n = config.MODEL.SEG.TOP_N_TRAIN
if not is_train:
top_n = config.MODEL.SEG.TOP_N_TEST
binary_thresh = config.MODEL.SEG.BINARY_THRESH
box_thresh = config.MODEL.SEG.BOX_THRESH
min_size = config.MODEL.SEG.MIN_SIZE
box_selector = SEGPostProcessor(
top_n=top_n,
binary_thresh=binary_thresh,
box_thresh=box_thresh,
min_size=min_size,
cfg = config
)
return box_selector
| 15,089 | 38.815303 | 148 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/segmentation/loss.py | #!/usr/bin/env python3
"""
This file contains specific functions for computing losses on the SEG
file
"""
import torch
class SEGLossComputation(object):
"""
This class computes the SEG loss.
"""
def __init__(self, cfg):
self.eps = 1e-6
self.cfg = cfg
def __call__(self, preds, targets):
"""
Arguments:
preds (Tensor)
targets (list[Tensor])
masks (list[Tensor])
Returns:
seg_loss (Tensor)
"""
image_size = (preds.shape[2], preds.shape[3])
segm_targets, masks = self.prepare_targets(targets, image_size)
device = preds.device
segm_targets = segm_targets.float().to(device)
masks = masks.float().to(device)
seg_loss = self.dice_loss(preds, segm_targets, masks)
return seg_loss
def dice_loss(self, pred, gt, m):
intersection = torch.sum(pred * gt * m)
union = torch.sum(pred * m) + torch.sum(gt * m) + self.eps
loss = 1 - 2.0 * intersection / union
return loss
def project_masks_on_image(self, mask_polygons, labels, shrink_ratio, image_size):
seg_map, training_mask = mask_polygons.convert_seg_map(
labels, shrink_ratio, image_size, self.cfg.MODEL.SEG.IGNORE_DIFFICULT
)
return torch.from_numpy(seg_map), torch.from_numpy(training_mask)
def prepare_targets(self, targets, image_size):
segms = []
training_masks = []
for target_per_image in targets:
segmentation_masks = target_per_image.get_field("masks")
labels = target_per_image.get_field("labels")
seg_maps_per_image, training_masks_per_image = self.project_masks_on_image(
segmentation_masks, labels, self.cfg.MODEL.SEG.SHRINK_RATIO, image_size
)
segms.append(seg_maps_per_image)
training_masks.append(training_masks_per_image)
return torch.stack(segms), torch.stack(training_masks)
def make_seg_loss_evaluator(cfg):
loss_evaluator = SEGLossComputation(cfg)
return loss_evaluator
| 2,122 | 31.661538 | 87 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/segmentation/segmentation.py | #!/usr/bin/env python3
import torch
from torch import nn
from .inference import make_seg_postprocessor
from .loss import make_seg_loss_evaluator
import time
def conv3x3(in_planes, out_planes, stride=1, has_bias=False):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=has_bias
)
def conv3x3_bn_relu(in_planes, out_planes, stride=1, has_bias=False):
return nn.Sequential(
conv3x3(in_planes, out_planes, stride),
nn.BatchNorm2d(out_planes),
nn.ReLU(inplace=True),
)
class SEGHead(nn.Module):
"""
Adds a simple SEG Head with pixel-level prediction
"""
def __init__(self, in_channels, cfg):
"""
Arguments:
in_channels (int): number of channels of the input feature
"""
super(SEGHead, self).__init__()
self.cfg = cfg
ndim = 256
self.fpn_out5 = nn.Sequential(
conv3x3(ndim, 64), nn.Upsample(scale_factor=8, mode="nearest")
)
self.fpn_out4 = nn.Sequential(
conv3x3(ndim, 64), nn.Upsample(scale_factor=4, mode="nearest")
)
self.fpn_out3 = nn.Sequential(
conv3x3(ndim, 64), nn.Upsample(scale_factor=2, mode="nearest")
)
self.fpn_out2 = conv3x3(ndim, 64)
self.seg_out = nn.Sequential(
conv3x3_bn_relu(in_channels, 64, 1),
nn.ConvTranspose2d(64, 64, 2, 2),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 1, 2, 2),
nn.Sigmoid(),
)
if self.cfg.MODEL.SEG.USE_PPM:
# PPM Module
pool_scales=(2, 4, 8)
fc_dim = 256
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
self.ppm_conv.append(nn.Sequential(
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, ndim, 1)
self.ppm_conv.apply(self.weights_init)
self.ppm_last_conv.apply(self.weights_init)
self.fpn_out5.apply(self.weights_init)
self.fpn_out4.apply(self.weights_init)
self.fpn_out3.apply(self.weights_init)
self.fpn_out2.apply(self.weights_init)
self.seg_out.apply(self.weights_init)
def forward(self, x):
if self.cfg.MODEL.SEG.USE_PPM:
conv5 = x[-2]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
else:
f = x[-2]
# p5 = self.fpn_out5(x[-2])
p5 = self.fpn_out5(f)
p4 = self.fpn_out4(x[-3])
p3 = self.fpn_out3(x[-4])
p2 = self.fpn_out2(x[-5])
fuse = torch.cat((p5, p4, p3, p2), 1)
out = self.seg_out(fuse)
return out, fuse
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find("BatchNorm") != -1:
m.weight.data.fill_(1.0)
m.bias.data.fill_(1e-4)
class SEGModule(torch.nn.Module):
"""
Module for RPN computation. Takes feature maps from the backbone and RPN
proposals and losses. Works for both FPN and non-FPN.
"""
def __init__(self, cfg):
super(SEGModule, self).__init__()
self.cfg = cfg.clone()
in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
head = SEGHead(in_channels, cfg)
box_selector_train = make_seg_postprocessor(cfg, is_train=True)
box_selector_test = make_seg_postprocessor(cfg, is_train=False)
loss_evaluator = make_seg_loss_evaluator(cfg)
# self.anchor_generator = anchor_generator
self.head = head
self.box_selector_train = box_selector_train
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
"""
Arguments:
images (ImageList): images for which we want to compute the predictions
features (Tensor): fused feature from FPN
targets (Tensor): segmentaion gt map
Returns:
boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
image.
losses (dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
preds, fuse_feature = self.head(features)
# anchors = self.anchor_generator(images, features)
image_shapes = images.get_sizes()
if self.training:
return self._forward_train(preds, targets, image_shapes), [fuse_feature]
else:
return self._forward_test(preds, image_shapes), [fuse_feature]
def _forward_train(self, preds, targets, image_shapes):
# Segmentation map must be transformed into boxes for detection.
# sampled into a training batch.
with torch.no_grad():
boxes = self.box_selector_train(preds, image_shapes, targets)
loss_seg = self.loss_evaluator(preds, targets)
losses = {"loss_seg": loss_seg}
return boxes, losses
def _forward_test(self, preds, image_shapes):
# torch.cuda.synchronize()
# start_time = time.time()
boxes, rotated_boxes, polygons, scores = self.box_selector_test(preds, image_shapes)
# torch.cuda.synchronize()
# end_time = time.time()
# print('post time:', end_time - start_time)
seg_results = {'rotated_boxes': rotated_boxes, 'polygons': polygons, 'preds': preds, 'scores': scores}
return boxes, seg_results
def build_segmentation(cfg):
"""
This gives the gist of it. Not super important because it doesn't change as much
"""
return SEGModule(cfg)
| 6,573 | 34.923497 | 110 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/rpn/inference.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes
import pdb
class RPNPostProcessor(torch.nn.Module):
"""
Performs post-processing on the outputs of the RPN boxes, before feeding the
proposals to the heads
"""
def __init__(
self,
pre_nms_top_n,
post_nms_top_n,
nms_thresh,
min_size,
box_coder=None,
fpn_post_nms_top_n=None,
):
"""
Arguments:
pre_nms_top_n (int)
post_nms_top_n (int)
nms_thresh (float)
min_size (int)
box_coder (BoxCoder)
fpn_post_nms_top_n (int)
"""
super(RPNPostProcessor, self).__init__()
self.pre_nms_top_n = pre_nms_top_n
self.post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.min_size = min_size
if box_coder is None:
box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
self.box_coder = box_coder
if fpn_post_nms_top_n is None:
fpn_post_nms_top_n = post_nms_top_n
self.fpn_post_nms_top_n = fpn_post_nms_top_n
def add_gt_proposals(self, proposals, targets):
"""
Arguments:
proposals: list[BoxList]
targets: list[BoxList]
"""
# Get the device we're operating on
device = proposals[0].bbox.device
gt_boxes = [target.copy_with_fields([]) for target in targets]
# later cat of bbox requires all fields to be present for all bbox
# so we need to add a dummy for objectness that's missing
for gt_box in gt_boxes:
gt_box.add_field("objectness", torch.ones(len(gt_box), device=device))
proposals = [
cat_boxlist((proposal, gt_box))
for proposal, gt_box in zip(proposals, gt_boxes)
]
return proposals
def forward_for_single_feature_map(self, anchors, objectness, box_regression):
"""
Arguments:
anchors: list[BoxList]
objectness: tensor of size N, A, H, W
box_regression: tensor of size N, A * 4, H, W
"""
device = objectness.device
N, A, H, W = objectness.shape
# put in the same format as anchors
objectness = objectness.permute(0, 2, 3, 1).reshape(N, -1)
objectness = objectness.sigmoid()
box_regression = box_regression.view(N, -1, 4, H, W).permute(0, 3, 4, 1, 2)
box_regression = box_regression.reshape(N, -1, 4)
num_anchors = A * H * W
pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True)
batch_idx = torch.arange(N, device=device)[:, None]
box_regression = box_regression[batch_idx, topk_idx]
image_shapes = [box.size for box in anchors]
concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]
proposals = self.box_coder.decode(
box_regression.view(-1, 4), concat_anchors.view(-1, 4)
)
proposals = proposals.view(N, -1, 4)
result = []
for proposal, score, im_shape in zip(proposals, objectness, image_shapes):
boxlist = BoxList(proposal, im_shape, mode="xyxy")
boxlist.add_field("objectness", score)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, self.min_size)
boxlist = boxlist_nms(
boxlist,
self.nms_thresh,
max_proposals=self.post_nms_top_n,
score_field="objectness",
)
result.append(boxlist)
return result
def forward(self, anchors, objectness, box_regression, targets=None):
"""
Arguments:
anchors: list[list[BoxList]]
objectness: list[tensor]
box_regression: list[tensor]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
"""
sampled_boxes = []
num_levels = len(objectness)
anchors = list(zip(*anchors))
for a, o, b in zip(anchors, objectness, box_regression):
sampled_boxes.append(self.forward_for_single_feature_map(a, o, b))
boxlists = list(zip(*sampled_boxes))
boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
if num_levels > 1:
boxlists = self.select_over_all_levels(boxlists)
# append ground-truth bboxes to proposals
if self.training and targets is not None:
boxlists = self.add_gt_proposals(boxlists, targets)
return boxlists
def select_over_all_levels(self, boxlists):
num_images = len(boxlists)
# different behavior during training and during testing:
# during training, post_nms_top_n is over *all* the proposals combined, while
# during testing, it is over the proposals for each image
# TODO resolve this difference and make it consistent. It should be per image,
# and not per batch
if self.training:
objectness = torch.cat(
[boxlist.get_field("objectness") for boxlist in boxlists], dim=0
)
box_sizes = [len(boxlist) for boxlist in boxlists]
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True)
inds_mask = torch.zeros_like(objectness, dtype=torch.bool)
inds_mask[inds_sorted] = 1
inds_mask = inds_mask.split(box_sizes)
for i in range(num_images):
boxlists[i] = boxlists[i][inds_mask[i]]
else:
for i in range(num_images):
objectness = boxlists[i].get_field("objectness")
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(
objectness, post_nms_top_n, dim=0, sorted=True
)
boxlists[i] = boxlists[i][inds_sorted]
return boxlists
def make_rpn_postprocessor(config, rpn_box_coder, is_train):
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN
if not is_train:
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TRAIN
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TRAIN
if not is_train:
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TEST
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TEST
nms_thresh = config.MODEL.RPN.NMS_THRESH
min_size = config.MODEL.RPN.MIN_SIZE
box_selector = RPNPostProcessor(
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=nms_thresh,
min_size=min_size,
box_coder=rpn_box_coder,
fpn_post_nms_top_n=fpn_post_nms_top_n,
)
return box_selector
| 7,490 | 35.720588 | 87 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/rpn/anchor_generator.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numpy as np
import torch
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
class BufferList(nn.Module):
"""
Similar to nn.ParameterList, but for buffers
"""
def __init__(self, buffers=None):
super(BufferList, self).__init__()
if buffers is not None:
self.extend(buffers)
def extend(self, buffers):
offset = len(self)
for i, buffer in enumerate(buffers):
self.register_buffer(str(offset + i), buffer)
return self
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.values())
class AnchorGenerator(nn.Module):
"""
For a set of image sizes and feature maps, computes a set
of anchors
"""
def __init__(
self,
sizes=(128, 256, 512),
aspect_ratios=(0.5, 1.0, 2.0),
anchor_strides=(8, 16, 32),
straddle_thresh=0,
):
super(AnchorGenerator, self).__init__()
if len(anchor_strides) == 1:
anchor_stride = anchor_strides[0]
cell_anchors = [
generate_anchors(anchor_stride, sizes, aspect_ratios).float()
]
else:
if len(anchor_strides) != len(sizes):
raise RuntimeError("FPN should have #anchor_strides == #sizes")
cell_anchors = [
generate_anchors(anchor_stride, (size,), aspect_ratios).float()
for anchor_stride, size in zip(anchor_strides, sizes)
]
self.strides = anchor_strides
self.cell_anchors = BufferList(cell_anchors)
self.straddle_thresh = straddle_thresh
def num_anchors_per_location(self):
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
def grid_anchors(self, grid_sizes):
anchors = []
for size, stride, base_anchors in zip(
grid_sizes, self.strides, self.cell_anchors
):
grid_height, grid_width = size
device = base_anchors.device
shifts_x = torch.arange(
0, grid_width * stride, step=stride, dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, grid_height * stride, step=stride, dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
anchors.append(
(shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
)
return anchors
def add_visibility_to(self, boxlist):
image_width, image_height = boxlist.size
anchors = boxlist.bbox
if self.straddle_thresh >= 0:
inds_inside = (
(anchors[..., 0] >= -self.straddle_thresh)
& (anchors[..., 1] >= -self.straddle_thresh)
& (anchors[..., 2] < image_width + self.straddle_thresh)
& (anchors[..., 3] < image_height + self.straddle_thresh)
)
else:
device = anchors.device
inds_inside = torch.ones(anchors.shape[0], dtype=torch.bool, device=device)
boxlist.add_field("visibility", inds_inside)
def forward(self, image_list, feature_maps):
grid_height, grid_width = feature_maps[0].shape[-2:]
grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
anchors = []
for i, (image_height, image_width) in enumerate(image_list.image_sizes):
anchors_in_image = []
for anchors_per_feature_map in anchors_over_all_feature_maps:
boxlist = BoxList(
anchors_per_feature_map, (image_width, image_height), mode="xyxy"
)
self.add_visibility_to(boxlist)
anchors_in_image.append(boxlist)
anchors.append(anchors_in_image)
return anchors
def make_anchor_generator(config):
anchor_sizes = config.MODEL.RPN.ANCHOR_SIZES
aspect_ratios = config.MODEL.RPN.ASPECT_RATIOS
anchor_stride = config.MODEL.RPN.ANCHOR_STRIDE
straddle_thresh = config.MODEL.RPN.STRADDLE_THRESH
if config.MODEL.RPN.USE_FPN:
assert len(anchor_stride) == len(
anchor_sizes
), "FPN should have len(ANCHOR_STRIDE) == len(ANCHOR_SIZES)"
else:
assert len(anchor_stride) == 1, "Non-FPN should have a single ANCHOR_STRIDE"
anchor_generator = AnchorGenerator(
anchor_sizes, aspect_ratios, anchor_stride, straddle_thresh
)
return anchor_generator
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
# Verify that we compute the same anchors as Shaoqing's matlab implementation:
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
# array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_anchors(
stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)
):
"""Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
are centered on stride / 2, have (approximate) sqrt areas of the specified
sizes, and aspect ratios as given.
"""
return _generate_anchors(
stride,
np.array(sizes, dtype=np.float) / stride,
np.array(aspect_ratios, dtype=np.float),
)
def _generate_anchors(base_size, scales, aspect_ratios):
"""Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, base_size - 1, base_size - 1) window.
"""
anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1
anchors = _ratio_enum(anchor, aspect_ratios)
anchors = np.vstack(
[_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]
)
return torch.from_numpy(anchors)
def _whctrs(anchor):
"""Return width, height, x center, and y center for an anchor (window)."""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack(
(
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1),
)
)
return anchors
def _ratio_enum(anchor, ratios):
"""Enumerate a set of anchors for each aspect ratio wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
| 8,907 | 32.742424 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/rpn/loss.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This file contains specific functions for computing losses on the RPN
file
"""
import torch
from torch.nn import functional as F
from ..balanced_positive_negative_sampler import BalancedPositiveNegativeSampler
from ..utils import cat
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
class RPNLossComputation(object):
"""
This class computes the RPN loss.
"""
def __init__(self, proposal_matcher, fg_bg_sampler, box_coder):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
# self.target_preparator = target_preparator
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
def match_targets_to_anchors(self, anchor, target):
match_quality_matrix = boxlist_iou(target, anchor)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# RPN doesn't need any fields from target
# for creating the labels, so clear them all
target = target.copy_with_fields([])
# get the targets corresponding GT for each anchor
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, anchors, targets):
labels = []
regression_targets = []
for anchors_per_image, targets_per_image in zip(anchors, targets):
matched_targets = self.match_targets_to_anchors(
anchors_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_idxs >= 0
labels_per_image = labels_per_image.to(dtype=torch.float32)
# discard anchors that go out of the boundaries of the image
labels_per_image[~anchors_per_image.get_field("visibility")] = -1
# discard indices that are between thresholds
inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[inds_to_discard] = -1
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, anchors_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return labels, regression_targets
def __call__(self, anchors, objectness, box_regression, targets):
"""
Arguments:
anchors (list[BoxList])
objectness (list[Tensor])
box_regression (list[Tensor])
targets (list[BoxList])
Returns:
objectness_loss (Tensor)
box_loss (Tensor
"""
anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
labels, regression_targets = self.prepare_targets(anchors, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
objectness_flattened = []
box_regression_flattened = []
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness and the box_regression
for objectness_per_level, box_regression_per_level in zip(
objectness, box_regression
):
N, A, H, W = objectness_per_level.shape
objectness_per_level = objectness_per_level.permute(0, 2, 3, 1).reshape(
N, -1
)
box_regression_per_level = box_regression_per_level.view(N, -1, 4, H, W)
box_regression_per_level = box_regression_per_level.permute(0, 3, 4, 1, 2)
box_regression_per_level = box_regression_per_level.reshape(N, -1, 4)
objectness_flattened.append(objectness_per_level)
box_regression_flattened.append(box_regression_per_level)
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
objectness = cat(objectness_flattened, dim=1).reshape(-1)
box_regression = cat(box_regression_flattened, dim=1).reshape(-1, 4)
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds],
regression_targets[sampled_pos_inds],
beta=1.0 / 9,
size_average=False,
) / (sampled_inds.numel())
objectness_loss = F.binary_cross_entropy_with_logits(
objectness[sampled_inds], labels[sampled_inds]
)
return objectness_loss, box_loss
def make_rpn_loss_evaluator(cfg, box_coder):
matcher = Matcher(
cfg.MODEL.RPN.FG_IOU_THRESHOLD,
cfg.MODEL.RPN.BG_IOU_THRESHOLD,
allow_low_quality_matches=True,
)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, cfg.MODEL.RPN.POSITIVE_FRACTION
)
loss_evaluator = RPNLossComputation(matcher, fg_bg_sampler, box_coder)
return loss_evaluator
| 6,123 | 39.026144 | 87 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/rpn/rpn.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from .loss import make_rpn_loss_evaluator
from .anchor_generator import make_anchor_generator
from .inference import make_rpn_postprocessor
class RPNHead(nn.Module):
"""
Adds a simple RPN Head with classification and regression heads
"""
def __init__(self, in_channels, num_anchors):
"""
Arguments:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
"""
super(RPNHead, self).__init__()
self.conv = nn.Conv2d(
in_channels, in_channels, kernel_size=3, stride=1, padding=1
)
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=1, stride=1
)
for l in [self.conv, self.cls_logits, self.bbox_pred]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x):
logits = []
bbox_reg = []
for feature in x:
t = F.relu(self.conv(feature))
logits.append(self.cls_logits(t))
bbox_reg.append(self.bbox_pred(t))
return logits, bbox_reg
class RPNModule(torch.nn.Module):
"""
Module for RPN computation. Takes feature maps from the backbone and RPN
proposals and losses. Works for both FPN and non-FPN.
"""
def __init__(self, cfg):
super(RPNModule, self).__init__()
self.cfg = cfg.clone()
anchor_generator = make_anchor_generator(cfg)
in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
head = RPNHead(in_channels, anchor_generator.num_anchors_per_location()[0])
rpn_box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
box_selector_train = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=True)
box_selector_test = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=False)
loss_evaluator = make_rpn_loss_evaluator(cfg, rpn_box_coder)
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_train = box_selector_train
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
"""
Arguments:
images (ImageList): images for which we want to compute the predictions
features (list[Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (list[BoxList): ground-truth boxes present in the image (optional)
Returns:
boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
image.
losses (dict[Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
objectness, rpn_box_regression = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, objectness, rpn_box_regression, targets)
else:
return self._forward_test(anchors, objectness, rpn_box_regression)
def _forward_train(self, anchors, objectness, rpn_box_regression, targets):
if self.cfg.MODEL.RPN_ONLY:
# When training an RPN-only model, the loss is determined by the
# predicted objectness and rpn_box_regression values and there is
# no need to transform the anchors into predicted boxes; this is an
# optimization that avoids the unnecessary transformation.
boxes = anchors
else:
# For end-to-end models, anchors must be transformed into boxes and
# sampled into a training batch.
with torch.no_grad():
boxes = self.box_selector_train(
anchors, objectness, rpn_box_regression, targets
)
loss_objectness, loss_rpn_box_reg = self.loss_evaluator(
anchors, objectness, rpn_box_regression, targets
)
losses = {
"loss_objectness": loss_objectness,
"loss_rpn_box_reg": loss_rpn_box_reg,
}
return boxes, losses
def _forward_test(self, anchors, objectness, rpn_box_regression):
boxes = self.box_selector_test(anchors, objectness, rpn_box_regression)
if self.cfg.MODEL.RPN_ONLY:
# For end-to-end models, the RPN proposals are an intermediate state
# and don't bother to sort them in decreasing score order. For RPN-only
# models, the proposals are the final output and we return them in
# high-to-low confidence order.
inds = [
box.get_field("objectness").sort(descending=True)[1] for box in boxes
]
boxes = [box[ind] for box, ind in zip(boxes, inds)]
return boxes, {}
def build_rpn(cfg):
"""
This gives the gist of it. Not super important because it doesn't change as much
"""
return RPNModule(cfg)
| 5,453 | 37.680851 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/roi_heads.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .box_head.box_head import build_roi_box_head
from .mask_head.mask_head import build_roi_mask_head
class CombinedROIHeads(torch.nn.ModuleDict):
"""
Combines a set of individual heads (for box prediction or masks) into a single
head.
"""
def __init__(self, cfg, heads):
super(CombinedROIHeads, self).__init__(heads)
self.cfg = cfg.clone()
if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
self.mask.feature_extractor = self.box.feature_extractor
def forward(self, features, proposals, targets=None):
losses = {}
# TODO rename x to roi_box_features, if it doesn't increase memory consumption
x, detections, loss_box = self.box(features, proposals, targets)
losses.update(loss_box)
if self.cfg.MODEL.MASK_ON or self.cfg.SEQUENCE.SEQ_ON:
mask_features = features
# optimization: during training, if we share the feature extractor between
# the box and the mask heads,
# then we can reuse the features already computed
if (
self.training
and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR
):
mask_features = x
# During training, self.box() will return
# the unaltered proposals as "detections"
# this makes the API consistent during training and testing
x, detections, loss_mask = self.mask(mask_features, detections, targets)
if loss_mask is not None:
losses.update(loss_mask)
return x, detections, losses
def build_roi_heads(cfg):
# individually create the heads, that will be combined together
# afterwards
roi_heads = []
if not cfg.MODEL.RPN_ONLY:
roi_heads.append(("box", build_roi_box_head(cfg)))
if cfg.MODEL.MASK_ON or cfg.SEQUENCE.SEQ_ON:
roi_heads.append(("mask", build_roi_mask_head(cfg)))
# combine individual heads in a single module
if roi_heads:
roi_heads = CombinedROIHeads(cfg, roi_heads)
return roi_heads
| 2,237 | 36.932203 | 86 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import torch
from PIL import Image
from torch import nn
import cv2
from torch.nn import functional as F
from maskrcnn_benchmark.structures.bounding_box import BoxList
# TODO check if want to return a single BoxList or a composite
# object
class MaskPostProcessor(nn.Module):
"""
From the results of the CNN, post process the masks
by taking the mask corresponding to the class with max
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
If a masker object is passed, it will additionally
project the masks in the image according to the locations in boxes,
"""
def __init__(self, masker=None):
super(MaskPostProcessor, self).__init__()
self.masker = masker
def forward(self, x, boxes):
"""
Arguments:
x (Tensor): the mask logits
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra field mask
"""
mask_prob = x.sigmoid()
# select masks coresponding to the predicted classes
num_masks = x.shape[0]
labels = [bbox.get_field("labels") for bbox in boxes]
labels = torch.cat(labels)
index = torch.arange(num_masks, device=labels.device)
mask_prob = mask_prob[index, labels][:, None]
if self.masker:
mask_prob = self.masker(mask_prob, boxes)
boxes_per_image = [len(box) for box in boxes]
mask_prob = mask_prob.split(boxes_per_image, dim=0)
results = []
for prob, box in zip(mask_prob, boxes):
bbox = BoxList(box.bbox, box.size, mode="xyxy")
for field in box.fields():
bbox.add_field(field, box.get_field(field))
bbox.add_field("mask", prob)
results.append(bbox)
return results
# TODO
class CharMaskPostProcessor(nn.Module):
"""
From the results of the CNN, post process the masks
by taking the mask corresponding to the class with max
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
If a masker object is passed, it will additionally
project the masks in the image according to the locations in boxes,
"""
def __init__(self, cfg, masker=None):
super(CharMaskPostProcessor, self).__init__()
self.masker = masker
self.cfg = cfg
def forward(self, x, char_mask, boxes, seq_outputs=None, seq_scores=None, detailed_seq_scores=None):
"""
Arguments:
x (Tensor): the mask logits
char_mask (Tensor): the char mask logits
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra field mask
"""
if x is not None:
mask_prob = x.sigmoid()
mask_prob = mask_prob.squeeze(dim=1)[:, None]
if self.masker:
mask_prob = self.masker(mask_prob, boxes)
boxes_per_image = [len(box) for box in boxes]
if x is not None:
mask_prob = mask_prob.split(boxes_per_image, dim=0)
if self.cfg.MODEL.CHAR_MASK_ON:
char_mask_softmax = F.softmax(char_mask, dim=1)
char_results = {'char_mask': char_mask_softmax.cpu().numpy(), 'boxes': boxes[0].bbox.cpu().numpy(), 'seq_outputs': seq_outputs, 'seq_scores': seq_scores, 'detailed_seq_scores': detailed_seq_scores}
else:
char_results = {'char_mask': None, 'boxes': boxes[0].bbox.cpu().numpy(), 'seq_outputs': seq_outputs, 'seq_scores': seq_scores, 'detailed_seq_scores': detailed_seq_scores}
results = []
if x is not None:
for prob, box in zip(mask_prob, boxes):
bbox = BoxList(box.bbox, box.size, mode="xyxy")
for field in box.fields():
bbox.add_field(field, box.get_field(field))
bbox.add_field("mask", prob)
results.append(bbox)
else:
for box in boxes:
bbox = BoxList(box.bbox, box.size, mode="xyxy")
for field in box.fields():
bbox.add_field(field, box.get_field(field))
results.append(bbox)
return [results, char_results]
class MaskPostProcessorCOCOFormat(MaskPostProcessor):
"""
From the results of the CNN, post process the results
so that the masks are pasted in the image, and
additionally convert the results to COCO format.
"""
def forward(self, x, boxes):
import pycocotools.mask as mask_util
import numpy as np
results = super(MaskPostProcessorCOCOFormat, self).forward(x, boxes)
for result in results:
masks = result.get_field("mask").cpu()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
result.add_field("mask", rles)
return results
# the next two functions should be merged inside Masker
# but are kept here for the moment while we need them
# temporarily gor paste_mask_in_image
def expand_boxes(boxes, scale):
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale[1]
h_half *= scale[0]
boxes_exp = torch.zeros_like(boxes)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def expand_masks(mask, padding):
N = mask.shape[0]
M_H = mask.shape[-2]
M_W = mask.shape[-1]
pad2 = 2 * padding
scale = (float(M_H + pad2) / M_H, float(M_W + pad2) / M_W)
padded_mask = mask.new_zeros((N, 1, M_H + pad2, M_W + pad2))
padded_mask[:, :, padding:-padding, padding:-padding] = mask
return padded_mask, scale
def paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1):
# Need to work on the CPU, where fp16 isn't supported - cast to float to avoid this
mask = mask.float()
box = box.float()
padded_mask, scale = expand_masks(mask[None], padding=padding)
mask = padded_mask[0, 0]
box = expand_boxes(box[None], scale)[0]
box = box.numpy().astype(np.int32)
TO_REMOVE = 1
w = box[2] - box[0] + TO_REMOVE
h = box[3] - box[1] + TO_REMOVE
w = max(w, 1)
h = max(h, 1)
mask = Image.fromarray(mask.cpu().numpy())
mask = mask.resize((w, h), resample=Image.BILINEAR)
mask = np.array(mask, copy=False)
if thresh >= 0:
mask = np.array(mask > thresh, dtype=np.uint8)
mask = torch.from_numpy(mask)
else:
# for visualization and debugging, we also
# allow it to return an unmodified mask
mask = torch.from_numpy(mask * 255).to(torch.bool)
im_mask = torch.zeros((im_h, im_w), dtype=torch.bool)
x_0 = max(box[0], 0)
x_1 = min(box[2] + 1, im_w)
y_0 = max(box[1], 0)
y_1 = min(box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
]
return im_mask
class Masker(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, threshold=0.5, padding=1):
self.threshold = threshold
self.padding = padding
def forward_single_image(self, masks, boxes):
boxes = boxes.convert("xyxy")
im_w, im_h = boxes.size
res = [
paste_mask_in_image(mask[0], box, im_h, im_w, self.threshold, self.padding)
for mask, box in zip(masks, boxes.bbox)
]
if len(res) > 0:
res = torch.stack(res, dim=0)[:, None]
else:
res = masks.new_empty((0, 1, masks.shape[-2], masks.shape[-1]))
return res
def __call__(self, masks, boxes):
# TODO do this properly
if isinstance(boxes, BoxList):
boxes = [boxes]
assert len(boxes) == 1, "Only single image batch supported"
result = self.forward_single_image(masks, boxes[0])
return result
def make_roi_mask_post_processor(cfg):
masker = None
if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON:
mask_post_processor = CharMaskPostProcessor(cfg, masker)
else:
mask_post_processor = MaskPostProcessor(masker)
return mask_post_processor
| 8,971 | 34.184314 | 209 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.nn import functional as F
from ..box_head.roi_box_feature_extractors import ResNet50Conv5ROIFeatureExtractor
from maskrcnn_benchmark.modeling.poolers import Pooler
from maskrcnn_benchmark.layers import Conv2d
class MaskRCNNFPNFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg):
"""
Arguments:
num_classes (int): number of output classes
input_size (int): number of channels of the input once it's flattened
representation_size (int): size of the intermediate representation
"""
super(MaskRCNNFPNFeatureExtractor, self).__init__()
# resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON:
resolution_h = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_H
resolution_w = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_W
else:
resolution_h = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
resolution_w = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_MASK_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution_h, resolution_w),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS
self.pooler = pooler
layers = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS
next_feature = input_size
self.blocks = []
for layer_idx, layer_features in enumerate(layers, 1):
layer_name = "mask_fcn{}".format(layer_idx)
module = Conv2d(next_feature, layer_features, 3, stride=1, padding=1)
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(module.bias, 0)
self.add_module(layer_name, module)
next_feature = layer_features
self.blocks.append(layer_name)
def forward(self, x, proposals):
x = self.pooler(x, proposals)
for layer_name in self.blocks:
x = F.relu(getattr(self, layer_name)(x))
return x
_ROI_MASK_FEATURE_EXTRACTORS = {
"ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor,
"MaskRCNNFPNFeatureExtractor": MaskRCNNFPNFeatureExtractor,
}
def make_roi_mask_feature_extractor(cfg):
func = _ROI_MASK_FEATURE_EXTRACTORS[cfg.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR]
return func(cfg)
| 2,762 | 36.849315 | 87 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.modeling.utils import cat
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from torch.nn import functional as F
def project_masks_on_boxes(segmentation_masks, proposals, discretization_size):
"""
Given segmentation masks and the bounding boxes corresponding
to the location of the masks in the image, this function
crops and resizes the masks in the position defined by the
boxes. This prepares the masks for them to be fed to the
loss computation as the targets.
Arguments:
segmentation_masks: an instance of SegmentationMask
proposals: an instance of BoxList
"""
masks = []
M = discretization_size
device = proposals.bbox.device
proposals = proposals.convert("xyxy")
assert segmentation_masks.size == proposals.size, "{}, {}".format(
segmentation_masks, proposals
)
# TODO put the proposals on the CPU, as the representation for the
# masks is not efficient GPU-wise (possibly several small tensors for
# representing a single instance mask)
proposals = proposals.bbox.to(torch.device("cpu"))
for segmentation_mask, proposal in zip(segmentation_masks, proposals):
# crop the masks, resize them to the desired resolution and
# then convert them to the tensor representation,
# instead of the list representation that was used
cropped_mask = segmentation_mask.crop(proposal)
scaled_mask = cropped_mask.resize((M, M))
mask = scaled_mask.convert(mode="mask")
masks.append(mask)
if len(masks) == 0:
return torch.empty(0, dtype=torch.float32, device=device)
return torch.stack(masks, dim=0).to(device, dtype=torch.float32)
class MaskRCNNLossComputation(object):
def __init__(self, proposal_matcher, discretization_size):
"""
Arguments:
proposal_matcher (Matcher)
discretization_size (int)
"""
self.proposal_matcher = proposal_matcher
self.discretization_size = discretization_size
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Mask RCNN needs "labels" and "masks "fields for creating the targets
target = target.copy_with_fields(["labels", "masks"])
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
masks = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# this can probably be removed, but is left here for clarity
# and completeness
neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[neg_inds] = 0
# mask scores are only computed on positive samples
positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1)
segmentation_masks = matched_targets.get_field("masks")
segmentation_masks = segmentation_masks[positive_inds]
positive_proposals = proposals_per_image[positive_inds]
masks_per_image = project_masks_on_boxes(
segmentation_masks, positive_proposals, self.discretization_size
)
labels.append(labels_per_image)
masks.append(masks_per_image)
return labels, masks
def __call__(self, proposals, mask_logits, targets):
"""
Arguments:
proposals (list[BoxList])
mask_logits (Tensor)
targets (list[BoxList])
Return:
mask_loss (Tensor): scalar tensor containing the loss
"""
labels, mask_targets = self.prepare_targets(proposals, targets)
labels = cat(labels, dim=0)
mask_targets = cat(mask_targets, dim=0)
positive_inds = torch.nonzero(labels > 0).squeeze(1)
labels_pos = labels[positive_inds]
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if mask_targets.numel() == 0:
return mask_logits.sum() * 0
mask_loss = F.binary_cross_entropy_with_logits(
mask_logits[positive_inds, labels_pos], mask_targets
)
return mask_loss
class CharMaskRCNNLossComputation(object):
def __init__(self, use_weighted_loss=False):
"""
Arguments:
proposal_matcher (Matcher)
discretization_size (int)
"""
self.use_weighted_loss = use_weighted_loss
def __call__(
self,
proposals,
mask_logits,
char_mask_logits,
mask_targets,
char_mask_targets,
char_mask_weights,
):
"""
Arguments:
proposals (list[BoxList])
mask_logits (Tensor)
targets (list[BoxList])
Return:
mask_loss (Tensor): scalar tensor containing the loss
"""
mask_targets = cat(mask_targets, dim=0)
char_mask_targets = cat(char_mask_targets, dim=0)
char_mask_weights = cat(char_mask_weights, dim=0)
char_mask_weights = char_mask_weights.mean(dim=0)
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if mask_targets.numel() == 0 or char_mask_targets.numel() == 0:
return mask_logits.sum() * 0, char_mask_targets.sum() * 0
mask_loss = F.binary_cross_entropy_with_logits(
mask_logits.squeeze(dim=1), mask_targets
)
if self.use_weighted_loss:
char_mask_loss = F.cross_entropy(
char_mask_logits, char_mask_targets, char_mask_weights, ignore_index=-1
)
else:
char_mask_loss = F.cross_entropy(
char_mask_logits, char_mask_targets, ignore_index=-1
)
return mask_loss, char_mask_loss
class SeqMaskRCNNLossComputation(object):
def __init__(self):
"""
Arguments:
proposal_matcher (Matcher)
discretization_size (int)
"""
def __call__(
self,
proposals,
mask_logits,
mask_targets,
):
"""
Arguments:
proposals (list[BoxList])
mask_logits (Tensor)
targets (list[BoxList])
Return:
mask_loss (Tensor): scalar tensor containing the loss
"""
mask_targets = cat(mask_targets, dim=0)
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if mask_targets.numel() == 0:
return mask_logits.sum() * 0
mask_loss = F.binary_cross_entropy_with_logits(
mask_logits.squeeze(dim=1), mask_targets
)
return mask_loss
def make_roi_mask_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
if cfg.MODEL.CHAR_MASK_ON:
loss_evaluator = CharMaskRCNNLossComputation(
use_weighted_loss=cfg.MODEL.ROI_MASK_HEAD.USE_WEIGHTED_CHAR_MASK
)
else:
if cfg.SEQUENCE.SEQ_ON:
loss_evaluator = SeqMaskRCNNLossComputation()
else:
loss_evaluator = MaskRCNNLossComputation(
matcher, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION
)
return loss_evaluator
| 8,431 | 34.428571 | 87 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_seq_predictors.py | # Written by Minghui Liao
import math
import random
import numpy as np
import torch
from maskrcnn_benchmark.utils.chars import char2num, num2char
from torch import nn
from torch.nn import functional as F
gpu_device = torch.device("cuda")
cpu_device = torch.device("cpu")
def reduce_mul(l):
out = 1.0
for x in l:
out *= x
return out
def check_all_done(seqs):
for seq in seqs:
if not seq[-1]:
return False
return True
# TODO
class SequencePredictor(nn.Module):
def __init__(self, cfg, dim_in):
super(SequencePredictor, self).__init__()
self.cfg = cfg
if cfg.SEQUENCE.TWO_CONV:
self.seq_encoder = nn.Sequential(
nn.Conv2d(dim_in, dim_in, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
nn.Conv2d(dim_in, 256, 3, padding=1),
nn.ReLU(inplace=True),
)
else:
self.seq_encoder = nn.Sequential(
nn.Conv2d(dim_in, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
x_onehot_size = int(cfg.SEQUENCE.RESIZE_WIDTH / 2)
y_onehot_size = int(cfg.SEQUENCE.RESIZE_HEIGHT / 2)
self.seq_decoder = BahdanauAttnDecoderRNN(
256, cfg.SEQUENCE.NUM_CHAR, cfg.SEQUENCE.NUM_CHAR, n_layers=1, dropout_p=0.1, onehot_size = (y_onehot_size, x_onehot_size)
)
# self.criterion_seq_decoder = nn.NLLLoss(ignore_index = -1, reduce=False)
self.criterion_seq_decoder = nn.NLLLoss(ignore_index=-1, reduction="none")
# self.rescale = nn.Upsample(size=(16, 64), mode="bilinear", align_corners=False)
self.rescale = nn.Upsample(size=(cfg.SEQUENCE.RESIZE_HEIGHT, cfg.SEQUENCE.RESIZE_WIDTH), mode="bilinear", align_corners=False)
self.x_onehot = nn.Embedding(x_onehot_size, x_onehot_size)
self.x_onehot.weight.data = torch.eye(x_onehot_size)
self.y_onehot = nn.Embedding(y_onehot_size, y_onehot_size)
self.y_onehot.weight.data = torch.eye(y_onehot_size)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(
self, x, decoder_targets=None, word_targets=None, use_beam_search=False
):
rescale_out = self.rescale(x)
seq_decoder_input = self.seq_encoder(rescale_out)
x_onehot_size = int(self.cfg.SEQUENCE.RESIZE_WIDTH / 2)
y_onehot_size = int(self.cfg.SEQUENCE.RESIZE_HEIGHT / 2)
x_t, y_t = np.meshgrid(np.linspace(0, x_onehot_size - 1, x_onehot_size), np.linspace(0, y_onehot_size - 1, y_onehot_size))
x_t = torch.LongTensor(x_t, device=cpu_device).cuda()
y_t = torch.LongTensor(y_t, device=cpu_device).cuda()
x_onehot_embedding = (
self.x_onehot(x_t)
.transpose(0, 2)
.transpose(1, 2)
.repeat(seq_decoder_input.size(0), 1, 1, 1)
)
y_onehot_embedding = (
self.y_onehot(y_t)
.transpose(0, 2)
.transpose(1, 2)
.repeat(seq_decoder_input.size(0), 1, 1, 1)
)
seq_decoder_input_loc = torch.cat(
[seq_decoder_input, x_onehot_embedding, y_onehot_embedding], 1
)
seq_decoder_input_reshape = (
seq_decoder_input_loc.view(
seq_decoder_input_loc.size(0), seq_decoder_input_loc.size(1), -1
)
.transpose(0, 2)
.transpose(1, 2)
)
if self.training:
bos_onehot = np.zeros(
(seq_decoder_input_reshape.size(1), 1), dtype=np.int32
)
bos_onehot[:, 0] = self.cfg.SEQUENCE.BOS_TOKEN
decoder_input = torch.tensor(bos_onehot.tolist(), device=gpu_device)
decoder_hidden = torch.zeros(
(seq_decoder_input_reshape.size(1), 256), device=gpu_device
)
use_teacher_forcing = (
True
if random.random() < self.cfg.SEQUENCE.TEACHER_FORCE_RATIO
else False
)
target_length = decoder_targets.size(1)
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = self.seq_decoder(
decoder_input, decoder_hidden, seq_decoder_input_reshape
)
if di == 0:
loss_seq_decoder = self.criterion_seq_decoder(
decoder_output, word_targets[:, di]
)
else:
loss_seq_decoder += self.criterion_seq_decoder(
decoder_output, word_targets[:, di]
)
decoder_input = decoder_targets[:, di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = self.seq_decoder(
decoder_input, decoder_hidden, seq_decoder_input_reshape
)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze(
1
).detach() # detach from history as input
if di == 0:
loss_seq_decoder = self.criterion_seq_decoder(
decoder_output, word_targets[:, di]
)
else:
loss_seq_decoder += self.criterion_seq_decoder(
decoder_output, word_targets[:, di]
)
loss_seq_decoder = loss_seq_decoder.sum() / loss_seq_decoder.size(0)
loss_seq_decoder = 0.2 * loss_seq_decoder
return loss_seq_decoder
else:
words = []
decoded_scores = []
detailed_decoded_scores = []
# real_length = 0
if use_beam_search:
for batch_index in range(seq_decoder_input_reshape.size(1)):
decoder_hidden = torch.zeros((1, 256), device=gpu_device)
word = []
char_scores = []
detailed_char_scores = []
top_seqs = self.beam_search(
seq_decoder_input_reshape[:, batch_index : batch_index + 1, :],
decoder_hidden,
beam_size=6,
max_len=self.cfg.SEQUENCE.MAX_LENGTH,
)
top_seq = top_seqs[0]
for character in top_seq[1:]:
character_index = character[0]
if character_index == self.cfg.SEQUENCE.NUM_CHAR - 1:
char_scores.append(character[1])
detailed_char_scores.append(character[2])
break
else:
if character_index == 0:
word.append("~")
char_scores.append(0.0)
else:
word.append(num2char(character_index))
char_scores.append(character[1])
detailed_char_scores.append(character[2])
words.append("".join(word))
decoded_scores.append(char_scores)
detailed_decoded_scores.append(detailed_char_scores)
else:
for batch_index in range(seq_decoder_input_reshape.size(1)):
bos_onehot = np.zeros((1, 1), dtype=np.int32)
bos_onehot[:, 0] = self.cfg.SEQUENCE.BOS_TOKEN
decoder_input = torch.tensor(bos_onehot.tolist(), device=gpu_device)
decoder_hidden = torch.zeros((1, 256), device=gpu_device)
word = []
char_scores = []
for di in range(self.cfg.SEQUENCE.MAX_LENGTH):
decoder_output, decoder_hidden, decoder_attention = self.seq_decoder(
decoder_input,
decoder_hidden,
seq_decoder_input_reshape[
:, batch_index : batch_index + 1, :
],
)
# decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
char_scores.append(topv.item())
if topi.item() == self.cfg.SEQUENCE.NUM_CHAR - 1:
break
else:
if topi.item() == 0:
word.append("~")
else:
word.append(num2char(topi.item()))
# real_length = di
decoder_input = topi.squeeze(1).detach()
words.append("".join(word))
decoded_scores.append(char_scores)
return words, decoded_scores, detailed_decoded_scores
def beam_search_step(self, encoder_context, top_seqs, k):
all_seqs = []
for seq in top_seqs:
seq_score = reduce_mul([_score for _, _score, _, _ in seq])
if seq[-1][0] == self.cfg.SEQUENCE.NUM_CHAR - 1:
all_seqs.append((seq, seq_score, seq[-1][2], True))
continue
decoder_hidden = seq[-1][-1][0]
onehot = np.zeros((1, 1), dtype=np.int32)
onehot[:, 0] = seq[-1][0]
decoder_input = torch.tensor(onehot.tolist(), device=gpu_device)
decoder_output, decoder_hidden, decoder_attention = self.seq_decoder(
decoder_input, decoder_hidden, encoder_context
)
detailed_char_scores = decoder_output.cpu().numpy()
# print(decoder_output.shape)
scores, candidates = decoder_output.data[:, 1:].topk(k)
for i in range(k):
character_score = scores[:, i]
character_index = candidates[:, i]
score = seq_score * character_score.item()
char_score = seq_score * detailed_char_scores
rs_seq = seq + [
(
character_index.item() + 1,
character_score.item(),
char_score,
[decoder_hidden],
)
]
done = character_index.item() + 1 == self.cfg.SEQUENCE.NUM_CHAR - 1
all_seqs.append((rs_seq, score, char_score, done))
all_seqs = sorted(all_seqs, key=lambda seq: seq[1], reverse=True)
topk_seqs = [seq for seq, _, _, _ in all_seqs[:k]]
all_done = check_all_done(all_seqs[:k])
return topk_seqs, all_done
def beam_search(self, encoder_context, decoder_hidden, beam_size=6, max_len=32):
char_score = np.zeros(self.cfg.SEQUENCE.NUM_CHAR)
top_seqs = [[(self.cfg.SEQUENCE.BOS_TOKEN, 1.0, char_score, [decoder_hidden])]]
# loop
for _ in range(max_len):
top_seqs, all_done = self.beam_search_step(
encoder_context, top_seqs, beam_size
)
if all_done:
break
return top_seqs
class Attn(nn.Module):
def __init__(self, method, hidden_size, embed_size, onehot_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
self.embed_size = embed_size
self.attn = nn.Linear(2 * self.hidden_size + onehot_size, hidden_size)
# self.attn = nn.Linear(hidden_size, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs):
"""
:param hidden:
previous hidden state of the decoder, in shape (B, hidden_size)
:param encoder_outputs:
encoder outputs from Encoder, in shape (H*W, B, hidden_size)
:return
attention energies in shape (B, H*W)
"""
max_len = encoder_outputs.size(0)
# this_batch_size = encoder_outputs.size(1)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1) # (B, H*W, hidden_size)
encoder_outputs = encoder_outputs.transpose(0, 1) # (B, H*W, hidden_size)
attn_energies = self.score(
H, encoder_outputs
) # compute attention score (B, H*W)
return F.softmax(attn_energies, dim=1).unsqueeze(
1
) # normalize with softmax (B, 1, H*W)
def score(self, hidden, encoder_outputs):
energy = torch.tanh(
self.attn(torch.cat([hidden, encoder_outputs], 2))
) # (B, H*W, 2*hidden_size+H+W)->(B, H*W, hidden_size)
energy = energy.transpose(2, 1) # (B, hidden_size, H*W)
v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(
1
) # (B, 1, hidden_size)
energy = torch.bmm(v, energy) # (B, 1, H*W)
return energy.squeeze(1) # (B, H*W)
class BahdanauAttnDecoderRNN(nn.Module):
def __init__(
self,
hidden_size,
embed_size,
output_size,
n_layers=1,
dropout_p=0,
bidirectional=False,
onehot_size = (8, 32)
):
super(BahdanauAttnDecoderRNN, self).__init__()
# Define parameters
self.hidden_size = hidden_size
self.embed_size = embed_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
# Define layers
self.embedding = nn.Embedding(output_size, embed_size)
self.embedding.weight.data = torch.eye(embed_size)
# self.dropout = nn.Dropout(dropout_p)
self.word_linear = nn.Linear(embed_size, hidden_size)
self.attn = Attn("concat", hidden_size, embed_size, onehot_size[0] + onehot_size[1])
self.rnn = nn.GRUCell(2 * hidden_size + onehot_size[0] + onehot_size[1], hidden_size)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, word_input, last_hidden, encoder_outputs):
"""
:param word_input:
word input for current time step, in shape (B)
:param last_hidden:
last hidden stat of the decoder, in shape (layers*direction*B, hidden_size)
:param encoder_outputs:
encoder outputs in shape (H*W, B, C)
:return
decoder output
"""
# Get the embedding of the current input word (last output word)
word_embedded_onehot = self.embedding(word_input).view(
1, word_input.size(0), -1
) # (1,B,embed_size)
word_embedded = self.word_linear(word_embedded_onehot) # (1, B, hidden_size)
attn_weights = self.attn(last_hidden, encoder_outputs) # (B, 1, H*W)
context = attn_weights.bmm(
encoder_outputs.transpose(0, 1)
) # (B, 1, H*W) * (B, H*W, C) = (B,1,C)
context = context.transpose(0, 1) # (1,B,C)
# Combine embedded input word and attended context, run through RNN
# 2 * hidden_size + W + H: 256 + 256 + 32 + 8 = 552
rnn_input = torch.cat((word_embedded, context), 2)
last_hidden = last_hidden.view(last_hidden.size(0), -1)
rnn_input = rnn_input.view(word_input.size(0), -1)
hidden = self.rnn(rnn_input, last_hidden)
if not self.training:
output = F.softmax(self.out(hidden), dim=1)
else:
output = F.log_softmax(self.out(hidden), dim=1)
# Return final output, hidden state
# print(output.shape)
return output, hidden, attn_weights
def make_roi_seq_predictor(cfg, dim_in):
return SequencePredictor(cfg, dim_in)
| 16,629 | 42.534031 | 134 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from maskrcnn_benchmark.layers import Conv2d, ConvTranspose2d
from torch import nn
from torch.nn import functional as F
from .roi_seq_predictors import make_roi_seq_predictor
class MaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(MaskRCNNC4Predictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
num_inputs = dim_reduced + 1
elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
num_inputs = dim_reduced * 2
else:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x):
x = F.relu(self.conv5_mask(x))
return self.mask_fcn_logits(x)
class CharMaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(CharMaskRCNNC4Predictor, self).__init__()
# num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
num_classes = 1
char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
num_inputs = dim_reduced + 1
elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
num_inputs = dim_reduced * 2
else:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
if cfg.MODEL.CHAR_MASK_ON:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
self.char_mask_fcn_logits = Conv2d(dim_reduced, char_num_classes, 1, 1, 0)
else:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x):
x = F.relu(self.conv5_mask(x))
return self.mask_fcn_logits(x), self.char_mask_fcn_logits(x)
class SeqCharMaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(SeqCharMaskRCNNC4Predictor, self).__init__()
# num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
num_classes = 1
char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
num_inputs = dim_reduced + 1
elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or 'ATTENTION_CHANNEL' in cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION:
num_inputs = dim_reduced * 2
else:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
if cfg.MODEL.CHAR_MASK_ON:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
self.char_mask_fcn_logits = Conv2d(dim_reduced, char_num_classes, 1, 1, 0)
self.seq = make_roi_seq_predictor(cfg, dim_reduced)
else:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x, decoder_targets=None, word_targets=None):
x = F.relu(self.conv5_mask(x))
if self.training:
loss_seq_decoder = self.seq(
x, decoder_targets=decoder_targets, word_targets=word_targets
)
return (
self.mask_fcn_logits(x),
self.char_mask_fcn_logits(x),
loss_seq_decoder,
)
else:
decoded_chars, decoded_scores, detailed_decoded_scores = self.seq(
x, use_beam_search=True
)
return (
self.mask_fcn_logits(x),
self.char_mask_fcn_logits(x),
decoded_chars,
decoded_scores,
detailed_decoded_scores,
)
class SeqMaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(SeqMaskRCNNC4Predictor, self).__init__()
num_classes = 1
# char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
num_inputs = dim_reduced + 1
elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
num_inputs = dim_reduced * 2
else:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
if cfg.SEQUENCE.SEQ_ON:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
self.seq = make_roi_seq_predictor(cfg, dim_reduced)
else:
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x, decoder_targets=None, word_targets=None):
x = F.relu(self.conv5_mask(x))
if self.training:
loss_seq_decoder = self.seq(
x, decoder_targets=decoder_targets, word_targets=word_targets
)
return (
self.mask_fcn_logits(x),
loss_seq_decoder,
)
else:
decoded_chars, decoded_scores, detailed_decoded_scores = self.seq(
x, use_beam_search=True
)
return (
self.mask_fcn_logits(x),
decoded_chars,
decoded_scores,
detailed_decoded_scores,
)
class SeqRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(SeqRCNNC4Predictor, self).__init__()
num_classes = 1
# char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
num_inputs = dim_reduced + 1
elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
num_inputs = dim_reduced * 2
else:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
if cfg.SEQUENCE.SEQ_ON:
# self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
self.seq = make_roi_seq_predictor(cfg, dim_reduced)
# else:
# self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x, decoder_targets=None, word_targets=None):
x = F.relu(self.conv5_mask(x))
if self.training:
loss_seq_decoder = self.seq(
x, decoder_targets=decoder_targets, word_targets=word_targets
)
return (
None,
loss_seq_decoder,
)
else:
decoded_chars, decoded_scores, detailed_decoded_scores = self.seq(
x, use_beam_search=True
)
return (
None,
decoded_chars,
decoded_scores,
detailed_decoded_scores,
)
_ROI_MASK_PREDICTOR = {
"MaskRCNNC4Predictor": MaskRCNNC4Predictor,
"CharMaskRCNNC4Predictor": CharMaskRCNNC4Predictor,
"SeqCharMaskRCNNC4Predictor": SeqCharMaskRCNNC4Predictor,
"SeqMaskRCNNC4Predictor": SeqMaskRCNNC4Predictor,
"SeqRCNNC4Predictor": SeqRCNNC4Predictor,
}
def make_roi_mask_predictor(cfg):
func = _ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR]
return func(cfg)
| 11,124 | 40.356877 | 122 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.modeling.utils import cat
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from .inference import make_roi_mask_post_processor
from .loss import make_roi_mask_loss_evaluator
from .roi_mask_feature_extractors import make_roi_mask_feature_extractor
from .roi_mask_predictors import make_roi_mask_predictor
from maskrcnn_benchmark.layers import Conv2d
import math
def conv3x3(in_planes, out_planes, stride=1, has_bias=False):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=has_bias
)
def conv3x3_bn_relu(in_planes, out_planes, stride=1, has_bias=False):
return nn.Sequential(
conv3x3(in_planes, out_planes, stride),
nn.BatchNorm2d(out_planes),
nn.ReLU(inplace=True),
)
def keep_only_positive_boxes(boxes, batch_size_per_im):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
if len(inds) > batch_size_per_im:
new_inds = inds[:batch_size_per_im]
inds_mask[inds[batch_size_per_im:]] = 0
else:
new_inds = inds
positive_boxes.append(boxes_per_image[new_inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds
# TODO
def project_char_masks_on_boxes(
segmentation_masks, segmentation_char_masks, proposals, discretization_size
):
"""
Given segmentation masks and the bounding boxes corresponding
to the location of the masks in the image, this function
crops and resizes the masks in the position defined by the
boxes. This prepares the masks for them to be fed to the
loss computation as the targets.
Arguments:
segmentation_masks: an instance of SegmentationMask
proposals: an instance of BoxList
"""
masks = []
char_masks = []
char_mask_weights = []
decoder_targets = []
word_targets = []
M_H, M_W = discretization_size[0], discretization_size[1]
device = proposals.bbox.device
proposals = proposals.convert("xyxy")
assert segmentation_masks.size == proposals.size, "{}, {}".format(
segmentation_masks, proposals
)
assert segmentation_char_masks.size == proposals.size, "{}, {}".format(
segmentation_char_masks, proposals
)
# TODO put the proposals on the CPU, as the representation for the
# masks is not efficient GPU-wise (possibly several small tensors for
# representing a single instance mask)
proposals = proposals.bbox.to(torch.device("cpu"))
for segmentation_mask, segmentation_char_mask, proposal in zip(
segmentation_masks, segmentation_char_masks, proposals
):
# crop the masks, resize them to the desired resolution and
# then convert them to the tensor representation,
# instead of the list representation that was used
cropped_mask = segmentation_mask.crop(proposal)
scaled_mask = cropped_mask.resize((M_W, M_H))
mask = scaled_mask.convert(mode="mask")
masks.append(mask)
cropped_char_mask = segmentation_char_mask.crop(proposal)
scaled_char_mask = cropped_char_mask.resize((M_W, M_H))
char_mask, char_mask_weight, decoder_target, word_target = scaled_char_mask.convert(
mode="seq_char_mask"
)
char_masks.append(char_mask)
char_mask_weights.append(char_mask_weight)
decoder_targets.append(decoder_target)
word_targets.append(word_target)
if len(masks) == 0:
return (
torch.empty(0, dtype=torch.float32, device=device),
torch.empty(0, dtype=torch.long, device=device),
torch.empty(0, dtype=torch.float32, device=device),
torch.empty(0, dtype=torch.long, device=device),
)
return (
torch.stack(masks, dim=0).to(device, dtype=torch.float32),
torch.stack(char_masks, dim=0).to(device, dtype=torch.long),
torch.stack(char_mask_weights, dim=0).to(device, dtype=torch.float32),
torch.stack(decoder_targets, dim=0).to(device, dtype=torch.long),
torch.stack(word_targets, dim=0).to(device, dtype=torch.long),
)
class ROIMaskHead(torch.nn.Module):
def __init__(self, cfg, proposal_matcher, discretization_size):
super(ROIMaskHead, self).__init__()
self.proposal_matcher = proposal_matcher
self.discretization_size = discretization_size
self.cfg = cfg.clone()
self.feature_extractor = make_roi_mask_feature_extractor(cfg)
self.predictor = make_roi_mask_predictor(cfg)
self.post_processor = make_roi_mask_post_processor(cfg)
self.loss_evaluator = make_roi_mask_loss_evaluator(cfg)
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION':
self.mask_attention = nn.Sequential(
conv3x3_bn_relu(cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] + 1, 32),
conv3x3(32, 1),
# Conv2d(cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] + 1, 1, 1, 1, 0),
nn.Sigmoid()
)
self.mask_attention.apply(self.weights_init)
# for name, param in self.named_parameters():
# if "bias" in name:
# nn.init.constant_(param, 0)
# elif "weight" in name:
# # Caffe2 implementation uses MSRAFill, which in fact
# # corresponds to kaiming_normal_ in PyTorch
# nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_DOWN':
self.mask_attention = nn.Sequential(
conv3x3_bn_relu(cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] + 1, 32, stride=2),
conv3x3(32, 1, stride=2),
nn.Upsample(scale_factor=4, mode='nearest'),
nn.Sigmoid()
)
self.mask_attention.apply(self.weights_init)
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] * 2
self.channel_attention = nn.Sequential(
nn.MaxPool2d(2),
conv3x3_bn_relu(num_channel, num_channel, stride=2),
conv3x3(num_channel, num_channel, stride=2),
nn.AdaptiveAvgPool2d((1,1)),
nn.Sigmoid()
)
self.channel_attention.apply(self.weights_init)
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_SPLIT' or self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_SPLIT_BINARY':
num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] * 2
self.channel_attention = nn.Sequential(
nn.MaxPool2d(2),
conv3x3_bn_relu(num_channel, int(num_channel / 4), stride=2),
conv3x3(int(num_channel / 4), 2, stride=2),
nn.AdaptiveAvgPool2d((1,1)),
# nn.Sigmoid()
nn.Softmax(dim=1)
)
self.channel_attention.apply(self.weights_init)
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_2':
num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] * 2
self.channel_attention_2 = nn.Sequential(
nn.AdaptiveAvgPool2d((1,1)),
nn.Conv2d(
num_channel, num_channel, kernel_size=1, stride=1, padding=0
),
nn.Conv2d(
num_channel, num_channel, kernel_size=1, stride=1, padding=0
),
nn.Softmax(dim=1)
)
self.channel_attention_2.apply(self.weights_init)
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_TANH':
feature_dim = 128
num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1] * 2
self.mask_pooler = nn.Sequential(
nn.MaxPool2d(2),
conv3x3_bn_relu(num_channel, num_channel, stride=2),
)
self.attn = nn.Linear(feature_dim, feature_dim)
self.v = nn.Parameter(torch.rand(feature_dim))
stdv = 1.0 / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
self.mask_pooler.apply(self.weights_init)
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'NEW_CAT':
num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
self.enlarge_recepitve_field = nn.Sequential(
nn.Conv2d(
2 * num_channel, num_channel, kernel_size=3, stride=1, padding=2, dilation=2
),
nn.Conv2d(
num_channel, num_channel, kernel_size=3, stride=1, padding=2, dilation=2
),
)
self.enlarge_recepitve_field.apply(self.weights_init)
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'NEW_MASK':
num_channel = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
self.new_mask = nn.Sequential(
nn.Conv2d(
2 * num_channel, num_channel, kernel_size=3, stride=1, padding=2, dilation=2
),
nn.Conv2d(
num_channel, 32, kernel_size=3, stride=1, padding=2, dilation=2
),
nn.Conv2d(
32, 1, kernel_size=3, stride=1, padding=2, dilation=2
),
nn.Sigmoid()
)
self.new_mask.apply(self.weights_init)
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find("BatchNorm") != -1:
m.weight.data.fill_(1.0)
m.bias.data.fill_(1e-4)
def step_function(self, x):
return torch.reciprocal(1 + torch.exp(-50 * (x - 0.5)))
def channel_attention_tanh(self, feature, mask):
"""
:param hidden:
previous hidden state of the decoder, in shape (B, hidden_size)
:param encoder_outputs:
encoder outputs from Encoder, in shape (H*W, B, hidden_size)
:return
attention energies in shape (B, H*W)
"""
feature = feature.reshape((feature.shape[0], feature.shape[1], -1)) # (B, C, H*W)
masks = mask.reshape((mask.shape[0], mask.shape[1], -1)).repeat(1, feature.shape[1], 1) # (B, C, H*W)
fuse_feature = torch.cat([feature, masks], 2)
energy = torch.tanh(self.attn(fuse_feature)) # (B, C, 2*H*W)->(B, C, 2*H*W)
energy = energy.transpose(2, 1) # (B, 2*H*W, C)
v = self.v.repeat(feature.shape[0], 1).unsqueeze(
1
) # (B, 1, 2*H*W)
energy = torch.bmm(v, energy) # (B, 1, C)
energy = energy.squeeze(1) # (B, C)
return nn.functional.softmax(energy, dim=1).unsqueeze(2).unsqueeze(3) # normalize with softmax (B, C)
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
# match_quality_matrix = boxlist_polygon_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Mask RCNN needs "labels" and "masks "fields for creating the targets
target = target.copy_with_fields(["labels", "masks", "char_masks"])
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
masks = []
char_masks = []
char_mask_weights = []
decoder_targets = []
word_targets = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# this can probably be removed, but is left here for clarity
# and completeness
neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[neg_inds] = 0
# mask scores are only computed on positive samples
positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1)
segmentation_masks = matched_targets.get_field("masks")
segmentation_masks = segmentation_masks[positive_inds]
char_segmentation_masks = matched_targets.get_field("char_masks")
char_segmentation_masks = char_segmentation_masks[positive_inds]
positive_proposals = proposals_per_image[positive_inds]
masks_per_image, char_masks_per_image, char_masks_weight_per_image, decoder_targets_per_image, word_targets_per_image = project_char_masks_on_boxes(
segmentation_masks,
char_segmentation_masks,
positive_proposals,
self.discretization_size,
)
masks.append(masks_per_image)
char_masks.append(char_masks_per_image)
char_mask_weights.append(char_masks_weight_per_image)
decoder_targets.append(decoder_targets_per_image)
word_targets.append(word_targets_per_image)
return masks, char_masks, char_mask_weights, decoder_targets, word_targets
def feature_mask(self, x, proposals):
masks = []
for proposal in proposals:
segmentation_masks = proposal.get_field("masks")
boxes = proposal.bbox.to(torch.device("cpu"))
for segmentation_mask, box in zip(segmentation_masks, boxes):
cropped_mask = segmentation_mask.crop(box)
scaled_mask = cropped_mask.resize((self.cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_W, self.cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_H))
mask = scaled_mask.convert(mode="mask")
masks.append(mask)
if len(masks) == 0:
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
x = cat([x, torch.ones((x.shape[0], 1, x.shape[2], x.shape[3]), device=x.device)], dim=1)
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or 'ATTENTION_CHANNEL' in self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION:
x = cat([x, x], dim=1)
return x
masks = torch.stack(masks, dim=0).to(x.device, dtype=torch.float32)
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
x = cat([x, masks.unsqueeze(1)], dim=1)
return x
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'NEW_CAT':
cat_x = cat([x, x * masks.unsqueeze(1)], dim=1)
out_x = self.enlarge_recepitve_field(cat_x)
return out_x
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'NEW_MASK':
cat_x = cat([x, x * masks.unsqueeze(1)], dim=1)
new_mask = self.new_mask(cat_x)
out_x = x * new_mask
return out_x
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION' or self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_DOWN':
x_cat = cat([x, masks.unsqueeze(1)], dim=1)
attention = self.mask_attention(x_cat)
x = x * attention
return x
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX':
mask_x = x * masks.unsqueeze(1)
cat_x = cat([x, mask_x], dim=1)
return cat_x
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
mask_x = x * masks.unsqueeze(1)
cat_x = cat([x, mask_x], dim=1)
channel_attention = self.channel_attention(cat_x)
attentioned_x = cat_x * channel_attention
return attentioned_x
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_2':
mask_x = x * masks.unsqueeze(1)
cat_x = cat([x, mask_x], dim=1)
channel_attention = self.channel_attention_2(cat_x)
# print(channel_attention[0, :, 0, 0])
attentioned_x = cat_x * channel_attention
return attentioned_x
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_SPLIT':
mask_x = x * masks.unsqueeze(1)
cat_x = cat([x, mask_x], dim=1)
channel_attention = self.channel_attention(cat_x)
print(channel_attention[0, :, 0, 0])
attentioned_x = cat([x * channel_attention[:, 0:1, :, :], mask_x * channel_attention[:, 1:, :, :]], dim=1)
return attentioned_x
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_SPLIT_BINARY':
mask_x = x * masks.unsqueeze(1)
cat_x = cat([x, mask_x], dim=1)
channel_attention = self.step_function(self.channel_attention(cat_x))
# print(channel_attention[:, :, 0, 0])
attentioned_x = cat([x * channel_attention[:, 0:1, :, :], mask_x * channel_attention[:, 1:, :, :]], dim=1)
# attentioned_x = cat([x * channel_attention[:, 1:, :, :], mask_x * channel_attention[:, 0:1, :, :]], dim=1)
return attentioned_x
if self.cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL_TANH':
mask_x = x * masks.unsqueeze(1)
cat_x = cat([x, mask_x], dim=1)
pooler_x = self.mask_pooler(cat_x)
pooler_mask = nn.functional.interpolate(masks.unsqueeze(1), scale_factor=0.25, mode='bilinear')
channel_attention = self.channel_attention_tanh(pooler_x, pooler_mask)
attentioned_x = cat_x * channel_attention
return attentioned_x
soft_ratio = self.cfg.MODEL.ROI_MASK_HEAD.SOFT_MASKED_FEATURE_RATIO
if soft_ratio > 0:
if soft_ratio < 1.0:
x = x * (soft_ratio + (1 - soft_ratio) * masks.unsqueeze(1))
else:
x = x * (1.0 + soft_ratio * masks.unsqueeze(1))
else:
x = x * masks.unsqueeze(1)
return x
def forward(self, features, proposals, targets=None):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the original proposals
are returned. During testing, the predicted boxlists are returned
with the `mask` field set
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
if self.training:
# during training, only focus on positive boxes
all_proposals = proposals
proposals, positive_inds = keep_only_positive_boxes(
proposals, self.cfg.MODEL.ROI_MASK_HEAD.MASK_BATCH_SIZE_PER_IM
)
if all(len(proposal) == 0 for proposal in proposals):
return None, None, None
if self.training and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
x = features
x = x[torch.cat(positive_inds, dim=0)]
else:
x = self.feature_extractor(features, proposals)
if self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE:
x = self.feature_mask(x, proposals)
if self.training:
mask_targets, char_mask_targets, char_mask_weights, \
decoder_targets, word_targets = self.prepare_targets(
proposals, targets
)
decoder_targets = cat(decoder_targets, dim=0)
word_targets = cat(word_targets, dim=0)
# proposals_not_empty, targets_not = [], []
# for proposal, target, mask_target, char_mask_target, char_mask_weight in zip(proposals, targets, mask_targets, char_mask_targets, char_mask_weights):
# if len(proposal_target[0]) > 0:
# proposals_not_empty.append(proposal)
# targets_not.append(proposal_target[1])
# proposals = proposals_not_empty
# targets = targets_not
if self.cfg.MODEL.CHAR_MASK_ON:
if self.cfg.SEQUENCE.SEQ_ON:
if not self.training:
if x.numel() > 0:
mask_logits, char_mask_logits, seq_outputs, seq_scores, \
detailed_seq_scores = self.predictor(x)
result = self.post_processor(
mask_logits,
char_mask_logits,
proposals,
seq_outputs=seq_outputs,
seq_scores=seq_scores,
detailed_seq_scores=detailed_seq_scores,
)
return x, result, {}
else:
return None, None, {}
mask_logits, char_mask_logits, seq_outputs = self.predictor(
x, decoder_targets=decoder_targets, word_targets=word_targets
)
loss_mask, loss_char_mask = self.loss_evaluator(
proposals,
mask_logits,
char_mask_logits,
mask_targets,
char_mask_targets,
char_mask_weights,
)
return (
x,
all_proposals,
dict(
loss_mask=loss_mask,
loss_char_mask=loss_char_mask,
loss_seq=seq_outputs,
),
)
else:
mask_logits, char_mask_logits = self.predictor(x)
if not self.training:
result = self.post_processor(
mask_logits, char_mask_logits, proposals
)
return x, result, {}
loss_mask, loss_char_mask = self.loss_evaluator(
proposals,
mask_logits,
char_mask_logits,
mask_targets,
char_mask_targets,
char_mask_weights,
)
return (
x,
all_proposals,
dict(loss_mask=loss_mask, loss_char_mask=loss_char_mask),
)
else:
if self.cfg.SEQUENCE.SEQ_ON:
if self.cfg.MODEL.MASK_ON:
if not self.training:
if x.numel() > 0:
mask_logits, seq_outputs, seq_scores, \
detailed_seq_scores = self.predictor(x)
result = self.post_processor(
mask_logits,
None,
proposals,
seq_outputs=seq_outputs,
seq_scores=seq_scores,
detailed_seq_scores=detailed_seq_scores,
)
return x, result, {}
else:
return None, None, {}
mask_logits, seq_outputs = self.predictor(
x, decoder_targets=decoder_targets, word_targets=word_targets
)
loss_mask = self.loss_evaluator(
proposals,
mask_logits,
mask_targets,
)
return (
x,
all_proposals,
dict(
loss_mask=loss_mask,
loss_seq=seq_outputs,
),
)
else:
if not self.training:
if x.numel() > 0:
_, seq_outputs, seq_scores, \
detailed_seq_scores = self.predictor(x)
result = self.post_processor(
None,
None,
proposals,
seq_outputs=seq_outputs,
seq_scores=seq_scores,
detailed_seq_scores=detailed_seq_scores,
)
return x, result, {}
else:
return None, None, {}
_, seq_outputs = self.predictor(
x, decoder_targets=decoder_targets, word_targets=word_targets
)
return (
x,
all_proposals,
dict(
loss_seq=seq_outputs,
),
)
else:
mask_logits = self.predictor(x)
if not self.training:
result = self.post_processor(mask_logits, proposals)
return x, result, {}
loss_mask = self.loss_evaluator(proposals, mask_logits, targets)
return x, all_proposals, dict(loss_mask=loss_mask)
def build_roi_mask_head(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
return ROIMaskHead(
cfg,
matcher,
(cfg.MODEL.ROI_MASK_HEAD.RESOLUTION_H, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION_W),
)
| 27,041 | 44.679054 | 160 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
class PostProcessor(nn.Module):
"""
From a set of classification scores, box regression and proposals,
computes the post-processed boxes, and applies NMS to obtain the
final results
"""
def __init__(
self, score_thresh=0.05, nms=0.5, detections_per_img=100, box_coder=None, cfg=None
):
"""
Arguments:
score_thresh (float)
nms (float)
detections_per_img (int)
box_coder (BoxCoder)
"""
super(PostProcessor, self).__init__()
self.cfg = cfg
self.score_thresh = score_thresh
self.nms = nms
self.detections_per_img = detections_per_img
if cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
if box_coder is None:
box_coder = BoxCoder(weights=(10., 10., 5., 5.))
self.box_coder = box_coder
def forward(self, x, boxes):
"""
Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores
"""
class_logits, box_regression = x
class_prob = F.softmax(class_logits, -1)
# TODO think about a representation of batch of boxes
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE:
masks = [box.get_field('masks') for box in boxes]
if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)
proposals = self.box_coder.decode(
box_regression.view(sum(boxes_per_image), -1), concat_boxes
)
proposals = proposals.split(boxes_per_image, dim=0)
else:
proposals = boxes
num_classes = class_prob.shape[1]
class_prob = class_prob.split(boxes_per_image, dim=0)
results = []
if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE:
for prob, boxes_per_img, image_shape, mask in zip(
class_prob, proposals, image_shapes, masks
):
boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape, mask)
if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = self.filter_results(boxlist, num_classes)
results.append(boxlist)
else:
for prob, boxes_per_img, image_shape in zip(
class_prob, proposals, image_shapes
):
boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)
if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = self.filter_results(boxlist, num_classes)
results.append(boxlist)
return results
def prepare_boxlist(self, boxes, scores, image_shape, mask=None):
"""
Returns BoxList from `boxes` and adds probability scores information
as an extra field
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
if not self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
scores = scores.reshape(-1)
boxes.add_field("scores", scores)
return boxes
boxes = boxes.reshape(-1, 4)
scores = scores.reshape(-1)
boxlist = BoxList(boxes, image_shape, mode="xyxy")
boxlist.add_field("scores", scores)
if mask is not None:
boxlist.add_field('masks', mask)
return boxlist
def filter_results(self, boxlist, num_classes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
"""
# unwrap the boxlist to avoid additional overhead.
# if we had multi-class NMS, we could perform this directly on the boxlist
boxes = boxlist.bbox.reshape(-1, num_classes * 4)
scores = boxlist.get_field("scores").reshape(-1, num_classes)
device = scores.device
result = []
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
inds_all = scores > self.score_thresh
for j in range(1, num_classes):
inds = inds_all[:, j].nonzero().squeeze(1)
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
boxlist_for_class.add_field("scores", scores_j)
boxlist_for_class = boxlist_nms(
boxlist_for_class, self.nms, score_field="scores"
)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field(
"labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
)
if self.cfg.MODEL.SEG.USE_SEG_POLY or self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE or self.cfg.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE:
boxlist_for_class.add_field('masks', boxlist.get_field('masks'))
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.detections_per_img > 0:
cls_scores = result.get_field("scores")
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
return result
def make_roi_box_post_processor(cfg):
# use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH
nms_thresh = cfg.MODEL.ROI_HEADS.NMS
detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG
postprocessor = PostProcessor(
score_thresh, nms_thresh, detections_per_img, box_coder, cfg
)
return postprocessor
| 7,693 | 42.468927 | 148 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.modeling.backbone import resnet
from maskrcnn_benchmark.modeling.poolers import Pooler
from maskrcnn_benchmark.modeling.utils import cat
from maskrcnn_benchmark.layers import Conv2d
def conv3x3(in_planes, out_planes, stride=1, has_bias=False):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=has_bias
)
def conv3x3_bn_relu(in_planes, out_planes, stride=1, has_bias=False):
return nn.Sequential(
conv3x3(in_planes, out_planes, stride),
nn.BatchNorm2d(out_planes),
nn.ReLU(inplace=True),
)
class ResNet50Conv5ROIFeatureExtractor(nn.Module):
def __init__(self, config):
super(ResNet50Conv5ROIFeatureExtractor, self).__init__()
resolution = config.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = config.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = config.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
stage = resnet.StageSpec(index=4, block_count=3, return_features=False)
head = resnet.ResNetHead(
block_module=config.MODEL.RESNETS.TRANS_FUNC,
stages=(stage,),
num_groups=config.MODEL.RESNETS.NUM_GROUPS,
width_per_group=config.MODEL.RESNETS.WIDTH_PER_GROUP,
stride_in_1x1=config.MODEL.RESNETS.STRIDE_IN_1X1,
stride_init=None,
res2_out_channels=config.MODEL.RESNETS.RES2_OUT_CHANNELS,
)
self.pooler = pooler
self.head = head
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.head(x)
return x
class FPN2MLPFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg):
super(FPN2MLPFeatureExtractor, self).__init__()
self.cfg = cfg
resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_BOX_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'CAT':
input_size = (cfg.MODEL.BACKBONE.OUT_CHANNELS + 1) * resolution ** 2
else:
input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS * resolution ** 2
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.pooler = pooler
self.fc6 = nn.Linear(input_size, representation_size)
self.fc7 = nn.Linear(representation_size, representation_size)
# if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'ATTENTION':
# self.attention = nn.Sequential(
# conv3x3_bn_relu(cfg.MODEL.BACKBONE.OUT_CHANNELS + 1, 32),
# conv3x3(32, 1),
# nn.Sigmoid()
# )
# self.attention.apply(self.weights_init)
# if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'ATTENTION':
# self.attention = nn.Sequential(
# Conv2d(cfg.MODEL.BACKBONE.OUT_CHANNELS + 1, 1, 1, 1, 0),
# nn.Sigmoid()
# )
# for name, param in self.named_parameters():
# if "bias" in name:
# nn.init.constant_(param, 0)
# elif "weight" in name:
# # Caffe2 implementation uses MSRAFill, which in fact
# # corresponds to kaiming_normal_ in PyTorch
# nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
for l in [self.fc6, self.fc7]:
# Caffe2 implementation uses XavierFill, which in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(l.weight, a=1)
nn.init.constant_(l.bias, 0)
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find("BatchNorm") != -1:
m.weight.data.fill_(1.0)
m.bias.data.fill_(1e-4)
def feature_mask(self, x, proposals):
masks = []
for proposal in proposals:
segmentation_masks = proposal.get_field("masks")
boxes = proposal.bbox.to(torch.device("cpu"))
for segmentation_mask, box in zip(segmentation_masks, boxes):
cropped_mask = segmentation_mask.crop(box)
scaled_mask = cropped_mask.resize((self.cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION, self.cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION))
mask = scaled_mask.convert(mode="mask")
masks.append(mask)
if len(masks) == 0:
if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'CAT':
x = cat([x, torch.ones((x.shape[0], 1, x.shape[2], x.shape[3]), device=x.device)], dim=1)
return x
masks = torch.stack(masks, dim=0).to(x.device, dtype=torch.float32)
if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'CAT':
x = cat([x, masks.unsqueeze(1)], dim=1)
return x
if self.cfg.MODEL.ROI_BOX_HEAD.MIX_OPTION == 'ATTENTION':
# x_cat = cat([x, masks.unsqueeze(1)], dim=1)
# attention = self.attention(x_cat)
# x = x * attention
return x
soft_ratio = self.cfg.MODEL.ROI_BOX_HEAD.SOFT_MASKED_FEATURE_RATIO
if soft_ratio > 0:
if soft_ratio < 1.0:
x = x * (soft_ratio + (1 - soft_ratio) * masks.unsqueeze(1))
else:
x = x * (1.0 + soft_ratio * masks.unsqueeze(1))
else:
x = x * masks.unsqueeze(1)
return x
def forward(self, x, proposals):
x = self.pooler(x, proposals)
if self.cfg.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE:
x = self.feature_mask(x, proposals)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x
_ROI_BOX_FEATURE_EXTRACTORS = {
"ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor,
"FPN2MLPFeatureExtractor": FPN2MLPFeatureExtractor,
}
def make_roi_box_feature_extractor(cfg):
func = _ROI_BOX_FEATURE_EXTRACTORS[cfg.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR]
return func(cfg)
| 6,713 | 38.263158 | 145 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .inference import make_roi_box_post_processor
from .loss import make_roi_box_loss_evaluator
from .roi_box_feature_extractors import make_roi_box_feature_extractor
from .roi_box_predictors import make_roi_box_predictor
class ROIBoxHead(torch.nn.Module):
"""
Generic Box Head class.
"""
def __init__(self, cfg):
super(ROIBoxHead, self).__init__()
self.cfg = cfg
self.feature_extractor = make_roi_box_feature_extractor(cfg)
self.predictor = make_roi_box_predictor(cfg)
self.post_processor = make_roi_box_post_processor(cfg)
self.loss_evaluator = make_roi_box_loss_evaluator(cfg)
def forward(self, features, proposals, targets=None):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
if self.training:
# Faster R-CNN subsamples during training the proposals with a fixed
# positive / negative ratio
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
x = self.feature_extractor(features, proposals)
# final classifier that converts the features into predictions
class_logits, box_regression = self.predictor(x)
if not self.training:
if self.cfg.MODEL.ROI_BOX_HEAD.INFERENCE_USE_BOX:
result = self.post_processor((class_logits, box_regression), proposals)
# print(result[0].get_field('masks'))
return x, result, {}
else:
return x, proposals, {}
loss_classifier, loss_box_reg = self.loss_evaluator(
[class_logits], [box_regression]
)
if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
return (
x,
proposals,
dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg),
)
else:
return (
x,
proposals,
dict(loss_classifier=loss_classifier),
)
def build_roi_box_head(cfg):
"""
Constructs a new box head.
By default, uses ROIBoxHead,
but if it turns out not to be enough, just register a new class
and make it a parameter in the config
"""
return ROIBoxHead(cfg)
| 3,104 | 35.529412 | 87 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler,
)
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.modeling.utils import cat
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from torch.nn import functional as F
class FastRCNNLossComputation(object):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(self, proposal_matcher, fg_bg_sampler, box_coder, cfg=None):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
self.cfg = cfg
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields("labels")
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, proposals_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return labels, regression_targets
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
labels, regression_targets = self.prepare_targets(proposals, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
# print('sampled_pos_inds:', sampled_pos_inds[0].sum())
# print('sampled_neg_inds:', sampled_neg_inds[0].sum())
proposals = list(proposals)
# add corresponding label and
# regression_targets information to the bounding boxes
for labels_per_image, regression_targets_per_image, proposals_per_image in zip(
labels, regression_targets, proposals
):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field(
"regression_targets", regression_targets_per_image
)
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
proposals_per_image = proposals[img_idx][img_sampled_inds]
proposals[img_idx] = proposals_per_image
self._proposals = proposals
return proposals
def __call__(self, class_logits, box_regression):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
box_regression (list[Tensor])
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
box_regression = cat(box_regression, dim=0)
device = class_logits.device
if not hasattr(self, "_proposals"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposals
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
regression_targets = cat(
[proposal.get_field("regression_targets") for proposal in proposals], dim=0
)
classification_loss = F.cross_entropy(class_logits, labels)
if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)
labels_pos = labels[sampled_pos_inds_subset]
map_inds = 4 * labels_pos[:, None] + torch.tensor([0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds_subset],
size_average=False,
beta=1,
)
box_loss = box_loss / labels.numel()
else:
box_loss = 0
return classification_loss, box_loss
def make_roi_box_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
loss_evaluator = FastRCNNLossComputation(matcher, fg_bg_sampler, box_coder, cfg)
return loss_evaluator
| 7,135 | 37.572973 | 91 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
class FastRCNNPredictor(nn.Module):
def __init__(self, config, pretrained=None):
super(FastRCNNPredictor, self).__init__()
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = config.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=7)
self.cls_score = nn.Linear(num_inputs, num_classes)
self.bbox_pred = nn.Linear(num_inputs, num_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return cls_logit, bbox_pred
class FPNPredictor(nn.Module):
def __init__(self, cfg):
super(FPNPredictor, self).__init__()
self.cfg = cfg
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.cls_score = nn.Linear(representation_size, num_classes)
if cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
self.bbox_pred = nn.Linear(representation_size, num_classes * 4)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
# nn.init.normal_(self.cls_score.weight, std=0.01)
# nn.init.normal_(self.bbox_pred.weight, std=0.001)
# for l in [self.cls_score, self.bbox_pred]:
# nn.init.constant_(l.bias, 0)
def forward(self, x):
scores = self.cls_score(x)
if self.cfg.MODEL.ROI_BOX_HEAD.USE_REGRESSION:
bbox_deltas = self.bbox_pred(x)
else:
bbox_deltas = None
return scores, bbox_deltas
_ROI_BOX_PREDICTOR = {
"FastRCNNPredictor": FastRCNNPredictor,
"FPNPredictor": FPNPredictor,
}
def make_roi_box_predictor(cfg):
func = _ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg)
| 2,501 | 33.273973 | 76 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/structures/image_list.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
"""
def __init__(self, tensors, image_sizes):
"""
Arguments:
tensors (tensor)
image_sizes (list[tuple[int, int]])
"""
self.tensors = tensors
self.image_sizes = image_sizes
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
def get_sizes(self):
return self.image_sizes
def to_image_list(tensors, size_divisible=0):
"""
tensors can be an ImageList, a torch.Tensor or
an iterable of Tensors. It can't be a numpy array.
When tensors is an iterable of Tensors, it pads
the Tensors with zeros so that they have the same
shape
"""
if isinstance(tensors, torch.Tensor) and size_divisible > 0:
tensors = [tensors]
if isinstance(tensors, ImageList):
return tensors
elif isinstance(tensors, torch.Tensor):
# single tensor shape can be inferred
assert tensors.dim() == 4
image_sizes = [tensor.shape[-2:] for tensor in tensors]
return ImageList(tensors, image_sizes)
elif isinstance(tensors, (tuple, list)):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors),) + max_size
batched_imgs = tensors[0].new(*batch_shape).zero_()
for img, pad_img in zip(tensors, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
image_sizes = [im.shape[-2:] for im in tensors]
return ImageList(batched_imgs, image_sizes)
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
def to_image_target_list(tensors, size_divisible=0, targets=None):
"""
tensors can be an ImageList, a torch.Tensor or
an iterable of Tensors. It can't be a numpy array.
When tensors is an iterable of Tensors, it pads
the Tensors with zeros so that they have the same
shape
"""
if isinstance(tensors, torch.Tensor) and size_divisible > 0:
tensors = [tensors]
if isinstance(tensors, ImageList):
return tensors
elif isinstance(tensors, torch.Tensor):
# single tensor shape can be inferred
assert tensors.dim() == 4
image_sizes = [tensor.shape[-2:] for tensor in tensors]
return ImageList(tensors, image_sizes)
elif isinstance(tensors, (tuple, list)):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors),) + max_size
batched_imgs = tensors[0].new(*batch_shape).zero_()
if targets is None:
for img, pad_img in zip(tensors, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
else:
for img, pad_img, target in zip(tensors, batched_imgs, targets):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
if target is not None:
target.set_size((pad_img.shape[2], pad_img.shape[1]))
image_sizes = [im.shape[-2:] for im in tensors]
return ImageList(batched_imgs, image_sizes), targets
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors))) | 4,459 | 35.859504 | 87 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/structures/segmentation_mask.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import numpy as np
import pycocotools.mask as mask_utils
import torch
from maskrcnn_benchmark.utils.chars import char2num
import pyclipper
# from PIL import Image
from shapely import affinity
from shapely.geometry import Polygon as ShapePolygon
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
def convert_2d_tuple(t):
a = []
for i in t:
a.extend(list(i))
return a
class Mask(object):
"""
This class is unfinished and not meant for use yet
It is supposed to contain the mask for an object as
a 2d tensor
"""
def __init__(self, masks, size, mode):
self.masks = masks
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
# idx = 2
elif method == FLIP_TOP_BOTTOM:
dim = height
# idx = 1
flip_idx = list(range(dim)[::-1])
flipped_masks = self.masks.index_select(dim, flip_idx)
return Mask(flipped_masks, self.size, self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped_masks = self.masks[:, box[1] : box[3], box[0] : box[2]]
return Mask(cropped_masks, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
pass
class SegmentationMask(object):
"""
This class stores the segmentations for all objects in the image
"""
def __init__(self, polygons, size, mode=None):
"""
Arguments:
polygons: a list of list of lists of numbers. The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
object, and the third level to the polygon coordinates.
"""
assert isinstance(polygons, list)
self.polygons = [Polygons(p, size, mode) for p in polygons]
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped = []
for polygon in self.polygons:
flipped.append(polygon.transpose(method))
return SegmentationMask(flipped, size=self.size, mode=self.mode)
def crop(self, box, keep_ind=None):
w, h = box[2] - box[0], box[3] - box[1]
if keep_ind is not None:
self.polygons = np.array(self.polygons)
self.polygons = self.polygons[keep_ind]
cropped = []
for polygon in self.polygons:
cropped.append(polygon.crop(box))
return SegmentationMask(cropped, size=(w, h), mode=self.mode)
def rotate(self, angle, r_c, start_h, start_w):
rotated = []
for polygon in self.polygons:
rotated.append(polygon.rotate(angle, r_c, start_h, start_w))
return SegmentationMask(rotated, size=(r_c[0] * 2, r_c[1] * 2), mode=self.mode)
def resize(self, size, *args, **kwargs):
scaled = []
for polygon in self.polygons:
scaled.append(polygon.resize(size, *args, **kwargs))
return SegmentationMask(scaled, size=size, mode=self.mode)
def set_size(self, size):
self.size = size
for polygon in self.polygons:
polygon.set_size(size)
def to(self, *args, **kwargs):
return self
def __getitem__(self, item):
if isinstance(item, (int, slice)):
selected_polygons = [self.polygons[item]]
else:
# advanced indexing on a single dimension
selected_polygons = []
if isinstance(item, torch.Tensor) and item.dtype == torch.bool:
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
selected_polygons.append(self.polygons[i])
return SegmentationMask(selected_polygons, size=self.size, mode=self.mode)
def __iter__(self):
return iter(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
def size(self):
return self.size
def get_polygons(self):
return self.polygons
def to_np_polygon(self):
np_polygons = []
for polygon in self.polygons:
polys = polygon.get_polygons()
for poly in polys:
np_poly = poly.numpy()
np_polygons.append(np_poly)
return np_polygons
def convert_seg_map(self, labels, shrink_ratio, seg_size, ignore_difficult=True):
# width, height = self.size
# assert self.size[0] == seg_size[1]
# assert self.size[1] == seg_size[0]
height, width = seg_size[0], seg_size[1]
seg_map = np.zeros((1, height, width), dtype=np.uint8)
training_mask = np.ones((height, width), dtype=np.uint8)
for poly, label in zip(self.polygons, labels):
poly = poly.get_polygons()[0]
poly = poly.reshape((-1, 2)).numpy()
if ignore_difficult and label.item() == -1:
cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0)
continue
if poly.shape[0] < 4:
continue
p = ShapePolygon(poly)
if p.length == 0:
continue
try:
d = p.area * (1 - np.power(shrink_ratio, 2)) / p.length
except:
continue
subj = [tuple(s) for s in poly]
pco = pyclipper.PyclipperOffset()
pco.AddPath(subj, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
s = pco.Execute(-d)
if s == []:
cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0)
continue
out = convert_2d_tuple(s[0])
out = np.array(out).reshape(-1, 2)
cv2.fillPoly(seg_map[0, :, :], [out.astype(np.int32)], 1)
return seg_map, training_mask
class Polygons(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(self, polygons, size, mode):
# assert isinstance(polygons, list), '{}'.format(polygons)
if isinstance(polygons, list):
polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons]
elif isinstance(polygons, Polygons):
polygons = polygons.polygons
self.polygons = polygons
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for poly in self.polygons:
p = poly.clone()
TO_REMOVE = 1
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return Polygons(flipped_polygons, size=self.size, mode=self.mode)
def rotate(self, angle, r_c, start_h, start_w):
poly = self.polygons[0].numpy().reshape(-1, 2)
poly[:, 0] += start_w
poly[:, 1] += start_h
polys = ShapePolygon(poly)
r_polys = list(affinity.rotate(polys, angle, r_c).boundary.coords[:-1])
p = []
for r in r_polys:
p += list(r)
return Polygons([p], size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
# TODO chck if necessary
w = max(w, 1)
h = max(h, 1)
cropped_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w)
p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h)
cropped_polygons.append(p)
return Polygons(cropped_polygons, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.polygons]
return Polygons(scaled_polys, size, mode=self.mode)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return Polygons(scaled_polygons, size=size, mode=self.mode)
def convert(self, mode):
width, height = self.size
if mode == "mask":
# print([p.numpy() for p in self.polygons])
try:
rles = mask_utils.frPyObjects(
[p.numpy() for p in self.polygons], height, width
)
except:
print([p.numpy() for p in self.polygons])
mask = torch.ones((height, width), dtype=torch.uint8)
return mask
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
# TODO add squeeze?
return mask
def set_size(self, size):
self.size = size
def get_polygons(self):
return self.polygons
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_polygons={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
class CharPolygons(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(
self,
char_boxes,
word=None,
use_char_ann=False,
char_classes=None,
size=None,
mode=None,
char_num_classes=37,
):
if isinstance(char_boxes, CharPolygons):
if char_classes is None:
char_classes = char_boxes.char_classes
self.word = char_boxes.word
char_boxes = char_boxes.char_boxes
else:
if char_classes is None:
char_classes = [
torch.as_tensor(p[8], dtype=torch.float32) for p in char_boxes
]
char_boxes = [
torch.as_tensor(p[:8], dtype=torch.float32) for p in char_boxes
]
self.word = word
self.char_boxes = char_boxes
self.char_classes = char_classes
self.size = size
self.mode = mode
self.use_char_ann = use_char_ann
self.char_num_classes = char_num_classes
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for char_box in self.char_boxes:
p = char_box.clone()
TO_REMOVE = 1
p[idx::2] = dim - char_box[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return CharPolygons(
flipped_polygons,
word=self.word,
use_char_ann=self.use_char_ann,
char_classes=self.char_classes,
size=self.size,
mode=self.mode,
char_num_classes=self.char_num_classes,
)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
# TODO chck if necessary
w = max(w, 1)
h = max(h, 1)
cropped_polygons = []
for char_box in self.char_boxes:
p = char_box.clone()
p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w)
p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h)
cropped_polygons.append(p)
return CharPolygons(
cropped_polygons,
word=self.word,
use_char_ann=self.use_char_ann,
char_classes=self.char_classes,
size=(w, h),
mode=self.mode,
char_num_classes=self.char_num_classes,
)
def rotate(self, angle, r_c, start_h, start_w):
r_polys = []
for poly in self.char_boxes:
poly = poly.numpy()
poly[0::2] += start_w
poly[1::2] += start_h
poly = ShapePolygon(np.array(poly).reshape(4, 2))
r_poly = np.array(
list(affinity.rotate(poly, angle, r_c).boundary.coords[:-1])
).reshape(-1, 8)
r_polys.append(r_poly[0])
return CharPolygons(
r_polys,
word=self.word,
use_char_ann=self.use_char_ann,
char_classes=self.char_classes,
size=(r_c[0] * 2, r_c[1] * 2),
mode=self.mode,
char_num_classes=self.char_num_classes,
)
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.char_boxes]
return CharPolygons(
scaled_polys,
word=self.word,
use_char_ann=self.use_char_ann,
char_classes=self.char_classes,
size=size,
mode=self.mode,
char_num_classes=self.char_num_classes,
)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.char_boxes:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return CharPolygons(
scaled_polygons,
word=self.word,
use_char_ann=self.use_char_ann,
char_classes=self.char_classes,
size=size,
mode=self.mode,
char_num_classes=self.char_num_classes,
)
def set_size(self, size):
self.size = size
def convert(self, mode):
width, height = self.size
if mode == "char_mask":
if not self.use_char_ann:
char_map = -np.ones((height, width))
char_map_weight = np.zeros((self.char_num_classes,))
else:
char_map = np.zeros((height, width))
char_map_weight = np.ones((self.char_num_classes,))
for i, p in enumerate(self.char_boxes):
poly = p.numpy().reshape(4, 2)
poly = shrink_poly(poly, 0.25)
cv2.fillPoly(
char_map, [poly.astype(np.int32)], int(self.char_classes[i])
)
pos_index = np.where(char_map > 0)
pos_num = pos_index[0].size
if pos_num > 0:
pos_weight = 1.0 * (height * width - pos_num) / pos_num
char_map_weight[1:] = pos_weight
return torch.from_numpy(char_map), torch.from_numpy(char_map_weight)
elif mode == "seq_char_mask":
decoder_target = self.char_num_classes * np.ones((32,))
word_target = -np.ones((32,))
if not self.use_char_ann:
char_map = -np.ones((height, width))
char_map_weight = np.zeros((self.char_num_classes,))
for i, char in enumerate(self.word):
if i > 31:
break
decoder_target[i] = char2num(char)
word_target[i] = char2num(char)
end_point = min(max(1, len(self.word)), 31)
word_target[end_point] = self.char_num_classes
else:
char_map = np.zeros((height, width))
char_map_weight = np.ones((self.char_num_classes,))
word_length = 0
for i, p in enumerate(self.char_boxes):
poly = p.numpy().reshape(4, 2)
if i < 32:
decoder_target[i] = int(self.char_classes[i])
word_target[i] = int(self.char_classes[i])
word_length += 1
poly = shrink_poly(poly, 0.25)
cv2.fillPoly(
char_map, [poly.astype(np.int32)], int(self.char_classes[i])
)
end_point = min(max(1, word_length), 31)
word_target[end_point] = self.char_num_classes
pos_index = np.where(char_map > 0)
pos_num = pos_index[0].size
if pos_num > 0:
pos_weight = 1.0 * (height * width - pos_num) / pos_num
char_map_weight[1:] = pos_weight
return (
torch.from_numpy(char_map),
torch.from_numpy(char_map_weight),
torch.from_numpy(decoder_target),
torch.from_numpy(word_target),
)
def creat_color_map(self, n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits - 1))
for j in range(splits):
g = int(j * width * 1.0 / (splits - 1))
for k in range(splits - 1):
b = int(k * width * 1.0 / (splits - 1))
maps.append([r, g, b])
return np.array(maps)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_char_boxes={}, ".format(len(self.char_boxes))
s += "num_char_classes={}, ".format(len(self.char_classes))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
class SegmentationCharMask(object):
def __init__(
self, chars_boxes, words=None, use_char_ann=True, size=None, mode=None, char_num_classes=37
):
# self.chars_boxes=[CharPolygons(char_boxes, word=word, use_char_ann=use_char_ann, size=size, mode=mode) for char_boxes, word in zip(chars_boxes, words)]
if words is None:
self.chars_boxes = [
CharPolygons(
char_boxes,
word=None,
use_char_ann=use_char_ann,
size=size,
mode=mode,
char_num_classes=char_num_classes,
)
for char_boxes in chars_boxes
]
else:
self.chars_boxes = [
CharPolygons(
char_boxes,
word=words[i],
use_char_ann=use_char_ann,
size=size,
mode=mode,
char_num_classes=char_num_classes,
)
for i, char_boxes in enumerate(chars_boxes)
]
self.size = size
self.mode = mode
self.use_char_ann = use_char_ann
self.char_num_classes = char_num_classes
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped = []
for char_boxes in self.chars_boxes:
flipped.append(char_boxes.transpose(method))
return SegmentationCharMask(
flipped, use_char_ann=self.use_char_ann, size=self.size, mode=self.mode, char_num_classes=self.char_num_classes
)
def crop(self, box, keep_ind):
cropped = []
w, h = box[2] - box[0], box[3] - box[1]
if keep_ind is not None:
self.chars_boxes = np.array(self.chars_boxes)
self.chars_boxes = self.chars_boxes[keep_ind]
for char_boxes in self.chars_boxes:
cropped.append(char_boxes.crop(box))
return SegmentationCharMask(
cropped, use_char_ann=self.use_char_ann, size=(w, h), mode=self.mode
)
def resize(self, size, *args, **kwargs):
scaled = []
for char_boxes in self.chars_boxes:
scaled.append(char_boxes.resize(size, *args, **kwargs))
return SegmentationCharMask(
scaled, use_char_ann=self.use_char_ann, size=size, mode=self.mode, char_num_classes=self.char_num_classes
)
def set_size(self, size):
self.size = size
for char_box in self.chars_boxes:
char_box.set_size(size)
def rotate(self, angle, r_c, start_h, start_w):
rotated = []
for char_boxes in self.chars_boxes:
rotated.append(char_boxes.rotate(angle, r_c, start_h, start_w))
return SegmentationCharMask(
rotated,
use_char_ann=self.use_char_ann,
size=(r_c[0] * 2, r_c[1] * 2),
mode=self.mode,
char_num_classes=self.char_num_classes,
)
def __iter__(self):
return iter(self.chars_boxes)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
selected_chars_boxes = [self.chars_boxes[item]]
else:
# advanced indexing on a single dimension
selected_chars_boxes = []
if isinstance(item, torch.Tensor) and item.dtype == torch.bool:
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
if i >= len(self.chars_boxes):
print(i)
print("chars_boxes.shape: ", len(self.chars_boxes))
input()
selected_chars_boxes.append(self.chars_boxes[i])
return SegmentationCharMask(
selected_chars_boxes,
use_char_ann=self.use_char_ann,
size=self.size,
mode=self.mode,
char_num_classes=self.char_num_classes,
)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_char_boxes={}, ".format(len(self.chars_boxes))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
def shrink_poly(poly, shrink):
# shrink ratio
R = shrink
r = [None, None, None, None]
for i in range(4):
r[i] = min(
np.linalg.norm(poly[i] - poly[(i + 1) % 4]),
np.linalg.norm(poly[i] - poly[(i - 1) % 4]),
)
# find the longer pair
if np.linalg.norm(poly[0] - poly[1]) + np.linalg.norm(
poly[2] - poly[3]
) > np.linalg.norm(poly[0] - poly[3]) + np.linalg.norm(poly[1] - poly[2]):
# first move (p0, p1), (p2, p3), then (p0, p3), (p1, p2)
## p0, p1
theta = np.arctan2((poly[1][1] - poly[0][1]), (poly[1][0] - poly[0][0]))
poly[0][0] += R * r[0] * np.cos(theta)
poly[0][1] += R * r[0] * np.sin(theta)
poly[1][0] -= R * r[1] * np.cos(theta)
poly[1][1] -= R * r[1] * np.sin(theta)
## p2, p3
theta = np.arctan2((poly[2][1] - poly[3][1]), (poly[2][0] - poly[3][0]))
poly[3][0] += R * r[3] * np.cos(theta)
poly[3][1] += R * r[3] * np.sin(theta)
poly[2][0] -= R * r[2] * np.cos(theta)
poly[2][1] -= R * r[2] * np.sin(theta)
## p0, p3
theta = np.arctan2((poly[3][0] - poly[0][0]), (poly[3][1] - poly[0][1]))
poly[0][0] += R * r[0] * np.sin(theta)
poly[0][1] += R * r[0] * np.cos(theta)
poly[3][0] -= R * r[3] * np.sin(theta)
poly[3][1] -= R * r[3] * np.cos(theta)
## p1, p2
theta = np.arctan2((poly[2][0] - poly[1][0]), (poly[2][1] - poly[1][1]))
poly[1][0] += R * r[1] * np.sin(theta)
poly[1][1] += R * r[1] * np.cos(theta)
poly[2][0] -= R * r[2] * np.sin(theta)
poly[2][1] -= R * r[2] * np.cos(theta)
else:
## p0, p3
# print poly
theta = np.arctan2((poly[3][0] - poly[0][0]), (poly[3][1] - poly[0][1]))
poly[0][0] += R * r[0] * np.sin(theta)
poly[0][1] += R * r[0] * np.cos(theta)
poly[3][0] -= R * r[3] * np.sin(theta)
poly[3][1] -= R * r[3] * np.cos(theta)
## p1, p2
theta = np.arctan2((poly[2][0] - poly[1][0]), (poly[2][1] - poly[1][1]))
poly[1][0] += R * r[1] * np.sin(theta)
poly[1][1] += R * r[1] * np.cos(theta)
poly[2][0] -= R * r[2] * np.sin(theta)
poly[2][1] -= R * r[2] * np.cos(theta)
## p0, p1
theta = np.arctan2((poly[1][1] - poly[0][1]), (poly[1][0] - poly[0][0]))
poly[0][0] += R * r[0] * np.cos(theta)
poly[0][1] += R * r[0] * np.sin(theta)
poly[1][0] -= R * r[1] * np.cos(theta)
poly[1][1] -= R * r[1] * np.sin(theta)
## p2, p3
theta = np.arctan2((poly[2][1] - poly[3][1]), (poly[2][0] - poly[3][0]))
poly[3][0] += R * r[3] * np.cos(theta)
poly[3][1] += R * r[3] * np.sin(theta)
poly[2][0] -= R * r[2] * np.cos(theta)
poly[2][1] -= R * r[2] * np.sin(theta)
return poly
def shrink_rect(poly, shrink):
xmin = min(poly[:, 0])
xmax = max(poly[:, 0])
ymin = min(poly[:, 1])
ymax = max(poly[:, 1])
# assert xmax > xmin and ymax > ymin
xc = (xmax + xmin) / 2
yc = (ymax + ymin) / 2
w = xmax - xmin
h = ymax - ymin
sxmin = xc - w / 2 * shrink
sxmax = xc + w / 2 * shrink
symin = yc - h / 2 * shrink
symax = yc + h / 2 * shrink
return np.array([sxmin, symin, sxmax, symin, sxmax, symax, sxmin, symax]).reshape(
(4, 2)
)
def is_poly_inbox(poly, height, width):
min_x = min(poly[:, 0])
min_y = min(poly[:, 1])
max_x = max(poly[:, 0])
max_y = max(poly[:, 1])
if (max_x < 0 and max_y < 0) or (min_x > width and min_y > height):
return False
else:
return True
| 27,175 | 34.431551 | 161 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/structures/bounding_box.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import torch
# from shapely import affinity
# from shapely.geometry import box
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order ot uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box, such as
labels.
"""
def __init__(self, bbox, image_size, mode="xyxy", use_char_ann=True, is_fake=False):
device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError(
"bbox should have 2 dimensions, got {}".format(bbox.ndimension())
)
if bbox.size(-1) != 4:
raise ValueError(
"last dimenion of bbox should have a "
"size of 4, got {}".format(bbox.size(-1))
)
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
self.use_char_ann = use_char_ann
def set_size(self, size):
self.size = size
bbox = BoxList(
self.bbox, size, mode=self.mode, use_char_ann=self.use_char_ann
)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.set_size(size)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == "xyxy":
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode, use_char_ann=self.use_char_ann)
else:
TO_REMOVE = 1
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
)
bbox = BoxList(bbox, self.size, mode=mode, use_char_ann=self.use_char_ann)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == "xyxy":
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError("Should not be here")
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(
scaled_box, size, mode=self.mode, use_char_ann=self.use_char_ann
)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
)
bbox = BoxList(scaled_box, size, mode="xyxy", use_char_ann=self.use_char_ann)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def poly2box(self, poly):
xmin = min(poly[0::2])
xmax = max(poly[0::2])
ymin = min(poly[1::2])
ymax = max(poly[1::2])
return [xmin, ymin, xmax, ymax]
def rotate(self, angle, r_c, start_h, start_w):
masks = self.extra_fields["masks"]
masks = masks.rotate(angle, r_c, start_h, start_w)
polys = masks.polygons
boxes = []
for poly in polys:
box = self.poly2box(poly.polygons[0].numpy())
boxes.append(box)
self.size = (r_c[0] * 2, r_c[1] * 2)
bbox = BoxList(boxes, self.size, mode="xyxy", use_char_ann=self.use_char_ann)
for k, v in self.extra_fields.items():
if k == "masks":
v = masks
else:
if self.use_char_ann:
if not isinstance(v, torch.Tensor):
v = v.rotate(angle, r_c, start_h, start_w)
else:
if not isinstance(v, torch.Tensor) and k != "char_masks":
v = v.rotate(angle, r_c, start_h, start_w)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat(
(transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
)
bbox = BoxList(
transposed_boxes, self.size, mode="xyxy", use_char_ann=self.use_char_ann
)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
keep_ind = None
not_empty = np.where(
(cropped_xmin != cropped_xmax) & (cropped_ymin != cropped_ymax)
)[0]
if len(not_empty) > 0:
keep_ind = not_empty
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
)
cropped_box = cropped_box[not_empty]
bbox = BoxList(cropped_box, (w, h), mode="xyxy", use_char_ann=self.use_char_ann)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if self.use_char_ann:
if not isinstance(v, torch.Tensor):
v = v.crop(box, keep_ind)
else:
if not isinstance(v, torch.Tensor) and k != "char_masks":
v = v.crop(box, keep_ind)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device):
bbox = BoxList(self.bbox.to(device), self.size, self.mode, self.use_char_ann)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(device)
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxList(self.bbox[item], self.size, self.mode, self.use_char_ann)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 1
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
TO_REMOVE = 1
box = self.bbox
area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
return area
def copy_with_fields(self, fields):
bbox = BoxList(self.bbox, self.size, self.mode, self.use_char_ann)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
bbox.add_field(field, self.get_field(field))
return bbox
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_boxes={}, ".format(len(self))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
if __name__ == "__main__":
bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))
s_bbox = bbox.resize((5, 5))
print(s_bbox)
print(s_bbox.bbox)
t_bbox = bbox.transpose(0)
print(t_bbox)
print(t_bbox.bbox)
| 11,570 | 35.617089 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/structures/boxlist_ops.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from maskrcnn_benchmark.layers import nms as _box_nms
from .bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
import numpy as np
import shapely
from shapely.geometry import Polygon,MultiPoint
def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="score"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maxium suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
mode = boxlist.mode
boxlist = boxlist.convert("xyxy")
boxes = boxlist.bbox
score = boxlist.get_field(score_field)
keep = _box_nms(boxes, score, nms_thresh)
if max_proposals > 0:
keep = keep[:max_proposals]
boxlist = boxlist[keep]
return boxlist.convert(mode)
def remove_small_boxes(boxlist, min_size):
"""
Only keep boxes with both sides >= min_size
Arguments:
boxlist (Boxlist)
min_size (int)
"""
# TODO maybe add an API for querying the ws / hs
xywh_boxes = boxlist.convert("xywh").bbox
_, _, ws, hs = xywh_boxes.unbind(dim=1)
keep = ((ws >= min_size) & (hs >= min_size)).nonzero().squeeze(1)
return boxlist[keep]
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def boxlist_iou(boxlist1, boxlist2):
"""Compute the intersection over union of two set of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Arguments:
box1: (BoxList) bounding boxes, sized [N,4].
box2: (BoxList) bounding boxes, sized [M,4].
Returns:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
"""
if boxlist1.size != boxlist2.size:
raise RuntimeError(
"boxlists should have same image size, got {}, {}".format(
boxlist1, boxlist2
)
)
# N = len(boxlist1)
# M = len(boxlist2)
area1 = boxlist1.area()
area2 = boxlist2.area()
box1, box2 = boxlist1.bbox, boxlist2.bbox
lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]
rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]
TO_REMOVE = 1
wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
# def boxlist_polygon_iou(target, proposal):
# """Compute the intersection over union of two set of boxes.
# The box order must be (xmin, ymin, xmax, ymax).
# Arguments:
# box1: (BoxList) bounding boxes, sized [N,4].
# box2: (BoxList) bounding boxes, sized [M,4].
# Returns:
# (tensor) iou, sized [N,M].
# Reference:
# https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
# """
# if target.size != proposal.size:
# raise RuntimeError(
# "boxlists should have same image size, got {}, {}".format(
# target, proposal
# )
# )
# target_polygon = target.get_field("masks").to_np_polygon()
# proposal_polygon = proposal.get_field("masks").to_np_polygon()
# print(target_polygon)
# print(proposal_polygon)
# polygon_points1 = target_polygon[0].reshape(-1, 2)
# poly1 = Polygon(polygon_points1).convex_hull
# polygon_points2 = proposal_polygon[0].reshape(-1, 2)
# poly2 = Polygon(polygon_points2).convex_hull
# union_poly = np.concatenate((polygon_points1, polygon_points2))
# if not poly1.intersects(poly2): # this test is fast and can accelerate calculation
# iou = 0
# else:
# try:
# inter_area = poly1.intersection(poly2).area
# #union_area = poly1.area + poly2.area - inter_area
# union_area = MultiPoint(union_poly).convex_hull.area
# if union_area == 0:
# return 0
# iou = float(inter_area) / union_area
# except shapely.geos.TopologicalError:
# print('shapely.geos.TopologicalError occured, iou set to 0')
# iou = 0
# return iou
# TODO redundant, remove
def _cat(tensors, dim=0):
"""
Efficient version of torch.cat
avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def _cat_mask(masks):
polygons_cat = []
size = masks[0].size
for mask in masks:
polygons = mask.get_polygons()
polygons_cat.extend(polygons)
masks_cat = SegmentationMask(polygons_cat, size)
return masks_cat
def cat_boxlist(bboxes):
"""
Concatenates a list of BoxList (having the same image size) into a
single BoxList
Arguments:
bboxes (list[BoxList])
"""
# if bboxes is None:
# return None
# if bboxes[0] is None:
# bboxes = [bboxes[1]
assert isinstance(bboxes, (list, tuple))
assert all(isinstance(bbox, BoxList) for bbox in bboxes)
size = bboxes[0].size
assert all(bbox.size == size for bbox in bboxes)
mode = bboxes[0].mode
assert all(bbox.mode == mode for bbox in bboxes)
fields = set(bboxes[0].fields())
assert all(set(bbox.fields()) == fields for bbox in bboxes)
cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode)
for field in fields:
if field == 'masks':
data = _cat_mask([bbox.get_field(field) for bbox in bboxes])
else:
data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0)
cat_boxes.add_field(field, data)
return cat_boxes
def cat_boxlist_gt(bboxes):
"""
Concatenates a list of BoxList (having the same image size) into a
single BoxList
Arguments:
bboxes (list[BoxList])
"""
assert isinstance(bboxes, (list, tuple))
assert all(isinstance(bbox, BoxList) for bbox in bboxes)
size = bboxes[0].size
# bboxes[1].set_size(size)
assert all(bbox.size == size for bbox in bboxes)
mode = bboxes[0].mode
assert all(bbox.mode == mode for bbox in bboxes)
fields = set(bboxes[0].fields())
assert all(set(bbox.fields()) == fields for bbox in bboxes)
if bboxes[0].bbox.sum().item() == 0:
cat_boxes = BoxList(bboxes[1].bbox, size, mode)
else:
cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode)
for field in fields:
if bboxes[0].bbox.sum().item() == 0:
if field == 'masks':
data = _cat_mask([bbox.get_field(field) for bbox in bboxes[1:]])
else:
data = _cat([bbox.get_field(field) for bbox in bboxes[1:]], dim=0)
else:
if field == 'masks':
data = _cat_mask([bbox.get_field(field) for bbox in bboxes])
else:
data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0)
cat_boxes.add_field(field, data)
return cat_boxes
| 7,393 | 30.46383 | 90 | py |
HASOC-2021---Hate-Speech-Detection | HASOC-2021---Hate-Speech-Detection-main/main.py | import getopt
import sys
import tensorflow as tf
import os
import json
import numpy as np
import file_utils
from datetime import datetime
import matplotlib.pyplot as plt
import h5py
from bert.tokenization.bert_tokenization import FullTokenizer
from bert import BertModelLayer
from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights
import bert
from sklearn.metrics import recall_score, precision_score, f1_score, average_precision_score, accuracy_score
import data_loader
import models
def prepare_predictions(ids, predictions, labels):
prediction_output = []
binary_predictions = list()
total_expected = {0: 0, 1: 0}
true_positives = {0: 0, 1: 0}
for i in range(0, len(labels)):
predicted_probs = predictions[i]
predicted_class = 1 if predicted_probs[1] >= 0.51 else 0
expected = int(labels[i])
binary_predictions.append(predicted_class)
if expected == predicted_class:
true_positives[expected] +=1
total_expected[expected] += 1
l = {"id": str(ids[i]), "prediction": str(predicted_class), "label": str(labels[i]), "probs": predicted_probs.tolist()}
prediction_output.append(json.dumps(l))
recall_hate = (true_positives[1] / total_expected[1]) if total_expected[1] > 0 else 0
recall_not_hate = (true_positives[0] / total_expected[0]) if total_expected[0] > 0 else 0
binary_predictions = np.array(binary_predictions)
average_precision = average_precision_score(binary_predictions, labels)
f1 = f1_score(binary_predictions, labels, average='binary')
f1_weighted = f1_score(binary_predictions, labels, average='weighted')
macro_f1 = f1_score(binary_predictions, labels, average='macro')
recall = recall_score(binary_predictions, labels, average='binary')
precision = precision_score(binary_predictions, labels, average='binary')
accuracy = accuracy_score(binary_predictions, labels)
score_output = {"accuracy": accuracy, "average_precision":average_precision, "f1":f1, "weighted_f1":f1_weighted, "macro_f1":macro_f1, "recall":recall, "precision":precision,
"HatefulOffensive": {"recall": recall_hate, "support": total_expected[1]},
"NOT": {"recall": recall_not_hate, "support": total_expected[0]}
}
return prediction_output, score_output
def train(config):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
# hate word list
hate_words = file_utils.read_file_to_list(config['base_dir'] +'resources/hate_words.txt')
# BERT related configurations
print('Using BERT: {}'.format(config['bert_model_dir']))
bert_ckpt_dir = config['base_dir'] + config['bert_model_dir'] + "/"
bert_check_point_file = bert_ckpt_dir + "bert_model.ckpt"
bert_config_file = bert_ckpt_dir + "bert_config.json"
bert_tokenizer = bert.tokenization.bert_tokenization.FullTokenizer(vocab_file=os.path.join(bert_ckpt_dir, "vocab.txt"))
X_train, y_train, y_train_ids, X_valid, y_valid, y_valid_ids, X_test, y_test, y_test_ids = data_loader.load_dataset(config, bert_tokenizer, hate_words)
print("Training input file shapes")
for k in X_train:
print('\t' + k + " shape: " + str(X_train[k].shape))
print("Validation input file shapes")
for k in X_valid:
print('\t' + k + " shape: " + str(X_valid[k].shape))
print("Test data size", len(y_test_ids))
# folders to save the trained models and results
results_dir_path = config['base_dir'] +'results'
now = datetime.now()
model_dir_path = config['base_dir'] +'results/'+now.strftime("%d-%m-%Y %H:%M:%S").replace(" ", "_")
file_utils.create_folder(results_dir_path)
file_utils.create_folder(model_dir_path)
model_check_point_callback = tf.keras.callbacks.ModelCheckpoint(
model_dir_path + '/best_model-epoch-{epoch:03d}-acc-{acc:03f}-val_acc-{val_acc:03f}.h5',
save_best_only=True,
monitor=config['monitor'])
early_stopping_callback = tf.keras.callbacks.EarlyStopping(patience=config["epoch_patience"],
restore_best_weights=True,
monitor=config['monitor'])
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=model_dir_path + "/logs")
callbacks = [early_stopping_callback]
print('Using GPUs: ' + str(tf.test.is_gpu_available()))
# create the model
model = models.get_model(config, bert_config_file, bert_check_point_file, adapter_size=None)
history = model.fit(X_train, y_train, validation_data=(X_valid, y_valid),
batch_size=config['batch_size'],
shuffle=True,
epochs=config['epochs'],
callbacks=callbacks)
predictions = model.predict(X_test, batch_size=config['batch_size'])
test_predictions, test_score_output = prepare_predictions(y_test_ids, predictions, y_test['output_label'])
print('Test macro-f1: ', test_score_output['macro_f1'])
# save the model
# model.save(model_dir_path + "/model.h5")
# save prediction score, predictions
file_utils.save_string_to_file(json.dumps(test_score_output),
model_dir_path + '/test_prediction_score.json')
file_utils.save_list_to_file(test_predictions, model_dir_path + '/test_predictions.jsonl')
# save the training config
file_utils.save_string_to_file(json.dumps(config),
model_dir_path + '/training_config.json')
N = len(history.epoch)
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(1, N + 1), history.history['loss'], label='loss')
plt.plot(np.arange(1, N + 1), history.history['val_loss'], label='val_loss')
plt.plot(np.arange(1, N + 1), history.history['acc'], label='acc')
plt.plot(np.arange(1, N + 1), history.history['val_acc'], label='val_acc')
plt.title("Validation, Test Loss and Accuracy on HASOC "+config["dataset_year"]+" Dataset, " + config['optimizer'])
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(model_dir_path + "/history.png")
plt.close()
if __name__ == "__main__":
argv = (sys.argv[1:])
config_path = 'config.json'
try:
opts, args = getopt.getopt(argv, "hc:o:")
except getopt.GetoptError:
print('main.py -c <config_path>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('main.py -c <config_path>')
sys.exit()
elif opt == "-c":
config_path = arg
if config_path != '':
with open(config_path) as json_file:
config = json.load(json_file)
train(config)
else:
print('main.py -c <config_path>') | 7,085 | 37.934066 | 179 | py |
HASOC-2021---Hate-Speech-Detection | HASOC-2021---Hate-Speech-Detection-main/data_loader.py | import pandas as pd
import numpy as np
from bert.tokenization.bert_tokenization import FullTokenizer
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import emoji
import re
import random
def replace_digits_emojis(s):
s = s.lower().strip()
s = emoji.demojize(s)
s = re.sub(r'\d+', '', s)
s = re.sub(r'[^\w\s]', '', s)
s = s.strip()
return s
def remove_urls_mentions(text):
text = re.sub(r"(?:\@|https?\://)\S+", "", text)
text = text.replace("RT", "").strip()
return text
def replace_space(text):
text = text.replace("\n", " ").strip()
text = re.sub(r"\s+", ' ', text)
text = text.strip()
return text
def merge_outputs(processed_text):
text = ""
for l in processed_text:
if "</" in l:
l = l.replace("</", "<")
if l in ['<percent>', '<url>', '<', '<number>', '</allcaps>',
'<money>', '<phone>', '<allcaps>', '<repeated>', '<hashtag>',
'<date>', '<time>', '<censored>', '</hashtag>', '<email>']:
continue
elif l in ['<emphasis>', '<user>', '<surprise>', '<laugh>', '<sad>', '<annoyed>', '<happy>']:
if l == '<user>':
continue
else:
text += " " + l
else:
text += " " + replace_digits_emojis(l)
normalized = replace_space(text)
return normalized
def normalize_text(input_text:str, text_preprocessor):
processed_text = text_preprocessor.pre_process_doc(input_text)
normalized_text = merge_outputs(processed_text)
return normalized_text
def sample_validation_set(X, y, ids):
validation_sample_size = int((float(len(ids)) * 0.1)/2)
X_train = {}
y_train = {}
y_train_ids = []
X_valid = {}
y_valid = {}
y_valid_ids = []
sampled_indexes = {0:[], 1:[]}
index_counter = 0
for label in y['output_label']:
if len(sampled_indexes[label]) < validation_sample_size:
sampled_indexes[label].append(index_counter)
index_counter+=1
for k in X:
data = X[k]
training_data = []
validation_data = []
index_counter = 0
for d in data:
label = y['output_label'][index_counter]
# add to validation split
if index_counter in sampled_indexes[label]:
validation_data.append(d)
else:
training_data.append(d)
index_counter +=1
X_train[k] = np.array(training_data)
X_valid[k] = np.array(validation_data)
for k in y:
data = y[k]
training_data = []
validation_data = []
index_counter = 0
for d in data:
label = y['output_label'][index_counter]
# add to validation split
if index_counter in sampled_indexes[label]:
validation_data.append(d)
else:
training_data.append(d)
index_counter +=1
y_train[k] = np.array(training_data)
y_valid[k] = np.array(validation_data)
index_counter = 0
for id in ids:
label = y['output_label'][index_counter]
# add to validation split
if index_counter in sampled_indexes[label]:
y_valid_ids.append(id)
else:
y_train_ids.append(id)
index_counter += 1
return X_train, y_train, y_train_ids, X_valid, y_valid, y_valid_ids
def apply_oversampling(ids, labels, text_docs):
count = {'HOF':0, 'NOT':0}
label_to_ids = {'HOF':[], 'NOT':[]}
c = 0
for l in labels:
count[l] +=1
id = ids[c]
label_to_ids[l].append(id)
c+=1
oversampled_ids, oversampled_labels, oversampled_text_docs = [], [], []
if count['HOF'] > count['NOT']:
max_label = 'HOF'
min_label = 'NOT'
else:
max_label = 'NOT'
min_label = 'HOF'
label_diff = count[max_label] - count[min_label]
random_ids = random.sample(label_to_ids[min_label], label_diff)
for r in random_ids:
id_index = ids.index(r)
oversampled_ids.append(ids[id_index])
oversampled_labels.append(labels[id_index])
oversampled_text_docs.append(text_docs[id_index])
# add the existing data
oversampled_ids.extend(ids)
oversampled_text_docs.extend(text_docs)
oversampled_labels.extend(labels)
return oversampled_ids, oversampled_labels, oversampled_text_docs
def tokenize(text):
tags = ['<emphasis>', '<user>', '<surprise>', '<percent>', '<url>', '<', '<number>', '</allcaps>', '<money>',
'<phone>', '<allcaps>', '<repeated>', '<laugh>', '<hashtag>', '<elongated>', '<sad>', '<annoyed>',
'<date>', '<time>', '<censored>', '<happy>', '</hashtag>', '<email>']
tokens = text.split(' ')
filtered_tokens = []
for t in tokens:
if t not in tags:
filtered_tokens.append(t)
return filtered_tokens
def pad_text(max_seq_len, token_ids):
token_ids = token_ids[:min(len(token_ids), max_seq_len - 2)]
token_ids = token_ids + [0] * (max_seq_len - len(token_ids))
return np.array(token_ids)
def embed_text_with_hate_words(config, data: list, hate_words: list):
x = list()
for text in data:
# tokenize
tokens = text.split(' ')
multihot_encoding_array = np.zeros(len(hate_words), dtype=int)
for t in tokens:
if t in hate_words:
index = hate_words.index(t)
multihot_encoding_array[index] = 1
x.append(multihot_encoding_array)
return np.array(x)
def embed_text_with_bert(config: dict, data: list, bert_tokenizer: FullTokenizer):
x = list()
for text in data:
# tokenize
tokens = bert_tokenizer.tokenize(text)
tokens = ["[CLS]"] + tokens + ["[SEP]"]
# convert tokens into IDs by embedding the text with BERT
token_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
# pad zeros to the token ids, if necessary
max_seq_len = config['tweet_text_seq_len']
token_ids = pad_text(max_seq_len, token_ids)
x.append(token_ids)
return np.array(x)
def embed_text_with_characters(config: dict, data: list):
char_tokenizer = Tokenizer(lower=True, char_level=True, oov_token="UNKNOWN")
alphabet = " abcdefghijklmnopqrstuvwxyz"
char_dict = {"PADDING": 0, "UNKNOWN": 1}
for i, char in enumerate(alphabet):
char_dict[char] = len(char_dict)
char_tokenizer.word_index = char_dict
x = char_tokenizer.texts_to_sequences(data)
x_padded = pad_sequences(x, padding='post', maxlen=config['tweet_text_char_len'])
return x_padded
def normalize_text_docs(text_docs:list, text_preprocessor):
normalized_text_docs = []
for text in text_docs:
normalized_text = normalize_text(text, text_preprocessor)
normalized_text_docs.append(normalized_text)
return normalized_text_docs
def encode_labels(data: list):
y = list()
label_to_index = {"HOF": 1, "NOT": 0}
for label in data:
y.append(label_to_index[label])
return np.array(y)
def load_split(config, df, bert_tokenizer, hate_words, text_preprocessor, oversample:bool):
X, y = {}, {}
ids = df["id"].tolist()
labels = df["label"].tolist()
text_docs = df["text"].tolist()
if oversample:
ids, labels, text_docs = apply_oversampling(ids, labels, text_docs)
if config['normalize_text']:
text_docs = normalize_text_docs(text_docs, text_preprocessor)
if "bert" in config["text_models"]:
X["text_bert"] = embed_text_with_bert(config, text_docs, bert_tokenizer)
if "hate_words" in config["text_models"]:
X["text_hate_words"] = embed_text_with_hate_words(config, text_docs, hate_words)
if "char_emb" in config["text_models"]:
X["text_char_emb"] = embed_text_with_characters(config, text_docs)
y['output_label'] = encode_labels(labels)
return X, y, ids
def load_dataset(config, bert_tokenizer, hate_words):
train_df = pd.read_csv(config['base_dir'] + 'resources/hasoc_data/'+config['dataset_year']+'/train.tsv', sep='\t', header=0)
test_df = pd.read_csv(config['base_dir'] + 'resources/hasoc_data/'+config['dataset_year']+'/test.tsv', sep='\t', header=0)
# load the Ekphrasis preprocessor
text_preprocessor = TextPreProcessor(
# terms that will be normalized
normalize=['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'url', 'date', 'number'],
# terms that will be annotated
annotate={"hashtag", "allcaps", "elongated", "repeated",
'emphasis', 'censored'},
fix_html=True, # fix HTML tokens
# corpus from which the word statistics are going to be used
# for word segmentation
segmenter="twitter",
# corpus from which the word statistics are going to be used
# for spell correction
corrector="twitter",
unpack_hashtags=True, # perform word segmentation on hashtags
unpack_contractions=True, # Unpack contractions (can't -> can not)
spell_correct_elong=False, # spell correction for elongated words
# select a tokenizer. You can use SocialTokenizer, or pass your own
# the tokenizer, should take as input a string and return a list of tokens
tokenizer=SocialTokenizer(lowercase=True).tokenize,
# list of dictionaries, for replacing tokens extracted from the text,
# with other expressions. You can pass more than one dictionaries.
dicts=[emoticons]
)
X_train, y_train, y_train_ids = load_split(config, train_df, bert_tokenizer, hate_words, text_preprocessor, oversample=config['oversample'])
X_test, y_test, y_test_ids = load_split(config, test_df, bert_tokenizer, hate_words, text_preprocessor, oversample=False)
X_train, y_train, y_train_ids, X_valid, y_valid, y_valid_ids = sample_validation_set(X_train, y_train, y_train_ids)
return X_train, y_train, y_train_ids, X_valid, y_valid, y_valid_ids, X_test, y_test, y_test_ids
| 10,369 | 31.507837 | 144 | py |
HASOC-2021---Hate-Speech-Detection | HASOC-2021---Hate-Speech-Detection-main/models.py | import tensorflow as tf
import numpy as np
from bert import BertModelLayer
from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights
from tensorflow import keras
from tensorflow.keras import layers
class MultiHeadSelfAttention(layers.Layer):
def __init__(self, embed_dim, num_heads=8):
super(MultiHeadSelfAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
if embed_dim % num_heads != 0:
raise ValueError(
f"embedding dimension = {embed_dim} should be divisible by number of heads = {num_heads}"
)
self.projection_dim = embed_dim // num_heads
self.query_dense = layers.Dense(embed_dim)
self.key_dense = layers.Dense(embed_dim)
self.value_dense = layers.Dense(embed_dim)
self.combine_heads = layers.Dense(embed_dim)
def attention(self, query, key, value):
score = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = score / tf.math.sqrt(dim_key)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
def separate_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs):
# x.shape = [batch_size, seq_len, embedding_dim]
batch_size = tf.shape(inputs)[0]
query = self.query_dense(inputs) # (batch_size, seq_len, embed_dim)
key = self.key_dense(inputs) # (batch_size, seq_len, embed_dim)
value = self.value_dense(inputs) # (batch_size, seq_len, embed_dim)
query = self.separate_heads(
query, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
key = self.separate_heads(
key, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
value = self.separate_heads(
value, batch_size
) # (batch_size, num_heads, seq_len, projection_dim)
attention, weights = self.attention(query, key, value)
attention = tf.transpose(
attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len, num_heads, projection_dim)
concat_attention = tf.reshape(
attention, (batch_size, -1, self.embed_dim)
) # (batch_size, seq_len, embed_dim)
output = self.combine_heads(
concat_attention
) # (batch_size, seq_len, embed_dim)
return output
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = MultiHeadSelfAttention(embed_dim, num_heads)
self.ffn = keras.Sequential(
[layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs, training):
attn_output = self.att(inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def get_config(self):
config = super().get_config().copy()
config.update({
'W1': self.W1,
'W2': self.W2,
'V': self.V
})
return config
def call(self, values, query):
# query hidden state shape == (batch_size, hidden size)
# query_with_time_axis shape == (batch_size, 1, hidden size)
# values shape == (batch_size, max_len, hidden size)
# we are doing this to broadcast addition along the time axis to calculate the score
query_with_time_axis = tf.expand_dims(query, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(
self.W1(query_with_time_axis) + self.W2(values)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
def flatten_layers(root_layer):
if isinstance(root_layer, tf.keras.layers.Layer):
yield root_layer
for layer in root_layer._layers:
for sub_layer in flatten_layers(layer):
yield sub_layer
def freeze_all_bert_layers(l_bert):
"""
Freezes all but LayerNorm and adapter layers - see arXiv:1902.00751.
"""
l_bert.trainable = False
l_bert.encoders_layer.trainable = False
for layer in l_bert.submodules:
if layer.name in ["LayerNorm", "adapter-down", "adapter-up"]:
layer.trainable = True
else:
layer.trainable = False
def freeze_bert_layers(l_bert):
"""
Freezes all but LayerNorm and adapter layers - see arXiv:1902.00751.
"""
for layer in flatten_layers(l_bert):
if layer.name in ["LayerNorm", "adapter-down", "adapter-up"]:
layer.trainable = True
# elif len(layer._layers) == 0:
# layer.trainable = False
else:
layer.trainable = False
l_bert.embeddings_layer.trainable = False
def encode_gru_with_attention(config, input):
if config["text_use_attention"]:
gru_forward = tf.keras.layers.GRU(config["rnn_layer_size"], return_sequences=True, return_state=True,
activation='relu')
attention_layer = BahdanauAttention(config["text_attention_size"])
# apply forward GRU, attention
forward_seq, forward_hidden_state = gru_forward(input)
forward_attention_result, forward_attention_weights = attention_layer(forward_seq, forward_hidden_state)
# concatenate attention results
text_encoding = forward_attention_result
else:
gru_forward = tf.keras.layers.GRU(config["rnn_layer_size"], activation='relu')
# apply forward GRU, attention
text_encoding = gru_forward(input)
return text_encoding
def encode_lstm_with_attention(config, input):
if config["text_use_attention"]:
lstm_forward = tf.keras.layers.LSTM(config["rnn_layer_size"], return_sequences=True, return_state=True,
activation='tanh')
attention_layer = BahdanauAttention(config["text_attention_size"])
# apply forward GRU, attention
forward_seq, forward_hidden_state, forward_cell_state = lstm_forward(input)
forward_attention_result, forward_attention_weights = attention_layer(forward_seq, forward_hidden_state)
# concatenate attention results
text_encoding = forward_attention_result
else:
lstm_forward = tf.keras.layers.LSTM(config["rnn_layer_size"])
# apply forward GRU, attention
text_encoding = lstm_forward(input)
return text_encoding
def encode_bigru_with_attention(config, input):
if config["text_use_attention"]:
gru_forward = tf.keras.layers.GRU(config["rnn_layer_size"], return_sequences=True, return_state=True,
activation='tanh')
gru_backward = tf.keras.layers.GRU(config["rnn_layer_size"], go_backwards=True, return_sequences=True,
return_state=True, activation='tanh')
attention_layer = BahdanauAttention(config["text_attention_size"])
# apply forward GRU, attention
forward_seq, forward_hidden_state = gru_forward(input)
forward_attention_result, forward_attention_weights = attention_layer(forward_seq, forward_hidden_state)
# apply backward GRU, attention
backward_seq, backward_hidden_state = gru_backward(input)
backward_attention_result, backward_attention_weights = attention_layer(backward_seq, backward_hidden_state)
# concatenate attention results
text_encoding = tf.keras.layers.concatenate([backward_attention_result, forward_attention_result])
else:
gru_forward = tf.keras.layers.GRU(config["rnn_layer_size"], activation='tanh')
gru_backward = tf.keras.layers.GRU(config["rnn_layer_size"], go_backwards=True, activation='tanh')
# apply forward GRU, attention
forward_hidden_state = gru_forward(input)
backward_hidden_state = gru_backward(input)
# concatenate attention results
text_encoding = tf.keras.layers.concatenate([backward_hidden_state, forward_hidden_state])
return text_encoding
def encode_text_with_bert(config, input_layer, bert):
bert_output = bert(input_layer)
if config["rnn_type"] == 'gru':
text_encoding = encode_gru_with_attention(config, bert_output)
elif config["rnn_type"] == 'lstm':
text_encoding = encode_lstm_with_attention(config, bert_output)
elif config["rnn_type"] == 'bi-gru':
text_encoding = encode_bigru_with_attention(config, bert_output)
else:
text_encoding = tf.keras.layers.Convolution1D(filters=100, kernel_size=5, padding='same', activation='tanh')(
bert_output)
text_encoding = tf.keras.layers.Convolution1D(filters=80, kernel_size=5, padding='same', activation='tanh')(
text_encoding)
text_encoding = tf.keras.layers.Convolution1D(filters=50, kernel_size=5, padding='same', activation='tanh')(
text_encoding)
text_encoding = tf.keras.layers.AvgPool1D()(text_encoding)
text_encoding = tf.keras.layers.Flatten()(text_encoding)
return text_encoding
def encode_text_with_hateword_list(config, input_layer):
hate_words_encoding = tf.keras.layers.Dense(1493, name="hatewords_norm_layer_1")(input_layer)
hate_words_encoding = tf.keras.layers.BatchNormalization()(hate_words_encoding)
hate_words_encoding = tf.keras.layers.Activation("relu")(hate_words_encoding)
hate_words_encoding2 = tf.keras.layers.Dense(512, name="hatewords_norm_layer_2")(hate_words_encoding)
hate_words_encoding2 = tf.keras.layers.BatchNormalization()(hate_words_encoding2)
hate_words_encoding2 = tf.keras.layers.Activation("relu")(hate_words_encoding2)
return hate_words_encoding2
def encode_text_with_char_embeddings(config, input_layer):
char_embedding_layer = tf.keras.layers.Embedding(input_dim=config["char_size"], trainable=True, output_dim=50, embeddings_initializer='uniform', name="char_embs")
char_emb_out = char_embedding_layer(input_layer)
if config["text_encoder"] == 'gru':
text_encoding = encode_gru_with_attention(config, char_emb_out)
elif config["text_encoder"] == 'lstm':
text_encoding = encode_lstm_with_attention(config, char_emb_out)
elif config["text_encoder"] == 'bi-gru':
text_encoding = encode_bigru_with_attention(config, char_emb_out)
else:
text_encoding = tf.keras.layers.Convolution1D(filters=40, kernel_size=5, padding='same', activation='relu')(
char_emb_out)
text_encoding = tf.keras.layers.Convolution1D(filters=20, kernel_size=5, padding='same', activation='relu')(
text_encoding)
text_encoding = tf.keras.layers.Convolution1D(filters=10, kernel_size=5, padding='same', activation='relu')(
text_encoding)
text_encoding = tf.keras.layers.AvgPool1D()(text_encoding)
text_encoding = tf.keras.layers.Flatten()(text_encoding)
return text_encoding
def get_fusion_layer_sizes(individual_layer_size, num_modalitiies):
layer_sizes = []
first_layer_size = individual_layer_size * num_modalitiies
layer_sizes.append(first_layer_size)
return layer_sizes
def encode_inputs(config, bert_config_file, bert_check_point_file, adapter_size=None):
"""Creates a classification model."""
inputs = []
modality_outputs = []
image_models = []
has_bert_modality = False
if "bert" in config["text_models"]:
has_bert_modality = True
if has_bert_modality:
with tf.io.gfile.GFile(bert_config_file, "r") as reader:
bc = StockBertConfig.from_json_string(reader.read())
bert_params = map_stock_config_to_params(bc)
bert_params.adapter_size = adapter_size
bert = BertModelLayer.from_params(bert_params, name="bert")
else:
bert = None
if "bert" in config["text_models"]:
tweet_text_bert_input = tf.keras.layers.Input(shape=(config['tweet_text_seq_len'],), dtype='int32',
name="text_bert")
inputs.append(tweet_text_bert_input)
# encode with BERT
tweet_text_encoding = encode_text_with_bert(config, tweet_text_bert_input, bert)
modality_outputs.append(tweet_text_encoding)
if "hate_words" in config["text_models"]:
tweet_text_hate_words_input = tf.keras.layers.Input(shape=(1493,), dtype='int32',
name="text_hate_words")
inputs.append(tweet_text_hate_words_input)
# encode hatewords
tweet_text_hate_words_encoding = encode_text_with_hateword_list(config, tweet_text_hate_words_input)
modality_outputs.append(tweet_text_hate_words_encoding)
if "char_emb" in config["text_models"]:
tweet_text_char_input = tf.keras.layers.Input(shape=(config['tweet_text_char_len'],), dtype='int32',
name="text_char_emb")
inputs.append(tweet_text_char_input)
# encode with char embeddings
tweet_text_char_encoding = encode_text_with_char_embeddings(config, tweet_text_char_input)
modality_outputs.append(tweet_text_char_encoding)
return inputs, modality_outputs, has_bert_modality, bert
def get_model(config, bert_config_file, bert_check_point_file, adapter_size=None):
"""Creates a classification model."""
inputs, modality_outputs, has_bert_modality, bert = encode_inputs(config, bert_config_file, bert_check_point_file, adapter_size)
outputs = []
if len(modality_outputs) > 1:
concat_embedding = tf.keras.layers.concatenate(modality_outputs)
else:
concat_embedding = modality_outputs[0]
fusion_layer_output = concat_embedding
# fusion_layer_size = len(modality_outputs) * config['feature_normalization_layer_size']
fusion_layer_size = fusion_layer_output.shape[1]
counter = 1
while fusion_layer_size > config['min_feature_normalization_layer_size']:
fusion_layer_output = tf.keras.layers.Dense(fusion_layer_size, name="fusion_layer_"+str(counter))(fusion_layer_output)
batch_norm_layer_output = tf.keras.layers.BatchNormalization(name="batch_norm_layer_"+str(counter))(fusion_layer_output)
activation_layer_output = tf.keras.layers.Activation("relu", name="relu_layer_"+str(counter))(batch_norm_layer_output)
if counter == 1:
adapted_layer_size = np.power(2, int(np.log2(fusion_layer_size)))
if adapted_layer_size == fusion_layer_size:
fusion_layer_size /= 2
else:
fusion_layer_size = adapted_layer_size
else:
# decrease by half
fusion_layer_size /= 2
counter+=1
fusion_layer_output = activation_layer_output
last_layer_output = tf.keras.layers.Dense(units=2, activation="softmax", name='output_label')(fusion_layer_output)
outputs.append(last_layer_output)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
# load the pre-trained model weights, if BERT is used as a modality
if has_bert_modality:
load_stock_weights(bert, bert_check_point_file)
# freeze weights if adapter-BERT is used
if adapter_size is not None:
freeze_bert_layers(bert)
else:
freeze_all_bert_layers(bert)
if config["optimizer"] == "sgd":
optimizer = tf.keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9)
elif config["optimizer"] == "rmsprop":
optimizer = tf.keras.optimizers.RMSProp()
elif config["optimizer"] == "adagrad":
optimizer = tf.keras.optimizers.Adagrad()
else:
optimizer = tf.keras.optimizers.Adam(0.0001)
# Enable Mixed Precision for faster computation, less memory
# optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")])
model.summary()
return model
| 17,504 | 41.799511 | 166 | py |
steer | steer-master/ffjord/train_vae_flow.py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import time
import torch
import torch.utils.data
import torch.optim as optim
import numpy as np
import math
import random
import os
import datetime
import lib.utils as utils
import lib.layers.odefunc as odefunc
import vae_lib.models.VAE as VAE
import vae_lib.models.CNFVAE as CNFVAE
from vae_lib.optimization.training import train, evaluate
from vae_lib.utils.load_data import load_dataset
from vae_lib.utils.plotting import plot_training_curve
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser(description='PyTorch Sylvester Normalizing flows')
parser.add_argument(
'-d', '--dataset', type=str, default='mnist', choices=['mnist', 'freyfaces', 'omniglot', 'caltech'],
metavar='DATASET', help='Dataset choice.'
)
parser.add_argument(
'-freys', '--freyseed', type=int, default=123, metavar='FREYSEED',
help="""Seed for shuffling frey face dataset for test split. Ignored for other datasets.
Results in paper are produced with seeds 123, 321, 231"""
)
parser.add_argument('-nc', '--no_cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--manual_seed', type=int, help='manual seed, if not given resorts to random seed.')
parser.add_argument(
'-li', '--log_interval', type=int, default=10, metavar='LOG_INTERVAL',
help='how many batches to wait before logging training status'
)
parser.add_argument(
'-od', '--out_dir', type=str, default='snapshots', metavar='OUT_DIR',
help='output directory for model snapshots etc.'
)
# optimization settings
parser.add_argument(
'-e', '--epochs', type=int, default=2000, metavar='EPOCHS', help='number of epochs to train (default: 2000)'
)
parser.add_argument(
'-es', '--early_stopping_epochs', type=int, default=35, metavar='EARLY_STOPPING',
help='number of early stopping epochs'
)
parser.add_argument(
'-bs', '--batch_size', type=int, default=100, metavar='BATCH_SIZE', help='input batch size for training'
)
parser.add_argument('-lr', '--learning_rate', type=float, default=0.0005, metavar='LEARNING_RATE', help='learning rate')
parser.add_argument(
'-w', '--warmup', type=int, default=100, metavar='N',
help='number of epochs for warm-up. Set to 0 to turn warmup off.'
)
parser.add_argument('--max_beta', type=float, default=1., metavar='MB', help='max beta for warm-up')
parser.add_argument('--min_beta', type=float, default=0.0, metavar='MB', help='min beta for warm-up')
parser.add_argument(
'-f', '--flow', type=str, default='no_flow', choices=[
'planar', 'iaf', 'householder', 'orthogonal', 'triangular', 'cnf', 'cnf_bias', 'cnf_hyper', 'cnf_rank',
'cnf_lyper', 'no_flow'
], help="""Type of flows to use, no flows can also be selected"""
)
parser.add_argument('-r', '--rank', type=int, default=1)
parser.add_argument(
'-nf', '--num_flows', type=int, default=4, metavar='NUM_FLOWS',
help='Number of flow layers, ignored in absence of flows'
)
parser.add_argument(
'-nv', '--num_ortho_vecs', type=int, default=8, metavar='NUM_ORTHO_VECS',
help=""" For orthogonal flow: How orthogonal vectors per flow do you need.
Ignored for other flow types."""
)
parser.add_argument(
'-nh', '--num_householder', type=int, default=8, metavar='NUM_HOUSEHOLDERS',
help=""" For Householder Sylvester flow: Number of Householder matrices per flow.
Ignored for other flow types."""
)
parser.add_argument(
'-mhs', '--made_h_size', type=int, default=320, metavar='MADEHSIZE',
help='Width of mades for iaf. Ignored for all other flows.'
)
parser.add_argument('--z_size', type=int, default=64, metavar='ZSIZE', help='how many stochastic hidden units')
# gpu/cpu
parser.add_argument('--gpu_num', type=int, default=0, metavar='GPU', help='choose GPU to run on.')
# CNF settings
parser.add_argument(
"--layer_type", type=str, default="concat",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='512-512')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--train_T', type=eval, default=False)
parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="softplus", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
# evaluation
parser.add_argument('--evaluate', type=eval, default=False, choices=[True, False])
parser.add_argument('--model_path', type=str, default='')
parser.add_argument('--retrain_encoder', type=eval, default=False, choices=[True, False])
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.manual_seed is None:
args.manual_seed = random.randint(1, 100000)
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
np.random.seed(args.manual_seed)
if args.cuda:
# gpu device number
torch.cuda.set_device(args.gpu_num)
kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}
def run(args, kwargs):
# ==================================================================================================================
# SNAPSHOTS
# ==================================================================================================================
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
snapshots_path = os.path.join(args.out_dir, 'vae_' + args.dataset + '_')
snap_dir = snapshots_path + args.flow
if args.flow != 'no_flow':
snap_dir += '_' + 'num_flows_' + str(args.num_flows)
if args.flow == 'orthogonal':
snap_dir = snap_dir + '_num_vectors_' + str(args.num_ortho_vecs)
elif args.flow == 'orthogonalH':
snap_dir = snap_dir + '_num_householder_' + str(args.num_householder)
elif args.flow == 'iaf':
snap_dir = snap_dir + '_madehsize_' + str(args.made_h_size)
elif args.flow == 'permutation':
snap_dir = snap_dir + '_' + 'kernelsize_' + str(args.kernel_size)
elif args.flow == 'mixed':
snap_dir = snap_dir + '_' + 'num_householder_' + str(args.num_householder)
elif args.flow == 'cnf_rank':
snap_dir = snap_dir + '_rank_' + str(args.rank) + '_' + args.dims + '_num_blocks_' + str(args.num_blocks)
elif 'cnf' in args.flow:
snap_dir = snap_dir + '_' + args.dims + '_num_blocks_' + str(args.num_blocks)
if args.retrain_encoder:
snap_dir = snap_dir + '_retrain-encoder_'
elif args.evaluate:
snap_dir = snap_dir + '_evaluate_'
snap_dir = snap_dir + '__' + args.model_signature + '/'
args.snap_dir = snap_dir
if not os.path.exists(snap_dir):
os.makedirs(snap_dir)
# logger
utils.makedirs(args.snap_dir)
logger = utils.get_logger(logpath=os.path.join(args.snap_dir, 'logs'), filepath=os.path.abspath(__file__))
logger.info(args)
# SAVING
torch.save(args, snap_dir + args.flow + '.config')
# ==================================================================================================================
# LOAD DATA
# ==================================================================================================================
train_loader, val_loader, test_loader, args = load_dataset(args, **kwargs)
if not args.evaluate:
# ==============================================================================================================
# SELECT MODEL
# ==============================================================================================================
# flow parameters and architecture choice are passed on to model through args
if args.flow == 'no_flow':
model = VAE.VAE(args)
elif args.flow == 'planar':
model = VAE.PlanarVAE(args)
elif args.flow == 'iaf':
model = VAE.IAFVAE(args)
elif args.flow == 'orthogonal':
model = VAE.OrthogonalSylvesterVAE(args)
elif args.flow == 'householder':
model = VAE.HouseholderSylvesterVAE(args)
elif args.flow == 'triangular':
model = VAE.TriangularSylvesterVAE(args)
elif args.flow == 'cnf':
model = CNFVAE.CNFVAE(args)
elif args.flow == 'cnf_bias':
model = CNFVAE.AmortizedBiasCNFVAE(args)
elif args.flow == 'cnf_hyper':
model = CNFVAE.HypernetCNFVAE(args)
elif args.flow == 'cnf_lyper':
model = CNFVAE.LypernetCNFVAE(args)
elif args.flow == 'cnf_rank':
model = CNFVAE.AmortizedLowRankCNFVAE(args)
else:
raise ValueError('Invalid flow choice')
if args.retrain_encoder:
logger.info(f"Initializing decoder from {args.model_path}")
dec_model = torch.load(args.model_path)
dec_sd = {}
for k, v in dec_model.state_dict().items():
if 'p_x' in k:
dec_sd[k] = v
model.load_state_dict(dec_sd, strict=False)
if args.cuda:
logger.info("Model on GPU")
model.cuda()
logger.info(model)
if args.retrain_encoder:
parameters = []
logger.info('Optimizing over:')
for name, param in model.named_parameters():
if 'p_x' not in name:
logger.info(name)
parameters.append(param)
else:
parameters = model.parameters()
optimizer = optim.Adamax(parameters, lr=args.learning_rate, eps=1.e-7)
# ==================================================================================================================
# TRAINING
# ==================================================================================================================
train_loss = []
val_loss = []
# for early stopping
best_loss = np.inf
best_bpd = np.inf
e = 0
epoch = 0
train_times = []
for epoch in range(1, args.epochs + 1):
t_start = time.time()
tr_loss = train(epoch, train_loader, model, optimizer, args, logger)
train_loss.append(tr_loss)
train_times.append(time.time() - t_start)
logger.info('One training epoch took %.2f seconds' % (time.time() - t_start))
v_loss, v_bpd = evaluate(val_loader, model, args, logger, epoch=epoch)
val_loss.append(v_loss)
# early-stopping
if v_loss < best_loss:
e = 0
best_loss = v_loss
if args.input_type != 'binary':
best_bpd = v_bpd
logger.info('->model saved<-')
torch.save(model, snap_dir + args.flow + '.model')
# torch.save(model, snap_dir + args.flow + '_' + args.architecture + '.model')
elif (args.early_stopping_epochs > 0) and (epoch >= args.warmup):
e += 1
if e > args.early_stopping_epochs:
break
if args.input_type == 'binary':
logger.info(
'--> Early stopping: {}/{} (BEST: loss {:.4f})\n'.format(e, args.early_stopping_epochs, best_loss)
)
else:
logger.info(
'--> Early stopping: {}/{} (BEST: loss {:.4f}, bpd {:.4f})\n'.
format(e, args.early_stopping_epochs, best_loss, best_bpd)
)
if math.isnan(v_loss):
raise ValueError('NaN encountered!')
train_loss = np.hstack(train_loss)
val_loss = np.array(val_loss)
plot_training_curve(train_loss, val_loss, fname=snap_dir + '/training_curve_%s.pdf' % args.flow)
# training time per epoch
train_times = np.array(train_times)
mean_train_time = np.mean(train_times)
std_train_time = np.std(train_times, ddof=1)
logger.info('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time))
# ==================================================================================================================
# EVALUATION
# ==================================================================================================================
logger.info(args)
logger.info('Stopped after %d epochs' % epoch)
logger.info('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time))
final_model = torch.load(snap_dir + args.flow + '.model')
validation_loss, validation_bpd = evaluate(val_loader, final_model, args, logger)
else:
validation_loss = "N/A"
validation_bpd = "N/A"
logger.info(f"Loading model from {args.model_path}")
final_model = torch.load(args.model_path)
test_loss, test_bpd = evaluate(test_loader, final_model, args, logger, testing=True)
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL): {:.4f}'.format(validation_loss))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST): {:.4f}'.format(test_loss))
if args.input_type != 'binary':
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL) BPD : {:.4f}'.format(validation_bpd))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST) BPD: {:.4f}'.format(test_bpd))
if __name__ == "__main__":
run(args, kwargs)
| 14,613 | 39.707521 | 124 | py |
steer | steer-master/ffjord/train_toy.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
import os
import time
import torch
import torch.optim as optim
import lib.toy_data as toy_data
import lib.utils as utils
from lib.visualize_flow import visualize_transform
import lib.layers.odefunc as odefunc
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import add_spectral_norm, spectral_norm_power_iteration
from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log
from train_misc import build_model_tabular
from diagnostics.viz_toy import save_trajectory, trajectory_to_video
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
'--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals', 'checkerboard', 'rings'],
type=str, default='pinwheel'
)
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument('--save', type=str, default='experiments/cnf')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
def get_transforms(model):
def sample_fn(z, logpz=None):
if logpz is not None:
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if logpx is not None:
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return sample_fn, density_fn
def compute_loss(args, model, batch_size=None):
if batch_size is None: batch_size = args.batch_size
# load data
x = toy_data.inf_train_gen(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
zero = torch.zeros(x.shape[0], 1).to(x)
# transform to z
z, delta_logp = model(x, zero)
# compute log q(z)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = logpz - delta_logp
loss = -torch.mean(logpx)
return loss
if __name__ == '__main__':
regularization_fns, regularization_coeffs = create_regularization_fns(args)
model = build_model_tabular(args, 2, regularization_fns).to(device)
if args.spectral_norm: add_spectral_norm(model)
set_cnf_options(args, model)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
nfef_meter = utils.RunningAverageMeter(0.93)
nfeb_meter = utils.RunningAverageMeter(0.93)
tt_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
if args.spectral_norm: spectral_norm_power_iteration(model, 1)
loss = compute_loss(args, model)
loss_meter.update(loss.item())
if len(regularization_coeffs) > 0:
reg_states = get_regularization(model, regularization_coeffs)
reg_loss = sum(
reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0
)
loss = loss + reg_loss
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
optimizer.step()
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})'
' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg,
nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
if len(regularization_coeffs) > 0:
log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
test_loss = compute_loss(args, model, batch_size=args.test_batch_size)
test_nfe = count_nfe(model)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
if itr % args.viz_freq == 0:
with torch.no_grad():
model.eval()
p_samples = toy_data.inf_train_gen(args.data, batch_size=2000)
sample_fn, density_fn = get_transforms(model)
plt.figure(figsize=(9, 3))
visualize_transform(
p_samples, torch.randn, standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn,
samples=True, npts=800, device=device
)
fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr))
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
model.train()
end = time.time()
logger.info('Training has finished.')
save_traj_dir = os.path.join(args.save, 'trajectory')
logger.info('Plotting trajectory to {}'.format(save_traj_dir))
data_samples = toy_data.inf_train_gen(args.data, batch_size=2000)
save_trajectory(model, data_samples, save_traj_dir, device=device)
trajectory_to_video(save_traj_dir)
| 9,288 | 39.038793 | 119 | py |
steer | steer-master/ffjord/train_img2d.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
import os
import time
import torch
import torch.optim as optim
import lib.utils as utils
from lib.visualize_flow import visualize_transform
import lib.layers.odefunc as odefunc
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import add_spectral_norm, spectral_norm_power_iteration
from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log
from train_misc import build_model_tabular
from diagnostics.viz_toy import save_trajectory, trajectory_to_video
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument('--img', type=str, required=True)
parser.add_argument('--data', type=str, default='dummy')
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument('--save', type=str, default='experiments/cnf')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
from PIL import Image
import numpy as np
img = np.array(Image.open(args.img).convert('L'))
h, w = img.shape
xx = np.linspace(-4, 4, w)
yy = np.linspace(-4, 4, h)
xx, yy = np.meshgrid(xx, yy)
xx = xx.reshape(-1, 1)
yy = yy.reshape(-1, 1)
means = np.concatenate([xx, yy], 1)
img = img.max() - img
probs = img.reshape(-1) / img.sum()
std = np.array([8 / w / 2, 8 / h / 2])
def sample_data(data=None, rng=None, batch_size=200):
"""data and rng are ignored."""
inds = np.random.choice(int(probs.shape[0]), int(batch_size), p=probs)
m = means[inds]
samples = np.random.randn(*m.shape) * std + m
return samples
def get_transforms(model):
def sample_fn(z, logpz=None):
if logpz is not None:
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if logpx is not None:
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return sample_fn, density_fn
def compute_loss(args, model, batch_size=None):
if batch_size is None: batch_size = args.batch_size
# load data
x = sample_data(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
zero = torch.zeros(x.shape[0], 1).to(x)
# transform to z
z, delta_logp = model(x, zero)
# compute log q(z)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = logpz - delta_logp
loss = -torch.mean(logpx)
return loss
if __name__ == '__main__':
regularization_fns, regularization_coeffs = create_regularization_fns(args)
model = build_model_tabular(args, 2, regularization_fns).to(device)
if args.spectral_norm: add_spectral_norm(model)
set_cnf_options(args, model)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
nfef_meter = utils.RunningAverageMeter(0.93)
nfeb_meter = utils.RunningAverageMeter(0.93)
tt_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
if args.spectral_norm: spectral_norm_power_iteration(model, 1)
loss = compute_loss(args, model)
loss_meter.update(loss.item())
if len(regularization_coeffs) > 0:
reg_states = get_regularization(model, regularization_coeffs)
reg_loss = sum(
reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0
)
loss = loss + reg_loss
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
optimizer.step()
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})'
' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg,
nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
if len(regularization_coeffs) > 0:
log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
test_loss = compute_loss(args, model, batch_size=args.test_batch_size)
test_nfe = count_nfe(model)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
if itr % args.viz_freq == 0:
with torch.no_grad():
model.eval()
p_samples = sample_data(args.data, batch_size=2000)
sample_fn, density_fn = get_transforms(model)
plt.figure(figsize=(9, 3))
visualize_transform(
p_samples, torch.randn, standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn,
samples=True, npts=800, device=device
)
fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr))
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
model.train()
end = time.time()
logger.info('Training has finished.')
save_traj_dir = os.path.join(args.save, 'trajectory')
logger.info('Plotting trajectory to {}'.format(save_traj_dir))
data_samples = sample_data(args.data, batch_size=2000)
save_trajectory(model, data_samples, save_traj_dir, device=device)
trajectory_to_video(save_traj_dir)
| 9,789 | 37.543307 | 119 | py |
steer | steer-master/ffjord/train_cnf.py | import argparse
import os
import time
import numpy as np
import torch
import torch.optim as optim
import torchvision.datasets as dset
import torchvision.transforms as tforms
from torchvision.utils import save_image
import lib.layers as layers
import lib.utils as utils
import lib.odenvp as odenvp
import lib.multiscale_parallel as multiscale_parallel
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import add_spectral_norm, spectral_norm_power_iteration
from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log
# go fast boi!!
torch.backends.cudnn.benchmark = True
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams']
parser = argparse.ArgumentParser("Continuous Normalizing Flow")
parser.add_argument("--data", choices=["mnist", "svhn", "cifar10", 'lsun_church'], type=str, default="mnist")
parser.add_argument("--dims", type=str, default="8,32,32,8")
parser.add_argument("--strides", type=str, default="2,2,1,-2,-2")
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument("--conv", type=eval, default=True, choices=[True, False])
parser.add_argument(
"--layer_type", type=str, default="ignore",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"])
parser.add_argument(
"--nonlinearity", type=str, default="softplus", choices=["tanh", "relu", "softplus", "elu", "swish"]
)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument("--imagesize", type=int, default=None)
parser.add_argument("--alpha", type=float, default=1e-6)
parser.add_argument('--time_length', type=float, default=1.0)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--num_epochs", type=int, default=1000)
parser.add_argument("--batch_size", type=int, default=200)
parser.add_argument(
"--batch_size_schedule", type=str, default="", help="Increases the batchsize at every given epoch, dash separated."
)
parser.add_argument("--test_batch_size", type=int, default=200)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--warmup_iters", type=float, default=1000)
parser.add_argument("--weight_decay", type=float, default=0.0)
parser.add_argument("--spectral_norm_niter", type=int, default=10)
parser.add_argument("--add_noise", type=eval, default=True, choices=[True, False])
parser.add_argument("--batch_norm", type=eval, default=False, choices=[True, False])
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--autoencode', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=True, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--multiscale', type=eval, default=False, choices=[True, False])
parser.add_argument('--parallel', type=eval, default=False, choices=[True, False])
# Regularizations
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument("--time_penalty", type=float, default=0, help="Regularization on the end_time.")
parser.add_argument(
"--max_grad_norm", type=float, default=1e10,
help="Max norm of graidents (default is just stupidly high to avoid any clipping)"
)
parser.add_argument("--begin_epoch", type=int, default=1)
parser.add_argument("--resume", type=str, default=None)
parser.add_argument("--save", type=str, default="experiments/cnf")
parser.add_argument("--val_freq", type=int, default=1)
parser.add_argument("--log_freq", type=int, default=10)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
def add_noise(x):
"""
[0, 1] -> [0, 255] -> add noise -> [0, 1]
"""
if args.add_noise:
noise = x.new().resize_as_(x).uniform_()
x = x * 255 + noise
x = x / 256
return x
def update_lr(optimizer, itr):
iter_frac = min(float(itr + 1) / max(args.warmup_iters, 1), 1.0)
lr = args.lr * iter_frac
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def get_train_loader(train_set, epoch):
if args.batch_size_schedule != "":
epochs = [0] + list(map(int, args.batch_size_schedule.split("-")))
n_passed = sum(np.array(epochs) <= epoch)
current_batch_size = int(args.batch_size * n_passed)
else:
current_batch_size = args.batch_size
train_loader = torch.utils.data.DataLoader(
dataset=train_set, batch_size=current_batch_size, shuffle=True, drop_last=True, pin_memory=True
)
logger.info("===> Using batch size {}. Total {} iterations/epoch.".format(current_batch_size, len(train_loader)))
return train_loader
def get_dataset(args):
trans = lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise])
if args.data == "mnist":
im_dim = 1
im_size = 28 if args.imagesize is None else args.imagesize
train_set = dset.MNIST(root="./data", train=True, transform=trans(im_size), download=True)
test_set = dset.MNIST(root="./data", train=False, transform=trans(im_size), download=True)
elif args.data == "svhn":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.SVHN(root="./data", split="train", transform=trans(im_size), download=True)
test_set = dset.SVHN(root="./data", split="test", transform=trans(im_size), download=True)
elif args.data == "cifar10":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.CIFAR10(
root="./data", train=True, transform=tforms.Compose([
tforms.Resize(im_size),
tforms.RandomHorizontalFlip(),
tforms.ToTensor(),
add_noise,
]), download=True
)
test_set = dset.CIFAR10(root="./data", train=False, transform=trans(im_size), download=True)
elif args.data == 'celeba':
im_dim = 3
im_size = 64 if args.imagesize is None else args.imagesize
train_set = dset.CelebA(
train=True, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(im_size),
tforms.RandomHorizontalFlip(),
tforms.ToTensor(),
add_noise,
])
)
test_set = dset.CelebA(
train=False, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(im_size),
tforms.ToTensor(),
add_noise,
])
)
elif args.data == 'lsun_church':
im_dim = 3
im_size = 64 if args.imagesize is None else args.imagesize
train_set = dset.LSUN(
'data', ['church_outdoor_train'], transform=tforms.Compose([
tforms.Resize(96),
tforms.RandomCrop(64),
tforms.Resize(im_size),
tforms.ToTensor(),
add_noise,
])
)
test_set = dset.LSUN(
'data', ['church_outdoor_val'], transform=tforms.Compose([
tforms.Resize(96),
tforms.RandomCrop(64),
tforms.Resize(im_size),
tforms.ToTensor(),
add_noise,
])
)
data_shape = (im_dim, im_size, im_size)
if not args.conv:
data_shape = (im_dim * im_size * im_size,)
test_loader = torch.utils.data.DataLoader(
dataset=test_set, batch_size=args.test_batch_size, shuffle=False, drop_last=True
)
return train_set, test_loader, data_shape
def compute_bits_per_dim(x, model):
zero = torch.zeros(x.shape[0], 1).to(x)
# Don't use data parallelize if batch size is small.
# if x.shape[0] < 200:
# model = model.module
z, delta_logp = model(x, zero) # run model forward
logpz = standard_normal_logprob(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z)
logpx = logpz - delta_logp
logpx_per_dim = torch.sum(logpx) / x.nelement() # averaged over batches
bits_per_dim = -(logpx_per_dim - np.log(256)) / np.log(2)
return bits_per_dim
def create_model(args, data_shape, regularization_fns):
hidden_dims = tuple(map(int, args.dims.split(",")))
strides = tuple(map(int, args.strides.split(",")))
if args.multiscale:
model = odenvp.ODENVP(
(args.batch_size, *data_shape),
n_blocks=args.num_blocks,
intermediate_dims=hidden_dims,
nonlinearity=args.nonlinearity,
alpha=args.alpha,
cnf_kwargs={"T": args.time_length, "train_T": args.train_T, "regularization_fns": regularization_fns},
)
elif args.parallel:
model = multiscale_parallel.MultiscaleParallelCNF(
(args.batch_size, *data_shape),
n_blocks=args.num_blocks,
intermediate_dims=hidden_dims,
alpha=args.alpha,
time_length=args.time_length,
)
else:
if args.autoencode:
def build_cnf():
autoencoder_diffeq = layers.AutoencoderDiffEqNet(
hidden_dims=hidden_dims,
input_shape=data_shape,
strides=strides,
conv=args.conv,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
odefunc = layers.AutoencoderODEfunc(
autoencoder_diffeq=autoencoder_diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
cnf = layers.CNF(
odefunc=odefunc,
T=args.time_length,
regularization_fns=regularization_fns,
solver=args.solver,
)
return cnf
else:
def build_cnf():
diffeq = layers.ODEnet(
hidden_dims=hidden_dims,
input_shape=data_shape,
strides=strides,
conv=args.conv,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
odefunc = layers.ODEfunc(
diffeq=diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
cnf = layers.CNF(
odefunc=odefunc,
T=args.time_length,
train_T=args.train_T,
regularization_fns=regularization_fns,
solver=args.solver,
)
return cnf
chain = [layers.LogitTransform(alpha=args.alpha)] if args.alpha > 0 else [layers.ZeroMeanTransform()]
chain = chain + [build_cnf() for _ in range(args.num_blocks)]
if args.batch_norm:
chain.append(layers.MovingBatchNorm2d(data_shape[0]))
model = layers.SequentialFlow(chain)
return model
if __name__ == "__main__":
# get deivce
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True)
# load dataset
train_set, test_loader, data_shape = get_dataset(args)
# build model
regularization_fns, regularization_coeffs = create_regularization_fns(args)
model = create_model(args, data_shape, regularization_fns)
if args.spectral_norm: add_spectral_norm(model, logger)
set_cnf_options(args, model)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# restore parameters
if args.resume is not None:
checkpt = torch.load(args.resume, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpt["state_dict"])
if "optim_state_dict" in checkpt.keys():
optimizer.load_state_dict(checkpt["optim_state_dict"])
# Manually move optimizer state to device.
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = cvt(v)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda()
# For visualization.
fixed_z = cvt(torch.randn(100, *data_shape))
time_meter = utils.RunningAverageMeter(0.97)
loss_meter = utils.RunningAverageMeter(0.97)
steps_meter = utils.RunningAverageMeter(0.97)
grad_meter = utils.RunningAverageMeter(0.97)
tt_meter = utils.RunningAverageMeter(0.97)
if args.spectral_norm and not args.resume: spectral_norm_power_iteration(model, 500)
best_loss = float("inf")
itr = 0
for epoch in range(args.begin_epoch, args.num_epochs + 1):
model.train()
train_loader = get_train_loader(train_set, epoch)
for _, (x, y) in enumerate(train_loader):
start = time.time()
update_lr(optimizer, itr)
optimizer.zero_grad()
if not args.conv:
x = x.view(x.shape[0], -1)
# cast data and move to device
x = cvt(x)
# compute loss
loss = compute_bits_per_dim(x, model)
if regularization_coeffs:
reg_states = get_regularization(model, regularization_coeffs)
reg_loss = sum(
reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0
)
loss = loss + reg_loss
total_time = count_total_time(model)
loss = loss + total_time * args.time_penalty
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
if args.spectral_norm: spectral_norm_power_iteration(model, args.spectral_norm_niter)
time_meter.update(time.time() - start)
loss_meter.update(loss.item())
steps_meter.update(count_nfe(model))
grad_meter.update(grad_norm)
tt_meter.update(total_time)
if itr % args.log_freq == 0:
log_message = (
"Iter {:04d} | Time {:.4f}({:.4f}) | Bit/dim {:.4f}({:.4f}) | "
"Steps {:.0f}({:.2f}) | Grad Norm {:.4f}({:.4f}) | Total Time {:.2f}({:.2f})".format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, steps_meter.val,
steps_meter.avg, grad_meter.val, grad_meter.avg, tt_meter.val, tt_meter.avg
)
)
if regularization_coeffs:
log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)
logger.info(log_message)
itr += 1
# compute test loss
model.eval()
if epoch % args.val_freq == 0:
with torch.no_grad():
start = time.time()
logger.info("validating...")
losses = []
for (x, y) in test_loader:
if not args.conv:
x = x.view(x.shape[0], -1)
x = cvt(x)
loss = compute_bits_per_dim(x, model)
losses.append(loss)
loss = 0
for i in range(len(losses)):
loss = loss + losses[i]
loss = loss/(len(losses))
logger.info("Epoch {:04d} | Time {:.4f}, Bit/dim {:.4f}".format(epoch, time.time() - start, loss))
if loss < best_loss:
best_loss = loss
utils.makedirs(args.save)
torch.save({
"args": args,
"state_dict": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),
"optim_state_dict": optimizer.state_dict(),
}, os.path.join(args.save, "checkpt.pth"))
# visualize samples and density
with torch.no_grad():
fig_filename = os.path.join(args.save, "figs", "{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
generated_samples = model(fixed_z, reverse=True).view(-1, *data_shape)
save_image(generated_samples, fig_filename, nrow=10)
| 18,212 | 39.654018 | 119 | py |
steer | steer-master/ffjord/train_discrete_tabular.py | import argparse
import os
import time
import torch
import lib.utils as utils
from lib.custom_optimizers import Adam
import lib.layers as layers
import datasets
from train_misc import standard_normal_logprob, count_parameters
parser = argparse.ArgumentParser()
parser.add_argument(
'--data', choices=['power', 'gas', 'hepmass', 'miniboone', 'bsds300'], type=str, default='miniboone'
)
parser.add_argument('--depth', type=int, default=10)
parser.add_argument('--dims', type=str, default="100-100")
parser.add_argument('--nonlinearity', type=str, default="tanh")
parser.add_argument('--glow', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--early_stopping', type=int, default=30)
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--test_batch_size', type=int, default=None)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--weight_decay', type=float, default=1e-6)
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--save', type=str, default='experiments/cnf')
parser.add_argument('--evaluate', action='store_true')
parser.add_argument('--val_freq', type=int, default=200)
parser.add_argument('--log_freq', type=int, default=10)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
logger.info(args)
test_batch_size = args.test_batch_size if args.test_batch_size else args.batch_size
def batch_iter(X, batch_size=args.batch_size, shuffle=False):
"""
X: feature tensor (shape: num_instances x num_features)
"""
if shuffle:
idxs = torch.randperm(X.shape[0])
else:
idxs = torch.arange(X.shape[0])
if X.is_cuda:
idxs = idxs.cuda()
for batch_idxs in idxs.split(batch_size):
yield X[batch_idxs]
ndecs = 0
def update_lr(optimizer, n_vals_without_improvement):
global ndecs
if ndecs == 0 and n_vals_without_improvement > args.early_stopping // 3:
for param_group in optimizer.param_groups:
param_group["lr"] = args.lr / 10
ndecs = 1
elif ndecs == 1 and n_vals_without_improvement > args.early_stopping // 3 * 2:
for param_group in optimizer.param_groups:
param_group["lr"] = args.lr / 100
ndecs = 2
else:
for param_group in optimizer.param_groups:
param_group["lr"] = args.lr / 10**ndecs
def load_data(name):
if name == 'bsds300':
return datasets.BSDS300()
elif name == 'power':
return datasets.POWER()
elif name == 'gas':
return datasets.GAS()
elif name == 'hepmass':
return datasets.HEPMASS()
elif name == 'miniboone':
return datasets.MINIBOONE()
else:
raise ValueError('Unknown dataset')
def build_model(input_dim):
hidden_dims = tuple(map(int, args.dims.split("-")))
chain = []
for i in range(args.depth):
if args.glow: chain.append(layers.BruteForceLayer(input_dim))
chain.append(layers.MaskedCouplingLayer(input_dim, hidden_dims, 'alternate', swap=i % 2 == 0))
if args.batch_norm: chain.append(layers.MovingBatchNorm1d(input_dim, bn_lag=args.bn_lag))
return layers.SequentialFlow(chain)
def compute_loss(x, model):
zero = torch.zeros(x.shape[0], 1).to(x)
z, delta_logp = model(x, zero) # run model forward
logpz = standard_normal_logprob(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z)
logpx = logpz - delta_logp
loss = -torch.mean(logpx)
return loss
def restore_model(model, filename):
checkpt = torch.load(filename, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpt["state_dict"])
return model
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True)
logger.info('Using {} GPUs.'.format(torch.cuda.device_count()))
data = load_data(args.data)
data.trn.x = torch.from_numpy(data.trn.x)
data.val.x = torch.from_numpy(data.val.x)
data.tst.x = torch.from_numpy(data.tst.x)
model = build_model(data.n_dims).to(device)
if args.resume is not None:
checkpt = torch.load(args.resume)
model.load_state_dict(checkpt['state_dict'])
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
if not args.evaluate:
optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.98)
loss_meter = utils.RunningAverageMeter(0.98)
best_loss = float('inf')
itr = 0
n_vals_without_improvement = 0
end = time.time()
model.train()
while True:
if args.early_stopping > 0 and n_vals_without_improvement > args.early_stopping:
break
for x in batch_iter(data.trn.x, shuffle=True):
if args.early_stopping > 0 and n_vals_without_improvement > args.early_stopping:
break
optimizer.zero_grad()
x = cvt(x)
loss = compute_loss(x, model)
loss_meter.update(loss.item())
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
if itr % args.log_freq == 0:
log_message = (
'Iter {:06d} | Epoch {:.2f} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | '.format(
itr,
float(itr) / (data.trn.x.shape[0] / float(args.batch_size)), time_meter.val, time_meter.avg,
loss_meter.val, loss_meter.avg
)
)
logger.info(log_message)
itr += 1
end = time.time()
# Validation loop.
if itr % args.val_freq == 0:
model.eval()
start_time = time.time()
with torch.no_grad():
val_loss = utils.AverageMeter()
for x in batch_iter(data.val.x, batch_size=test_batch_size):
x = cvt(x)
val_loss.update(compute_loss(x, model).item(), x.shape[0])
if val_loss.avg < best_loss:
best_loss = val_loss.avg
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
n_vals_without_improvement = 0
else:
n_vals_without_improvement += 1
update_lr(optimizer, n_vals_without_improvement)
log_message = (
'[VAL] Iter {:06d} | Val Loss {:.6f} | '
'NoImproveEpochs {:02d}/{:02d}'.format(
itr, val_loss.avg, n_vals_without_improvement, args.early_stopping
)
)
logger.info(log_message)
model.train()
logger.info('Training has finished.')
model = restore_model(model, os.path.join(args.save, 'checkpt.pth')).to(device)
logger.info('Evaluating model on test set.')
model.eval()
with torch.no_grad():
test_loss = utils.AverageMeter()
for itr, x in enumerate(batch_iter(data.tst.x, batch_size=test_batch_size)):
x = cvt(x)
test_loss.update(compute_loss(x, model).item(), x.shape[0])
logger.info('Progress: {:.2f}%'.format(itr / (data.tst.x.shape[0] / test_batch_size)))
log_message = '[TEST] Iter {:06d} | Test Loss {:.6f} '.format(itr, test_loss.avg)
logger.info(log_message)
| 8,301 | 34.177966 | 120 | py |
steer | steer-master/ffjord/train_tabular.py | import argparse
import os
import time
import torch
import lib.utils as utils
import lib.layers.odefunc as odefunc
from lib.custom_optimizers import Adam
import datasets
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log
from train_misc import build_model_tabular, override_divergence_fn
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
'--data', choices=['power', 'gas', 'hepmass', 'miniboone', 'bsds300'], type=str, default='miniboone'
)
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--hdim_factor', type=int, default=10)
parser.add_argument('--nhidden', type=int, default=1)
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=1.0)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="softplus", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-8)
parser.add_argument('--rtol', type=float, default=1e-6)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--early_stopping', type=int, default=30)
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--test_batch_size', type=int, default=None)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-6)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--save', type=str, default='experiments/cnf')
parser.add_argument('--evaluate', action='store_true')
parser.add_argument('--val_freq', type=int, default=200)
parser.add_argument('--log_freq', type=int, default=10)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
args.train_T = False
logger.info(args)
test_batch_size = args.test_batch_size if args.test_batch_size else args.batch_size
def batch_iter(X, batch_size=args.batch_size, shuffle=False):
"""
X: feature tensor (shape: num_instances x num_features)
"""
if shuffle:
idxs = torch.randperm(X.shape[0])
else:
idxs = torch.arange(X.shape[0])
if X.is_cuda:
idxs = idxs.cuda()
for batch_idxs in idxs.split(batch_size):
yield X[batch_idxs]
ndecs = 0
def update_lr(optimizer, n_vals_without_improvement):
global ndecs
if ndecs == 0 and n_vals_without_improvement > args.early_stopping // 3:
for param_group in optimizer.param_groups:
param_group["lr"] = args.lr / 10
ndecs = 1
elif ndecs == 1 and n_vals_without_improvement > args.early_stopping // 3 * 2:
for param_group in optimizer.param_groups:
param_group["lr"] = args.lr / 100
ndecs = 2
else:
for param_group in optimizer.param_groups:
param_group["lr"] = args.lr / 10**ndecs
def load_data(name):
if name == 'bsds300':
return datasets.BSDS300()
elif name == 'power':
return datasets.POWER()
elif name == 'gas':
return datasets.GAS()
elif name == 'hepmass':
return datasets.HEPMASS()
elif name == 'miniboone':
return datasets.MINIBOONE()
else:
raise ValueError('Unknown dataset')
def compute_loss(x, model):
zero = torch.zeros(x.shape[0], 1).to(x)
z, delta_logp = model(x, zero) # run model forward
logpz = standard_normal_logprob(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z)
logpx = logpz - delta_logp
loss = -torch.mean(logpx)
return loss
def restore_model(model, filename):
checkpt = torch.load(filename, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpt["state_dict"])
return model
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True)
logger.info('Using {} GPUs.'.format(torch.cuda.device_count()))
data = load_data(args.data)
data.trn.x = torch.from_numpy(data.trn.x)
data.val.x = torch.from_numpy(data.val.x)
data.tst.x = torch.from_numpy(data.tst.x)
args.dims = '-'.join([str(args.hdim_factor * data.n_dims)] * args.nhidden)
regularization_fns, regularization_coeffs = create_regularization_fns(args)
model = build_model_tabular(args, data.n_dims, regularization_fns).to(device)
set_cnf_options(args, model)
for k in model.state_dict().keys():
logger.info(k)
if args.resume is not None:
checkpt = torch.load(args.resume)
# Backwards compatibility with an older version of the code.
# TODO: remove upon release.
filtered_state_dict = {}
for k, v in checkpt['state_dict'].items():
if 'diffeq.diffeq' not in k:
filtered_state_dict[k.replace('module.', '')] = v
model.load_state_dict(filtered_state_dict)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
if not args.evaluate:
optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.98)
loss_meter = utils.RunningAverageMeter(0.98)
nfef_meter = utils.RunningAverageMeter(0.98)
nfeb_meter = utils.RunningAverageMeter(0.98)
tt_meter = utils.RunningAverageMeter(0.98)
best_loss = float('inf')
itr = 0
n_vals_without_improvement = 0
end = time.time()
model.train()
while True:
if args.early_stopping > 0 and n_vals_without_improvement > args.early_stopping:
break
for x in batch_iter(data.trn.x, shuffle=True):
if args.early_stopping > 0 and n_vals_without_improvement > args.early_stopping:
break
optimizer.zero_grad()
x = cvt(x)
loss = compute_loss(x, model)
loss_meter.update(loss.item())
if len(regularization_coeffs) > 0:
reg_states = get_regularization(model, regularization_coeffs)
reg_loss = sum(
reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0
)
loss = loss + reg_loss
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
optimizer.step()
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
if itr % args.log_freq == 0:
log_message = (
'Iter {:06d} | Epoch {:.2f} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | '
'NFE Forward {:.0f}({:.1f}) | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr,
float(itr) / (data.trn.x.shape[0] / float(args.batch_size)), time_meter.val, time_meter.avg,
loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg, nfeb_meter.val,
nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
if len(regularization_coeffs) > 0:
log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)
logger.info(log_message)
itr += 1
end = time.time()
# Validation loop.
if itr % args.val_freq == 0:
model.eval()
start_time = time.time()
with torch.no_grad():
val_loss = utils.AverageMeter()
val_nfe = utils.AverageMeter()
for x in batch_iter(data.val.x, batch_size=test_batch_size):
x = cvt(x)
val_loss.update(compute_loss(x, model).item(), x.shape[0])
val_nfe.update(count_nfe(model))
if val_loss.avg < best_loss:
best_loss = val_loss.avg
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
n_vals_without_improvement = 0
else:
n_vals_without_improvement += 1
update_lr(optimizer, n_vals_without_improvement)
log_message = (
'[VAL] Iter {:06d} | Val Loss {:.6f} | NFE {:.0f} | '
'NoImproveEpochs {:02d}/{:02d}'.format(
itr, val_loss.avg, val_nfe.avg, n_vals_without_improvement, args.early_stopping
)
)
logger.info(log_message)
model.train()
logger.info('Training has finished.')
model = restore_model(model, os.path.join(args.save, 'checkpt.pth')).to(device)
set_cnf_options(args, model)
logger.info('Evaluating model on test set.')
model.eval()
override_divergence_fn(model, "brute_force")
with torch.no_grad():
test_loss = utils.AverageMeter()
test_nfe = utils.AverageMeter()
for itr, x in enumerate(batch_iter(data.tst.x, batch_size=test_batch_size)):
x = cvt(x)
test_loss.update(compute_loss(x, model).item(), x.shape[0])
test_nfe.update(count_nfe(model))
logger.info('Progress: {:.2f}%'.format(100. * itr / (data.tst.x.shape[0] / test_batch_size)))
log_message = '[TEST] Iter {:06d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss.avg, test_nfe.avg)
logger.info(log_message)
| 12,249 | 38.90228 | 120 | py |
steer | steer-master/ffjord/train_discrete_toy.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
import os
import time
import torch
import torch.optim as optim
import lib.layers as layers
import lib.toy_data as toy_data
import lib.utils as utils
from lib.visualize_flow import visualize_transform
from train_misc import standard_normal_logprob
from train_misc import count_parameters
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
'--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals', 'checkerboard', 'rings'],
type=str, default='pinwheel'
)
parser.add_argument('--depth', help='number of coupling layers', type=int, default=10)
parser.add_argument('--glow', type=eval, choices=[True, False], default=False)
parser.add_argument('--nf', type=eval, choices=[True, False], default=False)
parser.add_argument('--niters', type=int, default=100001)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--weight_decay', type=float, default=0)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument('--save', type=str, default='experiments/cnf')
parser.add_argument('--viz_freq', type=int, default=1000)
parser.add_argument('--val_freq', type=int, default=1000)
parser.add_argument('--log_freq', type=int, default=100)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
def construct_model():
if args.nf:
chain = []
for i in range(args.depth):
chain.append(layers.PlanarFlow(2))
return layers.SequentialFlow(chain)
else:
chain = []
for i in range(args.depth):
if args.glow: chain.append(layers.BruteForceLayer(2))
chain.append(layers.CouplingLayer(2, swap=i % 2 == 0))
return layers.SequentialFlow(chain)
def get_transforms(model):
if args.nf:
sample_fn = None
else:
def sample_fn(z, logpz=None):
if logpz is not None:
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if logpx is not None:
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return sample_fn, density_fn
def compute_loss(args, model, batch_size=None):
if batch_size is None: batch_size = args.batch_size
# load data
x = toy_data.inf_train_gen(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
zero = torch.zeros(x.shape[0], 1).to(x)
# transform to z
z, delta_logp = model(x, zero)
# compute log q(z)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = logpz - delta_logp
loss = -torch.mean(logpx)
return loss
if __name__ == '__main__':
model = construct_model().to(device)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
optimizer = optim.Adamax(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.98)
loss_meter = utils.RunningAverageMeter(0.98)
end = time.time()
best_loss = float('inf')
model.train()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
loss = compute_loss(args, model)
loss_meter.update(loss.item())
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
if itr % args.log_freq == 0:
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg
)
)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
test_loss = compute_loss(args, model, batch_size=args.test_batch_size)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f}'.format(itr, test_loss)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
if itr % args.viz_freq == 0:
with torch.no_grad():
model.eval()
p_samples = toy_data.inf_train_gen(args.data, batch_size=2000)
sample_fn, density_fn = get_transforms(model)
plt.figure(figsize=(9, 3))
visualize_transform(
p_samples, torch.randn, standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn,
samples=True, npts=800, device=device
)
fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr))
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
model.train()
end = time.time()
logger.info('Training has finished.')
| 6,334 | 32.877005 | 119 | py |
steer | steer-master/ffjord/vae_lib/models/VAE.py | from __future__ import print_function
import torch
import torch.nn as nn
import vae_lib.models.flows as flows
from vae_lib.models.layers import GatedConv2d, GatedConvTranspose2d
class VAE(nn.Module):
"""
The base VAE class containing gated convolutional encoder and decoder architecture.
Can be used as a base class for VAE's with normalizing flows.
"""
def __init__(self, args):
super(VAE, self).__init__()
# extract model settings from args
self.z_size = args.z_size
self.input_size = args.input_size
self.input_type = args.input_type
if self.input_size == [1, 28, 28] or self.input_size == [3, 28, 28]:
self.last_kernel_size = 7
elif self.input_size == [1, 28, 20]:
self.last_kernel_size = (7, 5)
else:
raise ValueError('invalid input size!!')
self.q_z_nn, self.q_z_mean, self.q_z_var = self.create_encoder()
self.p_x_nn, self.p_x_mean = self.create_decoder()
self.q_z_nn_output_dim = 256
# auxiliary
if args.cuda:
self.FloatTensor = torch.cuda.FloatTensor
else:
self.FloatTensor = torch.FloatTensor
# log-det-jacobian = 0 without flows
self.log_det_j = self.FloatTensor(1).zero_()
def create_encoder(self):
"""
Helper function to create the elemental blocks for the encoder. Creates a gated convnet encoder.
the encoder expects data as input of shape (batch_size, num_channels, width, height).
"""
if self.input_type == 'binary':
q_z_nn = nn.Sequential(
GatedConv2d(self.input_size[0], 32, 5, 1, 2),
GatedConv2d(32, 32, 5, 2, 2),
GatedConv2d(32, 64, 5, 1, 2),
GatedConv2d(64, 64, 5, 2, 2),
GatedConv2d(64, 64, 5, 1, 2),
GatedConv2d(64, 256, self.last_kernel_size, 1, 0),
)
q_z_mean = nn.Linear(256, self.z_size)
q_z_var = nn.Sequential(
nn.Linear(256, self.z_size),
nn.Softplus(),
)
return q_z_nn, q_z_mean, q_z_var
elif self.input_type == 'multinomial':
act = None
q_z_nn = nn.Sequential(
GatedConv2d(self.input_size[0], 32, 5, 1, 2, activation=act),
GatedConv2d(32, 32, 5, 2, 2, activation=act),
GatedConv2d(32, 64, 5, 1, 2, activation=act),
GatedConv2d(64, 64, 5, 2, 2, activation=act),
GatedConv2d(64, 64, 5, 1, 2, activation=act),
GatedConv2d(64, 256, self.last_kernel_size, 1, 0, activation=act)
)
q_z_mean = nn.Linear(256, self.z_size)
q_z_var = nn.Sequential(nn.Linear(256, self.z_size), nn.Softplus(), nn.Hardtanh(min_val=0.01, max_val=7.))
return q_z_nn, q_z_mean, q_z_var
def create_decoder(self):
"""
Helper function to create the elemental blocks for the decoder. Creates a gated convnet decoder.
"""
num_classes = 256
if self.input_type == 'binary':
p_x_nn = nn.Sequential(
GatedConvTranspose2d(self.z_size, 64, self.last_kernel_size, 1, 0),
GatedConvTranspose2d(64, 64, 5, 1, 2),
GatedConvTranspose2d(64, 32, 5, 2, 2, 1),
GatedConvTranspose2d(32, 32, 5, 1, 2),
GatedConvTranspose2d(32, 32, 5, 2, 2, 1), GatedConvTranspose2d(32, 32, 5, 1, 2)
)
p_x_mean = nn.Sequential(nn.Conv2d(32, self.input_size[0], 1, 1, 0), nn.Sigmoid())
return p_x_nn, p_x_mean
elif self.input_type == 'multinomial':
act = None
p_x_nn = nn.Sequential(
GatedConvTranspose2d(self.z_size, 64, self.last_kernel_size, 1, 0, activation=act),
GatedConvTranspose2d(64, 64, 5, 1, 2, activation=act),
GatedConvTranspose2d(64, 32, 5, 2, 2, 1, activation=act),
GatedConvTranspose2d(32, 32, 5, 1, 2, activation=act),
GatedConvTranspose2d(32, 32, 5, 2, 2, 1, activation=act),
GatedConvTranspose2d(32, 32, 5, 1, 2, activation=act)
)
p_x_mean = nn.Sequential(
nn.Conv2d(32, 256, 5, 1, 2),
nn.Conv2d(256, self.input_size[0] * num_classes, 1, 1, 0),
# output shape: batch_size, num_channels * num_classes, pixel_width, pixel_height
)
return p_x_nn, p_x_mean
else:
raise ValueError('invalid input type!!')
def reparameterize(self, mu, var):
"""
Samples z from a multivariate Gaussian with diagonal covariance matrix using the
reparameterization trick.
"""
std = var.sqrt()
eps = self.FloatTensor(std.size()).normal_()
z = eps.mul(std).add_(mu)
return z
def encode(self, x):
"""
Encoder expects following data shapes as input: shape = (batch_size, num_channels, width, height)
"""
h = self.q_z_nn(x)
h = h.view(h.size(0), -1)
mean = self.q_z_mean(h)
var = self.q_z_var(h)
return mean, var
def decode(self, z):
"""
Decoder outputs reconstructed image in the following shapes:
x_mean.shape = (batch_size, num_channels, width, height)
"""
z = z.view(z.size(0), self.z_size, 1, 1)
h = self.p_x_nn(z)
x_mean = self.p_x_mean(h)
return x_mean
def forward(self, x):
"""
Evaluates the model as a whole, encodes and decodes. Note that the log det jacobian is zero
for a plain VAE (without flows), and z_0 = z_k.
"""
# mean and variance of z
z_mu, z_var = self.encode(x)
# sample z
z = self.reparameterize(z_mu, z_var)
x_mean = self.decode(z)
return x_mean, z_mu, z_var, self.log_det_j, z, z
class PlanarVAE(VAE):
"""
Variational auto-encoder with planar flows in the encoder.
"""
def __init__(self, args):
super(PlanarVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.Planar
self.num_flows = args.num_flows
# Amortized flow parameters
self.amor_u = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
self.amor_w = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow()
self.add_module('flow_' + str(k), flow_k)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# return amortized u an w for all flows
u = self.amor_u(h).view(batch_size, self.num_flows, self.z_size, 1)
w = self.amor_w(h).view(batch_size, self.num_flows, 1, self.z_size)
b = self.amor_b(h).view(batch_size, self.num_flows, 1, 1)
return mean_z, var_z, u, w, b
def forward(self, x):
"""
Forward pass with planar flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, u, w, b = self.encode(x)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
z_k, log_det_jacobian = flow_k(z[k], u[:, k, :, :], w[:, k, :, :], b[:, k, :, :])
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class OrthogonalSylvesterVAE(VAE):
"""
Variational auto-encoder with orthogonal flows in the encoder.
"""
def __init__(self, args):
super(OrthogonalSylvesterVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.Sylvester
self.num_flows = args.num_flows
self.num_ortho_vecs = args.num_ortho_vecs
assert (self.num_ortho_vecs <= self.z_size) and (self.num_ortho_vecs > 0)
# Orthogonalization parameters
if self.num_ortho_vecs == self.z_size:
self.cond = 1.e-5
else:
self.cond = 1.e-6
self.steps = 100
identity = torch.eye(self.num_ortho_vecs, self.num_ortho_vecs)
# Add batch dimension
identity = identity.unsqueeze(0)
# Put identity in buffer so that it will be moved to GPU if needed by any call of .cuda
self.register_buffer('_eye', identity)
self._eye.requires_grad = False
# Masks needed for triangular R1 and R2.
triu_mask = torch.triu(torch.ones(self.num_ortho_vecs, self.num_ortho_vecs), diagonal=1)
triu_mask = triu_mask.unsqueeze(0).unsqueeze(3)
diag_idx = torch.arange(0, self.num_ortho_vecs).long()
self.register_buffer('triu_mask', triu_mask)
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
# Amortized flow parameters
# Diagonal elements of R1 * R2 have to satisfy -1 < R1 * R2 for flow to be invertible
self.diag_activation = nn.Tanh()
self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs * self.num_ortho_vecs)
self.amor_diag1 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs), self.diag_activation
)
self.amor_diag2 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs), self.diag_activation
)
self.amor_q = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.num_ortho_vecs)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow(self.num_ortho_vecs)
self.add_module('flow_' + str(k), flow_k)
def batch_construct_orthogonal(self, q):
"""
Batch orthogonal matrix construction.
:param q: q contains batches of matrices, shape : (batch_size * num_flows, z_size * num_ortho_vecs)
:return: batches of orthogonalized matrices, shape: (batch_size * num_flows, z_size, num_ortho_vecs)
"""
# Reshape to shape (num_flows * batch_size, z_size * num_ortho_vecs)
q = q.view(-1, self.z_size * self.num_ortho_vecs)
norm = torch.norm(q, p=2, dim=1, keepdim=True)
amat = torch.div(q, norm)
dim0 = amat.size(0)
amat = amat.resize(dim0, self.z_size, self.num_ortho_vecs)
max_norm = 0.
# Iterative orthogonalization
for s in range(self.steps):
tmp = torch.bmm(amat.transpose(2, 1), amat)
tmp = self._eye - tmp
tmp = self._eye + 0.5 * tmp
amat = torch.bmm(amat, tmp)
# Testing for convergence
test = torch.bmm(amat.transpose(2, 1), amat) - self._eye
norms2 = torch.sum(torch.norm(test, p=2, dim=2)**2, dim=1)
norms = torch.sqrt(norms2)
max_norm = torch.max(norms).item()
if max_norm <= self.cond:
break
if max_norm > self.cond:
print('\nWARNING WARNING WARNING: orthogonalization not complete')
print('\t Final max norm =', max_norm)
print()
# Reshaping: first dimension is batch_size
amat = amat.view(-1, self.num_flows, self.z_size, self.num_ortho_vecs)
amat = amat.transpose(0, 1)
return amat
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# Amortized r1, r2, q, b for all flows
full_d = self.amor_d(h)
diag1 = self.amor_diag1(h)
diag2 = self.amor_diag2(h)
full_d = full_d.resize(batch_size, self.num_ortho_vecs, self.num_ortho_vecs, self.num_flows)
diag1 = diag1.resize(batch_size, self.num_ortho_vecs, self.num_flows)
diag2 = diag2.resize(batch_size, self.num_ortho_vecs, self.num_flows)
r1 = full_d * self.triu_mask
r2 = full_d.transpose(2, 1) * self.triu_mask
r1[:, self.diag_idx, self.diag_idx, :] = diag1
r2[:, self.diag_idx, self.diag_idx, :] = diag2
q = self.amor_q(h)
b = self.amor_b(h)
# Resize flow parameters to divide over K flows
b = b.resize(batch_size, 1, self.num_ortho_vecs, self.num_flows)
return mean_z, var_z, r1, r2, q, b
def forward(self, x):
"""
Forward pass with orthogonal sylvester flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, r1, r2, q, b = self.encode(x)
# Orthogonalize all q matrices
q_ortho = self.batch_construct_orthogonal(q)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], q_ortho[k, :, :, :], b[:, :, :, k])
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class HouseholderSylvesterVAE(VAE):
"""
Variational auto-encoder with householder sylvester flows in the encoder.
"""
def __init__(self, args):
super(HouseholderSylvesterVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.Sylvester
self.num_flows = args.num_flows
self.num_householder = args.num_householder
assert self.num_householder > 0
identity = torch.eye(self.z_size, self.z_size)
# Add batch dimension
identity = identity.unsqueeze(0)
# Put identity in buffer so that it will be moved to GPU if needed by any call of .cuda
self.register_buffer('_eye', identity)
self._eye.requires_grad = False
# Masks needed for triangular r1 and r2.
triu_mask = torch.triu(torch.ones(self.z_size, self.z_size), diagonal=1)
triu_mask = triu_mask.unsqueeze(0).unsqueeze(3)
diag_idx = torch.arange(0, self.z_size).long()
self.register_buffer('triu_mask', triu_mask)
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
# Amortized flow parameters
# Diagonal elements of r1 * r2 have to satisfy -1 < r1 * r2 for flow to be invertible
self.diag_activation = nn.Tanh()
self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.z_size)
self.amor_diag1 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_diag2 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_q = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.num_householder)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow(self.z_size)
self.add_module('flow_' + str(k), flow_k)
def batch_construct_orthogonal(self, q):
"""
Batch orthogonal matrix construction.
:param q: q contains batches of matrices, shape : (batch_size, num_flows * z_size * num_householder)
:return: batches of orthogonalized matrices, shape: (batch_size * num_flows, z_size, z_size)
"""
# Reshape to shape (num_flows * batch_size * num_householder, z_size)
q = q.view(-1, self.z_size)
norm = torch.norm(q, p=2, dim=1, keepdim=True) # ||v||_2
v = torch.div(q, norm) # v / ||v||_2
# Calculate Householder Matrices
vvT = torch.bmm(v.unsqueeze(2), v.unsqueeze(1)) # v * v_T : batch_dot( B x L x 1 * B x 1 x L ) = B x L x L
amat = self._eye - 2 * vvT # NOTICE: v is already normalized! so there is no need to calculate vvT/vTv
# Reshaping: first dimension is batch_size * num_flows
amat = amat.view(-1, self.num_householder, self.z_size, self.z_size)
tmp = amat[:, 0]
for k in range(1, self.num_householder):
tmp = torch.bmm(amat[:, k], tmp)
amat = tmp.view(-1, self.num_flows, self.z_size, self.z_size)
amat = amat.transpose(0, 1)
return amat
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# Amortized r1, r2, q, b for all flows
full_d = self.amor_d(h)
diag1 = self.amor_diag1(h)
diag2 = self.amor_diag2(h)
full_d = full_d.resize(batch_size, self.z_size, self.z_size, self.num_flows)
diag1 = diag1.resize(batch_size, self.z_size, self.num_flows)
diag2 = diag2.resize(batch_size, self.z_size, self.num_flows)
r1 = full_d * self.triu_mask
r2 = full_d.transpose(2, 1) * self.triu_mask
r1[:, self.diag_idx, self.diag_idx, :] = diag1
r2[:, self.diag_idx, self.diag_idx, :] = diag2
q = self.amor_q(h)
b = self.amor_b(h)
# Resize flow parameters to divide over K flows
b = b.resize(batch_size, 1, self.z_size, self.num_flows)
return mean_z, var_z, r1, r2, q, b
def forward(self, x):
"""
Forward pass with orthogonal flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, r1, r2, q, b = self.encode(x)
# Orthogonalize all q matrices
q_ortho = self.batch_construct_orthogonal(q)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
q_k = q_ortho[k]
z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], q_k, b[:, :, :, k], sum_ldj=True)
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class TriangularSylvesterVAE(VAE):
"""
Variational auto-encoder with triangular Sylvester flows in the encoder. Alternates between setting
the orthogonal matrix equal to permutation and identity matrix for each flow.
"""
def __init__(self, args):
super(TriangularSylvesterVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.TriangularSylvester
self.num_flows = args.num_flows
# permuting indices corresponding to Q=P (permutation matrix) for every other flow
flip_idx = torch.arange(self.z_size - 1, -1, -1).long()
self.register_buffer('flip_idx', flip_idx)
# Masks needed for triangular r1 and r2.
triu_mask = torch.triu(torch.ones(self.z_size, self.z_size), diagonal=1)
triu_mask = triu_mask.unsqueeze(0).unsqueeze(3)
diag_idx = torch.arange(0, self.z_size).long()
self.register_buffer('triu_mask', triu_mask)
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
# Amortized flow parameters
# Diagonal elements of r1 * r2 have to satisfy -1 < r1 * r2 for flow to be invertible
self.diag_activation = nn.Tanh()
self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.z_size)
self.amor_diag1 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_diag2 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow(self.z_size)
self.add_module('flow_' + str(k), flow_k)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# Amortized r1, r2, b for all flows
full_d = self.amor_d(h)
diag1 = self.amor_diag1(h)
diag2 = self.amor_diag2(h)
full_d = full_d.resize(batch_size, self.z_size, self.z_size, self.num_flows)
diag1 = diag1.resize(batch_size, self.z_size, self.num_flows)
diag2 = diag2.resize(batch_size, self.z_size, self.num_flows)
r1 = full_d * self.triu_mask
r2 = full_d.transpose(2, 1) * self.triu_mask
r1[:, self.diag_idx, self.diag_idx, :] = diag1
r2[:, self.diag_idx, self.diag_idx, :] = diag2
b = self.amor_b(h)
# Resize flow parameters to divide over K flows
b = b.resize(batch_size, 1, self.z_size, self.num_flows)
return mean_z, var_z, r1, r2, b
def forward(self, x):
"""
Forward pass with orthogonal flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, r1, r2, b = self.encode(x)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
if k % 2 == 1:
# Alternate with reorderering z for triangular flow
permute_z = self.flip_idx
else:
permute_z = None
z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], b[:, :, :, k], permute_z, sum_ldj=True)
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class IAFVAE(VAE):
"""
Variational auto-encoder with inverse autoregressive flows in the encoder.
"""
def __init__(self, args):
super(IAFVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
self.h_size = args.made_h_size
self.h_context = nn.Linear(self.q_z_nn_output_dim, self.h_size)
# Flow parameters
self.num_flows = args.num_flows
self.flow = flows.IAF(
z_size=self.z_size, num_flows=self.num_flows, num_hidden=1, h_size=self.h_size, conv2d=False
)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and context h for flows.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
h_context = self.h_context(h)
return mean_z, var_z, h_context
def forward(self, x):
"""
Forward pass with inverse autoregressive flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
# mean and variance of z
z_mu, z_var, h_context = self.encode(x)
# sample z
z_0 = self.reparameterize(z_mu, z_var)
# iaf flows
z_k, self.log_det_j = self.flow(z_0, h_context)
# decode
x_mean = self.decode(z_k)
return x_mean, z_mu, z_var, self.log_det_j, z_0, z_k
| 25,211 | 33.255435 | 120 | py |
steer | steer-master/ffjord/vae_lib/models/CNFVAE.py | import torch
import torch.nn as nn
from train_misc import build_model_tabular
import lib.layers as layers
from .VAE import VAE
import lib.layers.diffeq_layers as diffeq_layers
from lib.layers.odefunc import NONLINEARITIES
from torchdiffeq import odeint_adjoint as odeint
def get_hidden_dims(args):
return tuple(map(int, args.dims.split("-"))) + (args.z_size,)
def concat_layer_num_params(in_dim, out_dim):
return (in_dim + 1) * out_dim + out_dim
class CNFVAE(VAE):
def __init__(self, args):
super(CNFVAE, self).__init__(args)
# CNF model
self.cnf = build_model_tabular(args, args.z_size)
if args.cuda:
self.cuda()
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
return mean_z, var_z
def forward(self, x):
"""
Forward pass with planar flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
z_mu, z_var = self.encode(x)
# Sample z_0
z0 = self.reparameterize(z_mu, z_var)
zero = torch.zeros(x.shape[0], 1).to(x)
zk, delta_logp = self.cnf(z0, zero) # run model forward
x_mean = self.decode(zk)
return x_mean, z_mu, z_var, -delta_logp.view(-1), z0, zk
class AmortizedBiasODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"):
super(AmortizedBiasODEnet, self).__init__()
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
self.input_dim = input_dim
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_dim
for dim_out in hidden_dims:
layer = base_layer(hidden_shape, dim_out)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = dim_out
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns[:-1])
def _unpack_params(self, params):
return [params]
def forward(self, t, y, am_biases):
dx = y
for l, layer in enumerate(self.layers):
dx = layer(t, dx)
this_bias, am_biases = am_biases[:, :dx.size(1)], am_biases[:, dx.size(1):]
dx = dx + this_bias
# if not last layer, use nonlinearity
if l < len(self.layers) - 1:
dx = self.activation_fns[l](dx)
return dx
class AmortizedLowRankODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, rank=1, layer_type="concat", nonlinearity="softplus"):
super(AmortizedLowRankODEnet, self).__init__()
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
self.input_dim = input_dim
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_dim
self.output_dims = hidden_dims
self.input_dims = (input_dim,) + hidden_dims[:-1]
for dim_out in hidden_dims:
layer = base_layer(hidden_shape, dim_out)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = dim_out
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns[:-1])
self.rank = rank
def _unpack_params(self, params):
return [params]
def _rank_k_bmm(self, x, u, v):
xu = torch.bmm(x[:, None], u.view(x.shape[0], x.shape[-1], self.rank))
xuv = torch.bmm(xu, v.view(x.shape[0], self.rank, -1))
return xuv[:, 0]
def forward(self, t, y, am_params):
dx = y
for l, (layer, in_dim, out_dim) in enumerate(zip(self.layers, self.input_dims, self.output_dims)):
this_u, am_params = am_params[:, :in_dim * self.rank], am_params[:, in_dim * self.rank:]
this_v, am_params = am_params[:, :out_dim * self.rank], am_params[:, out_dim * self.rank:]
this_bias, am_params = am_params[:, :out_dim], am_params[:, out_dim:]
xw = layer(t, dx)
xw_am = self._rank_k_bmm(dx, this_u, this_v)
dx = xw + xw_am + this_bias
# if not last layer, use nonlinearity
if l < len(self.layers) - 1:
dx = self.activation_fns[l](dx)
return dx
class HyperODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"):
super(HyperODEnet, self).__init__()
assert layer_type == "concat"
self.input_dim = input_dim
# build layers and add them
activation_fns = []
for dim_out in hidden_dims + (input_dim,):
activation_fns.append(NONLINEARITIES[nonlinearity])
self.activation_fns = nn.ModuleList(activation_fns[:-1])
self.output_dims = hidden_dims
self.input_dims = (input_dim,) + hidden_dims[:-1]
def _pack_inputs(self, t, x):
tt = torch.ones_like(x[:, :1]) * t
ttx = torch.cat([tt, x], 1)
return ttx
def _unpack_params(self, params):
layer_params = []
for in_dim, out_dim in zip(self.input_dims, self.output_dims):
this_num_params = concat_layer_num_params(in_dim, out_dim)
# get params for this layer
this_params, params = params[:, :this_num_params], params[:, this_num_params:]
# split into weight and bias
bias, weight_params = this_params[:, :out_dim], this_params[:, out_dim:]
weight = weight_params.view(weight_params.size(0), in_dim + 1, out_dim)
layer_params.append(weight)
layer_params.append(bias)
return layer_params
def _layer(self, t, x, weight, bias):
# weights is (batch, in_dim + 1, out_dim)
ttx = self._pack_inputs(t, x) # (batch, in_dim + 1)
ttx = ttx.view(ttx.size(0), 1, ttx.size(1)) # (batch, 1, in_dim + 1)
xw = torch.bmm(ttx, weight)[:, 0, :] # (batch, out_dim)
return xw + bias
def forward(self, t, y, *layer_params):
dx = y
for l, (weight, bias) in enumerate(zip(layer_params[::2], layer_params[1::2])):
dx = self._layer(t, dx, weight, bias)
# if not last layer, use nonlinearity
if l < len(layer_params) - 1:
dx = self.activation_fns[l](dx)
return dx
class LyperODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"):
super(LyperODEnet, self).__init__()
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
self.input_dim = input_dim
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_dim
self.dims = (input_dim,) + hidden_dims
self.output_dims = hidden_dims
self.input_dims = (input_dim,) + hidden_dims[:-1]
for dim_out in hidden_dims[:-1]:
layer = base_layer(hidden_shape, dim_out)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = dim_out
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns)
def _pack_inputs(self, t, x):
tt = torch.ones_like(x[:, :1]) * t
ttx = torch.cat([tt, x], 1)
return ttx
def _unpack_params(self, params):
return [params]
def _am_layer(self, t, x, weight, bias):
# weights is (batch, in_dim + 1, out_dim)
ttx = self._pack_inputs(t, x) # (batch, in_dim + 1)
ttx = ttx.view(ttx.size(0), 1, ttx.size(1)) # (batch, 1, in_dim + 1)
xw = torch.bmm(ttx, weight)[:, 0, :] # (batch, out_dim)
return xw + bias
def forward(self, t, x, am_params):
dx = x
for layer, act in zip(self.layers, self.activation_fns):
dx = act(layer(t, dx))
bias, weight_params = am_params[:, :self.dims[-1]], am_params[:, self.dims[-1]:]
weight = weight_params.view(weight_params.size(0), self.dims[-2] + 1, self.dims[-1])
dx = self._am_layer(t, dx, weight, bias)
return dx
def construct_amortized_odefunc(args, z_dim, amortization_type="bias"):
hidden_dims = get_hidden_dims(args)
if amortization_type == "bias":
diffeq = AmortizedBiasODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
elif amortization_type == "hyper":
diffeq = HyperODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
elif amortization_type == "lyper":
diffeq = LyperODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
elif amortization_type == "low_rank":
diffeq = AmortizedLowRankODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
rank=args.rank,
)
odefunc = layers.ODEfunc(
diffeq=diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
return odefunc
class AmortizedCNFVAE(VAE):
h_size = 256
def __init__(self, args):
super(AmortizedCNFVAE, self).__init__(args)
# CNF model
self.odefuncs = nn.ModuleList([
construct_amortized_odefunc(args, args.z_size, self.amortization_type) for _ in range(args.num_blocks)
])
self.q_am = self._amortized_layers(args)
assert len(self.q_am) == args.num_blocks or len(self.q_am) == 0
if args.cuda:
self.cuda()
self.register_buffer('integration_times', torch.tensor([0.0, args.time_length]))
self.atol = args.atol
self.rtol = args.rtol
self.solver = args.solver
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
am_params = [q_am(h) for q_am in self.q_am]
return mean_z, var_z, am_params
def forward(self, x):
self.log_det_j = 0.
z_mu, z_var, am_params = self.encode(x)
# Sample z_0
z0 = self.reparameterize(z_mu, z_var)
delta_logp = torch.zeros(x.shape[0], 1).to(x)
z = z0
for odefunc, am_param in zip(self.odefuncs, am_params):
am_param_unpacked = odefunc.diffeq._unpack_params(am_param)
odefunc.before_odeint()
states = odeint(
odefunc,
(z, delta_logp) + tuple(am_param_unpacked),
self.integration_times.to(z),
atol=self.atol,
rtol=self.rtol,
method=self.solver,
)
z, delta_logp = states[0][-1], states[1][-1]
x_mean = self.decode(z)
return x_mean, z_mu, z_var, -delta_logp.view(-1), z0, z
class AmortizedBiasCNFVAE(AmortizedCNFVAE):
amortization_type = "bias"
def _amortized_layers(self, args):
hidden_dims = get_hidden_dims(args)
bias_size = sum(hidden_dims)
return nn.ModuleList([nn.Linear(self.h_size, bias_size) for _ in range(args.num_blocks)])
class AmortizedLowRankCNFVAE(AmortizedCNFVAE):
amortization_type = "low_rank"
def _amortized_layers(self, args):
out_dims = get_hidden_dims(args)
in_dims = (out_dims[-1],) + out_dims[:-1]
params_size = (sum(in_dims) + sum(out_dims)) * args.rank + sum(out_dims)
return nn.ModuleList([nn.Linear(self.h_size, params_size) for _ in range(args.num_blocks)])
class HypernetCNFVAE(AmortizedCNFVAE):
amortization_type = "hyper"
def _amortized_layers(self, args):
hidden_dims = get_hidden_dims(args)
input_dims = (args.z_size,) + hidden_dims[:-1]
assert args.layer_type == "concat", "hypernets only support concat layers at the moment"
weight_dims = [concat_layer_num_params(in_dim, out_dim) for in_dim, out_dim in zip(input_dims, hidden_dims)]
weight_size = sum(weight_dims)
return nn.ModuleList([nn.Linear(self.h_size, weight_size) for _ in range(args.num_blocks)])
class LypernetCNFVAE(AmortizedCNFVAE):
amortization_type = "lyper"
def _amortized_layers(self, args):
dims = (args.z_size,) + get_hidden_dims(args)
weight_size = concat_layer_num_params(dims[-2], dims[-1])
return nn.ModuleList([nn.Linear(self.h_size, weight_size) for _ in range(args.num_blocks)])
| 14,405 | 33.881356 | 116 | py |
steer | steer-master/ffjord/vae_lib/models/layers.py | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import numpy as np
import torch.nn.functional as F
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class GatedConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None):
super(GatedConv2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class GatedConvTranspose2d(nn.Module):
def __init__(
self, input_channels, output_channels, kernel_size, stride, padding, output_padding=0, dilation=1,
activation=None
):
super(GatedConvTranspose2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.ConvTranspose2d(
input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation
)
self.g = nn.ConvTranspose2d(
input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation
)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class MaskedLinear(nn.Module):
"""
Creates masked linear layer for MLP MADE.
For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False.
For hidden to output (y) layers:
If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True.
Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False.
"""
def __init__(self, in_features, out_features, diagonal_zeros=False, bias=True):
super(MaskedLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal(self.weight)
if self.bias is not None:
self.bias.data.zero_()
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_in % n_out == 0 or n_out % n_in == 0
mask = np.ones((n_in, n_out), dtype=np.float32)
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i + 1:, i * k:(i + 1) * k] = 0
if self.diagonal_zeros:
mask[i:i + 1, i * k:(i + 1) * k] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[(i + 1) * k:, i:i + 1] = 0
if self.diagonal_zeros:
mask[i * k:(i + 1) * k:, i:i + 1] = 0
return mask
def forward(self, x):
output = x.mm(self.mask * self.weight)
if self.bias is not None:
return output.add(self.bias.expand_as(output))
else:
return output
def __repr__(self):
if self.bias is not None:
bias = True
else:
bias = False
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', diagonal_zeros=' \
+ str(self.diagonal_zeros) + ', bias=' \
+ str(bias) + ')'
class MaskedConv2d(nn.Module):
"""
Creates masked convolutional autoregressive layer for pixelCNN.
For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False.
For hidden to output (y) layers:
If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True.
Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False.
"""
def __init__(self, in_features, out_features, size_kernel=(3, 3), diagonal_zeros=False, bias=True):
super(MaskedConv2d, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.size_kernel = size_kernel
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(torch.FloatTensor(out_features, in_features, *self.size_kernel))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal(self.weight)
if self.bias is not None:
self.bias.data.zero_()
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_out % n_in == 0 or n_in % n_out == 0, "%d - %d" % (n_in, n_out)
# Build autoregressive mask
l = (self.size_kernel[0] - 1) // 2
m = (self.size_kernel[1] - 1) // 2
mask = np.ones((n_out, n_in, self.size_kernel[0], self.size_kernel[1]), dtype=np.float32)
mask[:, :, :l, :] = 0
mask[:, :, l, :m] = 0
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i * k:(i + 1) * k, i + 1:, l, m] = 0
if self.diagonal_zeros:
mask[i * k:(i + 1) * k, i:i + 1, l, m] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[i:i + 1, (i + 1) * k:, l, m] = 0
if self.diagonal_zeros:
mask[i:i + 1, i * k:(i + 1) * k:, l, m] = 0
return mask
def forward(self, x):
output = F.conv2d(x, self.mask * self.weight, bias=self.bias, padding=(1, 1))
return output
def __repr__(self):
if self.bias is not None:
bias = True
else:
bias = False
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', diagonal_zeros=' \
+ str(self.diagonal_zeros) + ', bias=' \
+ str(bias) + ', size_kernel=' \
+ str(self.size_kernel) + ')'
| 7,128 | 32.947619 | 115 | py |
steer | steer-master/ffjord/vae_lib/models/flows.py | """
Collection of flow strategies
"""
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from vae_lib.models.layers import MaskedConv2d, MaskedLinear
class Planar(nn.Module):
"""
PyTorch implementation of planar flows as presented in "Variational Inference with Normalizing Flows"
by Danilo Jimenez Rezende, Shakir Mohamed. Model assumes amortized flow parameters.
"""
def __init__(self):
super(Planar, self).__init__()
self.h = nn.Tanh()
self.softplus = nn.Softplus()
def der_h(self, x):
""" Derivative of tanh """
return 1 - self.h(x)**2
def forward(self, zk, u, w, b):
"""
Forward pass. Assumes amortized u, w and b. Conditions on diagonals of u and w for invertibility
will be be satisfied inside this function. Computes the following transformation:
z' = z + u h( w^T z + b)
or actually
z'^T = z^T + h(z^T w + b)u^T
Assumes the following input shapes:
shape u = (batch_size, z_size, 1)
shape w = (batch_size, 1, z_size)
shape b = (batch_size, 1, 1)
shape z = (batch_size, z_size).
"""
zk = zk.unsqueeze(2)
# reparameterize u such that the flow becomes invertible (see appendix paper)
uw = torch.bmm(w, u)
m_uw = -1. + self.softplus(uw)
w_norm_sq = torch.sum(w**2, dim=2, keepdim=True)
u_hat = u + ((m_uw - uw) * w.transpose(2, 1) / w_norm_sq)
# compute flow with u_hat
wzb = torch.bmm(w, zk) + b
z = zk + u_hat * self.h(wzb)
z = z.squeeze(2)
# compute logdetJ
psi = w * self.der_h(wzb)
log_det_jacobian = torch.log(torch.abs(1 + torch.bmm(psi, u_hat)))
log_det_jacobian = log_det_jacobian.squeeze(2).squeeze(1)
return z, log_det_jacobian
class Sylvester(nn.Module):
"""
Sylvester normalizing flow.
"""
def __init__(self, num_ortho_vecs):
super(Sylvester, self).__init__()
self.num_ortho_vecs = num_ortho_vecs
self.h = nn.Tanh()
triu_mask = torch.triu(torch.ones(num_ortho_vecs, num_ortho_vecs), diagonal=1).unsqueeze(0)
diag_idx = torch.arange(0, num_ortho_vecs).long()
self.register_buffer('triu_mask', Variable(triu_mask))
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x)**2
def _forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
"""
All flow parameters are amortized. Conditions on diagonals of R1 and R2 for invertibility need to be satisfied
outside of this function. Computes the following transformation:
z' = z + QR1 h( R2Q^T z + b)
or actually
z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T
:param zk: shape: (batch_size, z_size)
:param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)
:param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)
:param q_ortho: shape (batch_size, z_size , num_ortho_vecs)
:param b: shape: (batch_size, 1, self.z_size)
:return: z, log_det_j
"""
# Amortized flow parameters
zk = zk.unsqueeze(1)
# Save diagonals for log_det_j
diag_r1 = r1[:, self.diag_idx, self.diag_idx]
diag_r2 = r2[:, self.diag_idx, self.diag_idx]
r1_hat = r1
r2_hat = r2
qr2 = torch.bmm(q_ortho, r2_hat.transpose(2, 1))
qr1 = torch.bmm(q_ortho, r1_hat)
r2qzb = torch.bmm(zk, qr2) + b
z = torch.bmm(self.h(r2qzb), qr1.transpose(2, 1)) + zk
z = z.squeeze(1)
# Compute log|det J|
# Output log_det_j in shape (batch_size) instead of (batch_size,1)
diag_j = diag_r1 * diag_r2
diag_j = self.der_h(r2qzb).squeeze(1) * diag_j
diag_j += 1.
log_diag_j = diag_j.abs().log()
if sum_ldj:
log_det_j = log_diag_j.sum(-1)
else:
log_det_j = log_diag_j
return z, log_det_j
def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
return self._forward(zk, r1, r2, q_ortho, b, sum_ldj)
class TriangularSylvester(nn.Module):
"""
Sylvester normalizing flow with Q=P or Q=I.
"""
def __init__(self, z_size):
super(TriangularSylvester, self).__init__()
self.z_size = z_size
self.h = nn.Tanh()
diag_idx = torch.arange(0, z_size).long()
self.register_buffer('diag_idx', diag_idx)
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x)**2
def _forward(self, zk, r1, r2, b, permute_z=None, sum_ldj=True):
"""
All flow parameters are amortized. conditions on diagonals of R1 and R2 need to be satisfied
outside of this function.
Computes the following transformation:
z' = z + QR1 h( R2Q^T z + b)
or actually
z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T
with Q = P a permutation matrix (equal to identity matrix if permute_z=None)
:param zk: shape: (batch_size, z_size)
:param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param b: shape: (batch_size, 1, self.z_size)
:return: z, log_det_j
"""
# Amortized flow parameters
zk = zk.unsqueeze(1)
# Save diagonals for log_det_j
diag_r1 = r1[:, self.diag_idx, self.diag_idx]
diag_r2 = r2[:, self.diag_idx, self.diag_idx]
if permute_z is not None:
# permute order of z
z_per = zk[:, :, permute_z]
else:
z_per = zk
r2qzb = torch.bmm(z_per, r2.transpose(2, 1)) + b
z = torch.bmm(self.h(r2qzb), r1.transpose(2, 1))
if permute_z is not None:
# permute order of z again back again
z = z[:, :, permute_z]
z += zk
z = z.squeeze(1)
# Compute log|det J|
# Output log_det_j in shape (batch_size) instead of (batch_size,1)
diag_j = diag_r1 * diag_r2
diag_j = self.der_h(r2qzb).squeeze(1) * diag_j
diag_j += 1.
log_diag_j = diag_j.abs().log()
if sum_ldj:
log_det_j = log_diag_j.sum(-1)
else:
log_det_j = log_diag_j
return z, log_det_j
def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
return self._forward(zk, r1, r2, q_ortho, b, sum_ldj)
class IAF(nn.Module):
"""
PyTorch implementation of inverse autoregressive flows as presented in
"Improving Variational Inference with Inverse Autoregressive Flow" by Diederik P. Kingma, Tim Salimans,
Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling.
Inverse Autoregressive Flow with either MADE MLPs or Pixel CNNs. Contains several flows. Each transformation
takes as an input the previous stochastic z, and a context h. The structure of each flow is then as follows:
z <- autoregressive_layer(z) + h, allow for diagonal connections
z <- autoregressive_layer(z), allow for diagonal connections
:
z <- autoregressive_layer(z), do not allow for diagonal connections.
Note that the size of h needs to be the same as h_size, which is the width of the MADE layers.
"""
def __init__(self, z_size, num_flows=2, num_hidden=0, h_size=50, forget_bias=1., conv2d=False):
super(IAF, self).__init__()
self.z_size = z_size
self.num_flows = num_flows
self.num_hidden = num_hidden
self.h_size = h_size
self.conv2d = conv2d
if not conv2d:
ar_layer = MaskedLinear
else:
ar_layer = MaskedConv2d
self.activation = torch.nn.ELU
# self.activation = torch.nn.ReLU
self.forget_bias = forget_bias
self.flows = []
self.param_list = []
# For reordering z after each flow
flip_idx = torch.arange(self.z_size - 1, -1, -1).long()
self.register_buffer('flip_idx', flip_idx)
for k in range(num_flows):
arch_z = [ar_layer(z_size, h_size), self.activation()]
self.param_list += list(arch_z[0].parameters())
z_feats = torch.nn.Sequential(*arch_z)
arch_zh = []
for j in range(num_hidden):
arch_zh += [ar_layer(h_size, h_size), self.activation()]
self.param_list += list(arch_zh[-2].parameters())
zh_feats = torch.nn.Sequential(*arch_zh)
linear_mean = ar_layer(h_size, z_size, diagonal_zeros=True)
linear_std = ar_layer(h_size, z_size, diagonal_zeros=True)
self.param_list += list(linear_mean.parameters())
self.param_list += list(linear_std.parameters())
if torch.cuda.is_available():
z_feats = z_feats.cuda()
zh_feats = zh_feats.cuda()
linear_mean = linear_mean.cuda()
linear_std = linear_std.cuda()
self.flows.append((z_feats, zh_feats, linear_mean, linear_std))
self.param_list = torch.nn.ParameterList(self.param_list)
def forward(self, z, h_context):
logdets = 0.
for i, flow in enumerate(self.flows):
if (i + 1) % 2 == 0 and not self.conv2d:
# reverse ordering to help mixing
z = z[:, self.flip_idx]
h = flow[0](z)
h = h + h_context
h = flow[1](h)
mean = flow[2](h)
gate = F.sigmoid(flow[3](h) + self.forget_bias)
z = gate * z + (1 - gate) * mean
logdets += torch.sum(gate.log().view(gate.size(0), -1), 1)
return z, logdets
| 9,939 | 32.133333 | 118 | py |
steer | steer-master/ffjord/vae_lib/optimization/loss.py | from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
from vae_lib.utils.distributions import log_normal_diag, log_normal_standard, log_bernoulli
import torch.nn.functional as F
def binary_loss_function(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.):
"""
Computes the binary loss function while summing over batch dimension, not averaged!
:param recon_x: shape: (batch_size, num_channels, pixel_width, pixel_height), bernoulli parameters p(x=1)
:param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].
:param z_mu: mean of z_0
:param z_var: variance of z_0
:param z_0: first stochastic latent variable
:param z_k: last stochastic latent variable
:param ldj: log det jacobian
:param beta: beta for kl loss
:return: loss, ce, kl
"""
reconstruction_function = nn.BCELoss(size_average=False)
batch_size = x.size(0)
# - N E_q0 [ ln p(x|z_k) ]
bce = reconstruction_function(recon_x, x)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
# N E_q0[ ln q(z_0) - ln p(z_k) ]
summed_logs = torch.sum(log_q_z0 - log_p_zk)
# sum over batches
summed_ldj = torch.sum(ldj)
# ldj = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]
kl = (summed_logs - summed_ldj)
loss = bce + beta * kl
loss /= float(batch_size)
bce /= float(batch_size)
kl /= float(batch_size)
return loss, bce, kl
def multinomial_loss_function(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.):
"""
Computes the cross entropy loss function while summing over batch dimension, not averaged!
:param x_logit: shape: (batch_size, num_classes * num_channels, pixel_width, pixel_height), real valued logits
:param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].
:param z_mu: mean of z_0
:param z_var: variance of z_0
:param z_0: first stochastic latent variable
:param z_k: last stochastic latent variable
:param ldj: log det jacobian
:param args: global parameter settings
:param beta: beta for kl loss
:return: loss, ce, kl
"""
num_classes = 256
batch_size = x.size(0)
x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2])
# make integer class labels
target = (x * (num_classes - 1)).long()
# - N E_q0 [ ln p(x|z_k) ]
# sums over batch dimension (and feature dimension)
ce = cross_entropy(x_logit, target, size_average=False)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
# N E_q0[ ln q(z_0) - ln p(z_k) ]
summed_logs = torch.sum(log_q_z0 - log_p_zk)
# sum over batches
summed_ldj = torch.sum(ldj)
# ldj = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]
kl = (summed_logs - summed_ldj)
loss = ce + beta * kl
loss /= float(batch_size)
ce /= float(batch_size)
kl /= float(batch_size)
return loss, ce, kl
def binary_loss_array(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.):
"""
Computes the binary loss without averaging or summing over the batch dimension.
"""
batch_size = x.size(0)
# if not summed over batch_dimension
if len(ldj.size()) > 1:
ldj = ldj.view(ldj.size(0), -1).sum(-1)
# TODO: upgrade to newest pytorch version on master branch, there the nn.BCELoss comes with the option
# reduce, which when set to False, does no sum over batch dimension.
bce = -log_bernoulli(x.view(batch_size, -1), recon_x.view(batch_size, -1), dim=1)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
# ln q(z_0) - ln p(z_k) ]
logs = log_q_z0 - log_p_zk
loss = bce + beta * (logs - ldj)
return loss
def multinomial_loss_array(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.):
"""
Computes the discritezed logistic loss without averaging or summing over the batch dimension.
"""
num_classes = 256
batch_size = x.size(0)
x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2])
# make integer class labels
target = (x * (num_classes - 1)).long()
# - N E_q0 [ ln p(x|z_k) ]
# computes cross entropy over all dimensions separately:
ce = cross_entropy(x_logit, target, size_average=False, reduce=False)
# sum over feature dimension
ce = ce.view(batch_size, -1).sum(dim=1)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k.view(batch_size, -1), dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(
z_0.view(batch_size, -1), mean=z_mu.view(batch_size, -1), log_var=z_var.log().view(batch_size, -1), dim=1
)
# ln q(z_0) - ln p(z_k) ]
logs = log_q_z0 - log_p_zk
loss = ce + beta * (logs - ldj)
return loss
def cross_entropy(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True):
r"""
Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes
instead of only (N, C, d_1, d_2) or (N, C).
This criterion combines `log_softmax` and `nll_loss` in a single
function.
See :class:`~torch.nn.CrossEntropyLoss` for details.
Args:
input: Variable :math:`(N, C)` where `C = number of classes`
target: Variable :math:`(N)` where each value is
`0 <= targets[i] <= C-1`
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch. Ignored if reduce is False. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average is
True, the loss is averaged over non-ignored targets. Default: -100
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on size_average. When reduce
is False, returns a loss per batch element instead and ignores
size_average. Default: ``True``
"""
return nll_loss(F.log_softmax(input, 1), target, weight, size_average, ignore_index, reduce)
def nll_loss(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True):
r"""
Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes
instead of only (N, C, d_1, d_2) or (N, C).
The negative log likelihood loss.
See :class:`~torch.nn.NLLLoss` for details.
Args:
input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K > 1`
in the case of K-dimensional loss.
target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`,
or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K >= 1` for
K-dimensional loss.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. If size_average
is False, the losses are summed for each minibatch. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average is
True, the loss is averaged over non-ignored targets. Default: -100
"""
dim = input.dim()
if dim == 2:
return F.nll_loss(
input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
)
elif dim == 4:
return F.nll_loss(
input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
)
elif dim == 3 or dim > 4:
n = input.size(0)
c = input.size(1)
out_size = (n,) + input.size()[2:]
if target.size()[1:] != input.size()[2:]:
raise ValueError('Expected target size {}, got {}'.format(out_size, input.size()))
input = input.contiguous().view(n, c, 1, -1)
target = target.contiguous().view(n, 1, -1)
if reduce:
_loss = nn.NLLLoss2d(weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce)
return _loss(input, target)
out = F.nll_loss(
input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
)
return out.view(out_size)
else:
raise ValueError('Expected 2 or more dimensions (got {})'.format(dim))
def calculate_loss(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.):
"""
Picks the correct loss depending on the input type.
"""
if args.input_type == 'binary':
loss, rec, kl = binary_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, beta=beta)
bpd = 0.
elif args.input_type == 'multinomial':
loss, rec, kl = multinomial_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=beta)
bpd = loss.data[0] / (np.prod(args.input_size) * np.log(2.))
else:
raise ValueError('Invalid input type for calculate loss: %s.' % args.input_type)
return loss, rec, kl, bpd
def calculate_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args):
"""
Picks the correct loss depending on the input type.
"""
if args.input_type == 'binary':
loss = binary_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj)
elif args.input_type == 'multinomial':
loss = multinomial_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args)
else:
raise ValueError('Invalid input type for calculate loss: %s.' % args.input_type)
return loss
| 10,566 | 37.849265 | 116 | py |
steer | steer-master/ffjord/vae_lib/optimization/training.py | from __future__ import print_function
import time
import torch
from vae_lib.optimization.loss import calculate_loss
from vae_lib.utils.visual_evaluation import plot_reconstructions
from vae_lib.utils.log_likelihood import calculate_likelihood
import numpy as np
from train_misc import count_nfe, override_divergence_fn
def train(epoch, train_loader, model, opt, args, logger):
model.train()
train_loss = np.zeros(len(train_loader))
train_bpd = np.zeros(len(train_loader))
num_data = 0
# set warmup coefficient
beta = min([(epoch * 1.) / max([args.warmup, 1.]), args.max_beta])
logger.info('beta = {:5.4f}'.format(beta))
end = time.time()
for batch_idx, (data, _) in enumerate(train_loader):
if args.cuda:
data = data.cuda()
if args.dynamic_binarization:
data = torch.bernoulli(data)
data = data.view(-1, *args.input_size)
opt.zero_grad()
x_mean, z_mu, z_var, ldj, z0, zk = model(data)
if 'cnf' in args.flow:
f_nfe = count_nfe(model)
loss, rec, kl, bpd = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args, beta=beta)
loss.backward()
if 'cnf' in args.flow:
t_nfe = count_nfe(model)
b_nfe = t_nfe - f_nfe
train_loss[batch_idx] = loss.item()
train_bpd[batch_idx] = bpd
opt.step()
rec = rec.item()
kl = kl.item()
num_data += len(data)
batch_time = time.time() - end
end = time.time()
if batch_idx % args.log_interval == 0:
if args.input_type == 'binary':
perc = 100. * batch_idx / len(train_loader)
log_msg = (
'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | '
'Rec {:11.6f} | KL {:11.6f}'.format(
epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(), rec, kl
)
)
else:
perc = 100. * batch_idx / len(train_loader)
tmp = 'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | Bits/dim {:8.6f}'
log_msg = tmp.format(epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(),
bpd), '\trec: {:11.3f}\tkl: {:11.6f}'.format(rec, kl)
log_msg = "".join(log_msg)
if 'cnf' in args.flow:
log_msg += ' | NFE Forward {} | NFE Backward {}'.format(f_nfe, b_nfe)
logger.info(log_msg)
if args.input_type == 'binary':
logger.info('====> Epoch: {:3d} Average train loss: {:.4f}'.format(epoch, train_loss.sum() / len(train_loader)))
else:
logger.info(
'====> Epoch: {:3d} Average train loss: {:.4f}, average bpd: {:.4f}'.
format(epoch, train_loss.sum() / len(train_loader), train_bpd.sum() / len(train_loader))
)
return train_loss
def evaluate(data_loader, model, args, logger, testing=False, epoch=0):
model.eval()
loss = 0.
batch_idx = 0
bpd = 0.
if args.input_type == 'binary':
loss_type = 'elbo'
else:
loss_type = 'bpd'
if testing and 'cnf' in args.flow:
override_divergence_fn(model, "brute_force")
for data, _ in data_loader:
batch_idx += 1
if args.cuda:
data = data.cuda()
with torch.no_grad():
data = data.view(-1, *args.input_size)
x_mean, z_mu, z_var, ldj, z0, zk = model(data)
batch_loss, rec, kl, batch_bpd = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args)
bpd += batch_bpd
loss += batch_loss.item()
# PRINT RECONSTRUCTIONS
if batch_idx == 1 and testing is False:
plot_reconstructions(data, x_mean, batch_loss, loss_type, epoch, args)
loss /= len(data_loader)
bpd /= len(data_loader)
if testing:
logger.info('====> Test set loss: {:.4f}'.format(loss))
# Compute log-likelihood
if testing and not ("cnf" in args.flow): # don't compute log-likelihood for cnf models
with torch.no_grad():
test_data = data_loader.dataset.tensors[0]
if args.cuda:
test_data = test_data.cuda()
logger.info('Computing log-likelihood on test set')
model.eval()
if args.dataset == 'caltech':
log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=2000, MB=500)
else:
log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=5000, MB=500)
if 'cnf' in args.flow:
override_divergence_fn(model, args.divergence_fn)
else:
log_likelihood = None
nll_bpd = None
if args.input_type in ['multinomial']:
bpd = loss / (np.prod(args.input_size) * np.log(2.))
if testing and not ("cnf" in args.flow):
logger.info('====> Test set log-likelihood: {:.4f}'.format(log_likelihood))
if args.input_type != 'binary':
logger.info('====> Test set bpd (elbo): {:.4f}'.format(bpd))
logger.info(
'====> Test set bpd (log-likelihood): {:.4f}'.
format(log_likelihood / (np.prod(args.input_size) * np.log(2.)))
)
if not testing:
return loss, bpd
else:
return log_likelihood, nll_bpd
| 5,518 | 31.087209 | 120 | py |
steer | steer-master/ffjord/vae_lib/utils/distributions.py | from __future__ import print_function
import torch
import torch.utils.data
import math
MIN_EPSILON = 1e-5
MAX_EPSILON = 1. - 1e-5
PI = torch.FloatTensor([math.pi])
if torch.cuda.is_available():
PI = PI.cuda()
# N(x | mu, var) = 1/sqrt{2pi var} exp[-1/(2 var) (x-mean)(x-mean)]
# log N(x| mu, var) = -log sqrt(2pi) -0.5 log var - 0.5 (x-mean)(x-mean)/var
def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = -0.5 * (log_var + (x - mean) * (x - mean) * log_var.exp().reciprocal())
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = -(x - mean) * (x - mean)
log_norm *= torch.reciprocal(2. * log_var.exp())
log_norm += -0.5 * log_var
log_norm += -0.5 * torch.log(2. * PI)
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_normal_standard(x, average=False, reduce=True, dim=None):
log_norm = -0.5 * x * x
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_bernoulli(x, mean, average=False, reduce=True, dim=None):
probs = torch.clamp(mean, min=MIN_EPSILON, max=MAX_EPSILON)
log_bern = x * torch.log(probs) + (1. - x) * torch.log(1. - probs)
if reduce:
if average:
return torch.mean(log_bern, dim)
else:
return torch.sum(log_bern, dim)
else:
return log_bern
| 1,768 | 25.80303 | 86 | py |
steer | steer-master/ffjord/vae_lib/utils/load_data.py | from __future__ import print_function
import torch
import torch.utils.data as data_utils
import pickle
from scipy.io import loadmat
import numpy as np
import os
def load_static_mnist(args, **kwargs):
"""
Dataloading function for static mnist. Outputs image data in vectorized form: each image is a vector of size 784
"""
args.dynamic_binarization = False
args.input_type = 'binary'
args.input_size = [1, 28, 28]
# start processing
def lines_to_np_array(lines):
return np.array([[int(i) for i in line.split()] for line in lines])
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_train.amat')) as f:
lines = f.readlines()
x_train = lines_to_np_array(lines).astype('float32')
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_valid.amat')) as f:
lines = f.readlines()
x_val = lines_to_np_array(lines).astype('float32')
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_test.amat')) as f:
lines = f.readlines()
x_test = lines_to_np_array(lines).astype('float32')
# shuffle train data
np.random.shuffle(x_train)
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_freyfaces(args, **kwargs):
# set args
args.input_size = [1, 28, 20]
args.input_type = 'multinomial'
args.dynamic_binarization = False
TRAIN = 1565
VAL = 200
TEST = 200
# start processing
with open('data/Freyfaces/freyfaces.pkl', 'rb') as f:
data = pickle.load(f, encoding="latin1")[0]
data = data / 255.
# NOTE: shuffling is done before splitting into train and test set, so test set is different for every run!
# shuffle data:
np.random.seed(args.freyseed)
np.random.shuffle(data)
# train images
x_train = data[0:TRAIN].reshape(-1, 28 * 20)
# validation images
x_val = data[TRAIN:(TRAIN + VAL)].reshape(-1, 28 * 20)
# test images
x_test = data[(TRAIN + VAL):(TRAIN + VAL + TEST)].reshape(-1, 28 * 20)
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_omniglot(args, **kwargs):
n_validation = 1345
# set args
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = True
# start processing
def reshape_data(data):
return data.reshape((-1, 28, 28)).reshape((-1, 28 * 28), order='F')
omni_raw = loadmat(os.path.join('data', 'OMNIGLOT', 'chardata.mat'))
# train and test data
train_data = reshape_data(omni_raw['data'].T.astype('float32'))
x_test = reshape_data(omni_raw['testdata'].T.astype('float32'))
# shuffle train data
np.random.shuffle(train_data)
# set train and validation data
x_train = train_data[:-n_validation]
x_val = train_data[-n_validation:]
# binarize
if args.dynamic_binarization:
args.input_type = 'binary'
np.random.seed(777)
x_val = np.random.binomial(1, x_val)
x_test = np.random.binomial(1, x_test)
else:
args.input_type = 'gray'
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_caltech101silhouettes(args, **kwargs):
# set args
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = False
# start processing
def reshape_data(data):
return data.reshape((-1, 28, 28)).reshape((-1, 28 * 28), order='F')
caltech_raw = loadmat(os.path.join('data', 'Caltech101Silhouettes', 'caltech101_silhouettes_28_split1.mat'))
# train, validation and test data
x_train = 1. - reshape_data(caltech_raw['train_data'].astype('float32'))
np.random.shuffle(x_train)
x_val = 1. - reshape_data(caltech_raw['val_data'].astype('float32'))
np.random.shuffle(x_val)
x_test = 1. - reshape_data(caltech_raw['test_data'].astype('float32'))
y_train = caltech_raw['train_labels']
y_val = caltech_raw['val_labels']
y_test = caltech_raw['test_labels']
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_dataset(args, **kwargs):
if args.dataset == 'mnist':
train_loader, val_loader, test_loader, args = load_static_mnist(args, **kwargs)
elif args.dataset == 'caltech':
train_loader, val_loader, test_loader, args = load_caltech101silhouettes(args, **kwargs)
elif args.dataset == 'freyfaces':
train_loader, val_loader, test_loader, args = load_freyfaces(args, **kwargs)
elif args.dataset == 'omniglot':
train_loader, val_loader, test_loader, args = load_omniglot(args, **kwargs)
else:
raise Exception('Wrong name of the dataset!')
return train_loader, val_loader, test_loader, args
| 7,592 | 35.859223 | 116 | py |
steer | steer-master/ffjord/diagnostics/viz_toy.py | import os
import math
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_trajectory(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu'):
model.eval()
# Sample from prior
z_samples = torch.randn(2000, 2).to(device)
# sample from a grid
npts = 800
side = np.linspace(-4, 4, npts)
xx, yy = np.meshgrid(side, side)
xx = torch.from_numpy(xx).type(torch.float32).to(device)
yy = torch.from_numpy(yy).type(torch.float32).to(device)
z_grid = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1)], 1)
with torch.no_grad():
# We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), 1, keepdim=True)
logp_grid = torch.sum(standard_normal_logprob(z_grid), 1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
z_traj, _ = cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
z_traj = z_traj.cpu().numpy()
grid_z_traj, grid_logpz_traj = [], []
inds = torch.arange(0, z_grid.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(z_grid.shape[0] * memory)):
_grid_z_traj, _grid_logpz_traj = cnf(
z_grid[ii], logp_grid[ii], integration_times=integration_times, reverse=True
)
_grid_z_traj, _grid_logpz_traj = _grid_z_traj.cpu().numpy(), _grid_logpz_traj.cpu().numpy()
grid_z_traj.append(_grid_z_traj)
grid_logpz_traj.append(_grid_logpz_traj)
grid_z_traj = np.concatenate(grid_z_traj, axis=1)
grid_logpz_traj = np.concatenate(grid_logpz_traj, axis=1)
plt.figure(figsize=(8, 8))
for _ in range(z_traj.shape[0]):
plt.clf()
# plot target potential function
ax = plt.subplot(2, 2, 1, aspect="equal")
ax.hist2d(data_samples[:, 0], data_samples[:, 1], range=[[-4, 4], [-4, 4]], bins=200)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title("Target", fontsize=32)
# plot the density
ax = plt.subplot(2, 2, 2, aspect="equal")
z, logqz = grid_z_traj[t], grid_logpz_traj[t]
xx = z[:, 0].reshape(npts, npts)
yy = z[:, 1].reshape(npts, npts)
qz = np.exp(logqz).reshape(npts, npts)
plt.pcolormesh(xx, yy, qz)
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
cmap = matplotlib.cm.get_cmap(None)
#ax.set_axis_bgcolor(cmap(0.))
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title("Density", fontsize=32)
# plot the samples
ax = plt.subplot(2, 2, 3, aspect="equal")
zk = z_traj[t]
ax.hist2d(zk[:, 0], zk[:, 1], range=[[-4, 4], [-4, 4]], bins=200)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title("Samples", fontsize=32)
# plot vector field
ax = plt.subplot(2, 2, 4, aspect="equal")
K = 13j
y, x = np.mgrid[-4:4:K, -4:4:K]
K = int(K.imag)
zs = torch.from_numpy(np.stack([x, y], -1).reshape(K * K, 2)).to(device, torch.float32)
logps = torch.zeros(zs.shape[0], 1).to(device, torch.float32)
dydt = cnf.odefunc(integration_times[t], (zs, logps))[0]
dydt = -dydt.cpu().detach().numpy()
dydt = dydt.reshape(K, K, 2)
logmag = 2 * np.log(np.hypot(dydt[:, :, 0], dydt[:, :, 1]))
ax.quiver(
x, y, dydt[:, :, 0], dydt[:, :, 1],
np.exp(logmag), cmap="coolwarm", scale=20., width=0.015, pivot="mid"
)
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
ax.axis("off")
ax.set_title("Vector Field", fontsize=32)
makedirs(savedir)
plt.savefig(os.path.join(savedir, f"viz-{t:05d}.jpg"))
t += 1
def trajectory_to_video(savedir):
import subprocess
bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, 'viz-%05d.jpg'), os.path.join(savedir, 'traj.mp4'))
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if __name__ == '__main__':
import argparse
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')))
import lib.toy_data as toy_data
from train_misc import count_parameters
from train_misc import set_cnf_options, add_spectral_norm, create_regularization_fns
from train_misc import build_model_tabular
def get_ckpt_model_and_data(args):
# Load checkpoint.
checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
ckpt_args = checkpt['args']
state_dict = checkpt['state_dict']
# Construct model and restore checkpoint.
regularization_fns, regularization_coeffs = create_regularization_fns(ckpt_args)
model = build_model_tabular(ckpt_args, 2, regularization_fns).to(device)
if ckpt_args.spectral_norm: add_spectral_norm(model)
set_cnf_options(ckpt_args, model)
model.load_state_dict(state_dict)
model.to(device)
print(model)
print("Number of trainable parameters: {}".format(count_parameters(model)))
# Load samples from dataset
data_samples = toy_data.inf_train_gen(ckpt_args.data, batch_size=2000)
return model, data_samples
parser = argparse.ArgumentParser()
parser.add_argument('--checkpt', type=str, required=True)
parser.add_argument('--ntimes', type=int, default=101)
parser.add_argument('--memory', type=float, default=0.01, help='Higher this number, the more memory is consumed.')
parser.add_argument('--save', type=str, default='trajectory')
args = parser.parse_args()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model, data_samples = get_ckpt_model_and_data(args)
save_trajectory(model, data_samples, args.save, ntimes=args.ntimes, memory=args.memory, device=device)
trajectory_to_video(args.save)
| 7,028 | 38.05 | 119 | py |
steer | steer-master/ffjord/diagnostics/viz_cnf.py | from inspect import getsourcefile
import sys
import os
import subprocess
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import argparse
import torch
import torchvision.datasets as dset
import torchvision.transforms as tforms
from torchvision.utils import save_image
import lib.layers as layers
import lib.spectral_norm as spectral_norm
import lib.utils as utils
def add_noise(x):
"""
[0, 1] -> [0, 255] -> add noise -> [0, 1]
"""
noise = x.new().resize_as_(x).uniform_()
x = x * 255 + noise
x = x / 256
return x
def get_dataset(args):
trans = lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise])
if args.data == "mnist":
im_dim = 1
im_size = 28 if args.imagesize is None else args.imagesize
train_set = dset.MNIST(root="./data", train=True, transform=trans(im_size), download=True)
test_set = dset.MNIST(root="./data", train=False, transform=trans(im_size), download=True)
elif args.data == "svhn":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.SVHN(root="./data", split="train", transform=trans(im_size), download=True)
test_set = dset.SVHN(root="./data", split="test", transform=trans(im_size), download=True)
elif args.data == "cifar10":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.CIFAR10(root="./data", train=True, transform=trans(im_size), download=True)
test_set = dset.CIFAR10(root="./data", train=False, transform=trans(im_size), download=True)
elif args.dataset == 'celeba':
im_dim = 3
im_size = 64 if args.imagesize is None else args.imagesize
train_set = dset.CelebA(
train=True, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(im_size),
tforms.RandomHorizontalFlip(),
tforms.ToTensor(),
add_noise,
])
)
test_set = dset.CelebA(
train=False, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(args.imagesize),
tforms.ToTensor(),
add_noise,
])
)
data_shape = (im_dim, im_size, im_size)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=args.batch_size, shuffle=False)
return train_loader, test_loader, data_shape
def add_spectral_norm(model):
def recursive_apply_sn(parent_module):
for child_name in list(parent_module._modules.keys()):
child_module = parent_module._modules[child_name]
classname = child_module.__class__.__name__
if classname.find('Conv') != -1 and 'weight' in child_module._parameters:
del parent_module._modules[child_name]
parent_module.add_module(child_name, spectral_norm.spectral_norm(child_module, 'weight'))
else:
recursive_apply_sn(child_module)
recursive_apply_sn(model)
def build_model(args, state_dict):
# load dataset
train_loader, test_loader, data_shape = get_dataset(args)
hidden_dims = tuple(map(int, args.dims.split(",")))
strides = tuple(map(int, args.strides.split(",")))
# neural net that parameterizes the velocity field
if args.autoencode:
def build_cnf():
autoencoder_diffeq = layers.AutoencoderDiffEqNet(
hidden_dims=hidden_dims,
input_shape=data_shape,
strides=strides,
conv=args.conv,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
odefunc = layers.AutoencoderODEfunc(
autoencoder_diffeq=autoencoder_diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
cnf = layers.CNF(
odefunc=odefunc,
T=args.time_length,
solver=args.solver,
)
return cnf
else:
def build_cnf():
diffeq = layers.ODEnet(
hidden_dims=hidden_dims,
input_shape=data_shape,
strides=strides,
conv=args.conv,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
odefunc = layers.ODEfunc(
diffeq=diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
cnf = layers.CNF(
odefunc=odefunc,
T=args.time_length,
solver=args.solver,
)
return cnf
chain = [layers.LogitTransform(alpha=args.alpha), build_cnf()]
if args.batch_norm:
chain.append(layers.MovingBatchNorm2d(data_shape[0]))
model = layers.SequentialFlow(chain)
if args.spectral_norm:
add_spectral_norm(model)
model.load_state_dict(state_dict)
return model, test_loader.dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser("Visualizes experiments trained using train_cnf.py.")
parser.add_argument("--checkpt", type=str, required=True)
parser.add_argument("--nsamples", type=int, default=50)
parser.add_argument("--ntimes", type=int, default=100)
parser.add_argument("--save", type=str, default="imgs")
args = parser.parse_args()
checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
ck_args = checkpt["args"]
state_dict = checkpt["state_dict"]
model, test_set = build_model(ck_args, state_dict)
real_samples = torch.stack([test_set[i][0] for i in range(args.nsamples)], dim=0)
data_shape = real_samples.shape[1:]
fake_latents = torch.randn(args.nsamples, *data_shape)
# Transfer to GPU if available.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Running on {}".format(device))
model.to(device)
real_samples = real_samples.to(device)
fake_latents = fake_latents.to(device)
# Construct fake samples
fake_samples = model(fake_latents, reverse=True).view(-1, *data_shape)
samples = torch.cat([real_samples, fake_samples], dim=0)
still_diffeq = torch.zeros_like(samples)
im_indx = 0
# Image-saving helper function
def save_im(im, diffeq):
global im_indx
filename = os.path.join(current_dir, args.save, "flow_%05d.png" % im_indx)
utils.makedirs(os.path.dirname(filename))
diffeq = diffeq.clone()
de_min, de_max = float(diffeq.min()), float(diffeq.max())
diffeq.clamp_(min=de_min, max=de_max)
diffeq.add_(-de_min).div_(de_max - de_min + 1e-5)
assert im.shape == diffeq.shape
shape = im.shape
interleaved = torch.stack([im, diffeq]).transpose(0, 1).contiguous().view(2 * shape[0], *shape[1:])
save_image(interleaved, filename, nrow=20, padding=0, range=(0, 1))
im_indx += 1
# Still frames with image samples.
for _ in range(30):
save_im(samples, still_diffeq)
# Forward image to latent.
logits = model.chain[0](samples)
for i in range(1, len(model.chain)):
assert isinstance(model.chain[i], layers.CNF)
cnf = model.chain[i]
tt = torch.linspace(cnf.integration_times[0], cnf.integration_times[-1], args.ntimes)
z_t = cnf(logits, integration_times=tt)
logits = z_t[-1]
# transform back to image space
im_t = model.chain[0](z_t.view(args.ntimes * args.nsamples * 2, *data_shape),
reverse=True).view(args.ntimes, 2 * args.nsamples, *data_shape)
# save each step as an image
for t, im in zip(tt, im_t):
diffeq = cnf.odefunc(t, (im, None))[0]
diffeq = model.chain[0](diffeq, reverse=True)
save_im(im, diffeq)
# Still frames with latent samples.
latents = model.chain[0](logits, reverse=True)
for _ in range(30):
save_im(latents, still_diffeq)
# Forward image to latent.
for i in range(len(model.chain) - 1, 0, -1):
assert isinstance(model.chain[i], layers.CNF)
cnf = model.chain[i]
tt = torch.linspace(cnf.integration_times[-1], cnf.integration_times[0], args.ntimes)
z_t = cnf(logits, integration_times=tt)
logits = z_t[-1]
# transform back to image space
im_t = model.chain[0](z_t.view(args.ntimes * args.nsamples * 2, *data_shape),
reverse=True).view(args.ntimes, 2 * args.nsamples, *data_shape)
# save each step as an image
for t, im in zip(tt, im_t):
diffeq = cnf.odefunc(t, (im, None))[0]
diffeq = model.chain[0](diffeq, reverse=True)
save_im(im, -diffeq)
# Combine the images into a movie
bashCommand = r"ffmpeg -y -i {}/flow_%05d.png {}".format(
os.path.join(current_dir, args.save), os.path.join(current_dir, args.save, "flow.mp4")
)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
| 9,576 | 36.120155 | 107 | py |
steer | steer-master/ffjord/diagnostics/viz_fig1.py | import os
import math
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
from scipy import interpolate as interp
import lib.utils as utils
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_fig1(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu'):
model.eval()
# Sample from prior
# z_samples = torch.randn(20, 200).to(device)
z_samples = torch.randn(20, 50).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), 1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
z_traj, _ = cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
z_traj = z_traj.cpu().numpy()
makedirs(savedir)
for sample in range(z_traj.shape[1]):
plt.clf()
plt.imshow(z_traj[:,sample,:],cmap='plasma')
plt.xaxis
plt.savefig(os.path.join(savedir, "fig1_"+str(sample)+".jpg"))
def save_fig1_rev(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu'):
model.eval()
data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
# z_samples = torch.randn(20, 200).to(device)
z_samples = torch.randn(20, 50).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), 1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
z_traj, _ = cnf(data_samples[0:1], logp_samples[0:1], integration_times=integration_times, reverse=False)
z_traj = z_traj.cpu().numpy()
print('zt',z_traj.shape)
makedirs(savedir)
plt.clf()
plt.imshow(data_samples[0:1],cmap='plasma')
plt.savefig(os.path.join(savedir, "fig1_data.jpg"))
for sample in range(z_traj.shape[1]):
plt.clf()
plt.imshow(z_traj[:,sample,:],cmap='plasma')
plt.savefig(os.path.join(savedir, "fig1_forward"+str(sample)+".jpg"))
def save_fig1_1d_ptd(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,100)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs / maxs
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 7))
fig.set_tight_layout({'pad': 0.1, 'h_pad': -1.0})
axes[0].scatter(znp,np.exp(np.array(logp)[0,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[0,:,0]),cmap='viridis')
axes[0].set_xlim(-4,4)
axes[0].set_ylabel(r"$p(z(t_1))$",labelpad=20)
axes[0].set_yticks([])
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[0].set_ylim(bottom=0.0)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.9,density=(0.7,0.5),arrowsize=0.8)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
axes[1].get_xaxis().set_visible(False)
# axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
axes[2].scatter(znp,np.exp(np.array(logp)[-1,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[-1,:,0]),cmap='viridis')
axes[2].set_xlim(-4,4)
axes[2].set_ylabel(r"$p(z(t_0))$",labelpad=20)
axes[2].set_xlabel(r"$z$")
axes[2].set_yticks([])
axes[2].set_xticks([])
# axes[2].get_xaxis().set_visible(False)
axes[2].spines['top'].set_visible(False)
axes[2].spines['right'].set_visible(False)
axes[2].set_ylim(bottom=0.)
# fig.subplots_adjust(hspace=0.)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
pos2 = axes[2].get_position(original=False)
print(pos0.y0)
print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
plt.savefig(os.path.join(savedir, "fig1_1d_together"+str(itr)+".png"),pad_inches=0,bbox_inches='tight',dpi=350)
def save_fig1_1d_ptd_timescrub(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,100)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
for timerow in range(integration_times.shape[0]):
plt.clf()
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs / maxs
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5]},
figsize=(8,13))
fig.set_tight_layout({'pad': 0.1, 'h_pad': -1.0})
axes[0].scatter(znp,np.exp(np.array(logp)[::-1][timerow,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[::-1][timerow,:,0]),cmap='viridis')
axes[0].set_xlim(-4,4)
axes[0].set_ylim(0,.42)
axes[0].set_ylabel(r"$p(z(t))$",labelpad=20)
axes[0].set_yticks([])
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[0].set_ylim(bottom=0.0)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.9,density=(0.7,0.5),arrowsize=0.8)
axes[1].plot([-4,4],[integration_times[timerow],integration_times[timerow]],c='red',zorder=100)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_xticks([])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
axes[1].set_xlabel(r"$z$")
axes[1].spines['top'].set_visible(False)
axes[1].spines['bottom'].set_visible(False)
axes[1].get_xaxis().set_visible(True)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
print(pos0.y0)
print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.15,pos1.x1,pos0.height])
plt.savefig(os.path.join(savedir, "fig1_1d_scrub"+str(timerow)+".png"),pad_inches=0,bbox_inches='tight')
plt.close()
def save_fig1_1d_NF(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',dpi=350):
model.eval()
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
# for planar in model.chain:
# end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
# integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
for planar in model.chain[t:]:
print(t)
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,100)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
# plt.imshow(logp_traj[:,:,0],cmap='plasma')
# plt.imshow(np.exp(np.array(logp)[:,:,0]),cmap='plasma',extent=[-4,4,0,1])
# plt.tight_layout()
# plt.savefig(os.path.join(savedir, "fig1_1d.jpg"))
# plt.clf()
# plt.plot(z_traj[:,:,0].cpu().numpy())
# plt.savefig(os.path.join(savedir, "fig1_1d_traj.jpg"))
# nm = matplotlib.colors.Normalize(0.05,0.45,True)
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs / maxs
# plt.clf()
# plt.axis('off')
# plt.tight_layout()
# plt.imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=40.)
# plt.streamplot(xs,ts,dxs,dts,color='white',linewidth=0.7,density=(0.5,2.))
# plt.savefig(os.path.join(savedir, "fig1_1d_stream.pdf"),pad_inches=0,bbox_inches='tight')
# plt.clf()
# plt.axis('off')
# plt.tight_layout()
# plt.plot(np.exp(np.array(logp)[0,:,0]))
# plt.savefig(os.path.join(savedir, "fig1_1d_t1.pdf"),pad_inches=0,bbox_inches='tight')
# plt.clf()
# plt.axis('off')
# plt.tight_layout()
# plt.plot(np.exp(np.array(logp)[-1,:,0]))
# plt.savefig(os.path.join(savedir, "fig1_1d_t0.pdf"),pad_inches=0,bbox_inches='tight')
# plt.clf()
# plt.axis('off')
# plt.tight_layout()
# plt.subplot2grid((8,1),(0,0))
# plt.axis('off')
# plt.tight_layout()
# plt.plot(znp,np.exp(np.array(logp)[-1,:,0]))
# plt.subplot2grid((8,1),(1,0),rowspan=6)
# plt.axis('off')
# plt.tight_layout()
# plt.imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=30.)
# plt.streamplot(xs,ts,dxs,dts,color='white',linewidth=0.7,density=(0.5,2.))
# plt.subplot2grid((8,1),(7,0))
# plt.axis('off')
# plt.tight_layout()
# plt.plot(znp,np.exp(np.array(logp)[0,:,0]))
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 7))
fig.set_tight_layout({'pad': 0.1, 'h_pad': -1.0})
# axes[1].set_aspect(30, share=True)
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
# axes[0].plot(znp,np.exp(np.array(logp)[0,:,0]))
axes[0].scatter(znp,np.exp(np.array(logp)[0,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[0,:,0]),cmap='viridis')
axes[0].set_xlim(-4,4)
axes[0].set_ylabel(r"$p(z(t_1))$",labelpad=20)
axes[0].set_yticks([])
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
# axes[0].spines['bottom'].set_visible(False)
# axes[0].axis('off')
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.9,density=(0.4,2.5))
# axes[1].set_axis_off()
axes[1].set_xlim(-4,4)
# axes[1].axis('off')
axes[1].set_yticks([0,0.5])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
axes[1].get_xaxis().set_visible(False)
# axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
axes[2].scatter(znp,np.exp(np.array(logp)[-1,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[-1,:,0]),cmap='viridis')
axes[2].set_xlim(-4,4)
axes[2].set_ylabel(r"$p(z(t_0))$",labelpad=20)
axes[2].set_xlabel(r"$z$")
axes[2].set_yticks([])
axes[2].set_xticks([])
# axes[2].get_xaxis().set_visible(False)
axes[2].spines['top'].set_visible(False)
axes[2].spines['right'].set_visible(False)
# fig.subplots_adjust(hspace=0.)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
pos2 = axes[2].get_position(original=False)
print(pos0.y0)
print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
plt.savefig(os.path.join(savedir, "fig1_1d_together.pdf"),pad_inches=0,bbox_inches='tight')
# if __name__ == '__main__':
# plt.figure(figsize=(8, 8))
# for _ in range(z_traj.shape[0]):
# plt.clf()
# # plot target potential function
# ax = plt.subplot(2, 2, 1, aspect="equal")
# ax.hist2d(data_samples[:, 0], data_samples[:, 1], range=[[-4, 4], [-4, 4]], bins=200)
# ax.invert_yaxis()
# ax.get_xaxis().set_ticks([])
# ax.get_yaxis().set_ticks([])
# ax.set_title("Target", fontsize=32)
# # plot the density
# ax = plt.subplot(2, 2, 2, aspect="equal")
# z, logqz = grid_z_traj[t], grid_logpz_traj[t]
# xx = z[:, 0].reshape(npts, npts)
# yy = z[:, 1].reshape(npts, npts)
# qz = np.exp(logqz).reshape(npts, npts)
# plt.pcolormesh(xx, yy, qz)
# ax.set_xlim(-4, 4)
# ax.set_ylim(-4, 4)
# cmap = matplotlib.cm.get_cmap(None)
# ax.set_axis_bgcolor(cmap(0.))
# ax.invert_yaxis()
# ax.get_xaxis().set_ticks([])
# ax.get_yaxis().set_ticks([])
# ax.set_title("Density", fontsize=32)
# # plot the samples
# ax = plt.subplot(2, 2, 3, aspect="equal")
# zk = z_traj[t]
# ax.hist2d(zk[:, 0], zk[:, 1], range=[[-4, 4], [-4, 4]], bins=200)
# ax.invert_yaxis()
# ax.get_xaxis().set_ticks([])
# ax.get_yaxis().set_ticks([])
# ax.set_title("Samples", fontsize=32)
# # plot vector field
# ax = plt.subplot(2, 2, 4, aspect="equal")
# K = 13j
# y, x = np.mgrid[-4:4:K, -4:4:K]
# K = int(K.imag)
# zs = torch.from_numpy(np.stack([x, y], -1).reshape(K * K, 2)).to(device, torch.float32)
# logps = torch.zeros(zs.shape[0], 1).to(device, torch.float32)
# dydt = cnf.odefunc(integration_times[t], (zs, logps))[0]
# dydt = -dydt.cpu().numpy()
# dydt = dydt.reshape(K, K, 2)
# logmag = 2 * np.log(np.hypot(dydt[:, :, 0], dydt[:, :, 1]))
# ax.quiver(
# x, y, dydt[:, :, 0], dydt[:, :, 1],
# np.exp(logmag), cmap="coolwarm", scale=20., width=0.015, pivot="mid"
# )
# ax.set_xlim(-4, 4)
# ax.set_ylim(-4, 4)
# ax.axis("off")
# ax.set_title("Vector Field", fontsize=32)
# makedirs(savedir)
# plt.savefig(os.path.join(savedir, f"viz-{t:05d}.jpg"))
# t += 1
# def trajectory_to_video(savedir):
# import subprocess
# bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, 'viz-%05d.jpg'), os.path.join(savedir, 'traj.mp4'))
# process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
# output, error = process.communicate()
# if __name__ == '__main__':
# import argparse
# import sys
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')))
# import lib.toy_data as toy_data
# from train_misc import count_parameters
# from train_misc import set_cnf_options, add_spectral_norm, create_regularization_fns
# from train_misc import build_model_toy2d
# def get_ckpt_model_and_data(args):
# # Load checkpoint.
# checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
# ckpt_args = checkpt['args']
# state_dict = checkpt['state_dict']
# # Construct model and restore checkpoint.
# regularization_fns, regularization_coeffs = create_regularization_fns(ckpt_args)
# model = build_model_toy2d(ckpt_args, regularization_fns).to(device)
# if ckpt_args.spectral_norm: add_spectral_norm(model)
# set_cnf_options(ckpt_args, model)
# model.load_state_dict(state_dict)
# model.to(device)
# print(model)
# print("Number of trainable parameters: {}".format(count_parameters(model)))
# # Load samples from dataset
# data_samples = toy_data.inf_train_gen(ckpt_args.data, batch_size=2000)
# return model, data_samples
# parser = argparse.ArgumentParser()
# parser.add_argument('--checkpt', type=str, required=True)
# parser.add_argument('--ntimes', type=int, default=101)
# parser.add_argument('--memory', type=float, default=0.01, help='Higher this number, the more memory is consumed.')
# parser.add_argument('--save', type=str, default='trajectory')
# args = parser.parse_args()
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# model, data_samples = get_ckpt_model_and_data(args)
# save_trajectory(model, data_samples, args.save, ntimes=args.ntimes, memory=args.memory, device=device)
# trajectory_to_video(args.save)
def save_fig1_1d_icml(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0.0001, end_time, ntimes)
def log_prob(t):
#z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([0,t]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.diffeq(t, x)
ts = np.linspace(0.0001,end_time,101)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
#z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
#z_traj = z_traj.cpu().numpy()
#logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 8))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
probs = np.exp(np.array(logp)[:,:,0])
probs = probs[::-1]
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs
# probs = probs / maxs
# for smple in range(len(z_samples)):
for smple in [38,50,55, 59]:
animate = True
if animate:
Trange = range(len(integration_times))
else:
Trange = [len(integration_times)-1]
for T in Trange:
plt.clf()
plt.close()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 7))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
print("plotting ",T)
ztr0 = z_traj.numpy()[0,smple,0]
pztr0 = np.exp(np.array(logp_traj)[0,smple,0])
ztrT = z_traj.numpy()[T,smple,0]
pztrT = np.exp(np.array(logp_traj)[T,smple,0])
probts = np.exp(np.array(logp))[::-1]
# axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
sc0 = axes[0].scatter(znp,np.exp(np.array(logp)[-1,:,0]),s=0.5,marker=None,linestyle='-',c=probs[-1,:],cmap='viridis')
#axes[0].scatter([ztr0],[0.],color="#F012BE",s=10.,zorder=5,clip_on=False)
#axes[0].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.4,linewidth=0.5,zorder=2)
#axes[0].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.4,linewidth=0.5,clip_on=False,zorder=2)
#axes[0].plot([ztrT, ztrT],[0.,pztrT], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
#axes[0].plot([ztrT, znp[0]],[pztrT,pztrT], color="#2ECC40",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
#axes[0].plot([znp[0], znp[0]],[pztr0,pztrT], color="#2ECC40",alpha=1.0,linewidth=1,zorder=10,clip_on=False)
axes[0].set_xlim(-4,4)
axes[0].set_ylim(0.,maxs[-1][0])
axes[0].set_ylabel(r"$p(z_{t})$",labelpad=20)
axes[0].set_yticks([min(0.9*pztr0,(pztrT+pztr0)/2)])
axes[0].set_yticklabels([r"$\Delta$"], color="#2ECC40")
axes[0].tick_params(width=0,labelsize=10)
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
# axes[0].set_clip_on(False)
axes[0].set_zorder(2)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
#axes[1].plot(z_traj[0:T,smple,0].numpy(),ts[0:T], color="#F012BE")
#axes[1].scatter([ztr0],[0.], color="#F012BE", s=10.,zorder=5)
#axes[1].scatter([ztrT],[ts[T]], color="#F012BE",s=10.,zorder=5,clip_on=True)
#axes[1].scatter([ztrT],[ts[-1]], color="#F012BE",s=10.,zorder=5,clip_on=True,visible=False)#hack
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
#axes[1].streamplot(xs,ts[::-1],dxs,dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
# axes[1].get_xaxis().set_visible(False)
axes[1].spines['bottom'].set_position('zero')
# axes[1].spines['bottom'].set_zorder(0.)
axes[1].spines['top'].set_position(('data',0.5))
# axes[1].spines['top'].set_zorder(0.)
axes[1].spines['left'].set_bounds(0.,0.5)
axes[1].spines['right'].set_bounds(0.,0.5)
axes[1].set_clip_on(True)
sc2 = axes[2].scatter(znp,probts[T,:,0],s=0.5,marker=None,linestyle='-',c=probts[T,:,0],cmap='viridis',zorder=3)
sc2.set_clim(0.,maxs[-1][0])
#axes[2].scatter(ztrT,[0.], color="#F012BE",s=10.,zorder=5,clip_on=False)
#axes[2].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5)
#axes[2].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.8,linewidth=0.5,clip_on=False)
axes[2].set_xlim(-4,4)
axes[2].set_ylabel(r"$p(z_{t_0})$",labelpad=20)
axes[2].set_xlabel(r"$z$")
axes[2].set_yticks([])
axes[2].set_xticks([])
# axes[2].get_xaxis().set_visible(False)
axes[2].spines['top'].set_visible(False)
axes[2].spines['right'].set_visible(False)
axes[2].set_ylim(bottom=0.)
# fig.subplots_adjust(hspace=0.)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
pos2 = axes[2].get_position(original=False)
# print(pos0.y0)
# print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
if animate:
makedirs(os.path.join(savedir, "anim",'{:0>4}'.format(str(smple))))
plt.savefig(os.path.join(savedir, "anim",'{:0>4}'.format(str(smple)),"img-"+'{:0>4}'.format(str(T))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=300)
plt.savefig(os.path.join(savedir, "fig1_1d_together"+'{:0>4}'.format(str(smple))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=350)
#;
# ffmpeg -r 24 -i experiments/fig1_1d_toy/fig1_ani/anim/0038/%04.png -c:v libx264 -crf 20 -pix_fmt yuv420p experiments/fig1_1d_toy/fig1_ani/animate.mp4
# ffmpeg -r 24 -i %03.png -c:v libx264 -crf 20 -pix_fmt yuv420p animate.mp4
# ffmpeg -f concat -safe 0 -i anim-list -r 24 -c:v libx264 -crf 20 -pix_fmt yuv420p ffjord-sample-rev.mp4
def save_fig1_1d_icml_no_top_or_bottom(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0.0001, end_time, ntimes)
def log_prob(t):
#z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([0,t]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.diffeq(t, x)
ts = np.linspace(0.0001,end_time,101)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
#z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
#z_traj = z_traj.cpu().numpy()
#logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 8))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
probs = np.exp(np.array(logp)[:,:,0])
probs = probs[::-1]
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs
# probs = probs / maxs
# for smple in range(len(z_samples)):
for smple in [38,50,55, 59]:
animate = True
if animate:
Trange = range(len(integration_times))
else:
Trange = [len(integration_times)-1]
for T in Trange:
plt.clf()
plt.close()
plt.rcParams.update({'font.size': 13})
#fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True,
# gridspec_kw={'height_ratios': [5000, 1]},
# figsize=(4, 7))
#fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
print("plotting ",T)
ztr0 = z_traj.numpy()[0,smple,0]
pztr0 = np.exp(np.array(logp_traj)[0,smple,0])
ztrT = z_traj.numpy()[T,smple,0]
pztrT = np.exp(np.array(logp_traj)[T,smple,0])
probts = np.exp(np.array(logp))[::-1]
## axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
#sc0 = axes[0].scatter(znp,np.exp(np.array(logp)[-1,:,0]),s=0.5,marker=None,linestyle='-',c=probs[-1,:],cmap='viridis')
##axes[0].scatter([ztr0],[0.],color="#F012BE",s=10.,zorder=5,clip_on=False)
##axes[0].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.4,linewidth=0.5,zorder=2)
##axes[0].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.4,linewidth=0.5,clip_on=False,zorder=2)
##axes[0].plot([ztrT, ztrT],[0.,pztrT], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
##axes[0].plot([ztrT, znp[0]],[pztrT,pztrT], color="#2ECC40",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
##axes[0].plot([znp[0], znp[0]],[pztr0,pztrT], color="#2ECC40",alpha=1.0,linewidth=1,zorder=10,clip_on=False)
#axes[0].set_xlim(-4,4)
#axes[0].set_ylim(0.,maxs[-1][0])
#axes[0].set_ylabel(r"$p(z_{t})$",labelpad=20)
#axes[0].set_yticks([min(0.9*pztr0,(pztrT+pztr0)/2)])
#axes[0].set_yticklabels([r"$\Delta$"], color="#2ECC40")
#axes[0].tick_params(width=0,labelsize=10)
#axes[0].get_xaxis().set_visible(False)
#axes[0].spines['top'].set_visible(False)
#axes[0].spines['right'].set_visible(False)
## axes[0].set_clip_on(False)
#axes[0].set_zorder(2)
plt.imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
#axes[1].plot(z_traj[0:T,smple,0].numpy(),ts[0:T], color="#F012BE")
#axes[1].scatter([ztr0],[0.], color="#F012BE", s=10.,zorder=5)
#axes[1].scatter([ztrT],[ts[T]], color="#F012BE",s=10.,zorder=5,clip_on=True)
#axes[1].scatter([ztrT],[ts[-1]], color="#F012BE",s=10.,zorder=5,clip_on=True,visible=False)#hack
plt.streamplot(xs,ts,dxs,dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
#axes[1].streamplot(xs,ts[::-1],dxs,dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
#plt.set_xlim(-4,4)
#plt.set_yticks([0,0.5])
#plt.set_yticklabels([r"$0$",r"$1$"])
#plt.set_ylabel(r"$t$")
# axes[1].get_xaxis().set_visible(False)
#plt.spines['bottom'].set_position('zero')
## axes[1].spines['bottom'].set_zorder(0.)
#plt.spines['top'].set_position(('data',0.5))
## axes[1].spines['top'].set_zorder(0.)
#plt.spines['left'].set_bounds(0.,0.5)
#plt.spines['right'].set_bounds(0.,0.5)
#plt.set_clip_on(True)
#sc2 = axes[2].scatter(znp,probts[T,:,0],s=0.5,marker=None,linestyle='-',c=probts[T,:,0],cmap='viridis',zorder=3)
#sc2.set_clim(0.,maxs[-1][0])
##axes[2].scatter(ztrT,[0.], color="#F012BE",s=10.,zorder=5,clip_on=False)
##axes[2].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5)
##axes[2].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.8,linewidth=0.5,clip_on=False)
#axes[2].set_xlim(-4,4)
#axes[2].set_ylabel(r"$p(z_{t_0})$",labelpad=20)
#axes[2].set_xlabel(r"$z$")
#axes[2].set_yticks([])
#axes[2].set_xticks([])
## axes[2].get_xaxis().set_visible(False)
#axes[2].spines['top'].set_visible(False)
#axes[2].spines['right'].set_visible(False)
#axes[2].set_ylim(bottom=0.)
## fig.subplots_adjust(hspace=0.)
#pos0 = axes[0].get_position(original=False)
#pos1 = axes[1].get_position(original=False)
#pos2 = axes[2].get_position(original=False)
## print(pos0.y0)
## print(pos1.y0+pos1.height)
#axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
#axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
if animate:
makedirs(os.path.join(savedir, "anim",'{:0>4}'.format(str(smple))))
plt.savefig(os.path.join(savedir, "anim",'{:0>4}'.format(str(smple)),"img-"+'{:0>4}'.format(str(T))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=300)
plt.savefig(os.path.join(savedir, "fig1_1d_together"+'{:0>4}'.format(str(smple))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=350)
def save_fig1_1d_icml_rev(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
#return cnf.odefunc.odefunc.diffeq(t, x)
return cnf.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,101)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
z_traj = z_traj.numpy()[::-1,:,:]
logp_traj = logp_traj[::-1,:,:]
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
plt.clf()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 8))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs[::-1]
# probs = probs / maxs
# for smple in range(len(z_samples)):
for smple in [38,50,55, 59]:
animate = True
if animate:
Trange = range(len(integration_times))
else:
Trange = [len(integration_times)-1]
for T in Trange:
plt.clf()
plt.close()
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5, 1]},
figsize=(4, 7))
fig.set_tight_layout({'pad': -1.0, 'h_pad': 0.0})
print("plotting ",T)
ztr0 = z_traj[0,smple,0]
pztr0 = np.exp(np.array(logp_traj)[0,smple,0])
ztrT = z_traj[T,smple,0]
pztrT = np.exp(np.array(logp_traj)[T,smple,0])
probts = np.exp(np.array(logp))
sc0 = axes[0].scatter(znp,probts[T,:,0],s=0.5,marker=None,linestyle='-',c=probts[T,:,0],cmap='viridis',zorder=3)
sc0.set_clim(0.,maxs[-1][0])
axes[0].scatter(ztrT,[0.], color="#F012BE",s=10.,zorder=5,clip_on=False)
axes[0].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.4,linewidth=0.5,zorder=2)
axes[0].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.4,linewidth=0.5,clip_on=False,zorder=2)
axes[0].plot([ztrT, ztrT],[0.,pztrT], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
axes[0].plot([ztrT, znp[0]],[pztrT,pztrT], color="#2ECC40",linestyle='--',alpha=0.8,linewidth=0.5,zorder=4)
axes[0].plot([znp[0], znp[0]],[pztr0,pztrT], color="#2ECC40",alpha=1.0,linewidth=1,zorder=10,clip_on=False)
axes[0].set_xlim(-4,4)
axes[0].set_ylim(0.,maxs[-1][0])
axes[0].set_ylabel(r"$p(z_{t})$",labelpad=20)
axes[0].set_yticks([min(0.9*pztr0,(pztrT+pztr0)/2)])
axes[0].set_yticklabels([r"$\Delta$"], color="#2ECC40")
axes[0].tick_params(width=0,labelsize=10)
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
# axes[0].set_clip_on(False)
axes[0].set_zorder(2)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].plot(z_traj[0:T,smple,0],ts[0:T], color="#F012BE")
axes[1].scatter([ztr0],[0.], color="#F012BE", s=10.,zorder=5)
axes[1].scatter([ztrT],[ts[T]], color="#F012BE",s=10.,zorder=5,clip_on=True)
axes[1].scatter([ztrT],[ts[-1]], color="#F012BE",s=10.,zorder=5,clip_on=True,visible=False)#hack
axes[1].streamplot(xs,ts,dxs.numpy()[:,::-1],dts,color='white',linewidth=0.3,density=(0.7,0.5),arrowsize=0.5)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
# axes[1].get_xaxis().set_visible(False)
axes[1].spines['bottom'].set_position('zero')
# axes[1].spines['bottom'].set_zorder(0.)
axes[1].spines['top'].set_position(('data',0.5))
# axes[1].spines['top'].set_zorder(0.)
axes[1].spines['left'].set_bounds(0.,0.5)
axes[1].spines['right'].set_bounds(0.,0.5)
axes[1].set_clip_on(True)
# axes[2].plot(znp,-np.exp(np.array(logp)[-1,:,0]))
sc2 =axes[2].scatter(znp,probs[-1,:],s=0.5,marker=None,linestyle='-',c=probs[-1,:],cmap='viridis')
sc2.set_clim(0.,maxs[-1][0])
axes[2].scatter([ztr0],[0.],color="#F012BE",s=10.,zorder=5,clip_on=False)
axes[2].plot([ztr0, ztr0],[0.,pztr0], color="#F012BE",linestyle='--',alpha=0.8,linewidth=0.5)
axes[2].plot([ztr0, znp[0]],[pztr0,pztr0], color="#39CCCC",linestyle='--',alpha=0.8,linewidth=0.5,clip_on=False)
axes[2].set_xlim(-4,4)
axes[2].set_ylabel(r"$p(z_{t_T})$",labelpad=20)
axes[2].set_xlabel(r"$z$")
axes[2].set_yticks([])
axes[2].set_xticks([])
# axes[2].get_xaxis().set_visible(False)
axes[2].spines['top'].set_visible(False)
axes[2].spines['right'].set_visible(False)
axes[2].set_ylim(0.,maxs[-1][0])
# fig.subplots_adjust(hspace=0.)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
pos2 = axes[2].get_position(original=False)
# print(pos0.y0)
# print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.4,pos1.x1,pos0.height])
axes[2].set_position([pos1.x0,pos2.y0,pos1.x1,pos2.height])
if animate:
makedirs(os.path.join(savedir, "anim_rev",'{:0>4}'.format(str(smple))))
plt.savefig(os.path.join(savedir, "anim_rev",'{:0>4}'.format(str(smple)),"img-"+'{:0>4}'.format(str(T))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=300)
plt.savefig(os.path.join(savedir, "fig1_1d_together_rev"+'{:0>4}'.format(str(smple))+".png"),pad_inches=0.05,bbox_inches='tight',dpi=350)
def save_fig1_1d_ptd_timescrub(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu',itr=''):
model.eval()
# data_samples=torch.tensor(data_samples).float().cuda()
# Sample from prior
z_samples = torch.randn(30, 1).to(device)
# linspace for plotting
npts=500
z_samples = np.linspace(-4,4,100)
z_samples = torch.from_numpy(z_samples[:,np.newaxis]).type(torch.float32).to(device)
znp = np.linspace(-4,4,npts)
z = torch.from_numpy(znp[:,np.newaxis]).type(torch.float32).to(device)
with torch.no_grad():
# # We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logp_samples = torch.sum(standard_normal_logprob(z_samples), -1, keepdim=True)
logp_z = torch.sum(standard_normal_logprob(z), -1, keepdim=True)
t = 0
for cnf in model.chain:
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
integration_times = torch.linspace(0, end_time, ntimes)
def log_prob(t):
z_traj,dlogp_traj = cnf(z,torch.zeros_like(logp_z),integration_times = torch.tensor([t,end_time]),reverse = False)
z_traj = z_traj
logp_z_traj = standard_normal_logprob(z_traj)
dlogp_traj = dlogp_traj.cpu().numpy()
return logp_z_traj.cpu().numpy() - dlogp_traj
logp = []
for t in integration_times:
logp.append(log_prob(t))
# The differential equation evaluated at some t and x.
def _differential(t, x):
t = torch.tensor(t).to(device)
x = torch.tensor(x).to(device)
return cnf.odefunc.odefunc.diffeq(t, x)
ts = np.linspace(0,end_time,100)
xs = np.linspace(-4,4,100)
dxs = torch.zeros(ts.shape[0],xs.shape[0])
for ti , t in enumerate(ts):
for xi,x in enumerate(xs):
dxs[ti,xi]= -_differential(t,[x])
dxs = torch.tensor(dxs)
dts = torch.ones_like(dxs)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=False)
z_traj, logp_traj= cnf(z_samples, logp_samples, integration_times=integration_times, reverse=True)
# z_traj, logp_traj= cnf(z, logp_z, integration_times=integration_times, reverse=True)
# z_traj = z_traj.cpu().numpy()
# logp_traj= logp_traj.cpu().numpy()
makedirs(savedir)
for timerow in range(integration_times.shape[0]):
plt.clf()
probs = np.exp(np.array(logp)[:,:,0])
maxs = np.amax(probs,axis=1,keepdims=True)
probs = probs / maxs
plt.rcParams.update({'font.size': 13})
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True,
gridspec_kw={'height_ratios': [1,5]},
figsize=(8,13))
fig.set_tight_layout({'pad': 0.1, 'h_pad': -1.0})
axes[0].scatter(znp,np.exp(np.array(logp)[::-1][timerow,:,0]),s=0.5,marker=None,linestyle='-',c=np.exp(np.array(logp)[::-1][timerow,:,0]),cmap='viridis')
axes[0].set_xlim(-4,4)
axes[0].set_ylim(0,.42)
axes[0].set_ylabel(r"$p(z(t))$",labelpad=20)
axes[0].set_yticks([])
axes[0].get_xaxis().set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
axes[0].set_ylim(bottom=0.0)
axes[1].imshow(probs,cmap='viridis',extent=[-4,4,0,0.5],aspect=10)
axes[1].streamplot(xs,ts,dxs,dts,color='white',linewidth=0.9,density=(0.7,0.5),arrowsize=0.8)
axes[1].plot([-4,4],[integration_times[timerow],integration_times[timerow]],c='red',zorder=100)
axes[1].set_xlim(-4,4)
axes[1].set_yticks([0,0.5])
axes[1].set_xticks([])
axes[1].set_yticklabels([r"$0$",r"$1$"])
axes[1].set_ylabel(r"$t$")
axes[1].set_xlabel(r"$z$")
axes[1].spines['top'].set_visible(False)
axes[1].spines['bottom'].set_visible(False)
axes[1].get_xaxis().set_visible(True)
pos0 = axes[0].get_position(original=False)
pos1 = axes[1].get_position(original=False)
print(pos0.y0)
print(pos1.y0+pos1.height)
axes[0].set_position([pos1.x0,pos0.y0+0.15,pos1.x1,pos0.height])
plt.savefig(os.path.join(savedir, "fig1_1d_scrub"+str(timerow)+".png"),pad_inches=0,bbox_inches='tight')
plt.close()
| 54,154 | 42.04849 | 172 | py |
steer | steer-master/ffjord/diagnostics/approx_error_1d_particle_traj.py | from inspect import getsourcefile
import sys
import os
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
import argparse
import os
import time
import torch
import torch.optim as optim
import lib.utils as utils
import lib.layers.odefunc as odefunc
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import build_model_tabular
import seaborn as sns
sns.set_style("whitegrid")
colors = ["windows blue", "amber", "greyish", "faded green", "dusty purple"]
sns.palplot(sns.xkcd_palette(colors))
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--ntimes', type=int, default=101)
parser.add_argument('--num_particles', type=int, default=10)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument('--save', type=str, default='experiments/approx_error_1d')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
def normal_log_density(x, mean=0, stdev=1):
term = (x - mean) / stdev
return -0.5 * (np.log(2 * np.pi) + 2 * np.log(stdev) + term * term)
def data_sample(batch_size):
x1 = np.random.randn(batch_size) * np.sqrt(0.4) - 2.8
x2 = np.random.randn(batch_size) * np.sqrt(0.4) - 0.9
x3 = np.random.randn(batch_size) * np.sqrt(0.4) + 2.
xs = np.concatenate([x1[:, None], x2[:, None], x3[:, None]], 1)
k = np.random.randint(0, 3, batch_size)
x = xs[np.arange(batch_size), k]
return torch.tensor(x[:, None]).float().to(device)
def data_density(x):
p1 = normal_log_density(x, mean=-2.8, stdev=np.sqrt(0.4))
p2 = normal_log_density(x, mean=-0.9, stdev=np.sqrt(0.4))
p3 = normal_log_density(x, mean=2.0, stdev=np.sqrt(0.4))
return torch.log(p1.exp() / 3 + p2.exp() / 3 + p3.exp() / 3)
def model_density(x, model):
x = x.to(device)
z, delta_logp = model(x, torch.zeros_like(x))
logpx = standard_normal_logprob(z) - delta_logp
return logpx
def model_sample(model, batch_size):
z = torch.randn(batch_size, 1)
logqz = standard_normal_logprob(z)
x, logqx = model(z, logqz, reverse=True)
return x, logqx
def compute_loss(args, model, batch_size=None):
if batch_size is None: batch_size = args.batch_size
x = data_sample(batch_size)
logpx = model_density(x, model)
return -torch.mean(logpx)
def train():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
nfef_meter = utils.RunningAverageMeter(0.93)
nfeb_meter = utils.RunningAverageMeter(0.93)
tt_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
loss = compute_loss(args, model)
loss_meter.update(loss.item())
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
optimizer.step()
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})'
' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg,
nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
test_loss = compute_loss(args, model, batch_size=args.test_batch_size)
test_nfe = count_nfe(model)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
if itr % args.viz_freq == 0:
with torch.no_grad():
model.eval()
xx = torch.linspace(-10, 10, 10000).view(-1, 1)
true_p = data_density(xx)
plt.plot(xx.view(-1).cpu().numpy(), true_p.view(-1).exp().cpu().numpy(), label='True')
true_p = model_density(xx, model)
plt.plot(xx.view(-1).cpu().numpy(), true_p.view(-1).exp().cpu().numpy(), label='Model')
utils.makedirs(os.path.join(args.save, 'figs'))
plt.savefig(os.path.join(args.save, 'figs', '{:06d}.jpg'.format(itr)))
plt.close()
model.train()
end = time.time()
logger.info('Training has finished.')
def evaluate():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
tols = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8]
errors = []
with torch.no_grad():
for tol in tols:
args.rtol = tol
args.atol = tol
set_cnf_options(args, model)
xx = torch.linspace(-15, 15, 500000).view(-1, 1).to(device)
prob_xx = model_density(xx, model).double().view(-1).cpu()
xx = xx.double().cpu().view(-1)
dxx = torch.log(xx[1:] - xx[:-1])
num_integral = torch.logsumexp(prob_xx[:-1] + dxx, 0).exp()
errors.append(float(torch.abs(num_integral - 1.)))
print(errors[-1])
plt.figure(figsize=(5, 3))
plt.plot(tols, errors, linewidth=3, marker='o', markersize=7)
# plt.plot([-1, 0.2], [-1, 0.2], '--', color='grey', linewidth=1)
plt.xscale("log", nonposx='clip')
# plt.yscale("log", nonposy='clip')
plt.xlabel('Solver Tolerance', fontsize=17)
plt.ylabel('$| 1 - \int p(x) |$', fontsize=17)
plt.tight_layout()
plt.savefig('ode_solver_error_vs_tol.pdf')
def visualize_times():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
viz_times = torch.linspace(0., args.time_length , args.ntimes)
errors = []
with torch.no_grad():
for i,t in enumerate(tqdm(viz_times[1:])):
model.eval()
set_cnf_options(args, model)
xx = torch.linspace(-10, 10, 10000).view(-1, 1)
#generated_p = model_density(xx, model)
generated_p=0
for cnf in model.chain:
xx = xx.to(device)
z, delta_logp = cnf(xx, torch.zeros_like(xx),integration_times=torch.Tensor( [ 0, t ] ))
generated_p = standard_normal_logprob(z) - delta_logp
plt.plot(xx.view(-1).cpu().numpy(), generated_p.view(-1).exp().cpu().numpy(), label='Model')
utils.makedirs(os.path.join(args.save,'test_times', 'figs'))
plt.savefig(os.path.join(args.save,'test_times', 'figs', '{:04d}.jpg'.format(i)))
plt.close()
trajectory_to_video(os.path.join(args.save,'test_times', 'figs'))
def visualize_evolution():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
viz_times = torch.linspace(0., args.time_length , args.ntimes)
errors = []
viz_times_np = viz_times[1:].detach().cpu().numpy()
xx = torch.linspace(-5, 5, args.num_particles).view(-1, 1)
xx_np = xx.detach().cpu().numpy()
xs,ys = np.meshgrid(xx,viz_times_np)
#xx,yy = np.meshgrid(args.num_particles, viz_times_np )
#all_evolutions = np.zeros((args.ntimes-1,args.num_particles))
all_evolutions = np.zeros((args.num_particles,args.ntimes-1))
with torch.no_grad():
for i,t in enumerate(tqdm(viz_times[1:])):
model.eval()
set_cnf_options(args, model)
#xx = torch.linspace(-5, 5, args.num_particles).view(-1, 1)
#generated_p = model_density(xx, model)
generated_p=0
for cnf in model.chain:
xx = xx.to(device)
z, delta_logp = cnf(xx, torch.zeros_like(xx),integration_times=torch.Tensor( [ 0, t ] ))
generated_p = standard_normal_logprob(z) - delta_logp
generated_p = generated_p.detach()
#plt.plot(xx.view(-1).cpu().numpy(), generated_p.view(-1).exp().cpu().numpy(), label='Model')
cur_evolution=generated_p.view(-1).exp().cpu().numpy()
#all_evolutions[i]= np.array(cur_evolution)
all_evolutions[:,i]= np.array(cur_evolution)
#xx = np.array(xx.detach().cpu().numpy())
#yy = np.array(yy)
plt.figure(dpi=1200)
plt.clf()
all_evolutions = all_evolutions.astype('float32')
print(xs.shape)
print(ys.shape)
print(all_evolutions.shape)
#plt.pcolormesh(ys, xs, all_evolutions)
plt.pcolormesh(xs, ys, all_evolutions.transpose())
utils.makedirs(os.path.join(args.save,'test_times', 'figs'))
plt.savefig(os.path.join(args.save,'test_times', 'figs', 'evolution.jpg'.format(i)))
plt.close()
def visualize_particle_flow():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
viz_times = torch.linspace(0., args.time_length , args.ntimes)
errors = []
xx = torch.linspace(-5, 5, args.num_particles).view(-1, 1)
zs=[]
#zs.append(xx.view(-1).cpu().numpy())
with torch.no_grad():
for i,t in enumerate(tqdm(viz_times[1:])):
model.eval()
set_cnf_options(args, model)
#generated_p = model_density(xx, model)
generated_p=0
for cnf in model.chain:
xx = xx.to(device)
z, delta_logp = cnf(xx, torch.zeros_like(xx),integration_times=torch.Tensor( [ 0, t ] ))
generated_p = standard_normal_logprob(z) - delta_logp
zs.append(z.cpu().numpy())
#plt.plot(xx.view(-1).cpu().numpy(), generated_p.view(-1).exp().cpu().numpy(), label='Model')
#plt.savefig(os.path.join(args.save,'test_times', 'figs', '{:04d}.jpg'.format(i)))
#plt.close()
zs=np.array(zs).reshape(args.ntimes-1,args.num_particles)
viz_t = viz_times[1:].numpy()
#print(zs)
plt.figure(dpi=1200)
plt.clf()
#plt.plot(viz_t , zs[:,0])
with sns.color_palette("Blues_d"):
plt.plot(viz_t , zs)
plt.xlabel("Test Time")
#plt.tight_layout()
utils.makedirs(os.path.join(args.save,'test_times', 'figs'))
plt.savefig(os.path.join(args.save,'test_times', 'figs', 'particle_trajectory.jpg'.format(i)))
plt.close()
def trajectory_to_video(savedir):
import subprocess
bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, '%04d.jpg'), os.path.join(savedir, 'traj.mp4'))
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if __name__ == '__main__':
#train()
#evaluate()
#visualize_times()
visualize_particle_flow()
#visualize_evolution()
| 15,540 | 36.720874 | 116 | py |
steer | steer-master/ffjord/diagnostics/plot_flows.py | from inspect import getsourcefile
import sys
import os
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
import os
import torch
import lib.toy_data as toy_data
import lib.utils as utils
import lib.visualize_flow as viz_flow
import lib.layers.odefunc as odefunc
import lib.layers as layers
from train_misc import standard_normal_logprob
from train_misc import build_model_tabular, count_parameters
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
'--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals', 'checkerboard', 'rings'],
type=str, default='pinwheel'
)
parser.add_argument('--discrete', action='store_true')
parser.add_argument('--depth', help='number of coupling layers', type=int, default=10)
parser.add_argument('--glow', type=eval, choices=[True, False], default=False)
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=2500)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
parser.add_argument('--checkpt', type=str, required=True)
parser.add_argument('--save', type=str, default='experiments/cnf')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
def construct_discrete_model():
chain = []
for i in range(args.depth):
if args.glow: chain.append(layers.BruteForceLayer(2))
chain.append(layers.CouplingLayer(2, swap=i % 2 == 0))
return layers.SequentialFlow(chain)
def get_transforms(model):
def sample_fn(z, logpz=None):
if logpz is not None:
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if logpx is not None:
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return sample_fn, density_fn
if __name__ == '__main__':
if args.discrete:
model = construct_discrete_model().to(device)
model.load_state_dict(torch.load(args.checkpt)['state_dict'])
else:
model = build_model_tabular(args, 2).to(device)
sd = torch.load(args.checkpt)['state_dict']
fixed_sd = {}
for k, v in sd.items():
fixed_sd[k.replace('odefunc.odefunc', 'odefunc')] = v
model.load_state_dict(fixed_sd)
print(model)
print("Number of trainable parameters: {}".format(count_parameters(model)))
model.eval()
p_samples = toy_data.inf_train_gen(args.data, batch_size=800**2)
with torch.no_grad():
sample_fn, density_fn = get_transforms(model)
plt.figure(figsize=(10, 10))
ax = ax = plt.gca()
viz_flow.plt_samples(p_samples, ax, npts=800)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
fig_filename = os.path.join(args.save, 'figs', 'true_samples.jpg')
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
plt.figure(figsize=(10, 10))
ax = ax = plt.gca()
viz_flow.plt_flow_density(standard_normal_logprob, density_fn, ax, npts=800, memory=200, device=device)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
fig_filename = os.path.join(args.save, 'figs', 'model_density.jpg')
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
plt.figure(figsize=(10, 10))
ax = ax = plt.gca()
viz_flow.plt_flow_samples(torch.randn, sample_fn, ax, npts=800, memory=200, device=device)
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
fig_filename = os.path.join(args.save, 'figs', 'model_samples.jpg')
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
plt.close()
| 6,070 | 37.424051 | 119 | py |
steer | steer-master/ffjord/diagnostics/viz_high_fidelity_toy.py | import os
import math
from tqdm import tqdm
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_density_traj(model, data_samples, savedir, ntimes=101, memory=0.01, device='cpu'):
model.eval()
# sample from a grid
npts = 800
side = np.linspace(-4, 4, npts)
xx, yy = np.meshgrid(side, side)
xx = torch.from_numpy(xx).type(torch.float32).to(device)
yy = torch.from_numpy(yy).type(torch.float32).to(device)
z_grid = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1)], 1)
with torch.no_grad():
# We expect the model is a chain of CNF layers wrapped in a SequentialFlow container.
logpz_grid = torch.sum(standard_normal_logprob(z_grid), 1, keepdim=True)
for cnf in model.chain:
end_time = cnf.sqrt_end_time * cnf.sqrt_end_time
viz_times = torch.linspace(0., end_time, ntimes)
logpz_grid = [standard_normal_logprob(z_grid).sum(1, keepdim=True)]
for t in tqdm(viz_times[1:]):
inds = torch.arange(0, z_grid.shape[0]).to(torch.int64)
logpz_t = []
for ii in torch.split(inds, int(z_grid.shape[0] * memory)):
z0, delta_logp = cnf(
z_grid[ii],
torch.zeros(z_grid[ii].shape[0], 1).to(z_grid), integration_times=torch.tensor([0.,
t.item()])
)
logpz_t.append(standard_normal_logprob(z0).sum(1, keepdim=True) - delta_logp)
logpz_grid.append(torch.cat(logpz_t, 0))
logpz_grid = torch.stack(logpz_grid, 0).cpu().detach().numpy()
z_grid = z_grid.cpu().detach().numpy()
plt.figure(figsize=(8, 8))
for t in range(logpz_grid.shape[0]):
plt.clf()
ax = plt.gca()
# plot the density
z, logqz = z_grid, logpz_grid[t]
xx = z[:, 0].reshape(npts, npts)
yy = z[:, 1].reshape(npts, npts)
qz = np.exp(logqz).reshape(npts, npts)
plt.pcolormesh(xx, yy, qz, cmap='binary')
ax.set_xlim(-4, 4)
ax.set_ylim(-4, 4)
cmap = matplotlib.cm.get_cmap('binary')
#ax.set_axis_bgcolor(cmap(0.))
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.tight_layout()
makedirs(savedir)
plt.savefig(os.path.join(savedir, f"viz-{t:05d}.jpg"))
def trajectory_to_video(savedir):
import subprocess
bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, 'viz-%05d.jpg'), os.path.join(savedir, 'traj.mp4'))
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if __name__ == '__main__':
import argparse
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')))
import lib.toy_data as toy_data
from train_misc import count_parameters
from train_misc import set_cnf_options, add_spectral_norm, create_regularization_fns
from train_misc import build_model_tabular
def get_ckpt_model_and_data(args):
# Load checkpoint.
checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
ckpt_args = checkpt['args']
state_dict = checkpt['state_dict']
# Construct model and restore checkpoint.
regularization_fns, regularization_coeffs = create_regularization_fns(ckpt_args)
model = build_model_tabular(ckpt_args, 2, regularization_fns).to(device)
if ckpt_args.spectral_norm: add_spectral_norm(model)
set_cnf_options(ckpt_args, model)
model.load_state_dict(state_dict)
model.to(device)
print(model)
print("Number of trainable parameters: {}".format(count_parameters(model)))
# Load samples from dataset
data_samples = toy_data.inf_train_gen(ckpt_args.data, batch_size=2000)
return model, data_samples
parser = argparse.ArgumentParser()
parser.add_argument('--checkpt', type=str, required=True)
parser.add_argument('--ntimes', type=int, default=101)
parser.add_argument('--memory', type=float, default=0.01, help='Higher this number, the more memory is consumed.')
parser.add_argument('--save', type=str, default='trajectory')
args = parser.parse_args()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model, data_samples = get_ckpt_model_and_data(args)
save_density_traj(model, data_samples, args.save, ntimes=args.ntimes, memory=args.memory, device=device)
trajectory_to_video(args.save)
| 5,114 | 37.458647 | 119 | py |
steer | steer-master/ffjord/diagnostics/approx_error_1d.py | from inspect import getsourcefile
import sys
import os
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
import argparse
import os
import time
import torch
import torch.optim as optim
import lib.utils as utils
import lib.layers.odefunc as odefunc
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import build_model_tabular
import seaborn as sns
sns.set_style("whitegrid")
colors = ["windows blue", "amber", "greyish", "faded green", "dusty purple"]
sns.palplot(sns.xkcd_palette(colors))
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--ntimes', type=int, default=101)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument('--save', type=str, default='experiments/approx_error_1d')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
def normal_log_density(x, mean=0, stdev=1):
term = (x - mean) / stdev
return -0.5 * (np.log(2 * np.pi) + 2 * np.log(stdev) + term * term)
#def data_sample(batch_size):
# x1 = np.random.randn(batch_size) * np.sqrt(0.4) - 2.8
# x2 = np.random.randn(batch_size) * np.sqrt(0.4) - 0.9
# x3 = np.random.randn(batch_size) * np.sqrt(0.4) + 2.
# xs = np.concatenate([x1[:, None], x2[:, None], x3[:, None]], 1)
# k = np.random.randint(0, 3, batch_size)
# x = xs[np.arange(batch_size), k]
# return torch.tensor(x[:, None]).float().to(device)
#
#
#def data_density(x):
# p1 = normal_log_density(x, mean=-2.8, stdev=np.sqrt(0.4))
# p2 = normal_log_density(x, mean=-0.9, stdev=np.sqrt(0.4))
# p3 = normal_log_density(x, mean=2.0, stdev=np.sqrt(0.4))
# return torch.log(p1.exp() / 3 + p2.exp() / 3 + p3.exp() / 3)
def data_sample(batch_size):
x1 = np.random.randn(batch_size) * np.sqrt(0.4) -2.8
x2 = np.random.randn(batch_size) * np.sqrt(0.4) - 0.9
x3 = np.random.randn(batch_size) * np.sqrt(0.4) + 2
x4 = np.random.randn(batch_size) * np.sqrt(0.4) + 5.1
x5 = np.random.randn(batch_size) * np.sqrt(0.4) + 3.4
xs = np.concatenate([x1[:, None], x2[:, None], x3[:, None], x4[:, None], x5[:, None] ], 1)
k = np.random.randint(0, 5, batch_size)
x = xs[np.arange(batch_size), k]
return torch.tensor(x[:, None]).float().to(device)
def data_density(x):
p1 = normal_log_density(x, mean= -2.8 ,stdev=np.sqrt(0.4))
p2 = normal_log_density(x, mean= -0.9, stdev=np.sqrt(0.4))
p3 = normal_log_density(x, mean= 2 , stdev=np.sqrt(0.4))
p4 = normal_log_density(x, mean= 5.1, stdev=np.sqrt(0.4))
p5 = normal_log_density(x, mean= 3.4, stdev=np.sqrt(0.4))
return torch.log(p1.exp() / 5 + p2.exp() / 5 + p3.exp() / 5 + p4.exp() / 5 + p5.exp() / 5 )
def model_density(x, model):
x = x.to(device)
z, delta_logp = model(x, torch.zeros_like(x))
logpx = standard_normal_logprob(z) - delta_logp
return logpx
def model_sample(model, batch_size):
z = torch.randn(batch_size, 1)
logqz = standard_normal_logprob(z)
x, logqx = model(z, logqz, reverse=True)
return x, logqx
def compute_loss(args, model, batch_size=None):
if batch_size is None: batch_size = args.batch_size
x = data_sample(batch_size)
logpx = model_density(x, model)
return -torch.mean(logpx)
def train():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
nfef_meter = utils.RunningAverageMeter(0.93)
nfeb_meter = utils.RunningAverageMeter(0.93)
tt_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
loss = compute_loss(args, model)
loss_meter.update(loss.item())
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
optimizer.step()
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})'
' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg,
nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
test_loss = compute_loss(args, model, batch_size=args.test_batch_size)
test_nfe = count_nfe(model)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
if itr % args.viz_freq == 0:
with torch.no_grad():
model.eval()
xx = torch.linspace(-10, 10, 10000).view(-1, 1)
true_p = data_density(xx)
plt.plot(xx.view(-1).cpu().numpy(), true_p.view(-1).exp().cpu().numpy(), label='True')
true_p = model_density(xx, model)
plt.plot(xx.view(-1).cpu().numpy(), true_p.view(-1).exp().cpu().numpy(), label='Model')
utils.makedirs(os.path.join(args.save, 'figs'))
plt.savefig(os.path.join(args.save, 'figs', '{:06d}.jpg'.format(itr)))
plt.close()
model.train()
end = time.time()
logger.info('Training has finished.')
def evaluate():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
tols = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8]
errors = []
with torch.no_grad():
for tol in tols:
args.rtol = tol
args.atol = tol
set_cnf_options(args, model)
xx = torch.linspace(-15, 15, 500000).view(-1, 1).to(device)
prob_xx = model_density(xx, model).double().view(-1).cpu()
xx = xx.double().cpu().view(-1)
dxx = torch.log(xx[1:] - xx[:-1])
num_integral = torch.logsumexp(prob_xx[:-1] + dxx, 0).exp()
errors.append(float(torch.abs(num_integral - 1.)))
print(errors[-1])
plt.figure(figsize=(5, 3))
plt.plot(tols, errors, linewidth=3, marker='o', markersize=7)
# plt.plot([-1, 0.2], [-1, 0.2], '--', color='grey', linewidth=1)
plt.xscale("log", nonposx='clip')
# plt.yscale("log", nonposy='clip')
plt.xlabel('Solver Tolerance', fontsize=17)
plt.ylabel('$| 1 - \int p(x) |$', fontsize=17)
plt.tight_layout()
plt.savefig('ode_solver_error_vs_tol.pdf')
def visualize_times():
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
viz_times = torch.linspace(0., args.time_length , args.ntimes)
errors = []
with torch.no_grad():
for i,t in enumerate(tqdm(viz_times[1:])):
model.eval()
set_cnf_options(args, model)
xx = torch.linspace(-10, 10, 10000).view(-1, 1)
#generated_p = model_density(xx, model)
generated_p=0
for cnf in model.chain:
xx = xx.to(device)
z, delta_logp = cnf(xx, torch.zeros_like(xx),integration_times=torch.Tensor( [ 0, t ] ))
generated_p = standard_normal_logprob(z) - delta_logp
plt.plot(xx.view(-1).cpu().numpy(), generated_p.view(-1).exp().cpu().numpy(), label='Model')
utils.makedirs(os.path.join(args.save,'test_times', 'figs'))
plt.savefig(os.path.join(args.save,'test_times', 'figs', '{:04d}.jpg'.format(i)))
plt.close()
trajectory_to_video(os.path.join(args.save,'test_times', 'figs'))
def trajectory_to_video(savedir):
import subprocess
bashCommand = 'ffmpeg -y -i {} {}'.format(os.path.join(savedir, '%04d.jpg'), os.path.join(savedir, 'traj.mp4'))
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if __name__ == '__main__':
train()
evaluate()
visualize_times()
| 12,572 | 36.984894 | 116 | py |
steer | steer-master/ffjord/diagnostics/fig_1_1d_toy.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from inspect import getsourcefile
import sys
import argparse
import os
import time
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import torch
import torch.optim as optim
import lib.toy_data as toy_data
import lib.utils as utils
from lib.visualize_flow import visualize_transform
import lib.layers.odefunc as odefunc
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time
from train_misc import add_spectral_norm, spectral_norm_power_iteration
from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log
from train_misc import build_model_tabular
from viz_toy import save_trajectory, trajectory_to_video
from viz_fig1 import save_fig1,save_fig1_rev,save_fig1_1d_ptd,save_fig1_1d_ptd_timescrub,save_fig1_1d_icml, save_fig1_1d_icml_rev,save_fig1_1d_icml_no_top_or_bottom
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams', 'fixed_adams']
parser = argparse.ArgumentParser('Continuous Normalizing Flow')
parser.add_argument(
'--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals','rowimg','rowimgsmol','willrow','1d_density','1d_density_mix'], type=str,
default='1d_density_mix'
)
parser.add_argument(
"--layer_type", type=str, default="concatsquash",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument('--dims', type=str, default='64-64-64')
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument('--time_length', type=float, default=0.5)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--divergence_fn", type=str, default="brute_force", choices=["brute_force", "approximate"])
parser.add_argument("--nonlinearity", type=str, default="tanh", choices=odefunc.NONLINEARITIES)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batch_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--bn_lag', type=float, default=0)
parser.add_argument('--niters', type=int, default=2500)
parser.add_argument('--batch_size', type=int, default=8000)
parser.add_argument('--test_batch_size', type=int, default=100)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--weight_decay', type=float, default=1e-5)
# Track quantities
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
#parser.add_argument("--resume", type=str, default=None)
#parser.add_argument('--save', type=str, default='experiments/fig1_1d_toy')
parser.add_argument('--save', type=str, default='experiments/approx_error_1d')
parser.add_argument('--resume', type=str, default='experiments/approx_error_1d')
parser.add_argument('--viz_freq', type=int, default=100)
parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--log_freq', type=int, default=10)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--icml_plot', type=int,default=1)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
device = torch.device('cpu') #torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
print('GPU active:',torch.cuda.is_available())
def get_transforms(model):
def sample_fn(z, logpz=None):
if logpz is not None:
return model(z, logpz, reverse=True)
else:
return model(z, reverse=True)
def density_fn(x, logpx=None):
if logpx is not None:
return model(x, logpx, reverse=False)
else:
return model(x, reverse=False)
return sample_fn, density_fn
def compute_loss(args, model, batch_size=None):
if batch_size is None: batch_size = args.batch_size
# load data
x = toy_data.inf_train_gen(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
zero = torch.zeros(x.shape[0], 1).to(x)
# transform to z
z, delta_logp = model(x, zero)
# compute log q(z)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = logpz - delta_logp
loss = -torch.mean(logpx)
return loss
if __name__ == '__main__':
x= toy_data.inf_train_gen(args.data, batch_size=args.batch_size)
print(x.shape)
plt.hist(x,bins=500)
plt.savefig('testwill.png')
regularization_fns, regularization_coeffs = create_regularization_fns(args)
# model = build_model_tabular(args, 200, regularization_fns).to(device)
model = build_model_tabular(args, 1, regularization_fns).to(device)
if args.spectral_norm: add_spectral_norm(model)
set_cnf_options(args, model)
# logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# restore parameters
if args.resume is not None:
#checkpt = torch.load(args.resume+'/checkpt.pth', map_location=lambda storage, loc: storage)
#model.load_state_dict(checkpt["state_dict"])
model = build_model_tabular(args, 1).to(device)
set_cnf_options(args, model)
checkpt = torch.load(os.path.join(args.save, 'checkpt.pth'))
model.load_state_dict(checkpt['state_dict'])
model.to(device)
if "optim_state_dict" in checkpt.keys():
optimizer.load_state_dict(checkpt["optim_state_dict"])
# Manually move optimizer state to device.
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = cvt(v)
if args.icml_plot:
save_fig1_path = os.path.join(args.resume, 'fig1_ani')
logger.info('Plotting fig1 to {}'.format(save_fig1_path))
data_samples = toy_data.inf_train_gen(args.data, batch_size=1)
#save_fig1_1d_icml_no_top_or_bottom(model, data_samples, save_fig1_path, device=device,itr=0)
save_fig1_1d_icml(model, data_samples, save_fig1_path, device=device,itr=0)
#save_fig1_1d_icml_rev(model, data_samples, save_fig1_path, device=device,itr=0)
1/0
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
nfef_meter = utils.RunningAverageMeter(0.93)
nfeb_meter = utils.RunningAverageMeter(0.93)
tt_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
if args.spectral_norm: spectral_norm_power_iteration(model, 1)
loss = compute_loss(args, model)
loss_meter.update(loss.item())
if len(regularization_coeffs) > 0:
reg_states = get_regularization(model, regularization_coeffs)
reg_loss = sum(
reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0
)
loss = loss + reg_loss
total_time = count_total_time(model)
nfe_forward = count_nfe(model)
loss.backward()
optimizer.step()
nfe_total = count_nfe(model)
nfe_backward = nfe_total - nfe_forward
nfef_meter.update(nfe_forward)
nfeb_meter.update(nfe_backward)
time_meter.update(time.time() - end)
tt_meter.update(total_time)
log_message = (
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f}) | NFE Forward {:.0f}({:.1f})'
' | NFE Backward {:.0f}({:.1f}) | CNF Time {:.4f}({:.4f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, nfef_meter.val, nfef_meter.avg,
nfeb_meter.val, nfeb_meter.avg, tt_meter.val, tt_meter.avg
)
)
if len(regularization_coeffs) > 0:
log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)
logger.info(log_message)
if itr % args.val_freq == 0 or itr == args.niters:
with torch.no_grad():
model.eval()
test_loss = compute_loss(args, model, batch_size=args.test_batch_size)
test_nfe = count_nfe(model)
log_message = '[TEST] Iter {:04d} | Test Loss {:.6f} | NFE {:.0f}'.format(itr, test_loss, test_nfe)
logger.info(log_message)
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
save_fig1_path = os.path.join(args.resume, 'fig1_ani')
logger.info('Plotting fig1 to {}'.format(save_fig1_path))
data_samples = toy_data.inf_train_gen(args.data, batch_size=1)
save_fig1_1d_ptd(model, data_samples, save_fig1_path, device=device,itr=itr)
# save_fig1_rev(model, data_samples, save_fig1_path, device=device)
# if itr % args.viz_freq == 0:
# with torch.no_grad():
# model.eval()
# p_samples = toy_data.inf_train_gen(args.data, batch_size=2000)
# sample_fn, density_fn = get_transforms(model)
# plt.figure(figsize=(9, 3))
# visualize_transform(
# p_samples, torch.randn, standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn,
# samples=True, npts=100, device=device
# )
# fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr))
# utils.makedirs(os.path.dirname(fig_filename))
# plt.savefig(fig_filename)
# plt.close()model
# model.train()
end = time.time()
save_fig1_path = os.path.join(args.resume, 'fig1')
logger.info('Plotting fig1 to {}'.format(save_fig1_path))
data_samples = toy_data.inf_train_gen(args.data, batch_size=1)
save_fig1_1d_ptd(model, data_samples, save_fig1_path, device=device)
save_fig1_1d_ptd_timescrub(model, data_samples, save_fig1_path+'/scrub', device=device)
logger.info('Training has finished.')
# save_fig1_path = os.path.join(args.resume, 'fig1')
# logger.info('Plotting fig1 to {}'.format(save_fig1_path))
# data_samples = toy_data.inf_train_gen(args.data, batch_size=1)
# save_fig1(model, data_samples, save_fig1_path, device=device)
# save_fig1_rev(model, data_samples, save_fig1_path, device=device)
# save_trajectory(model, data_samples, save_traj_dir, device=device)
# trajectory_to_video(save_traj_dir)
| 12,538 | 41.795222 | 166 | py |
steer | steer-master/ffjord/diagnostics/viz_multiscale.py | from inspect import getsourcefile
import sys
import os
import math
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import argparse
import lib.layers as layers
import lib.odenvp as odenvp
import torch
import torchvision.transforms as tforms
import torchvision.datasets as dset
from torchvision.utils import save_image
import lib.utils as utils
from train_misc import add_spectral_norm, set_cnf_options, count_parameters
parser = argparse.ArgumentParser("Continuous Normalizing Flow")
parser.add_argument("--checkpt", type=str, required=True)
parser.add_argument("--data", choices=["mnist", "svhn", "cifar10", 'lsun_church'], type=str, default="cifar10")
parser.add_argument("--dims", type=str, default="64,64,64")
parser.add_argument("--num_blocks", type=int, default=2, help='Number of stacked CNFs.')
parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"])
parser.add_argument(
"--nonlinearity", type=str, default="softplus", choices=["tanh", "relu", "softplus", "elu", "swish"]
)
parser.add_argument("--conv", type=eval, default=True, choices=[True, False])
parser.add_argument('--solver', type=str, default='dopri5')
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--test_solver', type=str, default=None)
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument("--imagesize", type=int, default=None)
parser.add_argument("--alpha", type=float, default=-1.0)
parser.add_argument('--time_length', type=float, default=1.0)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--add_noise", type=eval, default=True, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=True, choices=[True, False])
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--ntimes', type=int, default=50)
parser.add_argument('--save', type=str, default='img_trajectory')
args = parser.parse_args()
BATCH_SIZE = 8 * 8
def add_noise(x):
"""
[0, 1] -> [0, 255] -> add noise -> [0, 1]
"""
if args.add_noise:
noise = x.new().resize_as_(x).uniform_()
x = x * 255 + noise
x = x / 256
return x
def get_dataset(args):
trans = lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise])
if args.data == "mnist":
im_dim = 1
im_size = 28 if args.imagesize is None else args.imagesize
train_set = dset.MNIST(root="./data", train=True, transform=trans(im_size), download=True)
elif args.data == "cifar10":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.CIFAR10(
root="./data", train=True, transform=tforms.Compose([
tforms.Resize(im_size),
tforms.RandomHorizontalFlip(),
tforms.ToTensor(),
add_noise,
]), download=True
)
elif args.data == 'lsun_church':
im_dim = 3
im_size = 64 if args.imagesize is None else args.imagesize
train_set = dset.LSUN(
'data', ['church_outdoor_train'], transform=tforms.Compose([
tforms.Resize(96),
tforms.RandomCrop(64),
tforms.Resize(im_size),
tforms.ToTensor(),
add_noise,
])
)
data_shape = (im_dim, im_size, im_size)
if not args.conv:
data_shape = (im_dim * im_size * im_size,)
return train_set, data_shape
def create_model(args, data_shape):
hidden_dims = tuple(map(int, args.dims.split(",")))
model = odenvp.ODENVP(
(BATCH_SIZE, *data_shape),
n_blocks=args.num_blocks,
intermediate_dims=hidden_dims,
nonlinearity=args.nonlinearity,
alpha=args.alpha,
cnf_kwargs={"T": args.time_length, "train_T": args.train_T},
)
if args.spectral_norm: add_spectral_norm(model)
set_cnf_options(args, model)
return model
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True)
# load dataset
train_set, data_shape = get_dataset(args)
# build model
model = create_model(args, data_shape)
print(model)
print("Number of trainable parameters: {}".format(count_parameters(model)))
# restore parameters
checkpt = torch.load(args.checkpt, map_location=lambda storage, loc: storage)
pruned_sd = {}
for k, v in checkpt['state_dict'].items():
pruned_sd[k.replace('odefunc.odefunc', 'odefunc')] = v
model.load_state_dict(pruned_sd)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=BATCH_SIZE, shuffle=True)
data_samples, _ = train_loader.__iter__().__next__()
# cosine interpolate between 4 real images.
z = data_samples[:4]
print('Inferring base values for 4 example images.')
z = model(z)
phi0 = torch.linspace(0, 0.5, int(math.sqrt(BATCH_SIZE))) * math.pi
phi1 = torch.linspace(0, 0.5, int(math.sqrt(BATCH_SIZE))) * math.pi
phi0, phi1 = torch.meshgrid([phi0, phi1])
phi0, phi1 = phi0.contiguous().view(-1, 1), phi1.contiguous().view(-1, 1)
z = torch.cos(phi0) * (torch.cos(phi1) * z[0] + torch.sin(phi1) * z[1]) + \
torch.sin(phi0) * (torch.cos(phi1) * z[2] + torch.sin(phi1) * z[3])
print('Reconstructing images from latent interpolation.')
z = model(z, reverse=True)
non_cnf_layers = []
utils.makedirs(args.save)
img_idx = 0
def save_imgs_figure(xs):
global img_idx
save_image(
list(xs),
os.path.join(args.save, "img_{:05d}.jpg".format(img_idx)), nrow=int(math.sqrt(BATCH_SIZE)), normalize=True,
range=(0, 1)
)
img_idx += 1
class FactorOut(torch.nn.Module):
def __init__(self, factor_out):
super(FactorOut, self).__init__()
self.factor_out = factor_out
def forward(self, x, reverse=True):
assert reverse
T = x.shape[0] // self.factor_out.shape[0]
return torch.cat([x, self.factor_out.repeat(T, *([1] * (self.factor_out.ndimension() - 1)))], 1)
time_ratio = 1.0
print('Visualizing transformations.')
with torch.no_grad():
for idx, stacked_layers in enumerate(model.transforms):
for layer in stacked_layers.chain:
print(z.shape)
print(non_cnf_layers)
if isinstance(layer, layers.CNF):
# linspace over time, and visualize by reversing through previous non_cnf_layers.
cnf = layer
end_time = (cnf.sqrt_end_time * cnf.sqrt_end_time)
ntimes = int(args.ntimes * time_ratio)
integration_times = torch.linspace(0, end_time.item(), ntimes)
z_traj = cnf(z, integration_times=integration_times)
# reverse z(t) for all times to the input space
z_flatten = z_traj.view(ntimes * BATCH_SIZE, *z_traj.shape[2:])
for prev_layer in non_cnf_layers[::-1]:
z_flatten = prev_layer(z_flatten, reverse=True)
z_inv = z_flatten.view(ntimes, BATCH_SIZE, *data_shape)
for t in range(1, z_inv.shape[0]):
z_t = z_inv[t]
save_imgs_figure(z_t)
z = z_traj[-1]
else:
# update z and place in non_cnf_layers.
z = layer(z)
non_cnf_layers.append(layer)
if idx < len(model.transforms) - 1:
d = z.shape[1] // 2
z, factor_out = z[:, :d], z[:, d:]
non_cnf_layers.append(FactorOut(factor_out))
# After every factor out, we half the time for visualization.
time_ratio = time_ratio / 2
| 8,489 | 37.071749 | 119 | py |
steer | steer-master/ffjord/lib/priors.py | import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
eps = 1e-8
class Uniform(nn.Module):
def __init__(self, a=0, b=1):
super(Normal, self).__init__()
self.a = Variable(torch.Tensor([a]))
self.b = Variable(torch.Tensor([b]))
def _check_inputs(self, size, params):
if size is None and params is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and params is not None:
a = params.select(-1, 0).expand(size)
b = params.select(-1, 1).expand(size)
return a, b
elif size is not None:
a = self.a.expand(size)
b = self.b.expand(size)
return a, b
elif params is not None:
a = params.select(-1, 0)
b = params.select(-1, 1)
return a, b
else:
raise ValueError(
'Given invalid inputs: size={}, params={})'.format(
size, params))
def sample(self, size=None, params=None):
mu, logsigma = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = std_z * torch.exp(logsigma) + mu
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logsigma = self._check_inputs(None, params)
else:
mu, logsigma = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return -0.5 * (tmp * tmp + 2 * logsigma + c)
def get_params(self):
return torch.cat([self.mu, self.logsigma])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logsigma.exp().data[0])
return tmpstr
class Normal(nn.Module):
"""Samples from a Normal distribution using the reparameterization trick.
"""
def __init__(self, mu=0, sigma=1):
super(Normal, self).__init__()
self.normalization = Variable(torch.Tensor([np.log(2 * np.pi)]))
self.mu = Variable(torch.Tensor([mu]))
self.logsigma = Variable(torch.Tensor([math.log(sigma)]))
def _check_inputs(self, size, mu_logsigma):
if size is None and mu_logsigma is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and mu_logsigma is not None:
mu = mu_logsigma.select(-1, 0).expand(size)
logsigma = mu_logsigma.select(-1, 1).expand(size)
return mu, logsigma
elif size is not None:
mu = self.mu.expand(size)
logsigma = self.logsigma.expand(size)
return mu, logsigma
elif mu_logsigma is not None:
mu = mu_logsigma.select(-1, 0)
logsigma = mu_logsigma.select(-1, 1)
return mu, logsigma
else:
raise ValueError(
'Given invalid inputs: size={}, mu_logsigma={})'.format(
size, mu_logsigma))
def sample(self, size=None, params=None):
mu, logsigma = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = std_z * torch.exp(logsigma) + mu
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logsigma = self._check_inputs(None, params)
else:
mu, logsigma = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return -0.5 * (tmp * tmp + 2 * logsigma + c)
def NLL(self, params, sample_params=None):
"""Analytically computes
E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]
If mu_2, and sigma_2^2 are not provided, defaults to entropy.
"""
mu, logsigma = self._check_inputs(None, params)
if sample_params is not None:
sample_mu, sample_logsigma = self._check_inputs(None, sample_params)
else:
sample_mu, sample_logsigma = mu, logsigma
c = self.normalization.type_as(sample_mu.data)
nll = logsigma.mul(-2).exp() * (sample_mu - mu).pow(2) \
+ torch.exp(sample_logsigma.mul(2) - logsigma.mul(2)) + 2 * logsigma + c
return nll.mul(0.5)
def kld(self, params):
"""Computes KL(q||p) where q is the given distribution and p
is the standard Normal distribution.
"""
mu, logsigma = self._check_inputs(None, params)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mean^2 - sigma^2)
kld = logsigma.mul(2).add(1) - mu.pow(2) - logsigma.exp().pow(2)
kld.mul_(-0.5)
return kld
def get_params(self):
return torch.cat([self.mu, self.logsigma])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logsigma.exp().data[0])
return tmpstr
class Laplace(nn.Module):
"""Samples from a Laplace distribution using the reparameterization trick.
"""
def __init__(self, mu=0, scale=1):
super(Laplace, self).__init__()
self.normalization = Variable(torch.Tensor([-math.log(2)]))
self.mu = Variable(torch.Tensor([mu]))
self.logscale = Variable(torch.Tensor([math.log(scale)]))
def _check_inputs(self, size, mu_logscale):
if size is None and mu_logscale is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and mu_logscale is not None:
mu = mu_logscale.select(-1, 0).expand(size)
logscale = mu_logscale.select(-1, 1).expand(size)
return mu, logscale
elif size is not None:
mu = self.mu.expand(size)
logscale = self.logscale.expand(size)
return mu, logscale
elif mu_logscale is not None:
mu = mu_logscale.select(-1, 0)
logscale = mu_logscale.select(-1, 1)
return mu, logscale
else:
raise ValueError(
'Given invalid inputs: size={}, mu_logscale={})'.format(
size, mu_logscale))
def sample(self, size=None, params=None):
mu, logscale = self._check_inputs(size, params)
scale = torch.exp(logscale)
# Unif(-0.5, 0.5)
u = Variable(torch.rand(mu.size()).type_as(mu.data)) - 0.5
sample = mu - scale * torch.sign(u) * torch.log(1 - 2 * torch.abs(u) + eps)
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logscale = self._check_inputs(None, params)
else:
mu, logscale = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logscale = logscale.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_scale = torch.exp(-logscale)
ins_exp = - torch.abs(sample - mu) * inv_scale
return ins_exp + c - logscale
def get_params(self):
return torch.cat([self.mu, self.logscale])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logscale.exp().data[0])
return tmpstr
| 8,414 | 32.392857 | 84 | py |
steer | steer-master/ffjord/lib/spectral_norm.py | """
Spectral Normalization from https://arxiv.org/abs/1802.05957
"""
import types
import torch
from torch.nn.functional import normalize
POWER_ITERATION_FN = "spectral_norm_power_iteration"
class SpectralNorm(object):
def __init__(self, name='weight', dim=0, eps=1e-12):
self.name = name
self.dim = dim
self.eps = eps
def compute_weight(self, module, n_power_iterations):
if n_power_iterations < 0:
raise ValueError(
'Expected n_power_iterations to be non-negative, but '
'got n_power_iterations={}'.format(n_power_iterations)
)
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
v = getattr(module, self.name + '_v')
weight_mat = weight
if self.dim != 0:
# permute dim to front
weight_mat = weight_mat.permute(self.dim, * [d for d in range(weight_mat.dim()) if d != self.dim])
height = weight_mat.size(0)
weight_mat = weight_mat.reshape(height, -1)
with torch.no_grad():
for _ in range(n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
v = normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
u = normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
setattr(module, self.name + '_u', u)
setattr(module, self.name + '_v', v)
sigma = torch.dot(u, torch.matmul(weight_mat, v))
weight = weight / sigma
setattr(module, self.name, weight)
def remove(self, module):
weight = getattr(module, self.name)
delattr(module, self.name)
delattr(module, self.name + '_u')
delattr(module, self.name + '_orig')
module.register_parameter(self.name, torch.nn.Parameter(weight))
def get_update_method(self, module):
def update_fn(module, n_power_iterations):
self.compute_weight(module, n_power_iterations)
return update_fn
def __call__(self, module, unused_inputs):
del unused_inputs
self.compute_weight(module, n_power_iterations=0)
# requires_grad might be either True or False during inference.
if not module.training:
r_g = getattr(module, self.name + '_orig').requires_grad
setattr(module, self.name, getattr(module, self.name).detach().requires_grad_(r_g))
@staticmethod
def apply(module, name, dim, eps):
fn = SpectralNorm(name, dim, eps)
weight = module._parameters[name]
height = weight.size(dim)
u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
v = normalize(weight.new_empty(int(weight.numel() / height)).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter(fn.name + "_orig", weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# buffer, which will cause weight to be included in the state dict
# and also supports nn.init due to shared storage.
module.register_buffer(fn.name, weight.data)
module.register_buffer(fn.name + "_u", u)
module.register_buffer(fn.name + "_v", v)
setattr(module, POWER_ITERATION_FN, types.MethodType(fn.get_update_method(module), module))
module.register_forward_pre_hook(fn)
return fn
def inplace_spectral_norm(module, name='weight', dim=None, eps=1e-12):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectal norm
dim (int, optional): dimension corresponding to number of outputs,
the default is 0, except for modules that are instances of
ConvTranspose1/2/3d, when it is 1
eps (float, optional): epsilon for numerical stability in
calculating norms
Returns:
The original module with the spectal norm hook
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
Linear (20 -> 40)
>>> m.weight_u.size()
torch.Size([20])
"""
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, dim=dim, eps=eps)
return module
def remove_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(name, module))
| 6,512 | 38.957055 | 119 | py |
steer | steer-master/ffjord/lib/utils.py | import os
import math
from numbers import Number
import logging
import torch
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def get_logger(logpath, filepath, package_files=[], displaying=True, saving=True, debug=False):
logger = logging.getLogger()
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger.setLevel(level)
if saving:
info_file_handler = logging.FileHandler(logpath, mode="a")
info_file_handler.setLevel(level)
logger.addHandler(info_file_handler)
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.info(filepath)
with open(filepath, "r") as f:
logger.info(f.read())
for f in package_files:
logger.info(f)
with open(f, "r") as package_f:
logger.info(package_f.read())
return logger
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
def inf_generator(iterable):
"""Allows training with DataLoaders in a single infinite loop:
for i, (x, y) in enumerate(inf_generator(train_loader)):
"""
iterator = iterable.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = iterable.__iter__()
def save_checkpoint(state, save, epoch):
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, 'checkpt-%04d.pth' % epoch)
torch.save(state, filename)
def isnan(tensor):
return (tensor != tensor)
def logsumexp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
| 3,046 | 24.822034 | 95 | py |
steer | steer-master/ffjord/lib/odenvp.py | import torch
import torch.nn as nn
import lib.layers as layers
from lib.layers.odefunc import ODEnet
import numpy as np
class ODENVP(nn.Module):
"""
Real NVP for image data. Will downsample the input until one of the
dimensions is less than or equal to 4.
Args:
input_size (tuple): 4D tuple of the input size.
n_scale (int): Number of scales for the representation z.
n_resblocks (int): Length of the resnet for each coupling layer.
"""
def __init__(
self,
input_size,
n_scale=float('inf'),
n_blocks=2,
intermediate_dims=(32,),
nonlinearity="softplus",
squash_input=True,
alpha=0.05,
cnf_kwargs=None,
):
super(ODENVP, self).__init__()
self.n_scale = min(n_scale, self._calc_n_scale(input_size))
self.n_blocks = n_blocks
self.intermediate_dims = intermediate_dims
self.nonlinearity = nonlinearity
self.squash_input = squash_input
self.alpha = alpha
self.cnf_kwargs = cnf_kwargs if cnf_kwargs else {}
if not self.n_scale > 0:
raise ValueError('Could not compute number of scales for input of' 'size (%d,%d,%d,%d)' % input_size)
self.transforms = self._build_net(input_size)
self.dims = [o[1:] for o in self.calc_output_size(input_size)]
def _build_net(self, input_size):
_, c, h, w = input_size
transforms = []
for i in range(self.n_scale):
transforms.append(
StackedCNFLayers(
initial_size=(c, h, w),
idims=self.intermediate_dims,
squeeze=(i < self.n_scale - 1), # don't squeeze last layer
init_layer=(layers.LogitTransform(self.alpha) if self.alpha > 0 else layers.ZeroMeanTransform())
if self.squash_input and i == 0 else None,
n_blocks=self.n_blocks,
cnf_kwargs=self.cnf_kwargs,
nonlinearity=self.nonlinearity,
)
)
c, h, w = c * 2, h // 2, w // 2
return nn.ModuleList(transforms)
def get_regularization(self):
if len(self.regularization_fns) == 0:
return None
acc_reg_states = tuple([0.] * len(self.regularization_fns))
for module in self.modules():
if isinstance(module, layers.CNF):
acc_reg_states = tuple(
acc + reg for acc, reg in zip(acc_reg_states, module.get_regularization_states())
)
return sum(state * coeff for state, coeff in zip(acc_reg_states, self.regularization_coeffs))
def _calc_n_scale(self, input_size):
_, _, h, w = input_size
n_scale = 0
while h >= 4 and w >= 4:
n_scale += 1
h = h // 2
w = w // 2
return n_scale
def calc_output_size(self, input_size):
n, c, h, w = input_size
output_sizes = []
for i in range(self.n_scale):
if i < self.n_scale - 1:
c *= 2
h //= 2
w //= 2
output_sizes.append((n, c, h, w))
else:
output_sizes.append((n, c, h, w))
return tuple(output_sizes)
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._generate(x, logpx)
else:
return self._logdensity(x, logpx)
def _logdensity(self, x, logpx=None):
_logpx = torch.zeros(x.shape[0], 1).to(x) if logpx is None else logpx
out = []
for idx in range(len(self.transforms)):
x, _logpx = self.transforms[idx].forward(x, _logpx)
if idx < len(self.transforms) - 1:
d = x.size(1) // 2
x, factor_out = x[:, :d], x[:, d:]
else:
# last layer, no factor out
factor_out = x
out.append(factor_out)
out = [o.view(o.size()[0], -1) for o in out]
out = torch.cat(out, 1)
return out if logpx is None else (out, _logpx)
def _generate(self, z, logpz=None):
z = z.view(z.shape[0], -1)
zs = []
i = 0
for dims in self.dims:
s = np.prod(dims)
zs.append(z[:, i:i + s])
i += s
zs = [_z.view(_z.size()[0], *zsize) for _z, zsize in zip(zs, self.dims)]
_logpz = torch.zeros(zs[0].shape[0], 1).to(zs[0]) if logpz is None else logpz
z_prev, _logpz = self.transforms[-1](zs[-1], _logpz, reverse=True)
for idx in range(len(self.transforms) - 2, -1, -1):
z_prev = torch.cat((z_prev, zs[idx]), dim=1)
z_prev, _logpz = self.transforms[idx](z_prev, _logpz, reverse=True)
return z_prev if logpz is None else (z_prev, _logpz)
class StackedCNFLayers(layers.SequentialFlow):
def __init__(
self,
initial_size,
idims=(32,),
nonlinearity="softplus",
squeeze=True,
init_layer=None,
n_blocks=1,
cnf_kwargs={},
):
strides = tuple([1] + [1 for _ in idims])
chain = []
if init_layer is not None:
chain.append(init_layer)
def _make_odefunc(size):
net = ODEnet(idims, size, strides, True, layer_type="concat", nonlinearity=nonlinearity)
f = layers.ODEfunc(net)
return f
if squeeze:
c, h, w = initial_size
after_squeeze_size = c * 4, h // 2, w // 2
pre = [layers.CNF(_make_odefunc(initial_size), **cnf_kwargs) for _ in range(n_blocks)]
post = [layers.CNF(_make_odefunc(after_squeeze_size), **cnf_kwargs) for _ in range(n_blocks)]
chain += pre + [layers.SqueezeLayer(2)] + post
else:
chain += [layers.CNF(_make_odefunc(initial_size), **cnf_kwargs) for _ in range(n_blocks)]
super(StackedCNFLayers, self).__init__(chain)
| 6,008 | 34.556213 | 116 | py |
steer | steer-master/ffjord/lib/datasets.py | import torch
class Dataset(object):
def __init__(self, loc, transform=None):
self.dataset = torch.load(loc).float().div(255)
self.transform = transform
def __len__(self):
return self.dataset.size(0)
@property
def ndim(self):
return self.dataset.size(1)
def __getitem__(self, index):
x = self.dataset[index]
x = self.transform(x) if self.transform is not None else x
return x, 0
class CelebA(Dataset):
TRAIN_LOC = 'data/celeba/celeba_train.pth'
VAL_LOC = 'data/celeba/celeba_val.pth'
def __init__(self, train=True, transform=None):
return super(CelebA, self).__init__(self.TRAIN_LOC if train else self.VAL_LOC, transform)
| 725 | 24.928571 | 97 | py |
steer | steer-master/ffjord/lib/multiscale_parallel.py | import torch
import torch.nn as nn
import lib.layers as layers
from lib.layers.odefunc import ODEnet
import numpy as np
class MultiscaleParallelCNF(nn.Module):
"""
CNF model for image data.
Squeezes the input into multiple scales, applies different conv-nets at each scale
and adds the resulting gradients
Will downsample the input until one of the
dimensions is less than or equal to 4.
Args:
input_size (tuple): 4D tuple of the input size.
n_scale (int): Number of scales for the representation z.
n_resblocks (int): Length of the resnet for each coupling layer.
"""
def __init__(
self,
input_size,
n_scale=float('inf'),
n_blocks=1,
intermediate_dims=(32,),
alpha=-1,
time_length=1.,
):
super(MultiscaleParallelCNF, self).__init__()
print(input_size)
self.n_scale = min(n_scale, self._calc_n_scale(input_size))
self.n_blocks = n_blocks
self.intermediate_dims = intermediate_dims
self.alpha = alpha
self.time_length = time_length
if not self.n_scale > 0:
raise ValueError('Could not compute number of scales for input of' 'size (%d,%d,%d,%d)' % input_size)
self.transforms = self._build_net(input_size)
def _build_net(self, input_size):
_, c, h, w = input_size
transforms = []
transforms.append(
ParallelCNFLayers(
initial_size=(c, h, w),
idims=self.intermediate_dims,
init_layer=(layers.LogitTransform(self.alpha) if self.alpha > 0 else layers.ZeroMeanTransform()),
n_blocks=self.n_blocks,
time_length=self.time_length
)
)
return nn.ModuleList(transforms)
def get_regularization(self):
if len(self.regularization_fns) == 0:
return None
acc_reg_states = tuple([0.] * len(self.regularization_fns))
for module in self.modules():
if isinstance(module, layers.CNF):
acc_reg_states = tuple(
acc + reg for acc, reg in zip(acc_reg_states, module.get_regularization_states())
)
return sum(state * coeff for state, coeff in zip(acc_reg_states, self.regularization_coeffs))
def _calc_n_scale(self, input_size):
_, _, h, w = input_size
n_scale = 0
while h >= 4 and w >= 4:
n_scale += 1
h = h // 2
w = w // 2
return n_scale
def calc_output_size(self, input_size):
n, c, h, w = input_size
output_sizes = []
for i in range(self.n_scale):
if i < self.n_scale - 1:
c *= 2
h //= 2
w //= 2
output_sizes.append((n, c, h, w))
else:
output_sizes.append((n, c, h, w))
return tuple(output_sizes)
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._generate(x, logpx)
else:
return self._logdensity(x, logpx)
def _logdensity(self, x, logpx=None):
_logpx = torch.zeros(x.shape[0], 1).to(x) if logpx is None else logpx
for idx in range(len(self.transforms)):
x, _logpx = self.transforms[idx].forward(x, _logpx)
return x if logpx is None else (x, _logpx)
def _generate(self, z, logpz=None):
_logpz = torch.zeros(z.shape[0], 1).to(z) if logpz is None else logpz
for idx in reversed(range(len(self.transforms))):
z, _logpz = self.transforms[idx](z, _logpz, reverse=True)
return z if logpz is None else (z, _logpz)
class ParallelSumModules(nn.Module):
def __init__(self, models):
super(ParallelSumModules, self).__init__()
self.models = nn.ModuleList(models)
self.cpu = not torch.cuda.is_available()
def forward(self, t, y):
out = sum(model(t, y) for model in self.models)
return out
class ParallelCNFLayers(layers.SequentialFlow):
def __init__(
self,
initial_size,
idims=(32,),
scales=4,
init_layer=None,
n_blocks=1,
time_length=1.,
):
strides = tuple([1] + [1 for _ in idims])
chain = []
if init_layer is not None:
chain.append(init_layer)
get_size = lambda s: (initial_size[0] * (4**s), initial_size[1] // (2**s), initial_size[2] // (2**s))
def _make_odefunc():
nets = [ODEnet(idims, get_size(scale), strides, True, layer_type="concat", num_squeeze=scale)
for scale in range(scales)]
net = ParallelSumModules(nets)
f = layers.ODEfunc(net)
return f
chain += [layers.CNF(_make_odefunc(), T=time_length) for _ in range(n_blocks)]
super(ParallelCNFLayers, self).__init__(chain)
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cnfs = MultiscaleParallelCNF((13, 3, 32, 32)).to(device)
t = torch.randn(13, 3, 32, 32).to(device)
out = cnfs(t, logpx=None)
print("done") | 5,203 | 31.525 | 113 | py |
steer | steer-master/ffjord/lib/custom_optimizers.py | import math
import torch
from torch.optim.optimizer import Optimizer
class Adam(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1**state['step']
bias_correction2 = 1 - beta2**state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add(-step_size * group['weight_decay'], p.data)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 4,597 | 41.574074 | 116 | py |
steer | steer-master/ffjord/lib/visualize_flow.py | import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import torch
LOW = -4
HIGH = 4
def plt_potential_func(potential, ax, npts=100, title="$p(x)$"):
"""
Args:
potential: computes U(z_k) given z_k
"""
xside = np.linspace(LOW, HIGH, npts)
yside = np.linspace(LOW, HIGH, npts)
xx, yy = np.meshgrid(xside, yside)
z = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
z = torch.Tensor(z)
u = potential(z).cpu().numpy()
p = np.exp(-u).reshape(npts, npts)
plt.pcolormesh(xx, yy, p)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def plt_flow(prior_logdensity, transform, ax, npts=100, title="$q(x)$", device="cpu"):
"""
Args:
transform: computes z_k and log(q_k) given z_0
"""
side = np.linspace(LOW, HIGH, npts)
xx, yy = np.meshgrid(side, side)
z = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
z = torch.tensor(z, requires_grad=True).type(torch.float32).to(device)
logqz = prior_logdensity(z)
logqz = torch.sum(logqz, dim=1)[:, None]
z, logqz = transform(z, logqz)
logqz = torch.sum(logqz, dim=1)[:, None]
xx = z[:, 0].cpu().numpy().reshape(npts, npts)
yy = z[:, 1].cpu().numpy().reshape(npts, npts)
qz = np.exp(logqz.cpu().numpy()).reshape(npts, npts)
plt.pcolormesh(xx, yy, qz)
ax.set_xlim(LOW, HIGH)
ax.set_ylim(LOW, HIGH)
cmap = matplotlib.cm.get_cmap(None)
ax.set_facecolor(cmap(0.))
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def plt_flow_density(prior_logdensity, inverse_transform, ax, npts=100, memory=100, title="$q(x)$", device="cpu"):
side = np.linspace(LOW, HIGH, npts)
xx, yy = np.meshgrid(side, side)
x = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
x = torch.from_numpy(x).type(torch.float32).to(device)
zeros = torch.zeros(x.shape[0], 1).to(x)
z, delta_logp = [], []
inds = torch.arange(0, x.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(memory**2)):
z_, delta_logp_ = inverse_transform(x[ii], zeros[ii])
z.append(z_)
delta_logp.append(delta_logp_)
z = torch.cat(z, 0)
delta_logp = torch.cat(delta_logp, 0)
logpz = prior_logdensity(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z)
logpx = logpz - delta_logp
px = np.exp(logpx.cpu().numpy()).reshape(npts, npts)
ax.imshow(px)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def plt_flow_samples(prior_sample, transform, ax, npts=100, memory=100, title="$x ~ q(x)$", device="cpu"):
z = prior_sample(npts * npts, 2).type(torch.float32).to(device)
zk = []
inds = torch.arange(0, z.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(memory**2)):
zk.append(transform(z[ii]))
zk = torch.cat(zk, 0).cpu().numpy()
ax.hist2d(zk[:, 0], zk[:, 1], range=[[LOW, HIGH], [LOW, HIGH]], bins=npts)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def plt_samples(samples, ax, npts=100, title="$x ~ p(x)$"):
ax.hist2d(samples[:, 0], samples[:, 1], range=[[LOW, HIGH], [LOW, HIGH]], bins=npts)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_title(title)
def visualize_transform(
potential_or_samples, prior_sample, prior_density, transform=None, inverse_transform=None, samples=True, npts=100,
memory=100, device="cpu"
):
"""Produces visualization for the model density and samples from the model."""
plt.clf()
ax = plt.subplot(1, 3, 1, aspect="equal")
if samples:
plt_samples(potential_or_samples, ax, npts=npts)
else:
plt_potential_func(potential_or_samples, ax, npts=npts)
ax = plt.subplot(1, 3, 2, aspect="equal")
if inverse_transform is None:
plt_flow(prior_density, transform, ax, npts=npts, device=device)
else:
plt_flow_density(prior_density, inverse_transform, ax, npts=npts, memory=memory, device=device)
ax = plt.subplot(1, 3, 3, aspect="equal")
if transform is not None:
plt_flow_samples(prior_sample, transform, ax, npts=npts, memory=memory, device=device)
| 4,341 | 31.646617 | 118 | py |
steer | steer-master/ffjord/lib/layers/squeeze.py | import torch.nn as nn
__all__ = ['SqueezeLayer']
class SqueezeLayer(nn.Module):
def __init__(self, downscale_factor):
super(SqueezeLayer, self).__init__()
self.downscale_factor = downscale_factor
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._upsample(x, logpx)
else:
return self._downsample(x, logpx)
def _downsample(self, x, logpx=None):
squeeze_x = squeeze(x, self.downscale_factor)
if logpx is None:
return squeeze_x
else:
return squeeze_x, logpx
def _upsample(self, y, logpy=None):
unsqueeze_y = unsqueeze(y, self.downscale_factor)
if logpy is None:
return unsqueeze_y
else:
return unsqueeze_y, logpy
def unsqueeze(input, upscale_factor=2):
'''
[:, C*r^2, H, W] -> [:, C, H*r, W*r]
'''
batch_size, in_channels, in_height, in_width = input.size()
out_channels = in_channels // (upscale_factor**2)
out_height = in_height * upscale_factor
out_width = in_width * upscale_factor
input_view = input.contiguous().view(batch_size, out_channels, upscale_factor, upscale_factor, in_height, in_width)
output = input_view.permute(0, 1, 4, 2, 5, 3).contiguous()
return output.view(batch_size, out_channels, out_height, out_width)
def squeeze(input, downscale_factor=2):
'''
[:, C, H*r, W*r] -> [:, C*r^2, H, W]
'''
batch_size, in_channels, in_height, in_width = input.size()
out_channels = in_channels * (downscale_factor**2)
out_height = in_height // downscale_factor
out_width = in_width // downscale_factor
input_view = input.contiguous().view(
batch_size, in_channels, out_height, downscale_factor, out_width, downscale_factor
)
output = input_view.permute(0, 1, 3, 5, 2, 4).contiguous()
return output.view(batch_size, out_channels, out_height, out_width)
| 1,955 | 29.5625 | 119 | py |
steer | steer-master/ffjord/lib/layers/container.py | import torch.nn as nn
class SequentialFlow(nn.Module):
"""A generalized nn.Sequential container for normalizing flows.
"""
def __init__(self, layersList):
super(SequentialFlow, self).__init__()
self.chain = nn.ModuleList(layersList)
def forward(self, x, logpx=None, reverse=False, inds=None):
if inds is None:
if reverse:
inds = range(len(self.chain) - 1, -1, -1)
else:
inds = range(len(self.chain))
if logpx is None:
for i in inds:
x = self.chain[i](x, reverse=reverse)
return x
else:
for i in inds:
x, logpx = self.chain[i](x, logpx, reverse=reverse)
return x, logpx
| 766 | 27.407407 | 67 | py |
steer | steer-master/ffjord/lib/layers/norm_flows.py | import math
import torch
import torch.nn as nn
from torch.autograd import grad
class PlanarFlow(nn.Module):
def __init__(self, nd=1):
super(PlanarFlow, self).__init__()
self.nd = nd
self.activation = torch.tanh
self.register_parameter('u', nn.Parameter(torch.randn(self.nd)))
self.register_parameter('w', nn.Parameter(torch.randn(self.nd)))
self.register_parameter('b', nn.Parameter(torch.randn(1)))
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.nd)
self.u.data.uniform_(-stdv, stdv)
self.w.data.uniform_(-stdv, stdv)
self.b.data.fill_(0)
self.make_invertible()
def make_invertible(self):
u = self.u.data
w = self.w.data
dot = torch.dot(u, w)
m = -1 + math.log(1 + math.exp(dot))
du = (m - dot) / torch.norm(w) * w
u = u + du
self.u.data = u
def forward(self, z, logp=None, reverse=False):
"""Computes f(z) and log q(f(z))"""
assert not reverse, 'Planar normalizing flow cannot be reversed.'
logp - torch.log(self._detgrad(z) + 1e-8)
h = self.activation(torch.mm(z, self.w.view(self.nd, 1)) + self.b)
z = z + self.u.expand_as(z) * h
f = self.sample(z)
if logp is not None:
qf = self.log_density(z, logp)
return f, qf
else:
return f
def sample(self, z):
"""Computes f(z)"""
h = self.activation(torch.mm(z, self.w.view(self.nd, 1)) + self.b)
output = z + self.u.expand_as(z) * h
return output
def _detgrad(self, z):
"""Computes |det df/dz|"""
with torch.enable_grad():
z = z.requires_grad_(True)
h = self.activation(torch.mm(z, self.w.view(self.nd, 1)) + self.b)
psi = grad(h, z, grad_outputs=torch.ones_like(h), create_graph=True, only_inputs=True)[0]
u_dot_psi = torch.mm(psi, self.u.view(self.nd, 1))
detgrad = 1 + u_dot_psi
return detgrad
def log_density(self, z, logqz):
"""Computes log density of the flow given the log density of z"""
return logqz - torch.log(self._detgrad(z) + 1e-8)
| 2,240 | 31.014286 | 101 | py |
steer | steer-master/ffjord/lib/layers/cnf.py | import torch
import torch.nn as nn
#from torchdiffeq import odeint_adjoint_stochastic_end_v2
from torchdiffeq import odeint_adjoint_stochastic_end_v3
from torchdiffeq import odeint_adjoint_stochastic_end_normal
from torchdiffeq import odeint_adjoint as odeint
#from torchdiffeq import odeint
from .wrappers.cnf_regularization import RegularizedODEfunc
__all__ = ["CNF"]
class CNF(nn.Module):
def __init__(self, odefunc, T=1.0, train_T=False, regularization_fns=None, solver='dopri5', atol=1e-5, rtol=1e-5):
super(CNF, self).__init__()
if train_T:
self.register_parameter("sqrt_end_time", nn.Parameter(torch.sqrt(torch.tensor(T))))
else:
self.register_buffer("sqrt_end_time", torch.sqrt(torch.tensor(T)))
nreg = 0
if regularization_fns is not None:
odefunc = RegularizedODEfunc(odefunc, regularization_fns)
nreg = len(regularization_fns)
self.odefunc = odefunc
self.nreg = nreg
self.regularization_states = None
self.solver = solver
self.atol = atol
self.rtol = rtol
self.test_solver = solver
self.test_atol = atol
self.test_rtol = rtol
self.solver_options = {}
def forward(self, z, logpz=None, integration_times=None, reverse=False):
#print("integration_times")
#print(integration_times)
if logpz is None:
_logpz = torch.zeros(z.shape[0], 1).to(z)
else:
_logpz = logpz
if integration_times is None:
integration_times = torch.tensor([0.0, self.sqrt_end_time * self.sqrt_end_time]).to(z)
if reverse:
integration_times = _flip(integration_times, 0)
# Refresh the odefunc statistics.
self.odefunc.before_odeint()
# Add regularization states.
reg_states = tuple(torch.tensor(0).to(z) for _ in range(self.nreg))
if self.training:
state_t = odeint_adjoint_stochastic_end_v3(
#state_t = odeint_adjoint_stochastic_end_normal(
#state_t = odeint(
self.odefunc,
(z, _logpz) + reg_states,
integration_times.to(z),
atol=[self.atol, self.atol] + [1e20] * len(reg_states) if self.solver == 'dopri5' else self.atol,
rtol=[self.rtol, self.rtol] + [1e20] * len(reg_states) if self.solver == 'dopri5' else self.rtol,
method=self.solver,
options=self.solver_options,
#std = 0.25
min_length = 0.25 #0.001
)
else:
state_t = odeint(
self.odefunc,
(z, _logpz),
integration_times.to(z),
atol=self.test_atol,
rtol=self.test_rtol,
method=self.test_solver,
)
if len(integration_times) == 2:
state_t = tuple(s[1] for s in state_t)
z_t, logpz_t = state_t[:2]
self.regularization_states = state_t[2:]
if logpz is not None:
return z_t, logpz_t
else:
return z_t
def get_regularization_states(self):
reg_states = self.regularization_states
self.regularization_states = None
return reg_states
def num_evals(self):
return self.odefunc._num_evals.item()
def _flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device)
return x[tuple(indices)]
| 3,561 | 32.92381 | 118 | py |
steer | steer-master/ffjord/lib/layers/odefunc.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import diffeq_layers
from .squeeze import squeeze, unsqueeze
__all__ = ["ODEnet", "AutoencoderDiffEqNet", "ODEfunc", "AutoencoderODEfunc"]
def divergence_bf(dx, y, **unused_kwargs):
sum_diag = 0.
for i in range(y.shape[1]):
sum_diag += torch.autograd.grad(dx[:, i].sum(), y, create_graph=True)[0].contiguous()[:, i].contiguous()
return sum_diag.contiguous()
# def divergence_bf(f, y, **unused_kwargs):
# jac = _get_minibatch_jacobian(f, y)
# diagonal = jac.view(jac.shape[0], -1)[:, ::jac.shape[1]]
# return torch.sum(diagonal, 1)
def _get_minibatch_jacobian(y, x):
"""Computes the Jacobian of y wrt x assuming minibatch-mode.
Args:
y: (N, ...) with a total of D_y elements in ...
x: (N, ...) with a total of D_x elements in ...
Returns:
The minibatch Jacobian matrix of shape (N, D_y, D_x)
"""
assert y.shape[0] == x.shape[0]
y = y.view(y.shape[0], -1)
# Compute Jacobian row by row.
jac = []
for j in range(y.shape[1]):
dy_j_dx = torch.autograd.grad(y[:, j], x, torch.ones_like(y[:, j]), retain_graph=True,
create_graph=True)[0].view(x.shape[0], -1)
jac.append(torch.unsqueeze(dy_j_dx, 1))
jac = torch.cat(jac, 1)
return jac
def divergence_approx(f, y, e=None):
e_dzdx = torch.autograd.grad(f, y, e, create_graph=True)[0]
e_dzdx_e = e_dzdx * e
approx_tr_dzdx = e_dzdx_e.view(y.shape[0], -1).sum(dim=1)
return approx_tr_dzdx
def sample_rademacher_like(y):
return torch.randint(low=0, high=2, size=y.shape).to(y) * 2 - 1
def sample_gaussian_like(y):
return torch.randn_like(y)
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.beta = nn.Parameter(torch.tensor(1.0))
def forward(self, x):
return x * torch.sigmoid(self.beta * x)
class Lambda(nn.Module):
def __init__(self, f):
super(Lambda, self).__init__()
self.f = f
def forward(self, x):
return self.f(x)
NONLINEARITIES = {
"tanh": nn.Tanh(),
"relu": nn.ReLU(),
"softplus": nn.Softplus(),
"elu": nn.ELU(),
"swish": Swish(),
"square": Lambda(lambda x: x**2),
"identity": Lambda(lambda x: x),
}
class ODEnet(nn.Module):
"""
Helper class to make neural nets for use in continuous normalizing flows
"""
def __init__(
self, hidden_dims, input_shape, strides, conv, layer_type="concat", nonlinearity="softplus", num_squeeze=0
):
super(ODEnet, self).__init__()
self.num_squeeze = num_squeeze
if conv:
assert len(strides) == len(hidden_dims) + 1
base_layer = {
"ignore": diffeq_layers.IgnoreConv2d,
"hyper": diffeq_layers.HyperConv2d,
"squash": diffeq_layers.SquashConv2d,
"concat": diffeq_layers.ConcatConv2d,
"concat_v2": diffeq_layers.ConcatConv2d_v2,
"concatsquash": diffeq_layers.ConcatSquashConv2d,
"blend": diffeq_layers.BlendConv2d,
"concatcoord": diffeq_layers.ConcatCoordConv2d,
}[layer_type]
else:
strides = [None] * (len(hidden_dims) + 1)
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_shape
for dim_out, stride in zip(hidden_dims + (input_shape[0],), strides):
if stride is None:
layer_kwargs = {}
elif stride == 1:
layer_kwargs = {"ksize": 3, "stride": 1, "padding": 1, "transpose": False}
elif stride == 2:
layer_kwargs = {"ksize": 4, "stride": 2, "padding": 1, "transpose": False}
elif stride == -2:
layer_kwargs = {"ksize": 4, "stride": 2, "padding": 1, "transpose": True}
else:
raise ValueError('Unsupported stride: {}'.format(stride))
layer = base_layer(hidden_shape[0], dim_out, **layer_kwargs)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = list(copy.copy(hidden_shape))
hidden_shape[0] = dim_out
if stride == 2:
hidden_shape[1], hidden_shape[2] = hidden_shape[1] // 2, hidden_shape[2] // 2
elif stride == -2:
hidden_shape[1], hidden_shape[2] = hidden_shape[1] * 2, hidden_shape[2] * 2
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns[:-1])
def forward(self, t, y):
dx = y
# squeeze
for _ in range(self.num_squeeze):
dx = squeeze(dx, 2)
for l, layer in enumerate(self.layers):
dx = layer(t, dx)
# if not last layer, use nonlinearity
if l < len(self.layers) - 1:
dx = self.activation_fns[l](dx)
# unsqueeze
for _ in range(self.num_squeeze):
dx = unsqueeze(dx, 2)
return dx
class AutoencoderDiffEqNet(nn.Module):
"""
Helper class to make neural nets for use in continuous normalizing flows
"""
def __init__(self, hidden_dims, input_shape, strides, conv, layer_type="concat", nonlinearity="softplus"):
super(AutoencoderDiffEqNet, self).__init__()
assert layer_type in ("ignore", "hyper", "concat", "concatcoord", "blend")
assert nonlinearity in ("tanh", "relu", "softplus", "elu")
self.nonlinearity = {"tanh": F.tanh, "relu": F.relu, "softplus": F.softplus, "elu": F.elu}[nonlinearity]
if conv:
assert len(strides) == len(hidden_dims) + 1
base_layer = {
"ignore": diffeq_layers.IgnoreConv2d,
"hyper": diffeq_layers.HyperConv2d,
"squash": diffeq_layers.SquashConv2d,
"concat": diffeq_layers.ConcatConv2d,
"blend": diffeq_layers.BlendConv2d,
"concatcoord": diffeq_layers.ConcatCoordConv2d,
}[layer_type]
else:
strides = [None] * (len(hidden_dims) + 1)
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
# build layers and add them
encoder_layers = []
decoder_layers = []
hidden_shape = input_shape
for i, (dim_out, stride) in enumerate(zip(hidden_dims + (input_shape[0],), strides)):
if i <= len(hidden_dims) // 2:
layers = encoder_layers
else:
layers = decoder_layers
if stride is None:
layer_kwargs = {}
elif stride == 1:
layer_kwargs = {"ksize": 3, "stride": 1, "padding": 1, "transpose": False}
elif stride == 2:
layer_kwargs = {"ksize": 4, "stride": 2, "padding": 1, "transpose": False}
elif stride == -2:
layer_kwargs = {"ksize": 4, "stride": 2, "padding": 1, "transpose": True}
else:
raise ValueError('Unsupported stride: {}'.format(stride))
layers.append(base_layer(hidden_shape[0], dim_out, **layer_kwargs))
hidden_shape = list(copy.copy(hidden_shape))
hidden_shape[0] = dim_out
if stride == 2:
hidden_shape[1], hidden_shape[2] = hidden_shape[1] // 2, hidden_shape[2] // 2
elif stride == -2:
hidden_shape[1], hidden_shape[2] = hidden_shape[1] * 2, hidden_shape[2] * 2
self.encoder_layers = nn.ModuleList(encoder_layers)
self.decoder_layers = nn.ModuleList(decoder_layers)
def forward(self, t, y):
h = y
for layer in self.encoder_layers:
h = self.nonlinearity(layer(t, h))
dx = h
for i, layer in enumerate(self.decoder_layers):
dx = layer(t, dx)
# if not last layer, use nonlinearity
if i < len(self.decoder_layers) - 1:
dx = self.nonlinearity(dx)
return h, dx
class ODEfunc(nn.Module):
def __init__(self, diffeq, divergence_fn="approximate", residual=False, rademacher=False):
super(ODEfunc, self).__init__()
assert divergence_fn in ("brute_force", "approximate")
# self.diffeq = diffeq_layers.wrappers.diffeq_wrapper(diffeq)
self.diffeq = diffeq
self.residual = residual
self.rademacher = rademacher
if divergence_fn == "brute_force":
self.divergence_fn = divergence_bf
elif divergence_fn == "approximate":
self.divergence_fn = divergence_approx
self.register_buffer("_num_evals", torch.tensor(0.))
def before_odeint(self, e=None):
self._e = e
self._num_evals.fill_(0)
def num_evals(self):
return self._num_evals.item()
def forward(self, t, states):
assert len(states) >= 2
y = states[0]
# increment num evals
self._num_evals += 1
# convert to tensor
t = torch.tensor(t).type_as(y)
batchsize = y.shape[0]
# Sample and fix the noise.
if self._e is None:
if self.rademacher:
self._e = sample_rademacher_like(y)
else:
self._e = sample_gaussian_like(y)
with torch.set_grad_enabled(True):
y.requires_grad_(True)
t.requires_grad_(True)
for s_ in states[2:]:
s_.requires_grad_(True)
dy = self.diffeq(t, y, *states[2:])
# Hack for 2D data to use brute force divergence computation.
if not self.training and dy.view(dy.shape[0], -1).shape[1] == 2:
divergence = divergence_bf(dy, y).view(batchsize, 1)
else:
divergence = self.divergence_fn(dy, y, e=self._e).view(batchsize, 1)
if self.residual:
dy = dy - y
divergence -= torch.ones_like(divergence) * torch.tensor(np.prod(y.shape[1:]), dtype=torch.float32
).to(divergence)
return tuple([dy, -divergence] + [torch.zeros_like(s_).requires_grad_(True) for s_ in states[2:]])
class AutoencoderODEfunc(nn.Module):
def __init__(self, autoencoder_diffeq, divergence_fn="approximate", residual=False, rademacher=False):
assert divergence_fn in ("approximate"), "Only approximate divergence supported at the moment. (TODO)"
assert isinstance(autoencoder_diffeq, AutoencoderDiffEqNet)
super(AutoencoderODEfunc, self).__init__()
self.residual = residual
self.autoencoder_diffeq = autoencoder_diffeq
self.rademacher = rademacher
self.register_buffer("_num_evals", torch.tensor(0.))
def before_odeint(self, e=None):
self._e = e
self._num_evals.fill_(0)
def forward(self, t, y_and_logpy):
y, _ = y_and_logpy # remove logpy
# increment num evals
self._num_evals += 1
# convert to tensor
t = torch.tensor(t).type_as(y)
batchsize = y.shape[0]
with torch.set_grad_enabled(True):
y.requires_grad_(True)
t.requires_grad_(True)
h, dy = self.autoencoder_diffeq(t, y)
# Sample and fix the noise.
if self._e is None:
if self.rademacher:
self._e = sample_rademacher_like(h)
else:
self._e = sample_gaussian_like(h)
e_vjp_dhdy = torch.autograd.grad(h, y, self._e, create_graph=True)[0]
e_vjp_dfdy = torch.autograd.grad(dy, h, e_vjp_dhdy, create_graph=True)[0]
divergence = torch.sum((e_vjp_dfdy * self._e).view(batchsize, -1), 1, keepdim=True)
if self.residual:
dy = dy - y
divergence -= torch.ones_like(divergence) * torch.tensor(np.prod(y.shape[1:]), dtype=torch.float32
).to(divergence)
return dy, -divergence
| 12,985 | 34.675824 | 114 | py |
steer | steer-master/ffjord/lib/layers/resnet.py | import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, dim):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.GroupNorm(2, dim, eps=1e-4)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.GroupNorm(2, dim, eps=1e-4)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, dim, cardinality=4, base_depth=32):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
base_width: base number of channels in each group.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
D = cardinality * base_depth
self.conv_reduce = nn.Conv2d(dim, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_grp = nn.Conv2d(D, D, kernel_size=3, stride=1, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, dim, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(dim)
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_grp.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
return F.relu(x + bottleneck, inplace=True)
| 2,335 | 35.5 | 107 | py |
steer | steer-master/ffjord/lib/layers/glow.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class BruteForceLayer(nn.Module):
def __init__(self, dim):
super(BruteForceLayer, self).__init__()
self.weight = nn.Parameter(torch.eye(dim))
def forward(self, x, logpx=None, reverse=False):
if not reverse:
y = F.linear(x, self.weight)
if logpx is None:
return y
else:
return y, logpx - self._logdetgrad.expand_as(logpx)
else:
y = F.linear(x, self.weight.double().inverse().float())
if logpx is None:
return y
else:
return y, logpx + self._logdetgrad.expand_as(logpx)
@property
def _logdetgrad(self):
return torch.log(torch.abs(torch.det(self.weight.double()))).float()
| 836 | 26 | 76 | py |
steer | steer-master/ffjord/lib/layers/elemwise.py | import math
import torch
import torch.nn as nn
_DEFAULT_ALPHA = 1e-6
class ZeroMeanTransform(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, x, logpx=None, reverse=False):
if reverse:
x = x + .5
if logpx is None:
return x
return x, logpx
else:
x = x - .5
if logpx is None:
return x
return x, logpx
class LogitTransform(nn.Module):
"""
The proprocessing step used in Real NVP:
y = sigmoid(x) - a / (1 - 2a)
x = logit(a + (1 - 2a)*y)
"""
def __init__(self, alpha=_DEFAULT_ALPHA):
nn.Module.__init__(self)
self.alpha = alpha
def forward(self, x, logpx=None, reverse=False):
if reverse:
return _sigmoid(x, logpx, self.alpha)
else:
return _logit(x, logpx, self.alpha)
class SigmoidTransform(nn.Module):
"""Reverse of LogitTransform."""
def __init__(self, alpha=_DEFAULT_ALPHA):
nn.Module.__init__(self)
self.alpha = alpha
def forward(self, x, logpx=None, reverse=False):
if reverse:
return _logit(x, logpx, self.alpha)
else:
return _sigmoid(x, logpx, self.alpha)
def _logit(x, logpx=None, alpha=_DEFAULT_ALPHA):
s = alpha + (1 - 2 * alpha) * x
y = torch.log(s) - torch.log(1 - s)
if logpx is None:
return y
return y, logpx - _logdetgrad(x, alpha).view(x.size(0), -1).sum(1, keepdim=True)
def _sigmoid(y, logpy=None, alpha=_DEFAULT_ALPHA):
x = (torch.sigmoid(y) - alpha) / (1 - 2 * alpha)
if logpy is None:
return x
return x, logpy + _logdetgrad(x, alpha).view(x.size(0), -1).sum(1, keepdim=True)
def _logdetgrad(x, alpha):
s = alpha + (1 - 2 * alpha) * x
logdetgrad = -torch.log(s - s * s) + math.log(1 - 2 * alpha)
return logdetgrad
| 1,918 | 24.25 | 84 | py |
steer | steer-master/ffjord/lib/layers/normalization.py | import torch
import torch.nn as nn
from torch.nn import Parameter
__all__ = ['MovingBatchNorm1d', 'MovingBatchNorm2d']
class MovingBatchNormNd(nn.Module):
def __init__(self, num_features, eps=1e-4, decay=0.1, bn_lag=0., affine=True):
super(MovingBatchNormNd, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.decay = decay
self.bn_lag = bn_lag
self.register_buffer('step', torch.zeros(1))
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
@property
def shape(self):
raise NotImplementedError
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.fill_(1)
if self.affine:
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, x, logpx=None, reverse=False):
if reverse:
return self._reverse(x, logpx)
else:
return self._forward(x, logpx)
def _forward(self, x, logpx=None):
c = x.size(1)
used_mean = self.running_mean.clone().detach()
used_var = self.running_var.clone().detach()
if self.training:
# compute batch statistics
x_t = x.transpose(0, 1).contiguous().view(c, -1)
batch_mean = torch.mean(x_t, dim=1)
batch_var = torch.var(x_t, dim=1)
# moving average
if self.bn_lag > 0:
used_mean = batch_mean - (1 - self.bn_lag) * (batch_mean - used_mean.detach())
used_mean /= (1. - self.bn_lag**(self.step[0] + 1))
used_var = batch_var - (1 - self.bn_lag) * (batch_var - used_var.detach())
used_var /= (1. - self.bn_lag**(self.step[0] + 1))
# update running estimates
self.running_mean -= self.decay * (self.running_mean - batch_mean.data)
self.running_var -= self.decay * (self.running_var - batch_var.data)
self.step += 1
# perform normalization
used_mean = used_mean.view(*self.shape).expand_as(x)
used_var = used_var.view(*self.shape).expand_as(x)
y = (x - used_mean) * torch.exp(-0.5 * torch.log(used_var + self.eps))
if self.affine:
weight = self.weight.view(*self.shape).expand_as(x)
bias = self.bias.view(*self.shape).expand_as(x)
y = y * torch.exp(weight) + bias
if logpx is None:
return y
else:
return y, logpx - self._logdetgrad(x, used_var).view(x.size(0), -1).sum(1, keepdim=True)
def _reverse(self, y, logpy=None):
used_mean = self.running_mean
used_var = self.running_var
if self.affine:
weight = self.weight.view(*self.shape).expand_as(y)
bias = self.bias.view(*self.shape).expand_as(y)
y = (y - bias) * torch.exp(-weight)
used_mean = used_mean.view(*self.shape).expand_as(y)
used_var = used_var.view(*self.shape).expand_as(y)
x = y * torch.exp(0.5 * torch.log(used_var + self.eps)) + used_mean
if logpy is None:
return x
else:
return x, logpy + self._logdetgrad(x, used_var).view(x.size(0), -1).sum(1, keepdim=True)
def _logdetgrad(self, x, used_var):
logdetgrad = -0.5 * torch.log(used_var + self.eps)
if self.affine:
weight = self.weight.view(*self.shape).expand(*x.size())
logdetgrad += weight
return logdetgrad
def __repr__(self):
return (
'{name}({num_features}, eps={eps}, decay={decay}, bn_lag={bn_lag},'
' affine={affine})'.format(name=self.__class__.__name__, **self.__dict__)
)
def stable_var(x, mean=None, dim=1):
if mean is None:
mean = x.mean(dim, keepdim=True)
mean = mean.view(-1, 1)
res = torch.pow(x - mean, 2)
max_sqr = torch.max(res, dim, keepdim=True)[0]
var = torch.mean(res / max_sqr, 1, keepdim=True) * max_sqr
var = var.view(-1)
# change nan to zero
var[var != var] = 0
return var
class MovingBatchNorm1d(MovingBatchNormNd):
@property
def shape(self):
return [1, -1]
class MovingBatchNorm2d(MovingBatchNormNd):
@property
def shape(self):
return [1, -1, 1, 1]
| 4,688 | 32.978261 | 100 | py |
steer | steer-master/ffjord/lib/layers/coupling.py | import torch
import torch.nn as nn
__all__ = ['CouplingLayer', 'MaskedCouplingLayer']
class CouplingLayer(nn.Module):
"""Used in 2D experiments."""
def __init__(self, d, intermediate_dim=64, swap=False):
nn.Module.__init__(self)
self.d = d - (d // 2)
self.swap = swap
self.net_s_t = nn.Sequential(
nn.Linear(self.d, intermediate_dim),
nn.ReLU(inplace=True),
nn.Linear(intermediate_dim, intermediate_dim),
nn.ReLU(inplace=True),
nn.Linear(intermediate_dim, (d - self.d) * 2),
)
def forward(self, x, logpx=None, reverse=False):
if self.swap:
x = torch.cat([x[:, self.d:], x[:, :self.d]], 1)
in_dim = self.d
out_dim = x.shape[1] - self.d
s_t = self.net_s_t(x[:, :in_dim])
scale = torch.sigmoid(s_t[:, :out_dim] + 2.)
shift = s_t[:, out_dim:]
logdetjac = torch.sum(torch.log(scale).view(scale.shape[0], -1), 1, keepdim=True)
if not reverse:
y1 = x[:, self.d:] * scale + shift
delta_logp = -logdetjac
else:
y1 = (x[:, self.d:] - shift) / scale
delta_logp = logdetjac
y = torch.cat([x[:, :self.d], y1], 1) if not self.swap else torch.cat([y1, x[:, :self.d]], 1)
if logpx is None:
return y
else:
return y, logpx + delta_logp
class MaskedCouplingLayer(nn.Module):
"""Used in the tabular experiments."""
def __init__(self, d, hidden_dims, mask_type='alternate', swap=False):
nn.Module.__init__(self)
self.d = d
self.register_buffer('mask', sample_mask(d, mask_type, swap).view(1, d))
self.net_scale = build_net(d, hidden_dims, activation="tanh")
self.net_shift = build_net(d, hidden_dims, activation="relu")
def forward(self, x, logpx=None, reverse=False):
scale = torch.exp(self.net_scale(x * self.mask))
shift = self.net_shift(x * self.mask)
masked_scale = scale * (1 - self.mask) + torch.ones_like(scale) * self.mask
masked_shift = shift * (1 - self.mask)
logdetjac = torch.sum(torch.log(masked_scale).view(scale.shape[0], -1), 1, keepdim=True)
if not reverse:
y = x * masked_scale + masked_shift
delta_logp = -logdetjac
else:
y = (x - masked_shift) / masked_scale
delta_logp = logdetjac
if logpx is None:
return y
else:
return y, logpx + delta_logp
def sample_mask(dim, mask_type, swap):
if mask_type == 'alternate':
# Index-based masking in MAF paper.
mask = torch.zeros(dim)
mask[::2] = 1
if swap:
mask = 1 - mask
return mask
elif mask_type == 'channel':
# Masking type used in Real NVP paper.
mask = torch.zeros(dim)
mask[:dim // 2] = 1
if swap:
mask = 1 - mask
return mask
else:
raise ValueError('Unknown mask_type {}'.format(mask_type))
def build_net(input_dim, hidden_dims, activation="relu"):
dims = (input_dim,) + tuple(hidden_dims) + (input_dim,)
activation_modules = {"relu": nn.ReLU(inplace=True), "tanh": nn.Tanh()}
chain = []
for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
chain.append(nn.Linear(in_dim, out_dim))
if i < len(hidden_dims):
chain.append(activation_modules[activation])
return nn.Sequential(*chain)
| 3,525 | 30.20354 | 101 | py |
steer | steer-master/ffjord/lib/layers/wrappers/cnf_regularization.py | import torch
import torch.nn as nn
class RegularizedODEfunc(nn.Module):
def __init__(self, odefunc, regularization_fns):
super(RegularizedODEfunc, self).__init__()
self.odefunc = odefunc
self.regularization_fns = regularization_fns
def before_odeint(self, *args, **kwargs):
self.odefunc.before_odeint(*args, **kwargs)
def forward(self, t, state):
class SharedContext(object):
pass
with torch.enable_grad():
x, logp = state[:2]
x.requires_grad_(True)
logp.requires_grad_(True)
dstate = self.odefunc(t, (x, logp))
if len(state) > 2:
dx, dlogp = dstate[:2]
reg_states = tuple(reg_fn(x, logp, dx, dlogp, SharedContext) for reg_fn in self.regularization_fns)
return dstate + reg_states
else:
return dstate
@property
def _num_evals(self):
return self.odefunc._num_evals
def _batch_root_mean_squared(tensor):
tensor = tensor.view(tensor.shape[0], -1)
return torch.mean(torch.norm(tensor, p=2, dim=1) / tensor.shape[1]**0.5)
def l1_regularzation_fn(x, logp, dx, dlogp, unused_context):
del x, logp, dlogp
return torch.mean(torch.abs(dx))
def l2_regularzation_fn(x, logp, dx, dlogp, unused_context):
del x, logp, dlogp
return _batch_root_mean_squared(dx)
def directional_l2_regularization_fn(x, logp, dx, dlogp, unused_context):
del logp, dlogp
directional_dx = torch.autograd.grad(dx, x, dx, create_graph=True)[0]
return _batch_root_mean_squared(directional_dx)
def jacobian_frobenius_regularization_fn(x, logp, dx, dlogp, context):
del logp, dlogp
if hasattr(context, "jac"):
jac = context.jac
else:
jac = _get_minibatch_jacobian(dx, x)
context.jac = jac
return _batch_root_mean_squared(jac)
def jacobian_diag_frobenius_regularization_fn(x, logp, dx, dlogp, context):
del logp, dlogp
if hasattr(context, "jac"):
jac = context.jac
else:
jac = _get_minibatch_jacobian(dx, x)
context.jac = jac
diagonal = jac.view(jac.shape[0], -1)[:, ::jac.shape[1]] # assumes jac is minibatch square, ie. (N, M, M).
return _batch_root_mean_squared(diagonal)
def jacobian_offdiag_frobenius_regularization_fn(x, logp, dx, dlogp, context):
del logp, dlogp
if hasattr(context, "jac"):
jac = context.jac
else:
jac = _get_minibatch_jacobian(dx, x)
context.jac = jac
diagonal = jac.view(jac.shape[0], -1)[:, ::jac.shape[1]] # assumes jac is minibatch square, ie. (N, M, M).
ss_offdiag = torch.sum(jac.view(jac.shape[0], -1)**2, dim=1) - torch.sum(diagonal**2, dim=1)
ms_offdiag = ss_offdiag / (diagonal.shape[1] * (diagonal.shape[1] - 1))
return torch.mean(ms_offdiag)
def _get_minibatch_jacobian(y, x, create_graph=False):
"""Computes the Jacobian of y wrt x assuming minibatch-mode.
Args:
y: (N, ...) with a total of D_y elements in ...
x: (N, ...) with a total of D_x elements in ...
Returns:
The minibatch Jacobian matrix of shape (N, D_y, D_x)
"""
assert y.shape[0] == x.shape[0]
y = y.view(y.shape[0], -1)
# Compute Jacobian row by row.
jac = []
for j in range(y.shape[1]):
dy_j_dx = torch.autograd.grad(y[:, j], x, torch.ones_like(y[:, j]), retain_graph=True,
create_graph=True)[0].view(x.shape[0], -1)
jac.append(torch.unsqueeze(dy_j_dx, 1))
jac = torch.cat(jac, 1)
return jac
| 3,591 | 31.654545 | 115 | py |
steer | steer-master/ffjord/lib/layers/diffeq_layers/container.py | import torch
import torch.nn as nn
from .wrappers import diffeq_wrapper
class SequentialDiffEq(nn.Module):
"""A container for a sequential chain of layers. Supports both regular and diffeq layers.
"""
def __init__(self, *layers):
super(SequentialDiffEq, self).__init__()
self.layers = nn.ModuleList([diffeq_wrapper(layer) for layer in layers])
def forward(self, t, x):
for layer in self.layers:
x = layer(t, x)
return x
class MixtureODELayer(nn.Module):
"""Produces a mixture of experts where output = sigma(t) * f(t, x).
Time-dependent weights sigma(t) help learn to blend the experts without resorting to a highly stiff f.
Supports both regular and diffeq experts.
"""
def __init__(self, experts):
super(MixtureODELayer, self).__init__()
assert len(experts) > 1
wrapped_experts = [diffeq_wrapper(ex) for ex in experts]
self.experts = nn.ModuleList(wrapped_experts)
self.mixture_weights = nn.Linear(1, len(self.experts))
def forward(self, t, y):
dys = []
for f in self.experts:
dys.append(f(t, y))
dys = torch.stack(dys, 0)
weights = self.mixture_weights(t).view(-1, *([1] * (dys.ndimension() - 1)))
dy = torch.sum(dys * weights, dim=0, keepdim=False)
return dy
| 1,357 | 30.581395 | 106 | py |
steer | steer-master/ffjord/lib/layers/diffeq_layers/resnet.py | import torch.nn as nn
from . import basic
from . import container
NGROUPS = 16
class ResNet(container.SequentialDiffEq):
def __init__(self, dim, intermediate_dim, n_resblocks, conv_block=None):
super(ResNet, self).__init__()
if conv_block is None:
conv_block = basic.ConcatCoordConv2d
self.dim = dim
self.intermediate_dim = intermediate_dim
self.n_resblocks = n_resblocks
layers = []
layers.append(conv_block(dim, intermediate_dim, ksize=3, stride=1, padding=1, bias=False))
for _ in range(n_resblocks):
layers.append(BasicBlock(intermediate_dim, conv_block))
layers.append(nn.GroupNorm(NGROUPS, intermediate_dim, eps=1e-4))
layers.append(nn.ReLU(inplace=True))
layers.append(conv_block(intermediate_dim, dim, ksize=1, bias=False))
super(ResNet, self).__init__(*layers)
def __repr__(self):
return (
'{name}({dim}, intermediate_dim={intermediate_dim}, n_resblocks={n_resblocks})'.format(
name=self.__class__.__name__, **self.__dict__
)
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, dim, conv_block=None):
super(BasicBlock, self).__init__()
if conv_block is None:
conv_block = basic.ConcatCoordConv2d
self.norm1 = nn.GroupNorm(NGROUPS, dim, eps=1e-4)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = conv_block(dim, dim, ksize=3, stride=1, padding=1, bias=False)
self.norm2 = nn.GroupNorm(NGROUPS, dim, eps=1e-4)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = conv_block(dim, dim, ksize=3, stride=1, padding=1, bias=False)
def forward(self, t, x):
residual = x
out = self.norm1(x)
out = self.relu1(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu2(out)
out = self.conv2(t, out)
out += residual
return out
| 2,003 | 28.470588 | 99 | py |
steer | steer-master/ffjord/lib/layers/diffeq_layers/wrappers.py | from inspect import signature
import torch.nn as nn
__all__ = ["diffeq_wrapper", "reshape_wrapper"]
class DiffEqWrapper(nn.Module):
def __init__(self, module):
super(DiffEqWrapper, self).__init__()
self.module = module
if len(signature(self.module.forward).parameters) == 1:
self.diffeq = lambda t, y: self.module(y)
elif len(signature(self.module.forward).parameters) == 2:
self.diffeq = self.module
else:
raise ValueError("Differential equation needs to either take (t, y) or (y,) as input.")
def forward(self, t, y):
return self.diffeq(t, y)
def __repr__(self):
return self.diffeq.__repr__()
def diffeq_wrapper(layer):
return DiffEqWrapper(layer)
class ReshapeDiffEq(nn.Module):
def __init__(self, input_shape, net):
super(ReshapeDiffEq, self).__init__()
assert len(signature(net.forward).parameters) == 2, "use diffeq_wrapper before reshape_wrapper."
self.input_shape = input_shape
self.net = net
def forward(self, t, x):
batchsize = x.shape[0]
x = x.view(batchsize, *self.input_shape)
return self.net(t, x).view(batchsize, -1)
def __repr__(self):
return self.diffeq.__repr__()
def reshape_wrapper(input_shape, layer):
return ReshapeDiffEq(input_shape, layer)
| 1,365 | 28.06383 | 104 | py |
steer | steer-master/ffjord/lib/layers/diffeq_layers/basic.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1 or classname.find('Conv') != -1:
nn.init.constant_(m.weight, 0)
nn.init.normal_(m.bias, 0, 0.01)
class HyperLinear(nn.Module):
def __init__(self, dim_in, dim_out, hypernet_dim=8, n_hidden=1, activation=nn.Tanh):
super(HyperLinear, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.params_dim = self.dim_in * self.dim_out + self.dim_out
layers = []
dims = [1] + [hypernet_dim] * n_hidden + [self.params_dim]
for i in range(1, len(dims)):
layers.append(nn.Linear(dims[i - 1], dims[i]))
if i < len(dims) - 1:
layers.append(activation())
self._hypernet = nn.Sequential(*layers)
self._hypernet.apply(weights_init)
def forward(self, t, x):
params = self._hypernet(t.view(1, 1)).view(-1)
b = params[:self.dim_out].view(self.dim_out)
w = params[self.dim_out:].view(self.dim_out, self.dim_in)
return F.linear(x, w, b)
class IgnoreLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(IgnoreLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
def forward(self, t, x):
return self._layer(x)
class ConcatLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(ConcatLinear, self).__init__()
self._layer = nn.Linear(dim_in + 1, dim_out)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class ConcatLinear_v2(nn.Module):
def __init__(self, dim_in, dim_out):
super(ConcatLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
def forward(self, t, x):
return self._layer(x) + self._hyper_bias(t.view(1, 1))
class SquashLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(SquashLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper = nn.Linear(1, dim_out)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper(t.view(1, 1)))
class ConcatSquashLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(ConcatSquashLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
self._hyper_gate = nn.Linear(1, dim_out)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper_gate(t.view(1, 1))) \
+ self._hyper_bias(t.view(1, 1))
class HyperConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(HyperConv2d, self).__init__()
assert dim_in % groups == 0 and dim_out % groups == 0, "dim_in and dim_out must both be divisible by groups."
self.dim_in = dim_in
self.dim_out = dim_out
self.ksize = ksize
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.transpose = transpose
self.params_dim = int(dim_in * dim_out * ksize * ksize / groups)
if self.bias:
self.params_dim += dim_out
self._hypernet = nn.Linear(1, self.params_dim)
self.conv_fn = F.conv_transpose2d if transpose else F.conv2d
self._hypernet.apply(weights_init)
def forward(self, t, x):
params = self._hypernet(t.view(1, 1)).view(-1)
weight_size = int(self.dim_in * self.dim_out * self.ksize * self.ksize / self.groups)
if self.transpose:
weight = params[:weight_size].view(self.dim_in, self.dim_out // self.groups, self.ksize, self.ksize)
else:
weight = params[:weight_size].view(self.dim_out, self.dim_in // self.groups, self.ksize, self.ksize)
bias = params[:self.dim_out].view(self.dim_out) if self.bias else None
return self.conv_fn(
x, weight=weight, bias=bias, stride=self.stride, padding=self.padding, groups=self.groups,
dilation=self.dilation
)
class IgnoreConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(IgnoreConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
return self._layer(x)
class SquashConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(SquashConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in + 1, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
self._hyper = nn.Linear(1, dim_out)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper(t.view(1, 1))).view(1, -1, 1, 1)
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in + 1, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class ConcatConv2d_v2(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
def forward(self, t, x):
return self._layer(x) + self._hyper_bias(t.view(1, 1)).view(1, -1, 1, 1)
class ConcatSquashConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatSquashConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
self._hyper_gate = nn.Linear(1, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper_gate(t.view(1, 1))).view(1, -1, 1, 1) \
+ self._hyper_bias(t.view(1, 1)).view(1, -1, 1, 1)
class ConcatCoordConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatCoordConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in + 3, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
b, c, h, w = x.shape
hh = torch.arange(h).to(x).view(1, 1, h, 1).expand(b, 1, h, w)
ww = torch.arange(w).to(x).view(1, 1, 1, w).expand(b, 1, h, w)
tt = t.to(x).view(1, 1, 1, 1).expand(b, 1, h, w)
x_aug = torch.cat([x, tt, hh, ww], 1)
return self._layer(x_aug)
class GatedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(GatedLinear, self).__init__()
self.layer_f = nn.Linear(in_features, out_features)
self.layer_g = nn.Linear(in_features, out_features)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
class GatedConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1):
super(GatedConv, self).__init__()
self.layer_f = nn.Conv2d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=1, groups=groups
)
self.layer_g = nn.Conv2d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=1, groups=groups
)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
class GatedConvTranspose(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1):
super(GatedConvTranspose, self).__init__()
self.layer_f = nn.ConvTranspose2d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding,
groups=groups
)
self.layer_g = nn.ConvTranspose2d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding,
groups=groups
)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
class BlendLinear(nn.Module):
def __init__(self, dim_in, dim_out, layer_type=nn.Linear, **unused_kwargs):
super(BlendLinear, self).__init__()
self._layer0 = layer_type(dim_in, dim_out)
self._layer1 = layer_type(dim_in, dim_out)
def forward(self, t, x):
y0 = self._layer0(x)
y1 = self._layer1(x)
return y0 + (y1 - y0) * t
class BlendConv2d(nn.Module):
def __init__(
self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False,
**unused_kwargs
):
super(BlendConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer0 = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
self._layer1 = module(
dim_in, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
y0 = self._layer0(x)
y1 = self._layer1(x)
return y0 + (y1 - y0) * t
| 11,057 | 36.869863 | 120 | py |
steer | steer-master/latent_ode/mujoco_physics.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Authors: Yulia Rubanova and Ricky Chen
###########################
import os
import numpy as np
import torch
from lib.utils import get_dict_template
import lib.utils as utils
from torchvision.datasets.utils import download_url
class HopperPhysics(object):
T = 200
D = 14
n_training_samples = 10000
training_file = 'training.pt'
def __init__(self, root, download = True, generate=False, device = torch.device("cpu")):
self.root = root
if download:
self._download()
if generate:
self._generate_dataset()
if not self._check_exists():
raise RuntimeError('Dataset not found.' + ' You can use download=True to download it')
data_file = os.path.join(self.data_folder, self.training_file)
self.data = torch.Tensor(torch.load(data_file)).to(device)
self.data, self.data_min, self.data_max = utils.normalize_data(self.data)
self.device =device
def visualize(self, traj, plot_name = 'traj', dirname='hopper_imgs', video_name = None):
r"""Generates images of the trajectory and stores them as <dirname>/traj<index>-<t>.jpg"""
T, D = traj.size()
traj = traj.cpu() * self.data_max.cpu() + self.data_min.cpu()
try:
from dm_control import suite # noqa: F401
except ImportError as e:
raise Exception('Deepmind Control Suite is required to visualize the dataset.') from e
try:
from PIL import Image # noqa: F401
except ImportError as e:
raise Exception('PIL is required to visualize the dataset.') from e
def save_image(data, filename):
im = Image.fromarray(data)
im.save(filename)
os.makedirs(dirname, exist_ok=True)
env = suite.load('hopper', 'stand')
physics = env.physics
for t in range(T):
with physics.reset_context():
physics.data.qpos[:] = traj[t, :D // 2]
physics.data.qvel[:] = traj[t, D // 2:]
save_image(
physics.render(height=480, width=640, camera_id=0),
os.path.join(dirname, plot_name + '-{:03d}.jpg'.format(t))
)
def _generate_dataset(self):
if self._check_exists():
return
os.makedirs(self.data_folder, exist_ok=True)
print('Generating dataset...')
train_data = self._generate_random_trajectories(self.n_training_samples)
torch.save(train_data, os.path.join(self.data_folder, self.training_file))
def _download(self):
if self._check_exists():
return
print("Downloading the dataset [325MB] ...")
os.makedirs(self.data_folder, exist_ok=True)
url = "http://www.cs.toronto.edu/~rtqichen/datasets/HopperPhysics/training.pt"
download_url(url, self.data_folder, "training.pt", None)
def _generate_random_trajectories(self, n_samples):
try:
from dm_control import suite # noqa: F401
except ImportError as e:
raise Exception('Deepmind Control Suite is required to generate the dataset.') from e
env = suite.load('hopper', 'stand')
physics = env.physics
# Store the state of the RNG to restore later.
st0 = np.random.get_state()
np.random.seed(123)
data = np.zeros((n_samples, self.T, self.D))
for i in range(n_samples):
with physics.reset_context():
# x and z positions of the hopper. We want z > 0 for the hopper to stay above ground.
physics.data.qpos[:2] = np.random.uniform(0, 0.5, size=2)
physics.data.qpos[2:] = np.random.uniform(-2, 2, size=physics.data.qpos[2:].shape)
physics.data.qvel[:] = np.random.uniform(-5, 5, size=physics.data.qvel.shape)
for t in range(self.T):
data[i, t, :self.D // 2] = physics.data.qpos
data[i, t, self.D // 2:] = physics.data.qvel
physics.step()
# Restore RNG.
np.random.set_state(st0)
return data
def _check_exists(self):
return os.path.exists(os.path.join(self.data_folder, self.training_file))
@property
def data_folder(self):
return os.path.join(self.root, self.__class__.__name__)
# def __getitem__(self, index):
# return self.data[index]
def get_dataset(self):
return self.data
def __len__(self):
return len(self.data)
def size(self, ind = None):
if ind is not None:
return self.data.shape[ind]
return self.data.shape
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
return fmt_str
| 4,315 | 27.966443 | 92 | py |
steer | steer-master/latent_ode/person_activity.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Authors: Yulia Rubanova and Ricky Chen
###########################
import os
import lib.utils as utils
import numpy as np
import tarfile
import torch
from torch.utils.data import DataLoader
from torchvision.datasets.utils import download_url
from lib.utils import get_device
# Adapted from: https://github.com/rtqichen/time-series-datasets
class PersonActivity(object):
urls = [
'https://archive.ics.uci.edu/ml/machine-learning-databases/00196/ConfLongDemo_JSI.txt',
]
tag_ids = [
"010-000-024-033", #"ANKLE_LEFT",
"010-000-030-096", #"ANKLE_RIGHT",
"020-000-033-111", #"CHEST",
"020-000-032-221" #"BELT"
]
tag_dict = {k: i for i, k in enumerate(tag_ids)}
label_names = [
"walking",
"falling",
"lying down",
"lying",
"sitting down",
"sitting",
"standing up from lying",
"on all fours",
"sitting on the ground",
"standing up from sitting",
"standing up from sit on grnd"
]
#label_dict = {k: i for i, k in enumerate(label_names)}
#Merge similar labels into one class
label_dict = {
"walking": 0,
"falling": 1,
"lying": 2,
"lying down": 2,
"sitting": 3,
"sitting down" : 3,
"standing up from lying": 4,
"standing up from sitting": 4,
"standing up from sit on grnd": 4,
"on all fours": 5,
"sitting on the ground": 6
}
def __init__(self, root, download=False,
reduce='average', max_seq_length = 50,
n_samples = None, device = torch.device("cpu")):
self.root = root
self.reduce = reduce
self.max_seq_length = max_seq_length
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found. You can use download=True to download it')
if device == torch.device("cpu"):
self.data = torch.load(os.path.join(self.processed_folder, self.data_file), map_location='cpu')
else:
self.data = torch.load(os.path.join(self.processed_folder, self.data_file))
if n_samples is not None:
self.data = self.data[:n_samples]
def download(self):
if self._check_exists():
return
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
def save_record(records, record_id, tt, vals, mask, labels):
tt = torch.tensor(tt).to(self.device)
vals = torch.stack(vals)
mask = torch.stack(mask)
labels = torch.stack(labels)
# flatten the measurements for different tags
vals = vals.reshape(vals.size(0), -1)
mask = mask.reshape(mask.size(0), -1)
assert(len(tt) == vals.size(0))
assert(mask.size(0) == vals.size(0))
assert(labels.size(0) == vals.size(0))
#records.append((record_id, tt, vals, mask, labels))
seq_length = len(tt)
# split the long time series into smaller ones
offset = 0
slide = self.max_seq_length // 2
while (offset + self.max_seq_length < seq_length):
idx = range(offset, offset + self.max_seq_length)
first_tp = tt[idx][0]
records.append((record_id, tt[idx] - first_tp, vals[idx], mask[idx], labels[idx]))
offset += slide
for url in self.urls:
filename = url.rpartition('/')[2]
download_url(url, self.raw_folder, filename, None)
print('Processing {}...'.format(filename))
dirname = os.path.join(self.raw_folder)
records = []
first_tp = None
for txtfile in os.listdir(dirname):
with open(os.path.join(dirname, txtfile)) as f:
lines = f.readlines()
prev_time = -1
tt = []
record_id = None
for l in lines:
cur_record_id, tag_id, time, date, val1, val2, val3, label = l.strip().split(',')
value_vec = torch.Tensor((float(val1), float(val2), float(val3))).to(self.device)
time = float(time)
if cur_record_id != record_id:
if record_id is not None:
save_record(records, record_id, tt, vals, mask, labels)
tt, vals, mask, nobs, labels = [], [], [], [], []
record_id = cur_record_id
tt = [torch.zeros(1).to(self.device)]
vals = [torch.zeros(len(self.tag_ids),3).to(self.device)]
mask = [torch.zeros(len(self.tag_ids),3).to(self.device)]
nobs = [torch.zeros(len(self.tag_ids)).to(self.device)]
labels = [torch.zeros(len(self.label_names)).to(self.device)]
first_tp = time
time = round((time - first_tp)/ 10**5)
prev_time = time
else:
# for speed -- we actually don't need to quantize it in Latent ODE
time = round((time - first_tp)/ 10**5) # quatizing by 100 ms. 10,000 is one millisecond, 10,000,000 is one second
if time != prev_time:
tt.append(time)
vals.append(torch.zeros(len(self.tag_ids),3).to(self.device))
mask.append(torch.zeros(len(self.tag_ids),3).to(self.device))
nobs.append(torch.zeros(len(self.tag_ids)).to(self.device))
labels.append(torch.zeros(len(self.label_names)).to(self.device))
prev_time = time
if tag_id in self.tag_ids:
n_observations = nobs[-1][self.tag_dict[tag_id]]
if (self.reduce == 'average') and (n_observations > 0):
prev_val = vals[-1][self.tag_dict[tag_id]]
new_val = (prev_val * n_observations + value_vec) / (n_observations + 1)
vals[-1][self.tag_dict[tag_id]] = new_val
else:
vals[-1][self.tag_dict[tag_id]] = value_vec
mask[-1][self.tag_dict[tag_id]] = 1
nobs[-1][self.tag_dict[tag_id]] += 1
if label in self.label_names:
if torch.sum(labels[-1][self.label_dict[label]]) == 0:
labels[-1][self.label_dict[label]] = 1
else:
assert tag_id == 'RecordID', 'Read unexpected tag id {}'.format(tag_id)
save_record(records, record_id, tt, vals, mask, labels)
torch.save(
records,
os.path.join(self.processed_folder, 'data.pt')
)
print('Done!')
def _check_exists(self):
for url in self.urls:
filename = url.rpartition('/')[2]
if not os.path.exists(
os.path.join(self.processed_folder, 'data.pt')
):
return False
return True
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def data_file(self):
return 'data.pt'
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
fmt_str += ' Max length: {}\n'.format(self.max_seq_length)
fmt_str += ' Reduce: {}\n'.format(self.reduce)
return fmt_str
def get_person_id(record_id):
# The first letter is the person id
person_id = record_id[0]
person_id = ord(person_id) - ord("A")
return person_id
def variable_time_collate_fn_activity(batch, args, device = torch.device("cpu"), data_type = "train"):
"""
Expects a batch of time series data in the form of (record_id, tt, vals, mask, labels) where
- record_id is a patient id
- tt is a 1-dimensional tensor containing T time values of observations.
- vals is a (T, D) tensor containing observed values for D variables.
- mask is a (T, D) tensor containing 1 where values were observed and 0 otherwise.
- labels is a list of labels for the current patient, if labels are available. Otherwise None.
Returns:
combined_tt: The union of all time observations.
combined_vals: (M, T, D) tensor containing the observed values.
combined_mask: (M, T, D) tensor containing 1 where values were observed and 0 otherwise.
"""
D = batch[0][2].shape[1]
N = batch[0][-1].shape[1] # number of labels
combined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)
combined_tt = combined_tt.to(device)
offset = 0
combined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)
combined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)
combined_labels = torch.zeros([len(batch), len(combined_tt), N]).to(device)
for b, (record_id, tt, vals, mask, labels) in enumerate(batch):
tt = tt.to(device)
vals = vals.to(device)
mask = mask.to(device)
labels = labels.to(device)
indices = inverse_indices[offset:offset + len(tt)]
offset += len(tt)
combined_vals[b, indices] = vals
combined_mask[b, indices] = mask
combined_labels[b, indices] = labels
combined_tt = combined_tt.float()
if torch.max(combined_tt) != 0.:
combined_tt = combined_tt / torch.max(combined_tt)
data_dict = {
"data": combined_vals,
"time_steps": combined_tt,
"mask": combined_mask,
"labels": combined_labels}
data_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)
return data_dict
if __name__ == '__main__':
torch.manual_seed(1991)
dataset = PersonActivity('data/PersonActivity', download=True)
dataloader = DataLoader(dataset, batch_size=30, shuffle=True, collate_fn= variable_time_collate_fn_activity)
dataloader.__iter__().next()
| 9,173 | 29.682274 | 120 | py |
steer | steer-master/latent_ode/generate_timeseries.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
# Create a synthetic dataset
from __future__ import print_function
from __future__ import absolute_import, division
import lib.utils as utils
import torch
import matplotlib.image
import matplotlib.pyplot as plt
import pickle
from scipy.special import expit as sigmoid
import numpy.random as npr
import numpy as np
import os
import matplotlib
if os.path.exists("/Users/yulia"):
matplotlib.use('TkAgg')
else:
matplotlib.use('Agg')
# ======================================================================================
def get_next_val(init, t, tmin, tmax, final=None):
if final is None:
return init
val = init + (final - init) / (tmax - tmin) * t
return val
def generate_periodic(time_steps, init_freq, init_amplitude, starting_point,
final_freq=None, final_amplitude=None, phi_offset=0.):
tmin = time_steps.min()
tmax = time_steps.max()
data = []
t_prev = time_steps[0]
phi = phi_offset
for t in time_steps:
dt = t - t_prev
amp = get_next_val(init_amplitude, t, tmin, tmax, final_amplitude)
freq = get_next_val(init_freq, t, tmin, tmax, final_freq)
phi = phi + 2 * np.pi * freq * dt # integrate to get phase
phi2 = phi + 2 * np.pi * freq * 0.125 * dt # integrate to get phase
#print("Initial amplitude")
#print(init_amplitude)
#print("Amplitude")
#print(amp)
y = amp * np.sin(phi) + starting_point
#y = amp*0.001*np.exp(t) +2.5*amp * np.sin(phi) + starting_point
#y = amp*0.001*np.exp(t) + 2.5*amp * np.sin(phi) + 2.5*amp * np.sin(phi2)+ starting_point
t_prev = t
data.append([t, y])
return np.array(data)
def assign_value_or_sample(value, sampling_interval=[0., 1.]):
if value is None:
int_length = sampling_interval[1] - sampling_interval[0]
return np.random.random() * int_length + sampling_interval[0]
else:
return value
class TimeSeries:
def __init__(self, device=torch.device("cpu")):
self.device = device
self.z0 = None
def init_visualization(self):
self.fig = plt.figure(figsize=(10, 4), facecolor='white')
self.ax = self.fig.add_subplot(111, frameon=False)
plt.show(block=False)
def visualize(self, truth):
self.ax.plot(truth[:, 0], truth[:, 1])
def add_noise(self, traj_list, time_steps, noise_weight):
n_samples = traj_list.size(0)
# Add noise to all the points except the first point
n_tp = len(time_steps) - 1
noise = np.random.sample((n_samples, n_tp))
noise = torch.Tensor(noise).to(self.device)
traj_list_w_noise = traj_list.clone()
# Dimension [:,:,0] is a time dimension -- do not add noise to that
traj_list_w_noise[:, 1:, 0] += noise_weight * noise
return traj_list_w_noise
class Periodic_1d(TimeSeries):
def __init__(self, device=torch.device("cpu"),
init_freq=0.3, init_amplitude=1.,
final_amplitude=10., final_freq=1.,
z0=0.):
"""
If some of the parameters (init_freq, init_amplitude, final_amplitude, final_freq) is not provided, it is randomly sampled.
For now, all the time series share the time points and the starting point.
"""
super(Periodic_1d, self).__init__(device)
self.init_freq = init_freq
self.init_amplitude = init_amplitude
self.final_amplitude = final_amplitude
self.final_freq = final_freq
self.z0 = z0
def sample_traj(self, time_steps, n_samples=1, noise_weight=1.,
cut_out_section=None):
"""
Sample periodic functions.
"""
traj_list = []
for i in range(n_samples):
init_freq = assign_value_or_sample(self.init_freq, [0.4, 0.8])
if self.final_freq is None:
final_freq = init_freq
else:
final_freq = assign_value_or_sample(
self.final_freq, [0.4, 0.8])
init_amplitude = assign_value_or_sample(
self.init_amplitude, [0., 1.])
final_amplitude = assign_value_or_sample(
self.final_amplitude, [0., 1.])
noisy_z0 = self.z0 + np.random.normal(loc=0., scale=0.1)
traj = generate_periodic(time_steps, init_freq=init_freq,
init_amplitude=init_amplitude, starting_point=noisy_z0,
final_amplitude=final_amplitude, final_freq=final_freq)
# Cut the time dimension
traj = np.expand_dims(traj[:, 1:], 0)
traj_list.append(traj)
# shape: [n_samples, n_timesteps, 2]
# traj_list[:,:,0] -- time stamps
# traj_list[:,:,1] -- values at the time stamps
traj_list = np.array(traj_list)
traj_list = torch.Tensor().new_tensor(traj_list, device=self.device)
traj_list = traj_list.squeeze(1)
traj_list = self.add_noise(traj_list, time_steps, noise_weight)
return traj_list
| 5,248 | 33.761589 | 131 | py |
steer | steer-master/latent_ode/physionet.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Authors: Yulia Rubanova and Ricky Chen
###########################
import os
import matplotlib
if os.path.exists("/Users/yulia"):
matplotlib.use('TkAgg')
else:
matplotlib.use('Agg')
import matplotlib.pyplot
import matplotlib.pyplot as plt
import lib.utils as utils
import numpy as np
import tarfile
import torch
from torch.utils.data import DataLoader
from torchvision.datasets.utils import download_url
from lib.utils import get_device
# Adapted from: https://github.com/rtqichen/time-series-datasets
# get minimum and maximum for each feature across the whole dataset
def get_data_min_max(records):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_min, data_max = None, None
inf = torch.Tensor([float("Inf")])[0].to(device)
for b, (record_id, tt, vals, mask, labels) in enumerate(records):
n_features = vals.size(-1)
batch_min = []
batch_max = []
for i in range(n_features):
non_missing_vals = vals[:,i][mask[:,i] == 1]
if len(non_missing_vals) == 0:
batch_min.append(inf)
batch_max.append(-inf)
else:
batch_min.append(torch.min(non_missing_vals))
batch_max.append(torch.max(non_missing_vals))
batch_min = torch.stack(batch_min)
batch_max = torch.stack(batch_max)
if (data_min is None) and (data_max is None):
data_min = batch_min
data_max = batch_max
else:
data_min = torch.min(data_min, batch_min)
data_max = torch.max(data_max, batch_max)
return data_min, data_max
class PhysioNet(object):
urls = [
'https://physionet.org/files/challenge-2012/1.0.0/set-a.tar.gz?download',
'https://physionet.org/files/challenge-2012/1.0.0/set-b.tar.gz?download',
]
outcome_urls = ['https://physionet.org/files/challenge-2012/1.0.0/Outcomes-a.txt']
params = [
'Age', 'Gender', 'Height', 'ICUType', 'Weight', 'Albumin', 'ALP', 'ALT', 'AST', 'Bilirubin', 'BUN',
'Cholesterol', 'Creatinine', 'DiasABP', 'FiO2', 'GCS', 'Glucose', 'HCO3', 'HCT', 'HR', 'K', 'Lactate', 'Mg',
'MAP', 'MechVent', 'Na', 'NIDiasABP', 'NIMAP', 'NISysABP', 'PaCO2', 'PaO2', 'pH', 'Platelets', 'RespRate',
'SaO2', 'SysABP', 'Temp', 'TroponinI', 'TroponinT', 'Urine', 'WBC'
]
params_dict = {k: i for i, k in enumerate(params)}
labels = [ "SAPS-I", "SOFA", "Length_of_stay", "Survival", "In-hospital_death" ]
labels_dict = {k: i for i, k in enumerate(labels)}
def __init__(self, root, train=True, download=False,
quantization = 0.1, n_samples = None, device = torch.device("cpu")):
self.root = root
self.train = train
self.reduce = "average"
self.quantization = quantization
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found. You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
if device == torch.device("cpu"):
self.data = torch.load(os.path.join(self.processed_folder, data_file), map_location='cpu')
self.labels = torch.load(os.path.join(self.processed_folder, self.label_file), map_location='cpu')
else:
self.data = torch.load(os.path.join(self.processed_folder, data_file))
self.labels = torch.load(os.path.join(self.processed_folder, self.label_file))
if n_samples is not None:
self.data = self.data[:n_samples]
self.labels = self.labels[:n_samples]
def download(self):
if self._check_exists():
return
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# Download outcome data
for url in self.outcome_urls:
filename = url.rpartition('/')[2]
download_url(url, self.raw_folder, filename, None)
txtfile = os.path.join(self.raw_folder, filename)
with open(txtfile) as f:
lines = f.readlines()
outcomes = {}
for l in lines[1:]:
l = l.rstrip().split(',')
record_id, labels = l[0], np.array(l[1:]).astype(float)
outcomes[record_id] = torch.Tensor(labels).to(self.device)
torch.save(
labels,
os.path.join(self.processed_folder, filename.split('.')[0] + '.pt')
)
for url in self.urls:
filename = url.rpartition('/')[2]
download_url(url, self.raw_folder, filename, None)
tar = tarfile.open(os.path.join(self.raw_folder, filename), "r:gz")
tar.extractall(self.raw_folder)
tar.close()
print('Processing {}...'.format(filename))
dirname = os.path.join(self.raw_folder, filename.split('.')[0])
patients = []
total = 0
for txtfile in os.listdir(dirname):
record_id = txtfile.split('.')[0]
with open(os.path.join(dirname, txtfile)) as f:
lines = f.readlines()
prev_time = 0
tt = [0.]
vals = [torch.zeros(len(self.params)).to(self.device)]
mask = [torch.zeros(len(self.params)).to(self.device)]
nobs = [torch.zeros(len(self.params))]
for l in lines[1:]:
total += 1
time, param, val = l.split(',')
# Time in hours
time = float(time.split(':')[0]) + float(time.split(':')[1]) / 60.
# round up the time stamps (up to 6 min by default)
# used for speed -- we actually don't need to quantize it in Latent ODE
time = round(time / self.quantization) * self.quantization
if time != prev_time:
tt.append(time)
vals.append(torch.zeros(len(self.params)).to(self.device))
mask.append(torch.zeros(len(self.params)).to(self.device))
nobs.append(torch.zeros(len(self.params)).to(self.device))
prev_time = time
if param in self.params_dict:
#vals[-1][self.params_dict[param]] = float(val)
n_observations = nobs[-1][self.params_dict[param]]
if self.reduce == 'average' and n_observations > 0:
prev_val = vals[-1][self.params_dict[param]]
new_val = (prev_val * n_observations + float(val)) / (n_observations + 1)
vals[-1][self.params_dict[param]] = new_val
else:
vals[-1][self.params_dict[param]] = float(val)
mask[-1][self.params_dict[param]] = 1
nobs[-1][self.params_dict[param]] += 1
else:
assert param == 'RecordID', 'Read unexpected param {}'.format(param)
tt = torch.tensor(tt).to(self.device)
vals = torch.stack(vals)
mask = torch.stack(mask)
labels = None
if record_id in outcomes:
# Only training set has labels
labels = outcomes[record_id]
# Out of 5 label types provided for Physionet, take only the last one -- mortality
labels = labels[4]
patients.append((record_id, tt, vals, mask, labels))
torch.save(
patients,
os.path.join(self.processed_folder,
filename.split('.')[0] + "_" + str(self.quantization) + '.pt')
)
print('Done!')
def _check_exists(self):
for url in self.urls:
filename = url.rpartition('/')[2]
if not os.path.exists(
os.path.join(self.processed_folder,
filename.split('.')[0] + "_" + str(self.quantization) + '.pt')
):
return False
return True
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def training_file(self):
return 'set-a_{}.pt'.format(self.quantization)
@property
def test_file(self):
return 'set-b_{}.pt'.format(self.quantization)
@property
def label_file(self):
return 'Outcomes-a.pt'
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def get_label(self, record_id):
return self.labels[record_id]
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Split: {}\n'.format('train' if self.train is True else 'test')
fmt_str += ' Root Location: {}\n'.format(self.root)
fmt_str += ' Quantization: {}\n'.format(self.quantization)
fmt_str += ' Reduce: {}\n'.format(self.reduce)
return fmt_str
def visualize(self, timesteps, data, mask, plot_name):
width = 15
height = 15
non_zero_attributes = (torch.sum(mask,0) > 2).numpy()
non_zero_idx = [i for i in range(len(non_zero_attributes)) if non_zero_attributes[i] == 1.]
n_non_zero = sum(non_zero_attributes)
mask = mask[:, non_zero_idx]
data = data[:, non_zero_idx]
params_non_zero = [self.params[i] for i in non_zero_idx]
params_dict = {k: i for i, k in enumerate(params_non_zero)}
n_col = 3
n_row = n_non_zero // n_col + (n_non_zero % n_col > 0)
fig, ax_list = plt.subplots(n_row, n_col, figsize=(width, height), facecolor='white')
#for i in range(len(self.params)):
for i in range(n_non_zero):
param = params_non_zero[i]
param_id = params_dict[param]
tp_mask = mask[:,param_id].long()
tp_cur_param = timesteps[tp_mask == 1.]
data_cur_param = data[tp_mask == 1., param_id]
ax_list[i // n_col, i % n_col].plot(tp_cur_param.numpy(), data_cur_param.numpy(), marker='o')
ax_list[i // n_col, i % n_col].set_title(param)
fig.tight_layout()
fig.savefig(plot_name)
plt.close(fig)
def variable_time_collate_fn(batch, args, device = torch.device("cpu"), data_type = "train",
data_min = None, data_max = None):
"""
Expects a batch of time series data in the form of (record_id, tt, vals, mask, labels) where
- record_id is a patient id
- tt is a 1-dimensional tensor containing T time values of observations.
- vals is a (T, D) tensor containing observed values for D variables.
- mask is a (T, D) tensor containing 1 where values were observed and 0 otherwise.
- labels is a list of labels for the current patient, if labels are available. Otherwise None.
Returns:
combined_tt: The union of all time observations.
combined_vals: (M, T, D) tensor containing the observed values.
combined_mask: (M, T, D) tensor containing 1 where values were observed and 0 otherwise.
"""
D = batch[0][2].shape[1]
combined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)
combined_tt = combined_tt.to(device)
offset = 0
combined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)
combined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)
combined_labels = None
N_labels = 1
combined_labels = torch.zeros(len(batch), N_labels) + torch.tensor(float('nan'))
combined_labels = combined_labels.to(device = device)
for b, (record_id, tt, vals, mask, labels) in enumerate(batch):
tt = tt.to(device)
vals = vals.to(device)
mask = mask.to(device)
if labels is not None:
labels = labels.to(device)
indices = inverse_indices[offset:offset + len(tt)]
offset += len(tt)
combined_vals[b, indices] = vals
combined_mask[b, indices] = mask
if labels is not None:
combined_labels[b] = labels
combined_vals, _, _ = utils.normalize_masked_data(combined_vals, combined_mask,
att_min = data_min, att_max = data_max)
if torch.max(combined_tt) != 0.:
combined_tt = combined_tt / torch.max(combined_tt)
data_dict = {
"data": combined_vals,
"time_steps": combined_tt,
"mask": combined_mask,
"labels": combined_labels}
data_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)
return data_dict
if __name__ == '__main__':
torch.manual_seed(1991)
dataset = PhysioNet('data/physionet', train=False, download=True)
dataloader = DataLoader(dataset, batch_size=10, shuffle=True, collate_fn=variable_time_collate_fn)
print(dataloader.__iter__().next())
| 11,603 | 31.233333 | 114 | py |
steer | steer-master/latent_ode/run_models.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
import matplotlib.pyplot as plt
import time
import datetime
import argparse
import numpy as np
import pandas as pd
from random import SystemRandom
from sklearn import model_selection
import torch
import torch.nn as nn
from torch.nn.functional import relu
import torch.optim as optim
import lib.utils as utils
from lib.plotting import *
from lib.rnn_baselines import *
from lib.ode_rnn import *
from lib.create_latent_ode_model import create_LatentODE_model
from lib.parse_datasets import parse_datasets
from lib.ode_func import ODEFunc, ODEFunc_w_Poisson
from lib.diffeq_solver import DiffeqSolver
from mujoco_physics import HopperPhysics
from lib.utils import compute_loss_all_batches
# Generative model for noisy data based on ODE
parser = argparse.ArgumentParser('Latent ODE')
parser.add_argument('-n', type=int, default=100, help="Size of the dataset")
parser.add_argument('--niters', type=int, default=300)
parser.add_argument('--lr', type=float, default=1e-2, help="Starting learning rate.")
parser.add_argument('-b', '--batch-size', type=int, default=50)
parser.add_argument('--viz', action='store_true', help="Show plots while training")
parser.add_argument('--save', type=str, default='experiments/', help="Path for save checkpoints")
parser.add_argument('--load', type=str, default=None, help="ID of the experiment to load for evaluation. If None, run a new experiment.")
parser.add_argument('-r', '--random-seed', type=int, default=1991, help="Random_seed")
parser.add_argument('--dataset', type=str, default='periodic', help="Dataset to load. Available: physionet, activity, hopper, periodic")
parser.add_argument('-s', '--sample-tp', type=float, default=None, help="Number of time points to sub-sample."
"If > 1, subsample exact number of points. If the number is in [0,1], take a percentage of available points per time series. If None, do not subsample")
parser.add_argument('-c', '--cut-tp', type=int, default=None, help="Cut out the section of the timeline of the specified length (in number of points)."
"Used for periodic function demo.")
parser.add_argument('--quantization', type=float, default=0.1, help="Quantization on the physionet dataset."
"Value 1 means quantization by 1 hour, value 0.1 means quantization by 0.1 hour = 6 min")
parser.add_argument('--latent-ode', action='store_true', help="Run Latent ODE seq2seq model")
parser.add_argument('--z0-encoder', type=str, default='odernn', help="Type of encoder for Latent ODE model: odernn or rnn")
parser.add_argument('--classic-rnn', action='store_true', help="Run RNN baseline: classic RNN that sees true points at every point. Used for interpolation only.")
parser.add_argument('--rnn-cell', default="gru", help="RNN Cell type. Available: gru (default), expdecay")
parser.add_argument('--input-decay', action='store_true', help="For RNN: use the input that is the weighted average of impirical mean and previous value (like in GRU-D)")
parser.add_argument('--ode-rnn', action='store_true', help="Run ODE-RNN baseline: RNN-style that sees true points at every point. Used for interpolation only.")
parser.add_argument('--rnn-vae', action='store_true', help="Run RNN baseline: seq2seq model with sampling of the h0 and ELBO loss.")
parser.add_argument('-l', '--latents', type=int, default=6, help="Size of the latent state")
parser.add_argument('--rec-dims', type=int, default=20, help="Dimensionality of the recognition model (ODE or RNN).")
parser.add_argument('--experimentID', type=int, default=7, help="Dimensionality of the recognition model (ODE or RNN).")
parser.add_argument('--rec-layers', type=int, default=1, help="Number of layers in ODE func in recognition ODE")
parser.add_argument('--gen-layers', type=int, default=1, help="Number of layers in ODE func in generative ODE")
parser.add_argument('-u', '--units', type=int, default=100, help="Number of units per layer in ODE func")
parser.add_argument('-g', '--gru-units', type=int, default=100, help="Number of units per layer in each of GRU update networks")
parser.add_argument('--poisson', action='store_true', help="Model poisson-process likelihood for the density of events in addition to reconstruction.")
parser.add_argument('--classif', action='store_true', help="Include binary classification loss -- used for Physionet dataset for hospiral mortality")
parser.add_argument('--linear-classif', action='store_true', help="If using a classifier, use a linear classifier instead of 1-layer NN")
parser.add_argument('--extrap', action='store_true', help="Set extrapolation mode. If this flag is not set, run interpolation mode.")
parser.add_argument('-t', '--timepoints', type=int, default=100, help="Total number of time-points")
parser.add_argument('--max-t', type=float, default=5., help="We subsample points in the interval [0, args.max_tp]")
parser.add_argument('--noise-weight', type=float, default=0.01, help="Noise amplitude for generated traejctories")
args = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
file_name = os.path.basename(__file__)[:-3]
utils.makedirs(args.save)
#####################################################################################################
if __name__ == '__main__':
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
experimentID = args.load
if experimentID is None:
# Make a new experiment ID
experimentID = args.experimentID #int(SystemRandom().random()*100000)
ckpt_path = os.path.join(args.save, "experiment_" + str(experimentID) + '.ckpt')
start = time.time()
print("Sampling dataset of {} training examples".format(args.n))
input_command = sys.argv
ind = [i for i in range(len(input_command)) if input_command[i] == "--load"]
if len(ind) == 1:
ind = ind[0]
input_command = input_command[:ind] + input_command[(ind+2):]
input_command = " ".join(input_command)
utils.makedirs("results/")
##################################################################
data_obj = parse_datasets(args, device)
input_dim = data_obj["input_dim"]
classif_per_tp = False
if ("classif_per_tp" in data_obj):
# do classification per time point rather than on a time series as a whole
classif_per_tp = data_obj["classif_per_tp"]
if args.classif and (args.dataset == "hopper" or args.dataset == "periodic"):
raise Exception("Classification task is not available for MuJoCo and 1d datasets")
n_labels = 1
if args.classif:
if ("n_labels" in data_obj):
n_labels = data_obj["n_labels"]
else:
raise Exception("Please provide number of labels for classification task")
##################################################################
# Create the model
obsrv_std = 0.01
if args.dataset == "hopper":
obsrv_std = 1e-3
obsrv_std = torch.Tensor([obsrv_std]).to(device)
z0_prior = Normal(torch.Tensor([0.0]).to(device), torch.Tensor([1.]).to(device))
if args.rnn_vae:
if args.poisson:
print("Poisson process likelihood not implemented for RNN-VAE: ignoring --poisson")
# Create RNN-VAE model
model = RNN_VAE(input_dim, args.latents,
device = device,
rec_dims = args.rec_dims,
concat_mask = True,
obsrv_std = obsrv_std,
z0_prior = z0_prior,
use_binary_classif = args.classif,
classif_per_tp = classif_per_tp,
linear_classifier = args.linear_classif,
n_units = args.units,
input_space_decay = args.input_decay,
cell = args.rnn_cell,
n_labels = n_labels,
train_classif_w_reconstr = (args.dataset == "physionet")
).to(device)
elif args.classic_rnn:
if args.poisson:
print("Poisson process likelihood not implemented for RNN: ignoring --poisson")
if args.extrap:
raise Exception("Extrapolation for standard RNN not implemented")
# Create RNN model
model = Classic_RNN(input_dim, args.latents, device,
concat_mask = True, obsrv_std = obsrv_std,
n_units = args.units,
use_binary_classif = args.classif,
classif_per_tp = classif_per_tp,
linear_classifier = args.linear_classif,
input_space_decay = args.input_decay,
cell = args.rnn_cell,
n_labels = n_labels,
train_classif_w_reconstr = (args.dataset == "physionet")
).to(device)
elif args.ode_rnn:
# Create ODE-GRU model
n_ode_gru_dims = args.latents
if args.poisson:
print("Poisson process likelihood not implemented for ODE-RNN: ignoring --poisson")
if args.extrap:
raise Exception("Extrapolation for ODE-RNN not implemented")
ode_func_net = utils.create_net(n_ode_gru_dims, n_ode_gru_dims,
n_layers = args.rec_layers, n_units = args.units, nonlinear = nn.Tanh)
rec_ode_func = ODEFunc(
input_dim = input_dim,
latent_dim = n_ode_gru_dims,
ode_func_net = ode_func_net,
device = device).to(device)
z0_diffeq_solver = DiffeqSolver(input_dim, rec_ode_func, "euler", args.latents,
odeint_rtol = 1e-3, odeint_atol = 1e-4, device = device)
model = ODE_RNN(input_dim, n_ode_gru_dims, device = device,
z0_diffeq_solver = z0_diffeq_solver, n_gru_units = args.gru_units,
concat_mask = True, obsrv_std = obsrv_std,
use_binary_classif = args.classif,
classif_per_tp = classif_per_tp,
n_labels = n_labels,
train_classif_w_reconstr = (args.dataset == "physionet")
).to(device)
elif args.latent_ode:
model = create_LatentODE_model(args, input_dim, z0_prior, obsrv_std, device,
classif_per_tp = classif_per_tp,
n_labels = n_labels)
else:
raise Exception("Model not specified")
##################################################################
if args.viz:
viz = Visualizations(device)
##################################################################
#Load checkpoint and evaluate the model
if args.load is not None:
utils.get_ckpt_model(ckpt_path, model, device)
exit()
##################################################################
# Training
log_path = "logs/" + file_name + "_" + str(experimentID) + ".log"
if not os.path.exists("logs/"):
utils.makedirs("logs/")
logger = utils.get_logger(logpath=log_path, filepath=os.path.abspath(__file__))
logger.info(input_command)
optimizer = optim.Adamax(model.parameters(), lr=args.lr)
num_batches = data_obj["n_train_batches"]
for itr in range(1, num_batches * (args.niters + 1)):
optimizer.zero_grad()
utils.update_learning_rate(optimizer, decay_rate = 0.999, lowest = args.lr / 10)
wait_until_kl_inc = 10
if itr // num_batches < wait_until_kl_inc:
kl_coef = 0.
else:
kl_coef = (1-0.99** (itr // num_batches - wait_until_kl_inc))
batch_dict = utils.get_next_batch(data_obj["train_dataloader"])
train_res = model.compute_all_losses(batch_dict, n_traj_samples = 3, kl_coef = kl_coef)
train_res["loss"].backward()
optimizer.step()
n_iters_to_viz = 1
if itr % (n_iters_to_viz * num_batches) == 0:
with torch.no_grad():
test_res = compute_loss_all_batches(model,
data_obj["test_dataloader"], args,
n_batches = data_obj["n_test_batches"],
experimentID = experimentID,
device = device,
n_traj_samples = 3, kl_coef = kl_coef)
message = 'Epoch {:04d} [Test seq (cond on sampled tp)] | Loss {:.6f} | Likelihood {:.6f} | KL fp {:.4f} | FP STD {:.4f}|'.format(
itr//num_batches,
test_res["loss"].detach(), test_res["likelihood"].detach(),
test_res["kl_first_p"], test_res["std_first_p"])
logger.info("Experiment " + str(experimentID))
logger.info(message)
logger.info("KL coef: {}".format(kl_coef))
logger.info("Train loss (one batch): {}".format(train_res["loss"].detach()))
logger.info("Train CE loss (one batch): {}".format(train_res["ce_loss"].detach()))
if "auc" in test_res:
logger.info("Classification AUC (TEST): {:.4f}".format(test_res["auc"]))
if "mse" in test_res:
logger.info("Test MSE: {:.4f}".format(test_res["mse"]))
if "accuracy" in train_res:
logger.info("Classification accuracy (TRAIN): {:.4f}".format(train_res["accuracy"]))
if "accuracy" in test_res:
logger.info("Classification accuracy (TEST): {:.4f}".format(test_res["accuracy"]))
if "pois_likelihood" in test_res:
logger.info("Poisson likelihood: {}".format(test_res["pois_likelihood"]))
if "ce_loss" in test_res:
logger.info("CE loss: {}".format(test_res["ce_loss"]))
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, ckpt_path)
# Plotting
if args.viz:
with torch.no_grad():
test_dict = utils.get_next_batch(data_obj["test_dataloader"])
print("plotting....")
if isinstance(model, LatentODE) and (args.dataset == "periodic"): #and not args.classic_rnn and not args.ode_rnn:
plot_id = itr // num_batches // n_iters_to_viz
viz.draw_all_plots_one_dim(test_dict, model,
plot_name = file_name + "_" + str(experimentID) + "_{:03d}".format(plot_id) + ".png",
experimentID = experimentID, save=True)
plt.pause(0.01)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, ckpt_path)
| 13,141 | 38.584337 | 170 | py |
steer | steer-master/latent_ode/lib/rnn_baselines.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.nn.modules.rnn import GRUCell, LSTMCell, RNNCellBase
from torch.distributions.normal import Normal
from torch.distributions import Independent
from torch.nn.parameter import Parameter
from lib.base_models import Baseline, VAE_Baseline
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Exponential decay of the hidden states for RNN
# adapted from GRU-D implementation: https://github.com/zhiyongc/GRU-D/
# Exp decay between hidden states
class GRUCellExpDecay(RNNCellBase):
def __init__(self, input_size, input_size_for_decay, hidden_size, device, bias=True):
super(GRUCellExpDecay, self).__init__(input_size, hidden_size, bias, num_chunks=3)
self.device = device
self.input_size_for_decay = input_size_for_decay
self.decay = nn.Sequential(nn.Linear(input_size_for_decay, 1),)
utils.init_network_weights(self.decay)
def gru_exp_decay_cell(self, input, hidden, w_ih, w_hh, b_ih, b_hh):
# INPORTANT: assumes that cum delta t is the last dimension of the input
batch_size, n_dims = input.size()
# "input" contains the data, mask and also cumulative deltas for all inputs
cum_delta_ts = input[:, -self.input_size_for_decay:]
data = input[:, :-self.input_size_for_decay]
decay = torch.exp( - torch.min(torch.max(
torch.zeros([1]).to(self.device), self.decay(cum_delta_ts)),
torch.ones([1]).to(self.device) * 1000 ))
hidden = hidden * decay
gi = torch.mm(data, w_ih.t()) + b_ih
gh = torch.mm(hidden, w_hh.t()) + b_hh
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
resetgate = torch.sigmoid(i_r + h_r)
inputgate = torch.sigmoid(i_i + h_i)
newgate = torch.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)
return hy
def forward(self, input, hx=None):
# type: (Tensor, Optional[Tensor]) -> Tensor
#self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
#self.check_forward_hidden(input, hx, '')
return self.gru_exp_decay_cell(
input, hx,
self.weight_ih, self.weight_hh,
self.bias_ih, self.bias_hh
)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Imputation with a weighed average of previous value and empirical mean
# adapted from GRU-D implementation: https://github.com/zhiyongc/GRU-D/
def get_cum_delta_ts(data, delta_ts, mask):
n_traj, n_tp, n_dims = data.size()
cum_delta_ts = delta_ts.repeat(1, 1, n_dims)
missing_index = np.where(mask.cpu().numpy() == 0)
for idx in range(missing_index[0].shape[0]):
i = missing_index[0][idx]
j = missing_index[1][idx]
k = missing_index[2][idx]
if j != 0 and j != (n_tp-1):
cum_delta_ts[i,j+1,k] = cum_delta_ts[i,j+1,k] + cum_delta_ts[i,j,k]
cum_delta_ts = cum_delta_ts / cum_delta_ts.max() # normalize
return cum_delta_ts
# adapted from GRU-D implementation: https://github.com/zhiyongc/GRU-D/
# very slow
def impute_using_input_decay(data, delta_ts, mask, w_input_decay, b_input_decay):
n_traj, n_tp, n_dims = data.size()
cum_delta_ts = delta_ts.repeat(1, 1, n_dims)
missing_index = np.where(mask.cpu().numpy() == 0)
data_last_obsv = np.copy(data.cpu().numpy())
for idx in range(missing_index[0].shape[0]):
i = missing_index[0][idx]
j = missing_index[1][idx]
k = missing_index[2][idx]
if j != 0 and j != (n_tp-1):
cum_delta_ts[i,j+1,k] = cum_delta_ts[i,j+1,k] + cum_delta_ts[i,j,k]
if j != 0:
data_last_obsv[i,j,k] = data_last_obsv[i,j-1,k] # last observation
cum_delta_ts = cum_delta_ts / cum_delta_ts.max() # normalize
data_last_obsv = torch.Tensor(data_last_obsv).to(get_device(data))
zeros = torch.zeros([n_traj, n_tp, n_dims]).to(get_device(data))
decay = torch.exp( - torch.min( torch.max(zeros,
w_input_decay * cum_delta_ts + b_input_decay), zeros + 1000 ))
data_means = torch.mean(data, 1).unsqueeze(1)
data_imputed = data * mask + (1-mask) * (decay * data_last_obsv + (1-decay) * data_means)
return data_imputed
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def run_rnn(inputs, delta_ts, cell, first_hidden=None,
mask = None, feed_previous=False, n_steps=0,
decoder = None, input_decay_params = None,
feed_previous_w_prob = 0.,
masked_update = True):
if (feed_previous or feed_previous_w_prob) and decoder is None:
raise Exception("feed_previous is set to True -- please specify RNN decoder")
if n_steps == 0:
n_steps = inputs.size(1)
if (feed_previous or feed_previous_w_prob) and mask is None:
mask = torch.ones((inputs.size(0), n_steps, inputs.size(-1))).to(get_device(inputs))
if isinstance(cell, GRUCellExpDecay):
cum_delta_ts = get_cum_delta_ts(inputs, delta_ts, mask)
if input_decay_params is not None:
w_input_decay, b_input_decay = input_decay_params
inputs = impute_using_input_decay(inputs, delta_ts, mask,
w_input_decay, b_input_decay)
all_hiddens = []
hidden = first_hidden
if hidden is not None:
all_hiddens.append(hidden)
n_steps -= 1
for i in range(n_steps):
delta_t = delta_ts[:,i]
if i == 0:
rnn_input = inputs[:,i]
elif feed_previous:
rnn_input = decoder(hidden)
elif feed_previous_w_prob > 0:
feed_prev = np.random.uniform() > feed_previous_w_prob
if feed_prev:
rnn_input = decoder(hidden)
else:
rnn_input = inputs[:,i]
else:
rnn_input = inputs[:,i]
if mask is not None:
mask_i = mask[:,i,:]
rnn_input = torch.cat((rnn_input, mask_i), -1)
if isinstance(cell, GRUCellExpDecay):
cum_delta_t = cum_delta_ts[:,i]
input_w_t = torch.cat((rnn_input, cum_delta_t), -1).squeeze(1)
else:
input_w_t = torch.cat((rnn_input, delta_t), -1).squeeze(1)
prev_hidden = hidden
hidden = cell(input_w_t, hidden)
if masked_update and (mask is not None) and (prev_hidden is not None):
# update only the hidden states for hidden state only if at least one feature is present for the current time point
summed_mask = (torch.sum(mask_i, -1, keepdim = True) > 0).float()
assert(not torch.isnan(summed_mask).any())
hidden = summed_mask * hidden + (1-summed_mask) * prev_hidden
all_hiddens.append(hidden)
all_hiddens = torch.stack(all_hiddens, 0)
all_hiddens = all_hiddens.permute(1,0,2).unsqueeze(0)
return hidden, all_hiddens
class Classic_RNN(Baseline):
def __init__(self, input_dim, latent_dim, device,
concat_mask = False, obsrv_std = 0.1,
use_binary_classif = False,
linear_classifier = False,
classif_per_tp = False,
input_space_decay = False,
cell = "gru", n_units = 100,
n_labels = 1,
train_classif_w_reconstr = False):
super(Classic_RNN, self).__init__(input_dim, latent_dim, device,
obsrv_std = obsrv_std,
use_binary_classif = use_binary_classif,
classif_per_tp = classif_per_tp,
linear_classifier = linear_classifier,
n_labels = n_labels,
train_classif_w_reconstr = train_classif_w_reconstr)
self.concat_mask = concat_mask
encoder_dim = int(input_dim)
if concat_mask:
encoder_dim = encoder_dim * 2
self.decoder = nn.Sequential(
nn.Linear(latent_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, input_dim),)
#utils.init_network_weights(self.encoder)
utils.init_network_weights(self.decoder)
if cell == "gru":
self.rnn_cell = GRUCell(encoder_dim + 1, latent_dim) # +1 for delta t
elif cell == "expdecay":
self.rnn_cell = GRUCellExpDecay(
input_size = encoder_dim,
input_size_for_decay = input_dim,
hidden_size = latent_dim,
device = device)
else:
raise Exception("Unknown RNN cell: {}".format(cell))
if input_space_decay:
self.w_input_decay = Parameter(torch.Tensor(1, int(input_dim))).to(self.device)
self.b_input_decay = Parameter(torch.Tensor(1, int(input_dim))).to(self.device)
self.input_space_decay = input_space_decay
self.z0_net = lambda hidden_state: hidden_state
def get_reconstruction(self, time_steps_to_predict, data, truth_time_steps,
mask = None, n_traj_samples = 1, mode = None):
assert(mask is not None)
n_traj, n_tp, n_dims = data.size()
if (len(truth_time_steps) != len(time_steps_to_predict)) or (torch.sum(time_steps_to_predict - truth_time_steps) != 0):
raise Exception("Extrapolation mode not implemented for RNN models")
# for classic RNN time_steps_to_predict should be the same as truth_time_steps
assert(len(truth_time_steps) == len(time_steps_to_predict))
batch_size = data.size(0)
zero_delta_t = torch.Tensor([0.]).to(self.device)
delta_ts = truth_time_steps[1:] - truth_time_steps[:-1]
delta_ts = torch.cat((delta_ts, zero_delta_t))
if len(delta_ts.size()) == 1:
# delta_ts are shared for all trajectories in a batch
assert(data.size(1) == delta_ts.size(0))
delta_ts = delta_ts.unsqueeze(-1).repeat((batch_size,1,1))
input_decay_params = None
if self.input_space_decay:
input_decay_params = (self.w_input_decay, self.b_input_decay)
if mask is not None:
utils.check_mask(data, mask)
hidden_state, all_hiddens = run_rnn(data, delta_ts,
cell = self.rnn_cell, mask = mask,
input_decay_params = input_decay_params,
feed_previous_w_prob = (0. if self.use_binary_classif else 0.5),
decoder = self.decoder)
outputs = self.decoder(all_hiddens)
# Shift outputs for computing the loss -- we should compare the first output to the second data point, etc.
first_point = data[:,0,:]
outputs = utils.shift_outputs(outputs, first_point)
extra_info = {"first_point": (hidden_state.unsqueeze(0), 0.0, hidden_state.unsqueeze(0))}
if self.use_binary_classif:
if self.classif_per_tp:
extra_info["label_predictions"] = self.classifier(all_hiddens)
else:
extra_info["label_predictions"] = self.classifier(hidden_state).reshape(1,-1)
# outputs shape: [n_traj_samples, n_traj, n_tp, n_dims]
return outputs, extra_info
class RNN_VAE(VAE_Baseline):
def __init__(self, input_dim, latent_dim, rec_dims,
z0_prior, device,
concat_mask = False, obsrv_std = 0.1,
input_space_decay = False,
use_binary_classif = False,
classif_per_tp =False,
linear_classifier = False,
cell = "gru", n_units = 100,
n_labels = 1,
train_classif_w_reconstr = False):
super(RNN_VAE, self).__init__(
input_dim = input_dim, latent_dim = latent_dim,
z0_prior = z0_prior,
device = device, obsrv_std = obsrv_std,
use_binary_classif = use_binary_classif,
classif_per_tp = classif_per_tp,
linear_classifier = linear_classifier,
n_labels = n_labels,
train_classif_w_reconstr = train_classif_w_reconstr)
self.concat_mask = concat_mask
encoder_dim = int(input_dim)
if concat_mask:
encoder_dim = encoder_dim * 2
if cell == "gru":
self.rnn_cell_enc = GRUCell(encoder_dim + 1, rec_dims) # +1 for delta t
self.rnn_cell_dec = GRUCell(encoder_dim + 1, latent_dim) # +1 for delta t
elif cell == "expdecay":
self.rnn_cell_enc = GRUCellExpDecay(
input_size = encoder_dim,
input_size_for_decay = input_dim,
hidden_size = rec_dims,
device = device)
self.rnn_cell_dec = GRUCellExpDecay(
input_size = encoder_dim,
input_size_for_decay = input_dim,
hidden_size = latent_dim,
device = device)
else:
raise Exception("Unknown RNN cell: {}".format(cell))
self.z0_net = nn.Sequential(
nn.Linear(rec_dims, n_units),
nn.Tanh(),
nn.Linear(n_units, latent_dim * 2),)
utils.init_network_weights(self.z0_net)
self.decoder = nn.Sequential(
nn.Linear(latent_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, input_dim),)
#utils.init_network_weights(self.encoder)
utils.init_network_weights(self.decoder)
if input_space_decay:
self.w_input_decay = Parameter(torch.Tensor(1, int(input_dim))).to(self.device)
self.b_input_decay = Parameter(torch.Tensor(1, int(input_dim))).to(self.device)
self.input_space_decay = input_space_decay
def get_reconstruction(self, time_steps_to_predict, data, truth_time_steps,
mask = None, n_traj_samples = 1, mode = None):
assert(mask is not None)
batch_size = data.size(0)
zero_delta_t = torch.Tensor([0.]).to(self.device)
# run encoder backwards
run_backwards = bool(time_steps_to_predict[0] < truth_time_steps[-1])
if run_backwards:
# Look at data in the reverse order: from later points to the first
data = utils.reverse(data)
mask = utils.reverse(mask)
delta_ts = truth_time_steps[1:] - truth_time_steps[:-1]
if run_backwards:
# we are going backwards in time
delta_ts = utils.reverse(delta_ts)
delta_ts = torch.cat((delta_ts, zero_delta_t))
if len(delta_ts.size()) == 1:
# delta_ts are shared for all trajectories in a batch
assert(data.size(1) == delta_ts.size(0))
delta_ts = delta_ts.unsqueeze(-1).repeat((batch_size,1,1))
input_decay_params = None
if self.input_space_decay:
input_decay_params = (self.w_input_decay, self.b_input_decay)
hidden_state, _ = run_rnn(data, delta_ts,
cell = self.rnn_cell_enc, mask = mask,
input_decay_params = input_decay_params)
z0_mean, z0_std = utils.split_last_dim(self.z0_net(hidden_state))
z0_std = z0_std.abs()
z0_sample = utils.sample_standard_gaussian(z0_mean, z0_std)
# Decoder # # # # # # # # # # # # # # # # # # # #
delta_ts = torch.cat((zero_delta_t, time_steps_to_predict[1:] - time_steps_to_predict[:-1]))
if len(delta_ts.size()) == 1:
delta_ts = delta_ts.unsqueeze(-1).repeat((batch_size,1,1))
_, all_hiddens = run_rnn(data, delta_ts,
cell = self.rnn_cell_dec,
first_hidden = z0_sample, feed_previous = True,
n_steps = time_steps_to_predict.size(0),
decoder = self.decoder,
input_decay_params = input_decay_params)
outputs = self.decoder(all_hiddens)
# Shift outputs for computing the loss -- we should compare the first output to the second data point, etc.
first_point = data[:,0,:]
outputs = utils.shift_outputs(outputs, first_point)
extra_info = {"first_point": (z0_mean.unsqueeze(0), z0_std.unsqueeze(0), z0_sample.unsqueeze(0))}
if self.use_binary_classif:
if self.classif_per_tp:
extra_info["label_predictions"] = self.classifier(all_hiddens)
else:
extra_info["label_predictions"] = self.classifier(z0_mean).reshape(1,-1)
# outputs shape: [n_traj_samples, n_traj, n_tp, n_dims]
return outputs, extra_info
| 14,730 | 32.177928 | 121 | py |
steer | steer-master/latent_ode/lib/create_latent_ode_model.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import os
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.latent_ode import LatentODE
from lib.encoder_decoder import *
from lib.diffeq_solver import DiffeqSolver
from torch.distributions.normal import Normal
from lib.ode_func import ODEFunc, ODEFunc_w_Poisson
#####################################################################################################
def create_LatentODE_model(args, input_dim, z0_prior, obsrv_std, device,
classif_per_tp = False, n_labels = 1):
dim = args.latents
if args.poisson:
lambda_net = utils.create_net(dim, input_dim,
n_layers = 1, n_units = args.units, nonlinear = nn.Tanh)
# ODE function produces the gradient for latent state and for poisson rate
ode_func_net = utils.create_net(dim * 2, args.latents * 2,
n_layers = args.gen_layers, n_units = args.units, nonlinear = nn.Tanh)
gen_ode_func = ODEFunc_w_Poisson(
input_dim = input_dim,
latent_dim = args.latents * 2,
ode_func_net = ode_func_net,
lambda_net = lambda_net,
device = device).to(device)
else:
dim = args.latents
ode_func_net = utils.create_net(dim, args.latents,
n_layers = args.gen_layers, n_units = args.units, nonlinear = nn.Tanh)
gen_ode_func = ODEFunc(
input_dim = input_dim,
latent_dim = args.latents,
ode_func_net = ode_func_net,
device = device).to(device)
z0_diffeq_solver = None
n_rec_dims = args.rec_dims
enc_input_dim = int(input_dim) * 2 # we concatenate the mask
gen_data_dim = input_dim
z0_dim = args.latents
if args.poisson:
z0_dim += args.latents # predict the initial poisson rate
if args.z0_encoder == "odernn":
ode_func_net = utils.create_net(n_rec_dims, n_rec_dims,
n_layers = args.rec_layers, n_units = args.units, nonlinear = nn.Tanh)
rec_ode_func = ODEFunc(
input_dim = enc_input_dim,
latent_dim = n_rec_dims,
ode_func_net = ode_func_net,
device = device).to(device)
z0_diffeq_solver = DiffeqSolver(enc_input_dim, rec_ode_func, "euler", args.latents,
odeint_rtol = 1e-3, odeint_atol = 1e-4, device = device)
encoder_z0 = Encoder_z0_ODE_RNN(n_rec_dims, enc_input_dim, z0_diffeq_solver,
z0_dim = z0_dim, n_gru_units = args.gru_units, device = device).to(device)
elif args.z0_encoder == "rnn":
encoder_z0 = Encoder_z0_RNN(z0_dim, enc_input_dim,
lstm_output_size = n_rec_dims, device = device).to(device)
else:
raise Exception("Unknown encoder for Latent ODE model: " + args.z0_encoder)
decoder = Decoder(args.latents, gen_data_dim).to(device)
diffeq_solver = DiffeqSolver(gen_data_dim, gen_ode_func, 'dopri5', args.latents,
odeint_rtol = 1e-3, odeint_atol = 1e-4, device = device)
model = LatentODE(
input_dim = gen_data_dim,
latent_dim = args.latents,
encoder_z0 = encoder_z0,
decoder = decoder,
diffeq_solver = diffeq_solver,
z0_prior = z0_prior,
device = device,
obsrv_std = obsrv_std,
use_poisson_proc = args.poisson,
use_binary_classif = args.classif,
linear_classifier = args.linear_classif,
classif_per_tp = classif_per_tp,
n_labels = n_labels,
train_classif_w_reconstr = (args.dataset == "physionet")
).to(device)
return model
| 3,325 | 30.377358 | 101 | py |
steer | steer-master/latent_ode/lib/ode_rnn.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.nn.modules.rnn import GRUCell, LSTMCell, RNNCellBase
from torch.distributions.normal import Normal
from torch.distributions import Independent
from torch.nn.parameter import Parameter
from lib.base_models import Baseline
class ODE_RNN(Baseline):
def __init__(self, input_dim, latent_dim, device = torch.device("cpu"),
z0_diffeq_solver = None, n_gru_units = 100, n_units = 100,
concat_mask = False, obsrv_std = 0.1, use_binary_classif = False,
classif_per_tp = False, n_labels = 1, train_classif_w_reconstr = False):
Baseline.__init__(self, input_dim, latent_dim, device = device,
obsrv_std = obsrv_std, use_binary_classif = use_binary_classif,
classif_per_tp = classif_per_tp,
n_labels = n_labels,
train_classif_w_reconstr = train_classif_w_reconstr)
ode_rnn_encoder_dim = latent_dim
self.ode_gru = Encoder_z0_ODE_RNN(
latent_dim = ode_rnn_encoder_dim,
input_dim = (input_dim) * 2, # input and the mask
z0_diffeq_solver = z0_diffeq_solver,
n_gru_units = n_gru_units,
device = device).to(device)
self.z0_diffeq_solver = z0_diffeq_solver
self.decoder = nn.Sequential(
nn.Linear(latent_dim, n_units),
nn.Tanh(),
nn.Linear(n_units, input_dim),)
utils.init_network_weights(self.decoder)
def get_reconstruction(self, time_steps_to_predict, data, truth_time_steps,
mask = None, n_traj_samples = None, mode = None):
if (len(truth_time_steps) != len(time_steps_to_predict)) or (torch.sum(time_steps_to_predict - truth_time_steps) != 0):
raise Exception("Extrapolation mode not implemented for ODE-RNN")
# time_steps_to_predict and truth_time_steps should be the same
assert(len(truth_time_steps) == len(time_steps_to_predict))
assert(mask is not None)
data_and_mask = data
if mask is not None:
data_and_mask = torch.cat([data, mask],-1)
_, _, latent_ys, _ = self.ode_gru.run_odernn(
data_and_mask, truth_time_steps, run_backwards = False)
latent_ys = latent_ys.permute(0,2,1,3)
last_hidden = latent_ys[:,:,-1,:]
#assert(torch.sum(int_lambda[0,0,-1,:] <= 0) == 0.)
outputs = self.decoder(latent_ys)
# Shift outputs for computing the loss -- we should compare the first output to the second data point, etc.
first_point = data[:,0,:]
outputs = utils.shift_outputs(outputs, first_point)
extra_info = {"first_point": (latent_ys[:,:,-1,:], 0.0, latent_ys[:,:,-1,:])}
if self.use_binary_classif:
if self.classif_per_tp:
extra_info["label_predictions"] = self.classifier(latent_ys)
else:
extra_info["label_predictions"] = self.classifier(last_hidden).squeeze(-1)
# outputs shape: [n_traj_samples, n_traj, n_tp, n_dims]
return outputs, extra_info
| 3,133 | 31.309278 | 121 | py |
steer | steer-master/latent_ode/lib/plotting.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import matplotlib
# matplotlib.use('TkAgg')
matplotlib.use('Agg')
import matplotlib.pyplot
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import os
from scipy.stats import kde
import numpy as np
import subprocess
import torch
import lib.utils as utils
import matplotlib.gridspec as gridspec
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.rnn_baselines import *
from lib.ode_rnn import *
import torch.nn.functional as functional
from torch.distributions.normal import Normal
from lib.latent_ode import LatentODE
from lib.likelihood_eval import masked_gaussian_log_density
try:
import umap
except:
print("Couldn't import umap")
from generate_timeseries import Periodic_1d
from person_activity import PersonActivity
from lib.utils import compute_loss_all_batches
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
LARGE_SIZE = 22
def init_fonts(main_font_size = LARGE_SIZE):
plt.rc('font', size=main_font_size) # controls default text sizes
plt.rc('axes', titlesize=main_font_size) # fontsize of the axes title
plt.rc('axes', labelsize=main_font_size - 2) # fontsize of the x and y labels
plt.rc('xtick', labelsize=main_font_size - 2) # fontsize of the tick labels
plt.rc('ytick', labelsize=main_font_size - 2) # fontsize of the tick labels
plt.rc('legend', fontsize=main_font_size - 2) # legend fontsize
plt.rc('figure', titlesize=main_font_size) # fontsize of the figure title
def plot_trajectories(ax, traj, time_steps, min_y = None, max_y = None, title = "",
add_to_plot = False, label = None, add_legend = False, dim_to_show = 0,
linestyle = '-', marker = 'o', mask = None, color = None, linewidth = 1):
# expected shape of traj: [n_traj, n_timesteps, n_dims]
# The function will produce one line per trajectory (n_traj lines in total)
if not add_to_plot:
ax.cla()
ax.set_title(title)
ax.set_xlabel('Time')
ax.set_ylabel('x')
if min_y is not None:
ax.set_ylim(bottom = min_y)
if max_y is not None:
ax.set_ylim(top = max_y)
for i in range(traj.size()[0]):
d = traj[i].cpu().numpy()[:, dim_to_show]
ts = time_steps.cpu().numpy()
if mask is not None:
m = mask[i].cpu().numpy()[:, dim_to_show]
d = d[m == 1]
ts = ts[m == 1]
ax.plot(ts, d, linestyle = linestyle, label = label, marker=marker, color = color, linewidth = linewidth)
if add_legend:
ax.legend()
def plot_std(ax, traj, traj_std, time_steps, min_y = None, max_y = None, title = "",
add_to_plot = False, label = None, alpha=0.2, color = None):
# take only the first (and only?) dimension
mean_minus_std = (traj - traj_std).cpu().numpy()[:, :, 0]
mean_plus_std = (traj + traj_std).cpu().numpy()[:, :, 0]
for i in range(traj.size()[0]):
ax.fill_between(time_steps.cpu().numpy(), mean_minus_std[i], mean_plus_std[i],
alpha=alpha, color = color)
def plot_vector_field(ax, odefunc, latent_dim, device):
# Code borrowed from https://github.com/rtqichen/ffjord/blob/29c016131b702b307ceb05c70c74c6e802bb8a44/diagnostics/viz_toy.py
K = 13j
y, x = np.mgrid[-6:6:K, -6:6:K]
K = int(K.imag)
zs = torch.from_numpy(np.stack([x, y], -1).reshape(K * K, 2)).to(device, torch.float32)
if latent_dim > 2:
# Plots dimensions 0 and 2
zs = torch.cat((zs, torch.zeros(K * K, latent_dim-2).to(device) ), 1)
dydt = odefunc(0, zs)
dydt = -dydt.cpu().detach().numpy()
if latent_dim > 2:
dydt = dydt[:,:2]
mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
dydt = (dydt / mag)
dydt = dydt.reshape(K, K, 2)
ax.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], #color = dydt[:, :, 0],
cmap="coolwarm", linewidth=2)
# ax.quiver(
# x, y, dydt[:, :, 0], dydt[:, :, 1],
# np.exp(logmag), cmap="coolwarm", pivot="mid", scale = 100,
# )
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
#ax.axis("off")
def get_meshgrid(npts, int_y1, int_y2):
min_y1, max_y1 = int_y1
min_y2, max_y2 = int_y2
y1_grid = np.linspace(min_y1, max_y1, npts)
y2_grid = np.linspace(min_y2, max_y2, npts)
xx, yy = np.meshgrid(y1_grid, y2_grid)
flat_inputs = np.concatenate((np.expand_dims(xx.flatten(),1), np.expand_dims(yy.flatten(),1)), 1)
flat_inputs = torch.from_numpy(flat_inputs).float()
return xx, yy, flat_inputs
def add_white(cmap):
cmaplist = [cmap(i) for i in range(cmap.N)]
# force the first color entry to be grey
cmaplist[0] = (1.,1.,1.,1.0)
# create the new map
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
return cmap
class Visualizations():
def __init__(self, device):
self.init_visualization()
init_fonts(SMALL_SIZE)
self.device = device
def init_visualization(self):
self.fig = plt.figure(figsize=(12, 7), facecolor='white')
self.ax_traj = []
for i in range(1,4):
self.ax_traj.append(self.fig.add_subplot(2,3,i, frameon=False))
# self.ax_density = []
# for i in range(4,7):
# self.ax_density.append(self.fig.add_subplot(3,3,i, frameon=False))
#self.ax_samples_same_traj = self.fig.add_subplot(3,3,7, frameon=False)
self.ax_latent_traj = self.fig.add_subplot(2,3,4, frameon=False)
self.ax_vector_field = self.fig.add_subplot(2,3,5, frameon=False)
self.ax_traj_from_prior = self.fig.add_subplot(2,3,6, frameon=False)
self.plot_limits = {}
plt.show(block=False)
def set_plot_lims(self, ax, name):
if name not in self.plot_limits:
self.plot_limits[name] = (ax.get_xlim(), ax.get_ylim())
return
xlim, ylim = self.plot_limits[name]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
def draw_one_density_plot(self, ax, model, data_dict, traj_id,
multiply_by_poisson = False):
scale = 5
cmap = add_white(plt.cm.get_cmap('Blues', 9)) # plt.cm.BuGn_r
cmap2 = add_white(plt.cm.get_cmap('Reds', 9)) # plt.cm.BuGn_r
#cmap = plt.cm.get_cmap('viridis')
data = data_dict["data_to_predict"]
time_steps = data_dict["tp_to_predict"]
mask = data_dict["mask_predicted_data"]
observed_data = data_dict["observed_data"]
observed_time_steps = data_dict["observed_tp"]
observed_mask = data_dict["observed_mask"]
npts = 50
xx, yy, z0_grid = get_meshgrid(npts = npts, int_y1 = (-scale,scale), int_y2 = (-scale,scale))
z0_grid = z0_grid.to(get_device(data))
if model.latent_dim > 2:
z0_grid = torch.cat((z0_grid, torch.zeros(z0_grid.size(0), model.latent_dim-2)), 1)
if model.use_poisson_proc:
n_traj, n_dims = z0_grid.size()
# append a vector of zeros to compute the integral of lambda and also zeros for the first point of lambda
zeros = torch.zeros([n_traj, model.input_dim + model.latent_dim]).to(get_device(data))
z0_grid_aug = torch.cat((z0_grid, zeros), -1)
else:
z0_grid_aug = z0_grid
# Shape of sol_y [n_traj_samples, n_samples, n_timepoints, n_latents]
sol_y = model.diffeq_solver(z0_grid_aug.unsqueeze(0), time_steps)
if model.use_poisson_proc:
sol_y, log_lambda_y, int_lambda, _ = model.diffeq_solver.ode_func.extract_poisson_rate(sol_y)
assert(torch.sum(int_lambda[:,:,0,:]) == 0.)
assert(torch.sum(int_lambda[0,0,-1,:] <= 0) == 0.)
pred_x = model.decoder(sol_y)
# Plot density for one trajectory
one_traj = data[traj_id]
mask_one_traj = None
if mask is not None:
mask_one_traj = mask[traj_id].unsqueeze(0)
mask_one_traj = mask_one_traj.repeat(npts**2,1,1).unsqueeze(0)
ax.cla()
# Plot: prior
prior_density_grid = model.z0_prior.log_prob(z0_grid.unsqueeze(0)).squeeze(0)
# Sum the density over two dimensions
prior_density_grid = torch.sum(prior_density_grid, -1)
# =================================================
# Plot: p(x | y(t0))
masked_gaussian_log_density_grid = masked_gaussian_log_density(pred_x,
one_traj.repeat(npts**2,1,1).unsqueeze(0),
mask = mask_one_traj,
obsrv_std = model.obsrv_std).squeeze(-1)
# Plot p(t | y(t0))
if model.use_poisson_proc:
poisson_info = {}
poisson_info["int_lambda"] = int_lambda[:,:,-1,:]
poisson_info["log_lambda_y"] = log_lambda_y
poisson_log_density_grid = compute_poisson_proc_likelihood(
one_traj.repeat(npts**2,1,1).unsqueeze(0),
pred_x, poisson_info, mask = mask_one_traj)
poisson_log_density_grid = poisson_log_density_grid.squeeze(0)
# =================================================
# Plot: p(x , y(t0))
log_joint_density = prior_density_grid + masked_gaussian_log_density_grid
if multiply_by_poisson:
log_joint_density = log_joint_density + poisson_log_density_grid
density_grid = torch.exp(log_joint_density)
density_grid = torch.reshape(density_grid, (xx.shape[0], xx.shape[1]))
density_grid = density_grid.cpu().numpy()
ax.contourf(xx, yy, density_grid, cmap=cmap, alpha=1)
# =================================================
# Plot: q(y(t0)| x)
#self.ax_density.set_title("Red: q(y(t0) | x) Blue: p(x, y(t0))")
ax.set_xlabel('z1(t0)')
ax.set_ylabel('z2(t0)')
data_w_mask = observed_data[traj_id].unsqueeze(0)
if observed_mask is not None:
data_w_mask = torch.cat((data_w_mask, observed_mask[traj_id].unsqueeze(0)), -1)
z0_mu, z0_std = model.encoder_z0(
data_w_mask, observed_time_steps)
if model.use_poisson_proc:
z0_mu = z0_mu[:, :, :model.latent_dim]
z0_std = z0_std[:, :, :model.latent_dim]
q_z0 = Normal(z0_mu, z0_std)
q_density_grid = q_z0.log_prob(z0_grid)
# Sum the density over two dimensions
q_density_grid = torch.sum(q_density_grid, -1)
density_grid = torch.exp(q_density_grid)
density_grid = torch.reshape(density_grid, (xx.shape[0], xx.shape[1]))
density_grid = density_grid.cpu().numpy()
ax.contourf(xx, yy, density_grid, cmap=cmap2, alpha=0.3)
def draw_all_plots_one_dim(self, data_dict, model,
plot_name = "", save = False, experimentID = 0.):
data = data_dict["data_to_predict"]
time_steps = data_dict["tp_to_predict"]
mask = data_dict["mask_predicted_data"]
observed_data = data_dict["observed_data"]
observed_time_steps = data_dict["observed_tp"]
observed_mask = data_dict["observed_mask"]
device = get_device(time_steps)
time_steps_to_predict = time_steps
if isinstance(model, LatentODE):
# sample at the original time points
time_steps_to_predict = utils.linspace_vector(time_steps[0], time_steps[-1], 100).to(device)
reconstructions, info = model.get_reconstruction(time_steps_to_predict,
observed_data, observed_time_steps, mask = observed_mask, n_traj_samples = 10)
n_traj_to_show = 3
# plot only 10 trajectories
data_for_plotting = observed_data[:n_traj_to_show]
mask_for_plotting = observed_mask[:n_traj_to_show]
reconstructions_for_plotting = reconstructions.mean(dim=0)[:n_traj_to_show]
reconstr_std = reconstructions.std(dim=0)[:n_traj_to_show]
dim_to_show = 0
max_y = max(
data_for_plotting[:,:,dim_to_show].cpu().numpy().max(),
reconstructions[:,:,dim_to_show].cpu().numpy().max())
min_y = min(
data_for_plotting[:,:,dim_to_show].cpu().numpy().min(),
reconstructions[:,:,dim_to_show].cpu().numpy().min())
############################################
# Plot reconstructions, true postrior and approximate posterior
cmap = plt.cm.get_cmap('Set1')
for traj_id in range(3):
# Plot observations
plot_trajectories(self.ax_traj[traj_id],
data_for_plotting[traj_id].unsqueeze(0), observed_time_steps,
mask = mask_for_plotting[traj_id].unsqueeze(0),
min_y = min_y, max_y = max_y, #title="True trajectories",
marker = 'o', linestyle='', dim_to_show = dim_to_show,
color = cmap(2))
# Plot reconstructions
plot_trajectories(self.ax_traj[traj_id],
reconstructions_for_plotting[traj_id].unsqueeze(0), time_steps_to_predict,
min_y = min_y, max_y = max_y, title="Sample {} (data space)".format(traj_id), dim_to_show = dim_to_show,
add_to_plot = True, marker = '', color = cmap(3), linewidth = 3)
# Plot variance estimated over multiple samples from approx posterior
plot_std(self.ax_traj[traj_id],
reconstructions_for_plotting[traj_id].unsqueeze(0), reconstr_std[traj_id].unsqueeze(0),
time_steps_to_predict, alpha=0.5, color = cmap(3))
self.set_plot_lims(self.ax_traj[traj_id], "traj_" + str(traj_id))
# Plot true posterior and approximate posterior
# self.draw_one_density_plot(self.ax_density[traj_id],
# model, data_dict, traj_id = traj_id,
# multiply_by_poisson = False)
# self.set_plot_lims(self.ax_density[traj_id], "density_" + str(traj_id))
# self.ax_density[traj_id].set_title("Sample {}: p(z0) and q(z0 | x)".format(traj_id))
############################################
# Get several samples for the same trajectory
# one_traj = data_for_plotting[:1]
# first_point = one_traj[:,0]
# samples_same_traj, _ = model.get_reconstruction(time_steps_to_predict,
# observed_data[:1], observed_time_steps, mask = observed_mask[:1], n_traj_samples = 5)
# samples_same_traj = samples_same_traj.squeeze(1)
# plot_trajectories(self.ax_samples_same_traj, samples_same_traj, time_steps_to_predict, marker = '')
# plot_trajectories(self.ax_samples_same_traj, one_traj, time_steps, linestyle = "",
# label = "True traj", add_to_plot = True, title="Reconstructions for the same trajectory (data space)")
############################################
# Plot trajectories from prior
if isinstance(model, LatentODE):
torch.manual_seed(1991)
np.random.seed(1991)
traj_from_prior = model.sample_traj_from_prior(time_steps_to_predict, n_traj_samples = 3)
# Since in this case n_traj = 1, n_traj_samples -- requested number of samples from the prior, squeeze n_traj dimension
traj_from_prior = traj_from_prior.squeeze(1)
plot_trajectories(self.ax_traj_from_prior, traj_from_prior, time_steps_to_predict,
marker = '', linewidth = 3)
self.ax_traj_from_prior.set_title("Samples from prior (data space)", pad = 20)
#self.set_plot_lims(self.ax_traj_from_prior, "traj_from_prior")
################################################
# Plot z0
# first_point_mu, first_point_std, first_point_enc = info["first_point"]
# dim1 = 0
# dim2 = 1
# self.ax_z0.cla()
# # first_point_enc shape: [1, n_traj, n_dims]
# self.ax_z0.scatter(first_point_enc.cpu()[0,:,dim1], first_point_enc.cpu()[0,:,dim2])
# self.ax_z0.set_title("Encodings z0 of all test trajectories (latent space)")
# self.ax_z0.set_xlabel('dim {}'.format(dim1))
# self.ax_z0.set_ylabel('dim {}'.format(dim2))
################################################
# Show vector field
self.ax_vector_field.cla()
plot_vector_field(self.ax_vector_field, model.diffeq_solver.ode_func, model.latent_dim, device)
self.ax_vector_field.set_title("Slice of vector field (latent space)", pad = 20)
self.set_plot_lims(self.ax_vector_field, "vector_field")
#self.ax_vector_field.set_ylim((-0.5, 1.5))
################################################
# Plot trajectories in the latent space
# shape before [1, n_traj, n_tp, n_latent_dims]
# Take only the first sample from approx posterior
latent_traj = info["latent_traj"][0,:n_traj_to_show]
# shape before permute: [1, n_tp, n_latent_dims]
self.ax_latent_traj.cla()
cmap = plt.cm.get_cmap('Accent')
n_latent_dims = latent_traj.size(-1)
custom_labels = {}
for i in range(n_latent_dims):
col = cmap(i)
plot_trajectories(self.ax_latent_traj, latent_traj, time_steps_to_predict,
title="Latent trajectories z(t) (latent space)", dim_to_show = i, color = col,
marker = '', add_to_plot = True,
linewidth = 3)
custom_labels['dim ' + str(i)] = Line2D([0], [0], color=col)
self.ax_latent_traj.set_ylabel("z")
self.ax_latent_traj.set_title("Latent trajectories z(t) (latent space)", pad = 20)
self.ax_latent_traj.legend(custom_labels.values(), custom_labels.keys(), loc = 'lower left')
self.set_plot_lims(self.ax_latent_traj, "latent_traj")
################################################
self.fig.tight_layout()
plt.draw()
if save:
dirname = "plots/" + str(experimentID) + "/"
os.makedirs(dirname, exist_ok=True)
self.fig.savefig(dirname + plot_name)
| 16,053 | 33.673866 | 125 | py |
steer | steer-master/latent_ode/lib/diffeq_solver.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import time
import numpy as np
import torch
import torch.nn as nn
import lib.utils as utils
from torch.distributions.multivariate_normal import MultivariateNormal
# git clone https://github.com/rtqichen/torchdiffeq.git
#from torchdiffeq import odeint as odeint
from torchdiffeq import odeint_stochastic_end_v3 as odeint
#from torchdiffeq import odeint_stochastic_end_v2 as odeint
#from torchdiffeq import odeint_stochastic_end_v2_inference as odeint_inference
#####################################################################################################
class DiffeqSolver(nn.Module):
def __init__(self, input_dim, ode_func, method, latents,
odeint_rtol=1e-4, odeint_atol=1e-5, device=torch.device("cpu")):
super(DiffeqSolver, self).__init__()
self.ode_method = method
self.latents = latents
self.device = device
self.ode_func = ode_func
self.odeint_rtol = odeint_rtol
self.odeint_atol = odeint_atol
def forward(self, first_point, time_steps_to_predict, backwards=False):
"""
# Decode the trajectory through ODE Solver
"""
n_traj_samples, n_traj = first_point.size()[0], first_point.size()[1]
n_dims = first_point.size()[-1]
#print("time_steps_to_predict")
#print(time_steps_to_predict)
if time_steps_to_predict.size()==2:
pred_y = odeint(self.ode_func, first_point, time_steps_to_predict,
rtol=self.odeint_rtol, atol=self.odeint_atol, method=self.ode_method,min_length=0.001) #,mode='train')
else:
pred_y = odeint(self.ode_func, first_point, time_steps_to_predict,
rtol=self.odeint_rtol, atol=self.odeint_atol, method=self.ode_method,min_length=0.001) # ,mode='test')
pred_y = pred_y.permute(1, 2, 0, 3)
assert(torch.mean(pred_y[:, :, 0, :] - first_point) < 0.001)
assert(pred_y.size()[0] == n_traj_samples)
assert(pred_y.size()[1] == n_traj)
return pred_y
def sample_traj_from_prior(self, starting_point_enc, time_steps_to_predict,
n_traj_samples=1):
"""
# Decode the trajectory through ODE Solver using samples from the prior
time_steps_to_predict: time steps at which we want to sample the new trajectory
"""
func = self.ode_func.sample_next_point_from_prior
pred_y = odeint(func, starting_point_enc, time_steps_to_predict,
rtol=self.odeint_rtol, atol=self.odeint_atol, method=self.ode_method)
# shape: [n_traj_samples, n_traj, n_tp, n_dim]
pred_y = pred_y.permute(1, 2, 0, 3)
return pred_y
| 2,845 | 37.986301 | 126 | py |
steer | steer-master/latent_ode/lib/ode_func.py | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import torch
import torch.nn as nn
from torch.nn.utils.spectral_norm import spectral_norm
import lib.utils as utils
#####################################################################################################
class ODEFunc(nn.Module):
def __init__(self, input_dim, latent_dim, ode_func_net, device = torch.device("cpu")):
"""
input_dim: dimensionality of the input
latent_dim: dimensionality used for ODE. Analog of a continous latent state
"""
super(ODEFunc, self).__init__()
self.input_dim = input_dim
self.device = device
utils.init_network_weights(ode_func_net)
self.gradient_net = ode_func_net
def forward(self, t_local, y, backwards = False):
"""
Perform one step in solving ODE. Given current data point y and current time point t_local, returns gradient dy/dt at this time point
t_local: current time point
y: value at the current time point
"""
grad = self.get_ode_gradient_nn(t_local, y)
if backwards:
grad = -grad
return grad
def get_ode_gradient_nn(self, t_local, y):
return self.gradient_net(y)
def sample_next_point_from_prior(self, t_local, y):
"""
t_local: current time point
y: value at the current time point
"""
return self.get_ode_gradient_nn(t_local, y)
#####################################################################################################
class ODEFunc_w_Poisson(ODEFunc):
def __init__(self, input_dim, latent_dim, ode_func_net,
lambda_net, device = torch.device("cpu")):
"""
input_dim: dimensionality of the input
latent_dim: dimensionality used for ODE. Analog of a continous latent state
"""
super(ODEFunc_w_Poisson, self).__init__(input_dim, latent_dim, ode_func_net, device)
self.latent_ode = ODEFunc(input_dim = input_dim,
latent_dim = latent_dim,
ode_func_net = ode_func_net,
device = device)
self.latent_dim = latent_dim
self.lambda_net = lambda_net
# The computation of poisson likelihood can become numerically unstable.
#The integral lambda(t) dt can take large values. In fact, it is equal to the expected number of events on the interval [0,T]
#Exponent of lambda can also take large values
# So we divide lambda by the constant and then multiply the integral of lambda by the constant
self.const_for_lambda = torch.Tensor([100.]).to(device)
def extract_poisson_rate(self, augmented, final_result = True):
y, log_lambdas, int_lambda = None, None, None
assert(augmented.size(-1) == self.latent_dim + self.input_dim)
latent_lam_dim = self.latent_dim // 2
if len(augmented.size()) == 3:
int_lambda = augmented[:,:,-self.input_dim:]
y_latent_lam = augmented[:,:,:-self.input_dim]
log_lambdas = self.lambda_net(y_latent_lam[:,:,-latent_lam_dim:])
y = y_latent_lam[:,:,:-latent_lam_dim]
elif len(augmented.size()) == 4:
int_lambda = augmented[:,:,:,-self.input_dim:]
y_latent_lam = augmented[:,:,:,:-self.input_dim]
log_lambdas = self.lambda_net(y_latent_lam[:,:,:,-latent_lam_dim:])
y = y_latent_lam[:,:,:,:-latent_lam_dim]
# Multiply the intergral over lambda by a constant
# only when we have finished the integral computation (i.e. this is not a call in get_ode_gradient_nn)
if final_result:
int_lambda = int_lambda * self.const_for_lambda
# Latents for performing reconstruction (y) have the same size as latent poisson rate (log_lambdas)
assert(y.size(-1) == latent_lam_dim)
return y, log_lambdas, int_lambda, y_latent_lam
def get_ode_gradient_nn(self, t_local, augmented):
y, log_lam, int_lambda, y_latent_lam = self.extract_poisson_rate(augmented, final_result = False)
dydt_dldt = self.latent_ode(t_local, y_latent_lam)
log_lam = log_lam - torch.log(self.const_for_lambda)
return torch.cat((dydt_dldt, torch.exp(log_lam)),-1)
| 3,935 | 32.641026 | 135 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.