repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Alpha-IoU | Alpha-IoU-main/utils/metrics.py | # Model validation metrics
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
from . import general
def fitness(x):
# Model fitness as a weighted combination of metrics
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
return (x[:, :4] * w).sum(1)
def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
plot: Plot precision-recall curve at mAP@0.5
save_dir: Plot save directory
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
nc = unique_classes.shape[0] # number of classes, number of detections
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, 1000), [] # for plotting
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = (target_cls == c).sum() # number of labels
n_p = i.sum() # number of predictions
if n_p == 0 or n_l == 0:
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_l + 1e-16) # recall curve
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
if plot and j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
# Compute F1 (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
if plot:
plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
i = f1.mean(0).argmax() # max F1 index
return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves
# Arguments
recall: The recall curve (list)
precision: The precision curve (list)
# Returns
Average precision, precision curve, recall curve
"""
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01]))
mpre = np.concatenate(([1.], precision, [0.]))
# Compute the precision envelope
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
# Integrate area under curve
method = 'interp' # methods: 'continuous', 'interp'
if method == 'interp':
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap, mpre, mrec
class ConfusionMatrix:
# Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
def __init__(self, nc, conf=0.25, iou_thres=0.45):
self.matrix = np.zeros((nc + 1, nc + 1))
self.nc = nc # number of classes
self.conf = conf
self.iou_thres = iou_thres
def process_batch(self, detections, labels):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
Returns:
None, updates confusion matrix accordingly
"""
detections = detections[detections[:, 4] > self.conf]
gt_classes = labels[:, 0].int()
detection_classes = detections[:, 5].int()
iou = general.box_iou(labels[:, 1:], detections[:, :4])
x = torch.where(iou > self.iou_thres)
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
else:
matches = np.zeros((0, 3))
n = matches.shape[0] > 0
m0, m1, _ = matches.transpose().astype(np.int16)
for i, gc in enumerate(gt_classes):
j = m0 == i
if n and sum(j) == 1:
self.matrix[gc, detection_classes[m1[j]]] += 1 # correct
else:
self.matrix[self.nc, gc] += 1 # background FP
if n:
for i, dc in enumerate(detection_classes):
if not any(m1 == i):
self.matrix[dc, self.nc] += 1 # background FN
def matrix(self):
return self.matrix
def plot(self, save_dir='', names=()):
try:
import seaborn as sn
array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize
array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
fig = plt.figure(figsize=(12, 9), tight_layout=True)
sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
xticklabels=names + ['background FP'] if labels else "auto",
yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
fig.axes[0].set_xlabel('True')
fig.axes[0].set_ylabel('Predicted')
fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
except Exception as e:
pass
def print(self):
for i in range(self.nc + 1):
print(' '.join(map(str, self.matrix[i])))
# Plots ----------------------------------------------------------------------------------------------------------------
def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()):
# Precision-recall curve
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
py = np.stack(py, axis=1)
if 0 < len(names) < 21: # display per-class legend if < 21 classes
for i, y in enumerate(py.T):
ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
else:
ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
fig.savefig(Path(save_dir), dpi=250)
def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'):
# Metric-confidence curve
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
if 0 < len(names) < 21: # display per-class legend if < 21 classes
for i, y in enumerate(py):
ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
else:
ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
y = py.mean(0)
ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
fig.savefig(Path(save_dir), dpi=250)
| 8,969 | 39.044643 | 120 | py |
Alpha-IoU | Alpha-IoU-main/utils/activations.py | # Activation functions
import torch
import torch.nn as nn
import torch.nn.functional as F
# SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
class SiLU(nn.Module): # export-friendly version of nn.SiLU()
@staticmethod
def forward(x):
return x * torch.sigmoid(x)
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
@staticmethod
def forward(x):
# return x * F.hardsigmoid(x) # for torchscript and CoreML
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
class MemoryEfficientSwish(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x * torch.sigmoid(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
return grad_output * (sx * (1 + x * (1 - sx)))
def forward(self, x):
return self.F.apply(x)
# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
class Mish(nn.Module):
@staticmethod
def forward(x):
return x * F.softplus(x).tanh()
class MemoryEfficientMish(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
fx = F.softplus(x).tanh()
return grad_output * (fx + x * sx * (1 - fx * fx))
def forward(self, x):
return self.F.apply(x)
# FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
class FReLU(nn.Module):
def __init__(self, c1, k=3): # ch_in, kernel
super().__init__()
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
self.bn = nn.BatchNorm2d(c1)
def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))
| 2,248 | 29.808219 | 120 | py |
Alpha-IoU | Alpha-IoU-main/utils/general.py | # General utils
import glob
import logging
import math
import os
import platform
import random
import re
import subprocess
import time
from pathlib import Path
import cv2
import numpy as np
import torch
import torchvision
import yaml
from utils.google_utils import gsutil_getsize
from utils.metrics import fitness
from utils.torch_utils import init_torch_seeds
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
def set_logging(rank=-1):
logging.basicConfig(
format="%(message)s",
level=logging.INFO if rank in [-1, 0] else logging.WARN)
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds
random.seed(seed)
np.random.seed(seed)
init_torch_seeds(seed)
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def isdocker():
# Is environment a Docker container
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
return True
except OSError:
return False
def check_git_status():
# Recommend 'git pull' if code is out of date
print(colorstr('github: '), end='')
try:
assert Path('.git').exists(), 'skipping check (not a git repository)'
assert not isdocker(), 'skipping check (Docker image)'
assert check_online(), 'skipping check (offline)'
cmd = 'git fetch && git config --get remote.origin.url'
url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
f"Use 'git pull' to update or 'git clone {url}' to download latest."
else:
s = f'up to date with {url} ✅'
print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s)
except Exception as e:
print(e)
def check_requirements(file='requirements.txt', exclude=()):
# Check installed dependencies meet requirements
import pkg_resources
requirements = [f'{x.name}{x.specifier}' for x in pkg_resources.parse_requirements(Path(file).open())
if x.name not in exclude]
pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not isdocker(), 'cv2.imshow() is disabled in Docker environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_file(file):
# Search for file if not found
if os.path.isfile(file) or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file
assert len(files), 'File Not Found: %s' % file # assert file was found
assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
return files[0] # return file
def check_dataset(dict):
# Download dataset if not found locally
val, s = dict.get('val'), dict.get('download')
if val and len(val):
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and len(s): # download script
print('Downloading %s ...' % s)
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
torch.hub.download_url_to_file(s, f)
r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
else: # bash script
r = os.system(s)
print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
else:
raise Exception('Dataset not found.')
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywh2xyx1y1(x):
# Convert nx4 boxes from [x, y, w, h] to [x, y, x1, y1] where xy=center, xy1=top-left
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 2] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 3] = x[:, 1] - x[:, 3] / 2 # top left y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # cls, xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# elif whratio: # transform from xywhratio to xyxy
# b1_x1, b1_x2 = box1[0] - box1[2] / (whratio * 2), box1[0] + box1[2] / (whratio * 2)
# b1_y1, b1_y2 = box1[1] - box1[3] / (whratio * 2), box1[1] + box1[3] / (whratio * 2)
# b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
# b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha_ciou = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha_ciou) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # torch.log(iou+eps) or iou
def bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9):
# Returns tsqrt_he IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
# change iou into pow(iou+eps)
# iou = inter / union
iou = torch.pow(inter/union + eps, alpha)
# beta = 2 * alpha
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal
rho_x = torch.abs(b2_x1 + b2_x2 - b1_x1 - b1_x2)
rho_y = torch.abs(b2_y1 + b2_y2 - b1_y1 - b1_y2)
rho2 = ((rho_x ** 2 + rho_y ** 2) / 4) ** alpha # center distance
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha_ciou = v / ((1 + eps) - inter / union + v)
# return iou - (rho2 / c2 + v * alpha_ciou) # CIoU
return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
# c_area = cw * ch + eps # convex area
# return iou - (c_area - union) / c_area # GIoU
c_area = torch.max(cw * ch + eps, union) # convex area
return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU
else:
return iou # torch.log(iou+eps) or iou
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=()):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
if bucket:
url = 'gs://%s/evolve.txt' % bucket
if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
with open('evolve.txt', 'a') as f: # append result
f.write(c + b + '\n')
x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, 'w') as f:
results = tuple(x[0, :7])
c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
yaml.dump(hyp, f, sort_keys=False)
if bucket:
os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
def apply_classifier(x, model, img, im0):
# applies a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('test%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=True, sep=''):
# Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
path = Path(path) # os-agnostic
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
return f"{path}{sep}{n}" # update path
| 27,286 | 41.569423 | 120 | py |
Alpha-IoU | Alpha-IoU-main/utils/google_utils.py | # Google utils: https://cloud.google.com/storage/docs/reference/libraries
import os
import platform
import subprocess
import time
from pathlib import Path
import requests
import torch
def gsutil_getsize(url=''):
# gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
return eval(s.split(' ')[0]) if len(s) else 0 # bytes
def attempt_download(file, repo='ultralytics/yolov5'):
# Attempt file download if does not exist
file = Path(str(file).strip().replace("'", '').lower())
if not file.exists():
try:
response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
tag = response['tag_name'] # i.e. 'v1.0'
except: # fallback plan
assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']
tag = subprocess.check_output('git tag', shell=True).decode().split()[-1]
name = file.name
if name in assets:
msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/'
redundant = False # second download option
try: # GitHub
url = f'https://github.com/{repo}/releases/download/{tag}/{name}'
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, file)
assert file.exists() and file.stat().st_size > 1E6 # check
except Exception as e: # GCP
print(f'Download error: {e}')
assert redundant, 'No secondary mirror'
url = f'https://storage.googleapis.com/{repo}/ckpt/{name}'
print(f'Downloading {url} to {file}...')
os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights)
finally:
if not file.exists() or file.stat().st_size < 1E6: # check
file.unlink(missing_ok=True) # remove partial downloads
print(f'ERROR: Download failure: {msg}')
print('')
return
def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
# Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download()
t = time.time()
file = Path(file)
cookie = Path('cookie') # gdrive cookie
print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
file.unlink(missing_ok=True) # remove existing file
cookie.unlink(missing_ok=True) # remove existing cookie
# Attempt file download
out = "NUL" if platform.system() == "Windows" else "/dev/null"
os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
if os.path.exists('cookie'): # large file
s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
else: # small file
s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
r = os.system(s) # execute, capture return
cookie.unlink(missing_ok=True) # remove existing cookie
# Error check
if r != 0:
file.unlink(missing_ok=True) # remove partial
print('Download error ') # raise Exception('Download error')
return r
# Unzip if archive
if file.suffix == '.zip':
print('unzipping... ', end='')
os.system(f'unzip -q {file}') # unzip
file.unlink() # remove zip to free space
print(f'Done ({time.time() - t:.1f}s)')
return r
def get_token(cookie="./cookie"):
with open(cookie) as f:
for line in f:
if "download" in line:
return line.split()[-1]
return ""
# def upload_blob(bucket_name, source_file_name, destination_blob_name):
# # Uploads a file to a bucket
# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
#
# storage_client = storage.Client()
# bucket = storage_client.get_bucket(bucket_name)
# blob = bucket.blob(destination_blob_name)
#
# blob.upload_from_filename(source_file_name)
#
# print('File {} uploaded to {}.'.format(
# source_file_name,
# destination_blob_name))
#
#
# def download_blob(bucket_name, source_blob_name, destination_file_name):
# # Uploads a blob from a bucket
# storage_client = storage.Client()
# bucket = storage_client.get_bucket(bucket_name)
# blob = bucket.blob(source_blob_name)
#
# blob.download_to_filename(destination_file_name)
#
# print('Blob {} downloaded to {}.'.format(
# source_blob_name,
# destination_file_name))
| 4,870 | 38.601626 | 118 | py |
Alpha-IoU | Alpha-IoU-main/utils/wandb_logging/wandb_utils.py | import json
import shutil
import sys
from datetime import datetime
from pathlib import Path
import torch
sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path
from utils.general import colorstr, xywh2xyxy
try:
import wandb
except ImportError:
wandb = None
print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
def remove_prefix(from_string, prefix):
return from_string[len(prefix):]
class WandbLogger():
def __init__(self, opt, name, run_id, data_dict, job_type='Training'):
self.wandb = wandb
self.wandb_run = wandb.init(config=opt, resume="allow",
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
name=name,
job_type=job_type,
id=run_id) if self.wandb else None
if job_type == 'Training':
self.setup_training(opt, data_dict)
if opt.bbox_interval == -1:
opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs
if opt.save_period == -1:
opt.save_period = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs
def setup_training(self, opt, data_dict):
self.log_dict = {}
self.train_artifact_path, self.trainset_artifact = \
self.download_dataset_artifact(data_dict['train'], opt.artifact_alias)
self.test_artifact_path, self.testset_artifact = \
self.download_dataset_artifact(data_dict['val'], opt.artifact_alias)
self.result_artifact, self.result_table, self.weights = None, None, None
if self.train_artifact_path is not None:
train_path = Path(self.train_artifact_path) / 'data/images/'
data_dict['train'] = str(train_path)
if self.test_artifact_path is not None:
test_path = Path(self.test_artifact_path) / 'data/images/'
data_dict['val'] = str(test_path)
self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"])
if opt.resume_from_artifact:
modeldir, _ = self.download_model_artifact(opt.resume_from_artifact)
if modeldir:
self.weights = Path(modeldir) / "best.pt"
opt.weights = self.weights
def download_dataset_artifact(self, path, alias):
if path.startswith(WANDB_ARTIFACT_PREFIX):
dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
datadir = dataset_artifact.download()
labels_zip = Path(datadir) / "data/labels.zip"
shutil.unpack_archive(labels_zip, Path(datadir) / 'data/labels', 'zip')
print("Downloaded dataset to : ", datadir)
return datadir, dataset_artifact
return None, None
def download_model_artifact(self, name):
model_artifact = wandb.use_artifact(name + ":latest")
assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
modeldir = model_artifact.download()
print("Downloaded model to : ", modeldir)
return modeldir, model_artifact
def log_model(self, path, opt, epoch):
datetime_suffix = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
'original_url': str(path),
'epoch': epoch + 1,
'save period': opt.save_period,
'project': opt.project,
'datetime': datetime_suffix
})
model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
model_artifact.add_file(str(path / 'best.pt'), name='best.pt')
wandb.log_artifact(model_artifact)
print("Saving model artifact on epoch ", epoch + 1)
def log_dataset_artifact(self, dataset, class_to_id, name='dataset'):
artifact = wandb.Artifact(name=name, type="dataset")
image_path = dataset.path
artifact.add_dir(image_path, name='data/images')
table = wandb.Table(columns=["id", "train_image", "Classes"])
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
for si, (img, labels, paths, shapes) in enumerate(dataset):
height, width = shapes[0]
labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4)))
labels[:, 2:] *= torch.Tensor([width, height, width, height])
box_data = []
img_classes = {}
for cls, *xyxy in labels[:, 1:].tolist():
cls = int(cls)
box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": cls,
"box_caption": "%s" % (class_to_id[cls]),
"scores": {"acc": 1},
"domain": "pixel"})
img_classes[cls] = class_to_id[cls]
boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space
table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes))
artifact.add(table, name)
labels_path = 'labels'.join(image_path.rsplit('images', 1))
zip_path = Path(labels_path).parent / (name + '_labels.zip')
if not zip_path.is_file(): # make_archive won't check if file exists
shutil.make_archive(zip_path.with_suffix(''), 'zip', labels_path)
artifact.add_file(str(zip_path), name='data/labels.zip')
wandb.log_artifact(artifact)
print("Saving data to W&B...")
def log(self, log_dict):
if self.wandb_run:
for key, value in log_dict.items():
self.log_dict[key] = value
def end_epoch(self):
if self.wandb_run and self.log_dict:
wandb.log(self.log_dict)
self.log_dict = {}
def finish_run(self):
if self.wandb_run:
if self.result_artifact:
print("Add Training Progress Artifact")
self.result_artifact.add(self.result_table, 'result')
train_results = wandb.JoinedTable(self.testset_artifact.get("val"), self.result_table, "id")
self.result_artifact.add(train_results, 'joined_result')
wandb.log_artifact(self.result_artifact)
if self.log_dict:
wandb.log(self.log_dict)
wandb.run.finish()
| 6,892 | 46.212329 | 117 | py |
Alpha-IoU | Alpha-IoU-main/utils/aws/resume.py | # Resume all interrupted trainings in yolov5/ dir including DPP trainings
# Usage: $ python utils/aws/resume.py
import os
import sys
from pathlib import Path
import torch
import yaml
sys.path.append('./') # to run '$ python *.py' files in subdirectories
port = 0 # --master_port
path = Path('').resolve()
for last in path.rglob('*/**/last.pt'):
ckpt = torch.load(last)
if ckpt['optimizer'] is None:
continue
# Load opt.yaml
with open(last.parent.parent / 'opt.yaml') as f:
opt = yaml.load(f, Loader=yaml.SafeLoader)
# Get device count
d = opt['device'].split(',') # devices
nd = len(d) # number of devices
ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
if ddp: # multi-GPU
port += 1
cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
else: # single-GPU
cmd = f'python train.py --resume {last}'
cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
print(cmd)
os.system(cmd)
| 1,114 | 28.342105 | 119 | py |
CorrI2P | CorrI2P-main/pointnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
class PCUpSample(nn.Module):
def __init__(self,in_channel,mlp,last_norm_activate=True,k=16):
super(PCUpSample,self).__init__()
self.mlp_convs=nn.ModuleList()
self.mlp_bns=nn.ModuleList()
last_channel=in_channel
self.last_norm_activate=last_norm_activate
self.k=k
for out_channel in mlp[0:-1]:
self.mlp_convs.append(nn.Conv1d(last_channel,out_channel,1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel=out_channel
if self.last_norm_activate:
self.mlp_convs.append(nn.Conv1d(mlp[-2],mlp[-1],1))
self.mlp_bns.append(nn.BatchNorm1d(mlp[-1]))
else:
self.mlp_convs.append(nn.Conv1d(mlp[-2],mlp[-1],1))
def forward(self,xyz1,xyz2,features1=None,features2=None):
'''
xyz1: denstination points
xyz2: downsampled points
features1: densitination features
features2: downsampled features
'''
B,_,N=xyz1.shape
xyz1=xyz1.permute(0,2,1)
xyz2=xyz2.permute(0,2,1)
features2=features2.permute(0,2,1)
dists=square_distance(xyz1,xyz2)
dists,idx=dists.sort(dim=-1)
dists,idx=dists[:,:,:self.k], idx[:,:,:self.k]
dist_recip=1.0/(dists+1e-8)
norm=torch.sum(dist_recip,dim=2)
weight=dist_recip/norm.unsqueeze(-1)
#print(weight.size(),index_points(features2, idx).size())
interpolated_features = torch.sum(index_points(features2, idx) * weight.view(B, N, self.k, 1), dim=2)
if features1 is None:
new_features=interpolated_features
else:
features1=features1.permute(0,2,1)
new_features=torch.cat([features1,interpolated_features],dim=-1)
new_features=new_features.permute(0,2,1)
for i, conv in enumerate(self.mlp_convs[:-1]):
bn=self.mlp_bns[i]
new_features=F.relu(bn(conv(new_features)))
if self.last_norm_activate:
bn=self.mlp_bns[-1]
conv=self.mlp_convs[-1]
new_features=F.relu(bn(conv(new_features)))
else:
conv=self.mlp_convs[-1]
new_features=conv(new_features)
return new_features
def FPS(pc,k):
'''
pc:(B,C,N)
return (B,C,k)
'''
device=pc.device
B,C,N=pc.size()
centroids = torch.zeros(B,k,dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device)*1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(k):
centroids[:, i] = farthest
centroid = pc[batch_indices, :, farthest].view(B, 3, 1)
dist = torch.sum((pc-centroid)**2, -2)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return torch.gather(pc, dim=2, index=centroids.unsqueeze(1).repeat(1, C, 1))
'''def FPS(pc, k):
xyz=pc.transpose(2,1)
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, k, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device) #(B, 1)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(k):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return torch.gather(pc, dim=2, index=centroids.unsqueeze(1).repeat(1, C, 1))
'''
def group(pc,kpts,k):
B,C,N=pc.size()
N2=kpts.size(-1)
diff=torch.sum(torch.square(pc.unsqueeze(2)-kpts.unsqueeze(3)),dim=1)
_,idx=torch.topk(diff,k,dim=2,largest=False)
grouped_pc=torch.gather(pc.unsqueeze(2).repeat(1,1,N2,1),dim=3,index=idx.unsqueeze(1).repeat(1,3,1,1))
grouped_pc=grouped_pc-kpts.unsqueeze(-1)
return grouped_pc
def group_with_feature(pc,features,kpts,k):
B,C,N=pc.size()
C_feature=features.size(1)
N2=kpts.size(-1)
diff=torch.sum(torch.square(pc.unsqueeze(2)-kpts.unsqueeze(3)),dim=1)
dist,idx=torch.topk(diff,k,dim=2,largest=False)
#print(dist[0])
grouped_pc=torch.gather(pc.unsqueeze(2).repeat(1,1,N2,1),dim=3,index=idx.unsqueeze(1).repeat(1,C,1,1))
grouped_pc=grouped_pc-kpts.unsqueeze(-1)
grouped_features=torch.gather(features.unsqueeze(2).repeat(1,1,N2,1),dim=3,index=idx.unsqueeze(1).repeat(1,C_feature,1,1))
return grouped_pc, grouped_features
def group_only_feature(pc,features,kpts,k):
B,C,N=pc.size()
C_feature=features.size(1)
N2=kpts.size(-1)
diff=torch.sum(torch.square(pc.unsqueeze(2)-kpts.unsqueeze(3)),dim=1)
_,idx=torch.topk(diff,k,dim=2,largest=False)
#grouped_pc=torch.gather(pc.unsqueeze(2).repeat(1,1,N2,1),dim=3,index=idx.unsqueeze(1).repeat(1,C,1,1))
#grouped_pc=grouped_pc-kpts.unsqueeze(-1)
grouped_features=torch.gather(features.unsqueeze(2).repeat(1,1,N2,1),dim=3,index=idx.unsqueeze(1).repeat(1,C_feature,1,1))
return grouped_features
class attention_img2pc(nn.Module):
def __init__(self,in_channel,mlp):
super(attention_img2pc,self).__init__()
self.mlp_convs=nn.ModuleList()
self.mlp_bns=nn.ModuleList()
last_channel=in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel,out_channel,1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel=out_channel
def forward(self,img_global_feature,img_local_feature,pc_local_feature):
N=pc_local_feature.size(2)
B=pc_local_feature.size(0)
C_img=img_local_feature.size(1)
feature=torch.cat([pc_local_feature,img_global_feature.unsqueeze(-1).repeat(1,1,N)],dim=1)
for i, conv in enumerate(self.mlp_convs):
bn=self.mlp_bns[i]
feature=F.relu(bn(conv(feature)))
attention=F.softmax(feature,dim=1) #(B,H*W,N)
img_local_feature=img_local_feature.view(B,C_img,-1) #(B,C,H*W)
#print(img_local_feature.size(),attention.size())
feature_fusion=torch.matmul(img_local_feature,attention)
return feature_fusion
def angle(v1,v2):
"""
v1:(B,3,N,K)
v2:(B,3,N,K)
"""
cross_prod=torch.cat(( v1[:,1:2,:,:]*v2[:,2:,:,:]-v1[:,2:,:,:]*v2[:,1:2,:,:],
v1[:,2:,:,:]*v2[:,0:1,:,:]-v1[:,0:1,:,:]*v2[:,2:,:,:],
v1[:,0:1,:,:]*v2[:,1:2,:,:]-v1[:,1:2,:,:]*v2[:,0:1,:,:]),dim=1)
cross_prod_norm=torch.norm(cross_prod,dim=1,keepdim=True)
dot_prod=torch.sum(v1*v2,dim=1,keepdim=True)
return torch.atan2(cross_prod_norm,dot_prod)
class PointCloudEncoder(nn.Module):
def __init__(self,k1=512,k2=64,s1=256,s2=32):
super(PointCloudEncoder,self).__init__()
self.k1=k1
self.k2=k2
self.s1=s1
self.s2=s2
self.conv1=nn.Sequential(nn.Conv2d(8,64,1),nn.BatchNorm2d(64),nn.ReLU(),
nn.Conv2d(64,64,1),nn.BatchNorm2d(64),nn.ReLU(),
nn.Conv2d(64,256,1),nn.BatchNorm2d(256),nn.ReLU())
self.conv2=nn.Sequential(nn.Conv2d(256+3,256,1),nn.BatchNorm2d(256),nn.ReLU(),
nn.Conv2d(256,256,1),nn.BatchNorm2d(256),nn.ReLU(),
nn.Conv2d(256,512,1),nn.BatchNorm2d(512),nn.ReLU())
self.conv3=nn.Sequential(nn.Conv1d(512,512,1),nn.BatchNorm1d(512),nn.ReLU(),
nn.Conv1d(512,512,1),nn.BatchNorm1d(512),nn.ReLU())
def forward(self,xyz,features):
points=[]
out=[]
#----------------------
xyz1=FPS(xyz,self.k1)
xyz1_grouped,features1_grouped=group_with_feature(xyz,features,xyz1,self.s1) #(B,3,N,K) (B,C,N,K)
sn1=features1_grouped[:,1:,:,:] #(B,3,N,K)
intensity1=features1_grouped[:,0:1,:,:] #(B,1,N,K)
sn1_center=sn1[:,:,:,0:1]
xyz1_center=xyz1.unsqueeze(-1)
d=xyz1_grouped-xyz1_center
nr_d=angle(sn1_center,d)
ni_d=angle(sn1,d)
nr_ni=angle(sn1_center,sn1)
d_norm=torch.norm(d,dim=1,keepdim=True)
features1_grouped=torch.cat((nr_d,ni_d,nr_ni,d_norm,intensity1),dim=1)
#print(xyz.size(),xyz1.size(),xyz1_grouped.size())
features1=self.conv1(torch.cat((xyz1_grouped,features1_grouped),dim=1))
features1=torch.max(features1,dim=3)[0]
points.append(xyz1)
out.append(features1)
#----------------------
xyz2=FPS(xyz1,self.k2)
xyz2_grouped,features2_grouped=group_with_feature(xyz1,features1,xyz2,self.s2)
features2=self.conv2(torch.cat((xyz2_grouped,features2_grouped),dim=1))
features2=torch.max(features2,dim=3)[0]
points.append(xyz2)
out.append(features2)
#------------------------
features3=self.conv3(features2)
global_features=torch.max(features3,dim=2)[0]
return points,out,global_features
if __name__=='__main__':
net=PCUpSample(3,[64,64]).cuda()
a=torch.rand((3,)) | 10,653 | 35.865052 | 126 | py |
CorrI2P | CorrI2P-main/train_nuscenes.py | import os
os.environ["CUDA_VISIBLE_DEVICES"]="3"
import torch
import torch.nn as nn
import argparse
from network3 import DenseI2P
from nuscenes_pc_img_dataloader import nuScenesLoader
import loss2
import numpy as np
import logging
import math
import nuScenes.options as options
import cv2
from scipy.spatial.transform import Rotation
def get_P_diff(P_pred_np,P_gt_np):
P_diff=np.dot(np.linalg.inv(P_pred_np),P_gt_np)
t_diff=np.linalg.norm(P_diff[0:3,3])
r_diff=P_diff[0:3,0:3]
R_diff=Rotation.from_matrix(r_diff)
angles_diff=np.sum(np.abs(R_diff.as_euler('xzy',degrees=True)))
return t_diff,angles_diff
def test_acc(model,testdataloader,args):
t_diff_set=[]
angles_diff_set=[]
torch.cuda.empty_cache()
for step,data in enumerate(testdataloader):
if step%10==0:
model.eval()
img=data['img'].cuda() #full size
pc=data['pc'].cuda()
intensity=data['intensity'].cuda()
sn=data['sn'].cuda()
K=data['K'].cuda()
P=data['P'].cuda()
pc_mask=data['pc_mask'].cuda()
img_mask=data['img_mask'].cuda() #1/4 size
pc_kpt_idx=data['pc_kpt_idx'].cuda() #(B,512)
pc_outline_idx=data['pc_outline_idx'].cuda()
img_kpt_idx=data['img_kpt_idx'].cuda()
img_outline_idx=data['img_outline_index'].cuda()
node_a=data['node_a'].cuda()
node_b=data['node_b'].cuda()
img_features,pc_features,img_score,pc_score=model(pc,intensity,sn,img,node_a,node_b) #64 channels feature
img_score=img_score[0].data.cpu().numpy()
pc_score=pc_score[0].data.cpu().numpy()
img_feature=img_features[0].data.cpu().numpy()
pc_feature=pc_features[0].data.cpu().numpy()
pc=pc[0].data.cpu().numpy()
P=P[0].data.cpu().numpy()
K=K[0].data.cpu().numpy()
img_x=np.linspace(0,np.shape(img_feature)[-1]-1,np.shape(img_feature)[-1]).reshape(1,-1).repeat(np.shape(img_feature)[-2],0).reshape(1,np.shape(img_score)[-2],np.shape(img_score)[-1])
img_y=np.linspace(0,np.shape(img_feature)[-2]-1,np.shape(img_feature)[-2]).reshape(-1,1).repeat(np.shape(img_feature)[-1],1).reshape(1,np.shape(img_score)[-2],np.shape(img_score)[-1])
img_xy=np.concatenate((img_x,img_y),axis=0)
img_xy_flatten=img_xy.reshape(2,-1)
img_feature_flatten=img_feature.reshape(np.shape(img_feature)[0],-1)
img_score_flatten=img_score.squeeze().reshape(-1)
img_index=(img_score_flatten>args.img_thres)
#topk_img_index=np.argsort(-img_score_flatten)[:args.num_kpt]
img_xy_flatten_sel=img_xy_flatten[:,img_index]
img_feature_flatten_sel=img_feature_flatten[:,img_index]
img_score_flatten_sel=img_score_flatten[img_index]
pc_index=(pc_score.squeeze()>args.pc_thres)
#topk_pc_index=np.argsort(-pc_score.squeeze())[:args.num_kpt]
pc_sel=pc[:,pc_index]
pc_feature_sel=pc_feature[:,pc_index]
pc_score_sel=pc_score.squeeze()[pc_index]
dist=1-np.sum(np.expand_dims(pc_feature_sel,axis=2)*np.expand_dims(img_feature_flatten_sel,axis=1),axis=0)
sel_index=np.argmin(dist,axis=1)
#sel_index=np.argsort(dist,axis=1)[:,0]
img_xy_pc=img_xy_flatten_sel[:,sel_index]
try:
is_success,R,t,inliers=cv2.solvePnPRansac(pc_sel.T,img_xy_pc.T,K,useExtrinsicGuess=False,
iterationsCount=500,
reprojectionError=args.dist_thres,
flags=cv2.SOLVEPNP_EPNP,
distCoeffs=None)
R,_=cv2.Rodrigues(R)
T_pred=np.eye(4)
T_pred[0:3,0:3]=R
T_pred[0:3,3:]=t
t_diff,angles_diff=get_P_diff(T_pred,P)
t_diff_set.append(t_diff)
angles_diff_set.append(angles_diff)
except:
pass
torch.cuda.empty_cache()
return np.mean(np.array(t_diff_set)),np.mean(np.array(angles_diff_set))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Point Cloud Registration')
parser.add_argument('--epoch', type=int, default=25, metavar='epoch',
help='number of epoch to train')
parser.add_argument('--train_batch_size', type=int, default=24, metavar='train_batch_size',
help='Size of train batch')
parser.add_argument('--val_batch_size', type=int, default=8, metavar='val_batch_size',
help='Size of val batch')
parser.add_argument('--data_path', type=str, default='/home/siyu_ren/data/nuscenes2/', metavar='data_path',
help='train and test data path')
parser.add_argument('--num_point', type=int, default=40960, metavar='num_point',
help='point cloud size to train')
parser.add_argument('--num_workers', type=int, default=8, metavar='num_workers',
help='num of CPUs')
parser.add_argument('--val_freq', type=int, default=1000, metavar='val_freq',
help='')
parser.add_argument('--lr', type=float, default=0.001, metavar='lr',
help='')
parser.add_argument('--min_lr', type=float, default=0.00001, metavar='lr',
help='')
parser.add_argument('--P_tx_amplitude', type=float, default=10, metavar='P_tx_amplitude',
help='')
parser.add_argument('--P_ty_amplitude', type=float, default=0, metavar='P_ty_amplitude',
help='')
parser.add_argument('--P_tz_amplitude', type=float, default=10, metavar='P_tz_amplitude',
help='')
parser.add_argument('--P_Rx_amplitude', type=float, default=2*math.pi*0, metavar='P_Rx_amplitude',
help='')
parser.add_argument('--P_Ry_amplitude', type=float, default=2*math.pi, metavar='P_Ry_amplitude',
help='')
parser.add_argument('--P_Rz_amplitude', type=float, default=2*math.pi*0, metavar='P_Rz_amplitude',
help='')
parser.add_argument('--save_path', type=str, default='./nuscenes_log_xy_40960_256', metavar='save_path',
help='path to save log and model')
parser.add_argument('--num_kpt', type=int, default=512, metavar='num_kpt',
help='')
parser.add_argument('--dist_thres', type=float, default=1, metavar='num_kpt',
help='')
parser.add_argument('--img_thres', type=float, default=0.9, metavar='img_thres',
help='')
parser.add_argument('--pc_thres', type=float, default=0.9, metavar='pc_thres',
help='')
parser.add_argument('--pos_margin', type=float, default=0.2, metavar='pos_margin',
help='')
parser.add_argument('--neg_margin', type=float, default=1.8, metavar='neg_margin',
help='')
args = parser.parse_args()
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
logdir=os.path.join(args.save_path, 'dist_thres_%0.2f_pos_margin_%0.2f_neg_margin_%0.2f'%(args.dist_thres,args.pos_margin,args.neg_margin,))
try:
os.makedirs(logdir)
except:
print('mkdir failue')
logger=logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter=logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/log.txt' % (logdir))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
opt=options.Options()
train_dataset = nuScenesLoader(args.data_path, 'train', opt=opt)
test_dataset = nuScenesLoader(args.data_path, 'val', opt=opt)
assert len(train_dataset) > 10
assert len(test_dataset) > 10
trainloader=torch.utils.data.DataLoader(train_dataset,batch_size=args.train_batch_size,shuffle=True,drop_last=False,num_workers=args.num_workers)
testloader=torch.utils.data.DataLoader(test_dataset,batch_size=args.val_batch_size,shuffle=False,drop_last=True,num_workers=args.num_workers)
opt=options.Options()
model=DenseI2P(opt)
model = nn.DataParallel(model)
model=model.cuda()
current_lr=args.lr
learnable_params=filter(lambda p:p.requires_grad,model.parameters())
optimizer=torch.optim.Adam(learnable_params,lr=current_lr)
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.9)
#scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epoch, eta_min=args.lr)
logger.info(args)
global_step=0
best_t_diff=1000
best_r_diff=1000
for epoch in range(args.epoch):
for step,data in enumerate(trainloader):
global_step+=1
model.train()
optimizer.zero_grad()
img=data['img'].cuda() #full size
pc=data['pc'].cuda()
intensity=data['intensity'].cuda()
sn=data['sn'].cuda()
K=data['K'].cuda()
P=data['P'].cuda()
pc_mask=data['pc_mask'].cuda()
img_mask=data['img_mask'].cuda() #1/4 size
B=img_mask.size(0)
pc_kpt_idx=data['pc_kpt_idx'].cuda() #(B,512)
pc_outline_idx=data['pc_outline_idx'].cuda()
img_kpt_idx=data['img_kpt_idx'].cuda()
img_outline_idx=data['img_outline_index'].cuda()
node_a=data['node_a'].cuda()
node_b=data['node_b'].cuda()
img_x=torch.linspace(0,img_mask.size(-1)-1,img_mask.size(-1)).view(1,-1).expand(img_mask.size(-2),img_mask.size(-1)).unsqueeze(0).expand(img_mask.size(0),img_mask.size(-2),img_mask.size(-1)).unsqueeze(1).cuda()
img_y=torch.linspace(0,img_mask.size(-2)-1,img_mask.size(-2)).view(-1,1).expand(img_mask.size(-2),img_mask.size(-1)).unsqueeze(0).expand(img_mask.size(0),img_mask.size(-2),img_mask.size(-1)).unsqueeze(1).cuda()
img_xy=torch.cat((img_x,img_y),dim=1)
img_features,pc_features,img_score,pc_score=model(pc,intensity,sn,img,node_a,node_b) #64 channels feature
pc_features_inline=torch.gather(pc_features,index=pc_kpt_idx.unsqueeze(1).expand(B,pc_features.size(1),args.num_kpt),dim=-1)
pc_features_outline=torch.gather(pc_features,index=pc_outline_idx.unsqueeze(1).expand(B,pc_features.size(1),args.num_kpt),dim=-1)
pc_xyz_inline=torch.gather(pc,index=pc_kpt_idx.unsqueeze(1).expand(B,3,args.num_kpt),dim=-1)
pc_score_inline=torch.gather(pc_score,index=pc_kpt_idx.unsqueeze(1),dim=-1)
pc_score_outline=torch.gather(pc_score,index=pc_outline_idx.unsqueeze(1),dim=-1)
img_features_flatten=img_features.contiguous().view(img_features.size(0),img_features.size(1),-1)
img_score_flatten=img_score.contiguous().view(img_score.size(0),img_score.size(1),-1)
img_xy_flatten=img_xy.contiguous().view(img_features.size(0),2,-1)
img_features_flatten_inline=torch.gather(img_features_flatten,index=img_kpt_idx.unsqueeze(1).expand(B,img_features_flatten.size(1),args.num_kpt),dim=-1)
img_xy_flatten_inline=torch.gather(img_xy_flatten,index=img_kpt_idx.unsqueeze(1).expand(B,2,args.num_kpt),dim=-1)
img_score_flatten_inline=torch.gather(img_score_flatten,index=img_kpt_idx.unsqueeze(1),dim=-1)
img_features_flatten_outline=torch.gather(img_features_flatten,index=img_outline_idx.unsqueeze(1).expand(B,img_features_flatten.size(1),args.num_kpt),dim=-1)
img_score_flatten_outline=torch.gather(img_score_flatten,index=img_outline_idx.unsqueeze(1),dim=-1)
'''print(img_xy_flatten[10,:,1000])
print(1000//128)
print(1000%128)
assert False'''
#print((img_kpt_idx.unsqueeze(1).expand(B,img_features_flatten.size(1),args.num_kpt)==img_kpt_idx.unsqueeze(1).repeat(1,img_features_flatten.size(1),1)).all())
#print(img_features)
#assert False
'''print(img_xy_flatten_inline[3,:,10])
print(img_features[3,:,img_xy_flatten_inline[3,1,10].long(),img_xy_flatten_inline[3,0,10].long()]==img_features_flatten_inline[3,:,10])
print(img_mask[3,img_xy_flatten_inline[3,1,10].long(),img_xy_flatten_inline[3,0,10].long()])
assert False'''
pc_xyz_projection=torch.bmm(K,(torch.bmm(P[:,0:3,0:3],pc_xyz_inline)+P[:,0:3,3:]))
#pc_xy_projection=torch.floor(pc_xyz_projection[:,0:2,:]/pc_xyz_projection[:,2:,:]).float()
pc_xy_projection=pc_xyz_projection[:,0:2,:]/pc_xyz_projection[:,2:,:]
#print(pc_xy_projection.size())
#print((pc_xy_projection[:,0,:]>0).all())
#assert False
#print(img_xy_flatten[0])
#print(img_xy_flatten_inline.size(),pc_xy_projection.size())
correspondence_mask=(torch.sqrt(torch.sum(torch.square(img_xy_flatten_inline.unsqueeze(-1)-pc_xy_projection.unsqueeze(-2)),dim=1))<=args.dist_thres).float()
#mask=torch.zeros(correspondence_mask.size(0),int(img_score_flatten_inline.size(2)+img_score_flatten_outline.size(2)),int(pc_score_inline.size(2)+pc_score_outline.size(2))).to(correspondence_mask)
#mask[:,0:int(correspondence_mask.size(1)),0:int(correspondence_mask.size(2))]=correspondence_mask
'''print(correspondence_mask.size())
print(torch.sum(mask))
print(torch.sum(correspondence_mask))'''
#print(correspondence_mask.size())
#print(torch.sum(correspondence_mask,dim=(1,2)))
#assert False
#img_features=torch.cat((img_features_flatten_inline,img_features_flatten_outline),dim=-1)
#pc_features=torch.cat((pc_features_inline,pc_features_outline),dim=-1)
#img_score=torch.cat((img_score_flatten_inline,img_score_flatten_outline),dim=-1)
#pc_score=torch.cat((pc_score_inline,pc_score_outline),dim=-1)
#print(img_score.size(),pc_score.size())
#loss_desc,dists=loss2.desc_loss(img_features,pc_features,mask,num_kpt=args.num_kpt)
loss_desc,dists=loss2.desc_loss(img_features_flatten_inline,pc_features_inline,correspondence_mask,pos_margin=args.pos_margin,neg_margin=args.neg_margin)
#loss_det=loss2.det_loss(img_score_flatten_inline.squeeze(),img_score_flatten_outline.squeeze(),pc_score_inline,pc_score_outline.squeeze())
loss_det=loss2.det_loss2(img_score_flatten_inline.squeeze(),img_score_flatten_outline.squeeze(),pc_score_inline.squeeze(),pc_score_outline.squeeze(),dists,correspondence_mask)
loss=loss_desc+loss_det*0.5
#loss=loss_desc
loss.backward()
optimizer.step()
#torch.cuda.empty_cache()
if global_step%6==0:
logger.info('%s-%d-%d, loss: %f, loss desc: %f, loss det: %f'%('train',epoch,global_step,loss.data.cpu().numpy(),loss_desc.data.cpu().numpy(),loss_det.data.cpu().numpy()))
if global_step%args.val_freq==0 and epoch>5:
t_diff,r_diff=test_acc(model,testloader,args)
if t_diff<=best_t_diff:
torch.save(model.state_dict(),os.path.join(logdir,'mode_best_t.t7'))
best_t_diff=t_diff
if r_diff<=best_r_diff:
torch.save(model.state_dict(),os.path.join(logdir,'mode_best_r.t7'))
best_r_diff=r_diff
logger.info('%s-%d-%d, t_error: %f, r_error: %f'%('test',epoch,global_step,t_diff,r_diff))
torch.save(model.state_dict(),os.path.join(logdir,'mode_last.t7'))
if epoch%5==0 and epoch>0:
current_lr=current_lr*0.25
if current_lr<args.min_lr:
current_lr=args.min_lr
for param_group in optimizer.param_groups:
param_group['lr']=current_lr
logger.info('%s-%d-%d, updata lr, current lr is %f'%('train',epoch,global_step,current_lr))
torch.save(model.state_dict(),os.path.join(logdir,'mode_epoch_%d.t7'%epoch)) | 16,760 | 50.41411 | 222 | py |
CorrI2P | CorrI2P-main/nuscenes_pc_img_dataloader.py | import open3d
import torch.utils.data as data
import random
import numbers
import os
import os.path
import numpy as np
import struct
import math
import torch
import torchvision
import cv2
from PIL import Image
from torchvision import transforms
import pickle
from pyquaternion import Quaternion
from nuScenes import options
from nuscenes.utils.data_classes import LidarPointCloud
from nuscenes.nuscenes import NuScenes
from scipy.sparse import coo_matrix
def angles2rotation_matrix(angles):
Rx = np.array([[1, 0, 0],
[0, np.cos(angles[0]), -np.sin(angles[0])],
[0, np.sin(angles[0]), np.cos(angles[0])]])
Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])],
[0, 1, 0],
[-np.sin(angles[1]), 0, np.cos(angles[1])]])
Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],
[np.sin(angles[2]), np.cos(angles[2]), 0],
[0, 0, 1]])
R = np.dot(Rz, np.dot(Ry, Rx))
return R
def camera_matrix_scaling(K: np.ndarray, s: float):
K_scale = s * K
K_scale[2, 2] = 1
return K_scale
def camera_matrix_cropping(K: np.ndarray, dx: float, dy: float):
K_crop = np.copy(K)
K_crop[0, 2] -= dx
K_crop[1, 2] -= dy
return K_crop
class FarthestSampler:
def __init__(self, dim=3):
self.dim = dim
def calc_distances(self, p0, points):
return ((p0 - points) ** 2).sum(axis=0)
def sample(self, pts, k):
farthest_pts = np.zeros((self.dim, k))
farthest_pts_idx = np.zeros(k, dtype=np.int)
init_idx = np.random.randint(len(pts))
farthest_pts[:, 0] = pts[:, init_idx]
farthest_pts_idx[0] = init_idx
distances = self.calc_distances(farthest_pts[:, 0:1], pts)
for i in range(1, k):
idx = np.argmax(distances)
farthest_pts[:, i] = pts[:, idx]
farthest_pts_idx[i] = idx
distances = np.minimum(distances, self.calc_distances(farthest_pts[:, i:i + 1], pts))
return farthest_pts, farthest_pts_idx
def downsample_with_reflectance(pointcloud, reflectance, voxel_grid_downsample_size):
pcd = open3d.geometry.PointCloud()
pcd.points = open3d.utility.Vector3dVector(np.transpose(pointcloud[0:3, :]))
reflectance_max = np.max(reflectance)
fake_colors = np.zeros((pointcloud.shape[1], 3))
fake_colors[:, 0] = reflectance / reflectance_max
pcd.colors = open3d.utility.Vector3dVector(fake_colors)
down_pcd = pcd.voxel_down_sample(voxel_size=voxel_grid_downsample_size)
down_pcd_points = np.transpose(np.asarray(down_pcd.points)) # 3xN
pointcloud = down_pcd_points
reflectance = np.asarray(down_pcd.colors)[:, 0] * reflectance_max
return pointcloud, reflectance
def load_dataset_info(filepath):
with open(filepath, 'rb') as f:
dataset_read = pickle.load(f)
return dataset_read
def make_nuscenes_dataset(root_path):
dataset = load_dataset_info(os.path.join(root_path, 'dataset_info_new.list'))
return dataset
def get_sample_data_ego_pose_P(nusc, sample_data):
sample_data_pose = nusc.get('ego_pose', sample_data['ego_pose_token'])
sample_data_pose_R = np.asarray(Quaternion(sample_data_pose['rotation']).rotation_matrix).astype(np.float32)
sample_data_pose_t = np.asarray(sample_data_pose['translation']).astype(np.float32)
sample_data_pose_P = get_P_from_Rt(sample_data_pose_R, sample_data_pose_t)
return sample_data_pose_P
def get_calibration_P(nusc, sample_data):
calib = nusc.get('calibrated_sensor', sample_data['calibrated_sensor_token'])
R = np.asarray(Quaternion(calib['rotation']).rotation_matrix).astype(np.float32)
t = np.asarray(calib['translation']).astype(np.float32)
P = get_P_from_Rt(R, t)
return P
def get_P_from_Rt(R, t):
P = np.identity(4)
P[0:3, 0:3] = R
P[0:3, 3] = t
return P
def get_camera_K(nusc, camera):
calib = nusc.get('calibrated_sensor', camera['calibrated_sensor_token'])
return np.asarray(calib['camera_intrinsic']).astype(np.float32)
def transform_pc_np(P, pc_np):
"""
:param pc_np: 3xN
:param P: 4x4
:return:
"""
pc_homo_np = np.concatenate((pc_np,
np.ones((1, pc_np.shape[1]), dtype=pc_np.dtype)),
axis=0)
P_pc_homo_np = np.dot(P, pc_homo_np)
return P_pc_homo_np[0:3, :]
class nuScenesLoader(data.Dataset):
def __init__(self, root, mode, opt: options.Options):
super(nuScenesLoader, self).__init__()
self.root = root
self.opt = opt
self.mode = mode
# farthest point sample
self.farthest_sampler = FarthestSampler(dim=3)
# list of (traversal, pc_timestamp, pc_timestamp_idx, traversal_pc_num)
if mode == 'train':
self.pc_path=os.path.join(root,'train','PC')
self.img_path = os.path.join(root,'train', 'img')
self.K_path = os.path.join(root,'train', 'K')
else:
self.pc_path = os.path.join(root, 'test', 'PC')
self.img_path = os.path.join(root, 'test', 'img')
self.K_path = os.path.join(root, 'test', 'K')
self.length=len(os.listdir(self.pc_path))
def augment_img(self, img_np):
"""
:param img: HxWx3, np.ndarray
:return:
"""
# color perturbation
brightness = (0.8, 1.2)
contrast = (0.8, 1.2)
saturation = (0.8, 1.2)
hue = (-0.1, 0.1)
color_aug = transforms.ColorJitter(
brightness, contrast, saturation, hue)
img_color_aug_np = np.array(color_aug(Image.fromarray(np.uint8(img_np))))
return img_color_aug_np
def generate_random_transform(self,
P_tx_amplitude, P_ty_amplitude, P_tz_amplitude,
P_Rx_amplitude, P_Ry_amplitude, P_Rz_amplitude):
"""
:param pc_np: pc in NWU coordinate
:return:
"""
t = [random.uniform(-P_tx_amplitude, P_tx_amplitude),
random.uniform(-P_ty_amplitude, P_ty_amplitude),
random.uniform(-P_tz_amplitude, P_tz_amplitude)]
angles = [random.uniform(-P_Rx_amplitude, P_Rx_amplitude),
random.uniform(-P_Ry_amplitude, P_Ry_amplitude),
random.uniform(-P_Rz_amplitude, P_Rz_amplitude)]
rotation_mat = angles2rotation_matrix(angles)
P_random = np.identity(4, dtype=np.float32)
P_random[0:3, 0:3] = rotation_mat
P_random[0:3, 3] = t
return P_random.astype(np.float32)
def downsample_np(self, pc_np, intensity_np, k):
if pc_np.shape[1] >= k:
choice_idx = np.random.choice(pc_np.shape[1], k, replace=False)
else:
fix_idx = np.asarray(range(pc_np.shape[1]))
while pc_np.shape[1] + fix_idx.shape[0] < k:
fix_idx = np.concatenate((fix_idx, np.asarray(range(pc_np.shape[1]))), axis=0)
random_idx = np.random.choice(pc_np.shape[1], k - fix_idx.shape[0], replace=False)
choice_idx = np.concatenate((fix_idx, random_idx), axis=0)
pc_np = pc_np[:, choice_idx]
intensity_np = intensity_np[:, choice_idx]
return pc_np, intensity_np
def __len__(self):
return self.length
def __getitem__(self, index):
pc_data=np.load(os.path.join(self.pc_path,'%06d.npy'%index))
pc_np=pc_data[0:3,:]
intensity_np=pc_data[3:,:]
# load point cloud
# random sampling
pc_np, intensity_np = self.downsample_np(pc_np, intensity_np, self.opt.input_pt_num)
img = np.load(os.path.join(self.img_path,'%06d.npy'%index))
K = np.load(os.path.join(self.K_path,'%06d.npy'%index))
# random crop into input size
if 'train' == self.mode:
img_crop_dx = random.randint(0, img.shape[1] - self.opt.img_W)
img_crop_dy = random.randint(0, img.shape[0] - self.opt.img_H)
else:
img_crop_dx = int((img.shape[1] - self.opt.img_W) / 2)
img_crop_dy = int((img.shape[0] - self.opt.img_H) / 2)
# crop image
img = img[img_crop_dy:img_crop_dy + self.opt.img_H,
img_crop_dx:img_crop_dx + self.opt.img_W, :]
K = camera_matrix_cropping(K, dx=img_crop_dx, dy=img_crop_dy)
# ------------- apply random transform on points under the NWU coordinate ------------
# if 'train' == self.mode:
# -------------- augmentation ----------------------
# pc_np, intensity_np = self.augment_pc(pc_np, intensity_np)
#print(img.shape)
if 'train' == self.mode:
img = self.augment_img(img)
# random rotate pc_np
# pc_np = transform_pc_np(Pr, pc_np)
# 四分之一的尺寸
K_4 = camera_matrix_scaling(K, 0.25)
pc_ = np.dot(K_4, pc_np)
pc_mask = np.zeros((1, np.shape(pc_np)[1]), dtype=np.float32)
pc_[0:2, :] = pc_[0:2, :] / pc_[2:, :]
xy = np.floor(pc_[0:2, :])
is_in_picture = (xy[0, :] >= 0) & (xy[0, :] <= (self.opt.img_W * 0.25 - 1)) & (xy[1, :] >= 0) & (
xy[1, :] <= (self.opt.img_H * 0.25 - 1)) & (pc_[2, :] > 0)
pc_mask[:, is_in_picture] = 1.
pc_kpt_idx = np.where(pc_mask.squeeze() == 1)[0]
index = np.random.permutation(len(pc_kpt_idx))[0:self.opt.num_kpt]
pc_kpt_idx = pc_kpt_idx[index]
pc_outline_idx = np.where(pc_mask.squeeze() == 0)[0]
index = np.random.permutation(len(pc_outline_idx))[0:self.opt.num_kpt]
pc_outline_idx = pc_outline_idx[index]
xy2 = xy[:, is_in_picture]
img_mask = coo_matrix((np.ones_like(xy2[0, :]), (xy2[1, :], xy2[0, :])),
shape=(int(self.opt.img_H * 0.25), int(self.opt.img_W * 0.25))).toarray()
img_mask = np.array(img_mask)
img_mask[img_mask > 0] = 1.
img_kpt_index = xy[1, pc_kpt_idx] * self.opt.img_W * 0.25 + xy[0, pc_kpt_idx]
img_outline_index = np.where(img_mask.squeeze().reshape(-1) == 0)[0]
index = np.random.permutation(len(img_outline_index))[0:self.opt.num_kpt]
img_outline_index = img_outline_index[index]
P_np = self.generate_random_transform(self.opt.P_tx_amplitude, self.opt.P_ty_amplitude,
self.opt.P_tz_amplitude,
self.opt.P_Rx_amplitude, self.opt.P_Ry_amplitude,
self.opt.P_Rz_amplitude)
'''r_max=np.max(np.sqrt(np.sum(pc_np**2,axis=0)))
print('max range',r_max)'''
pc_np = np.dot(P_np[0:3, 0:3], pc_np) + P_np[0:3, 3:]
P_inv = np.linalg.inv(P_np)
node_a_np, _ = self.farthest_sampler.sample(pc_np[:, np.random.choice(pc_np.shape[1],
int(self.opt.node_a_num * 8),
replace=False)],
k=self.opt.node_a_num)
node_b_np, _ = self.farthest_sampler.sample(pc_np[:, np.random.choice(pc_np.shape[1],
int(self.opt.node_b_num * 8),
replace=False)],
k=self.opt.node_b_num)
# visualize nodes
# ax = vis_tools.plot_pc(pc_np, size=1)
# ax = vis_tools.plot_pc(node_a_np, size=10, ax=ax)
# plt.show()
# -------------- convert to torch tensor ---------------------
pc = torch.from_numpy(pc_np.astype(np.float32)) # 3xN
intensity = torch.from_numpy(intensity_np.astype(np.float32)) # 1xN
sn = torch.zeros(pc.size(), dtype=pc.dtype, device=pc.device)
P = torch.from_numpy(P_inv.astype(np.float32)) # 3x4
img = torch.from_numpy(img.astype(np.float32)/255).permute(2, 0, 1).contiguous() # 3xHxW
K = torch.from_numpy(K_4.astype(np.float32)) # 3x3
# print(P)
# print(pc)
# print(intensity)
return {'pc': pc,
'intensity': intensity,
'sn': sn,
'P': P,
'img': img,
'K': K,
'pc_mask': torch.from_numpy(pc_mask).float(),
'img_mask': torch.from_numpy(img_mask).float(), # (40,128)
'pc_kpt_idx': torch.from_numpy(pc_kpt_idx), # 512
'pc_outline_idx': torch.from_numpy(pc_outline_idx), # 512
'img_kpt_idx': torch.from_numpy(img_kpt_index).long(), # 512
'img_outline_index': torch.from_numpy(img_outline_index).long(),
'node_a': torch.from_numpy(node_a_np).float(),
'node_b': torch.from_numpy(node_b_np).float()
}
if __name__ == '__main__':
root_path = 'F:\\nuscenes'
opt = options.Options()
dataset = nuScenesLoader(root_path, 'train', opt)
#print(dataset[100]['img'].size())
#assert False
for i in range(0, len(dataset), 1000):
print('--- %d ---' % i)
data = dataset[i]
P = data['P'].numpy()
img = data['img'].numpy().transpose(1, 2, 0)
print(np.max(img))
'''print(img.shape)
H = img.shape[0]
W = img.shape[1]
K = data[7].numpy()
pc = data[0].numpy()
pointcloud2 = open3d.geometry.PointCloud()
pointcloud2.points = open3d.utility.Vector3dVector(pc.T)
open3d.io.write_point_cloud('pc_rot.ply', pointcloud2)
pc = np.dot(P[0:3, 0:3], pc) + P[0:3, 3:]
pointcloud = open3d.geometry.PointCloud()
pointcloud.points = open3d.utility.Vector3dVector(pc.T)
open3d.io.write_point_cloud('pc.ply', pointcloud)
uvz = np.dot(K, pc)
depth = uvz[2, :]
uv = uvz[0:2, :] / uvz[2:, :]
in_img = (depth > 0) & (uv[0, :] >= 0) & (uv[0, :] <= W - 1) & (uv[1, :] >= 0) & (uv[1, :] <= H - 1)
print(np.sum(in_img))
print(P)
uv = uv[:, in_img]
depth = depth[in_img]
# print(np.max(img))
plt.figure(1)
plt.imshow(img)
plt.scatter(uv[0, :], uv[1, :], c=[depth], s=1)
#plt.show()
pointcloud = open3d.geometry.PointCloud()
pointcloud.points = open3d.utility.Vector3dVector(data[0].numpy().T)
#open3d.visualization.draw_geometries([pointcloud])'''
| 14,620 | 35.921717 | 112 | py |
CorrI2P | CorrI2P-main/network.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import layers_pc
import imagenet
from imagenet import ResidualConv,ImageUpSample
from pointnet import FPS
import pointnet2
from options import Options
class CorrI2P(nn.Module):
def __init__(self,opt:Options):
super(CorrI2P, self).__init__()
self.opt=opt
self.pc_encoder=pointnet2.PCEncoder(opt,Ca=64,Cb=256,Cg=512)
self.img_encoder=imagenet.ImageEncoder()
self.H_fine_res = int(round(self.opt.img_H / self.opt.img_fine_resolution_scale))
self.W_fine_res = int(round(self.opt.img_W / self.opt.img_fine_resolution_scale))
self.node_b_attention_pn = layers_pc.PointNet(256+512,
[256, self.H_fine_res*self.W_fine_res],
activation=self.opt.activation,
normalization=self.opt.normalization,
norm_momentum=opt.norm_momentum,
norm_act_at_last=False)
self.node_b_pn = layers_pc.PointNet(256+512+512+512,
[1024, 512, 512],
activation=self.opt.activation,
normalization=self.opt.normalization,
norm_momentum=opt.norm_momentum,
norm_act_at_last=False)
self.node_a_attention_pn = layers_pc.PointNet(64 + 512,
[256, int(self.H_fine_res * self.W_fine_res * 4)],
activation=self.opt.activation,
normalization=self.opt.normalization,
norm_momentum=opt.norm_momentum,
norm_act_at_last=False)
self.node_a_pn = layers_pc.PointNet(64+256+512,
[512, 128, 128],
activation=self.opt.activation,
normalization=self.opt.normalization,
norm_momentum=opt.norm_momentum,
norm_act_at_last=False)
per_point_pn_in_channels = 32 + 64 + 128 + 512
self.per_point_pn=layers_pc.PointNet(per_point_pn_in_channels,
[256, 256, 128],
activation=self.opt.activation,
normalization=self.opt.normalization,
norm_momentum=opt.norm_momentum,
norm_act_at_last=True,
)
self.pc_feature_layer=nn.Sequential(nn.Conv1d(128,128,1,bias=False),nn.BatchNorm1d(128),nn.ReLU(),nn.Conv1d(128,128,1,bias=False),nn.BatchNorm1d(128),nn.ReLU(),nn.Conv1d(128,64,1,bias=False))
self.pc_score_layer=nn.Sequential(nn.Conv1d(128,128,1,bias=False),nn.BatchNorm1d(128),nn.ReLU(),nn.Conv1d(128,64,1,bias=False),nn.BatchNorm1d(64),nn.ReLU(),nn.Conv1d(64,1,1,bias=False),nn.Sigmoid())
#self.img_32_attention_conv=nn.Sequential(ResidualConv(512+512,512,kernel_1=True),ResidualConv(512,512,kernel_1=True),ResidualConv(512,self.opt.node_b_num,kernel_1=True))
#self.img_16_attention_conv=nn.Sequential(ResidualConv(512+256,256,kernel_1=True),ResidualConv(256,256,kernel_1=True),ResidualConv(256,self.opt.node_a_num,kernel_1=True))
self.img_32_attention_conv=nn.Sequential( nn.Conv2d(512+512,512,1,bias=False),nn.BatchNorm2d(512),nn.ReLU(),
nn.Conv2d(512,512,1,bias=False),nn.BatchNorm2d(512),nn.ReLU(),
nn.Conv2d(512,self.opt.node_b_num,1,bias=False))
self.img_16_attention_conv=nn.Sequential( nn.Conv2d(512+256,256,1,bias=False),nn.BatchNorm2d(256),nn.ReLU(),
nn.Conv2d(256,256,1,bias=False),nn.BatchNorm2d(256),nn.ReLU(),
nn.Conv2d(256,self.opt.node_a_num,1,bias=False))
self.up_conv1=ImageUpSample(768+320,256)
self.up_conv2=ImageUpSample(256+128,128)
self.up_conv3=ImageUpSample(128+64+64,64)
self.img_feature_layer=nn.Sequential(nn.Conv2d(64,64,1,bias=False),nn.BatchNorm2d(64),nn.ReLU(),nn.Conv2d(64,64,1,bias=False),nn.BatchNorm2d(64),nn.ReLU(),nn.Conv2d(64,64,1,bias=False))
self.img_score_layer=nn.Sequential(nn.Conv2d(64,64,1,bias=False),nn.BatchNorm2d(64),nn.ReLU(),nn.Conv2d(64,64,1,bias=False),nn.BatchNorm2d(64),nn.ReLU(),nn.Conv2d(64,1,1,bias=False),nn.Sigmoid())
def gather_topk_features(self, min_k_idx, features):
"""
:param min_k_idx: BxNxk
:param features: BxCxM
:return:
"""
B, N, k = min_k_idx.size(0), min_k_idx.size(1), min_k_idx.size(2)
C, M = features.size(1), features.size(2)
return torch.gather(features.unsqueeze(3).expand(B, C, M, k),
index=min_k_idx.unsqueeze(1).expand(B, C, N, k),
dim=2) # BxCxNxk
def upsample_by_interpolation(self,
interp_ab_topk_idx,
node_a,
node_b,
up_node_b_features):
interp_ab_topk_node_b = self.gather_topk_features(interp_ab_topk_idx, node_b) # Bx3xMaxk
# Bx3xMa -> Bx3xMaxk -> BxMaxk
interp_ab_node_diff = torch.norm(node_a.unsqueeze(3) - interp_ab_topk_node_b, dim=1, p=2, keepdim=False)
interp_ab_weight = 1 - interp_ab_node_diff / torch.sum(interp_ab_node_diff, dim=2, keepdim=True) # BxMaxk
interp_ab_topk_node_b_features = self.gather_topk_features(interp_ab_topk_idx, up_node_b_features) # BxCxMaxk
# BxCxMaxk -> BxCxMa
interp_ab_weighted_node_b_features = torch.sum(interp_ab_weight.unsqueeze(1) * interp_ab_topk_node_b_features,
dim=3)
return interp_ab_weighted_node_b_features
def forward(self,pc,intensity,sn,img,node_a,node_b):
#node_a=FPS(pc,self.opt.node_a_num)
#node_b=FPS(pc,self.opt.node_b_num)
B,N,Ma,Mb=pc.size(0),pc.size(2),node_a.size(2),node_b.size(2)
pc_center,\
cluster_mean, \
node_a_min_k_idx, \
first_pn_out, \
second_pn_out, \
node_a_features, \
node_b_features, \
global_feature = self.pc_encoder(pc,
intensity,
sn,
node_a,
node_b)
'''print(node_a_features.size())
print(node_b_features.size())'''
#print(global_feature.size())
C_global = global_feature.size(1)
img_feature_set=self.img_encoder(img)
'''for i in img_feature_set:
print(i.size())'''
img_global_feature=img_feature_set[-1] #512
img_s32_feature_map=img_feature_set[-2] #512
img_s16_feature_map=img_feature_set[-3] #256
img_s8_feature_map=img_feature_set[-4] #128
img_s4_feature_map=img_feature_set[-5] #64
img_s2_feature_map=img_feature_set[-6] #64
img_s32_feature_map_pc_global_feature=torch.cat((img_s32_feature_map,global_feature.unsqueeze(-1).expand(B,global_feature.size(1),img_s32_feature_map.size(-2),img_s32_feature_map.size(-1))),dim=1)
img_32_attention=self.img_32_attention_conv(img_s32_feature_map_pc_global_feature)
img_32_attention=F.softmax(img_32_attention,dim=1)#(B,C,H,W)
img_s32_feature_map_fusion=torch.cat((torch.sum(img_32_attention.unsqueeze(1)*node_b_features.unsqueeze(-1).unsqueeze(-1),dim=2),img_s32_feature_map),dim=1) #(B,512+256,H,W)
img_s16_feature_map_pc_global_feature=torch.cat((img_s16_feature_map,global_feature.unsqueeze(-1).expand(B,global_feature.size(1),img_s16_feature_map.size(-2),img_s16_feature_map.size(-1))),dim=1)
img_16_attention=self.img_16_attention_conv(img_s16_feature_map_pc_global_feature)
img_16_attention=F.softmax(img_16_attention,dim=1)
img_s16_feature_map_fusion=torch.cat((torch.sum(img_16_attention.unsqueeze(1)*node_a_features.unsqueeze(-1).unsqueeze(-1),dim=2),img_s16_feature_map),dim=1) #(B,320,10,32)
image_feature_16=self.up_conv1(img_s32_feature_map_fusion,img_s16_feature_map_fusion)
image_feature_8=self.up_conv2(image_feature_16,img_s8_feature_map)
img_s4_feature_map=torch.cat((img_s4_feature_map,F.interpolate(img_s2_feature_map,scale_factor=0.5)),dim=1)
image_feature_mid=self.up_conv3(image_feature_8,img_s4_feature_map)
img_feature=self.img_feature_layer(image_feature_mid)
img_score=self.img_score_layer(image_feature_mid)
img_feature_norm=F.normalize(img_feature, dim=1,p=2)
C_img=img_global_feature.size(1)
img_s16_feature_map_BCHw=img_s16_feature_map.view(B,img_s16_feature_map.size(1),-1)
img_s32_feature_map_BCHw=img_s32_feature_map.view(B,img_s32_feature_map.size(1),-1)
img_global_feature_BCMa = img_global_feature.squeeze(3).expand(B, C_img, Ma) # BxC_img -> BxC_imgxMa
img_global_feature_BCMb = img_global_feature.squeeze(3).expand(B, C_img, Mb) # BxC_img -> BxC_imgxMb
node_b_attention_score = self.node_b_attention_pn(torch.cat((node_b_features,
img_global_feature_BCMb), dim=1)) # Bx(H*W)xMb
node_b_attention_score=F.softmax(node_b_attention_score,dim=1)
node_b_weighted_img_s32_feature_map = torch.sum(img_s32_feature_map_BCHw.unsqueeze(3) * node_b_attention_score.unsqueeze(1),
dim=2) # BxC_imgx(H*W)xMb -> BxC_imgxMb
up_node_b_features = self.node_b_pn(torch.cat((node_b_features,
global_feature.expand(B, C_global, Mb),
node_b_weighted_img_s32_feature_map,
img_global_feature_BCMb), dim=1)) # BxCxMb
# interpolation of pc over node_b
pc_node_b_diff = torch.norm(pc.unsqueeze(3) - node_b.unsqueeze(2), p=2, dim=1, keepdim=False) # BxNxMb
# BxNxk
_, interp_pc_node_b_topk_idx = torch.topk(pc_node_b_diff, k=self.opt.k_interp_point_b,
dim=2, largest=False, sorted=True)
interp_pb_weighted_node_b_features = self.upsample_by_interpolation(interp_pc_node_b_topk_idx,
pc,
node_b,
up_node_b_features)
# interpolation of point over node_a ----------------------------------------------
# use attention method to select resnet features for each node_a_feature
node_a_attention_score = self.node_a_attention_pn(torch.cat((node_a_features,
img_global_feature_BCMa), dim=1)) # Bx(H*W)xMa
node_a_attention_score=F.softmax(node_a_attention_score,dim=1)
node_a_weighted_img_s16_feature_map = torch.sum(
img_s16_feature_map_BCHw.unsqueeze(3) * node_a_attention_score.unsqueeze(1),
dim=2) # BxC_imgx(H*W)xMa -> BxC_imgxMa
# interpolation of node_a over node_b
node_a_node_b_diff = torch.norm(node_a.unsqueeze(3) - node_b.unsqueeze(2), p=2, dim=1, keepdim=False) # BxMaxMb
_, interp_nodea_nodeb_topk_idx = torch.topk(node_a_node_b_diff, k=self.opt.k_interp_ab,
dim=2, largest=False, sorted=True)
interp_ab_weighted_node_b_features = self.upsample_by_interpolation(interp_nodea_nodeb_topk_idx,
node_a,
node_b,
up_node_b_features)
up_node_a_features = self.node_a_pn(torch.cat((node_a_features,
interp_ab_weighted_node_b_features,
node_a_weighted_img_s16_feature_map),
dim=1)) # BxCxMa
interp_pa_weighted_node_a_features = self.upsample_by_interpolation(node_a_min_k_idx,
pc,
node_a,
up_node_a_features)
pc_label_scores = self.per_point_pn(torch.cat((interp_pa_weighted_node_a_features,
interp_pb_weighted_node_b_features,
first_pn_out,
second_pn_out), dim=1))
pc_feature=self.pc_feature_layer(pc_label_scores)
pc_score=self.pc_score_layer(pc_label_scores)
pc_feature_norm=F.normalize(pc_feature, dim=1,p=2)
#img_feature=torch.rand(12,64,40,128).cuda()
return img_feature_norm,pc_feature_norm,img_score,pc_score
if __name__=='__main__':
opt=Options()
pc=torch.rand(10,3,20480).cuda()
intensity=torch.rand(10,1,20480).cuda()
sn=torch.rand(10,3,20480).cuda()
img=torch.rand(10,3,160,512).cuda()
net=CorrI2P(opt).cuda()
a,b,c,d=net(pc,intensity,sn,img)
print(a.size())
print(b.size())
print(c.size())
print(d.size())
| 14,508 | 57.504032 | 206 | py |
CorrI2P | CorrI2P-main/eval_all.py | import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import torch
import argparse
from network import DenseI2P
from kitti_pc_img_dataloader import kitti_pc_img_dataset
#from loss2 import kpt_loss, kpt_loss2, eval_recall
import datetime
import logging
import math
import numpy as np
import options
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Point Cloud Registration')
parser.add_argument('--epoch', type=int, default=25, metavar='epoch',
help='number of epoch to train')
parser.add_argument('--train_batch_size', type=int, default=12, metavar='train_batch_size',
help='Size of train batch')
parser.add_argument('--val_batch_size', type=int, default=24, metavar='val_batch_size',
help='Size of val batch')
parser.add_argument('--data_path', type=str, default='/home/siyu_ren/kitti_dataset/', metavar='data_path',
help='train and test data path')
parser.add_argument('--num_point', type=int, default=40960, metavar='num_point',
help='point cloud size to train')
parser.add_argument('--num_workers', type=int, default=6, metavar='num_workers',
help='num of CPUs')
parser.add_argument('--val_freq', type=int, default=300, metavar='val_freq',
help='')
parser.add_argument('--lr', type=float, default=0.01, metavar='lr',
help='')
parser.add_argument('--P_tx_amplitude', type=float, default=10, metavar='P_tx_amplitude',
help='')
parser.add_argument('--P_ty_amplitude', type=float, default=0, metavar='P_ty_amplitude',
help='')
parser.add_argument('--P_tz_amplitude', type=float, default=10, metavar='P_tz_amplitude',
help='')
parser.add_argument('--P_Rx_amplitude', type=float, default=0, metavar='P_Rx_amplitude',
help='')
parser.add_argument('--P_Ry_amplitude', type=float, default=2*math.pi, metavar='P_Ry_amplitude',
help='')
parser.add_argument('--P_Rz_amplitude', type=float, default=0, metavar='P_Rz_amplitude',
help='')
parser.add_argument('--save_path', type=str, default='./log', metavar='save_path',
help='path to save log and model')
parser.add_argument('--dist_thres', type=float, default=1, metavar='dist_thres',
help='')
parser.add_argument('--pos_margin', type=float, default=0.2, metavar='pos_margin',
help='')
parser.add_argument('--neg_margin', type=float, default=1.8, metavar='neg_margin',
help='')
args = parser.parse_args()
test_dataset = kitti_pc_img_dataset(args.data_path, 'val', args.num_point,
P_tx_amplitude=args.P_tx_amplitude,
P_ty_amplitude=args.P_ty_amplitude,
P_tz_amplitude=args.P_tz_amplitude,
P_Rx_amplitude=args.P_Rx_amplitude,
P_Ry_amplitude=args.P_Ry_amplitude,
P_Rz_amplitude=args.P_Rz_amplitude,is_front=False)
#assert len(train_dataset) > 10
assert len(test_dataset) > 10
#trainloader=torch.utils.data.DataLoader(train_dataset,batch_size=args.train_batch_size,shuffle=True,drop_last=True,num_workers=args.num_workers)
testloader=torch.utils.data.DataLoader(test_dataset,batch_size=args.val_batch_size,shuffle=False,drop_last=True,num_workers=args.num_workers)
opt=options.Options()
model=DenseI2P(opt)
#model.load_state_dict(torch.load('./log/2021-11-29 19:30:59.134507/mode_best_t.t7'))
#model.load_state_dict(torch.load('./log/2022-01-03 14:30:04.051421/mode_last.t7'))
model.load_state_dict(torch.load('./log_xy_40960_128/dist_thres_%0.2f_pos_margin_%0.2f_neg_margin_%0.2f/mode_last.t7'%(args.dist_thres,args.pos_margin,args.neg_margin)))
model=model.cuda()
save_path='result_all_dist_thres_%0.2f_pos_margin_%0.2f_neg_margin_%0.2f'%(args.dist_thres,args.pos_margin,args.neg_margin)
try:
os.mkdir(save_path)
except:
pass
with torch.no_grad():
for step,data in enumerate(testloader):
model.eval()
img=data['img'].cuda()
pc=data['pc'].cuda()
intensity=data['intensity'].cuda()
sn=data['sn'].cuda()
K=data['K'].cuda()
P=data['P'].cuda()
pc_mask=data['pc_mask'].cuda()
img_mask=data['img_mask'].cuda()
node_a=data['node_a'].cuda()
node_b=data['node_b'].cuda()
pc_feature=torch.cat((intensity,sn),dim=1)
img_feature,pc_feature,img_score,pc_score=model(pc,intensity,sn,img,node_a,node_b)
np.save(os.path.join(save_path,'img_%d.npy'%(step)),img.cpu().numpy())
np.save(os.path.join(save_path,'pc_%d.npy'%(step)),pc.cpu().numpy())
np.save(os.path.join(save_path,'pc_score_%d.npy'%(step)),pc_score.data.cpu().numpy())
np.save(os.path.join(save_path,'pc_mask_%d.npy'%(step)),pc_mask.data.cpu().numpy())
np.save(os.path.join(save_path,'K_%d.npy'%(step)),K.data.cpu().numpy())
np.save(os.path.join(save_path,'img_mask_%d.npy'%(step)),img_mask.data.cpu().numpy())
np.save(os.path.join(save_path,'img_score_%d.npy'%(step)),img_score.data.cpu().numpy())
np.save(os.path.join(save_path,'img_feature_%d.npy'%(step)),img_feature.data.cpu().numpy())
np.save(os.path.join(save_path,'pc_feature_%d.npy'%(step)),pc_feature.data.cpu().numpy())
np.save(os.path.join(save_path,'P_%d.npy'%(step)),P.data.cpu().numpy())
| 5,958 | 53.172727 | 173 | py |
CorrI2P | CorrI2P-main/loss.py | from numpy import positive
import torch
import torch.nn.functional as F
import numpy as np
def desc_loss(img_features,pc_features,mask,pos_margin=0.1,neg_margin=1.4,log_scale=10,num_kpt=512):
pos_mask=mask
neg_mask=1-mask
#dists=torch.sqrt(torch.sum((img_features.unsqueeze(-1)-pc_features.unsqueeze(-2))**2,dim=1))
dists=1-torch.sum(img_features.unsqueeze(-1)*pc_features.unsqueeze(-2),dim=1)
pos=dists-1e5*neg_mask
pos_weight=(pos-pos_margin).detach()
pos_weight=torch.max(torch.zeros_like(pos_weight),pos_weight)
#pos_weight[pos_weight>0]=1.
#positive_row=torch.sum((pos[:,:num_kpt,:]-pos_margin)*pos_weight[:,:num_kpt,:],dim=-1)/(torch.sum(pos_weight[:,:num_kpt,:],dim=-1)+1e-8)
#positive_col=torch.sum((pos[:,:,:num_kpt]-pos_margin)*pos_weight[:,:,:num_kpt],dim=-2)/(torch.sum(pos_weight[:,:,:num_kpt],dim=-2)+1e-8)
lse_positive_row=torch.logsumexp(log_scale*(pos-pos_margin)*pos_weight,dim=-1)
lse_positive_col=torch.logsumexp(log_scale*(pos-pos_margin)*pos_weight,dim=-2)
neg=dists+1e5*pos_mask
neg_weight=(neg_margin-neg).detach()
neg_weight=torch.max(torch.zeros_like(neg_weight),neg_weight)
#neg_weight[neg_weight>0]=1.
#negative_row=torch.sum((neg[:,:num_kpt,:]-neg_margin)*neg_weight[:,:num_kpt,:],dim=-1)/torch.sum(neg_weight[:,:num_kpt,:],dim=-1)
#negative_col=torch.sum((neg[:,:,:num_kpt]-neg_margin)*neg_weight[:,:,:num_kpt],dim=-2)/torch.sum(neg_weight[:,:,:num_kpt],dim=-2)
lse_negative_row=torch.logsumexp(log_scale*(neg_margin-neg)*neg_weight,dim=-1)
lse_negative_col=torch.logsumexp(log_scale*(neg_margin-neg)*neg_weight,dim=-2)
loss_col=F.softplus(lse_positive_row+lse_negative_row)/log_scale
loss_row=F.softplus(lse_positive_col+lse_negative_col)/log_scale
loss=loss_col+loss_row
return torch.mean(loss),dists
def desc_loss2(img_features,pc_features,mask,pos_margin=0.1,neg_margin=1.4,log_scale=10,num_kpt=512):
pos_mask=mask
neg_mask=1-mask
#dists=torch.sqrt(torch.sum((img_features.unsqueeze(-1)-pc_features.unsqueeze(-2))**2,dim=1))
dists=1-torch.sum(img_features.unsqueeze(-1)*pc_features.unsqueeze(-2),dim=1)
pos=dists-1e5*neg_mask
pos_weight=(pos-pos_margin).detach()
pos_weight=torch.max(torch.zeros_like(pos_weight),pos_weight)
#pos_weight[pos_weight>0]=1.
#positive_row=torch.sum((pos[:,:num_kpt,:]-pos_margin)*pos_weight[:,:num_kpt,:],dim=-1)/(torch.sum(pos_weight[:,:num_kpt,:],dim=-1)+1e-8)
#positive_col=torch.sum((pos[:,:,:num_kpt]-pos_margin)*pos_weight[:,:,:num_kpt],dim=-2)/(torch.sum(pos_weight[:,:,:num_kpt],dim=-2)+1e-8)
lse_positive_row=torch.logsumexp(log_scale*(pos-pos_margin)*pos_weight,dim=-1)
#lse_positive_col=torch.logsumexp(log_scale*(pos-pos_margin)*pos_weight,dim=-2)
neg=dists+1e5*pos_mask
neg_weight=(neg_margin-neg).detach()
neg_weight=torch.max(torch.zeros_like(neg_weight),neg_weight)
#neg_weight[neg_weight>0]=1.
#negative_row=torch.sum((neg[:,:num_kpt,:]-neg_margin)*neg_weight[:,:num_kpt,:],dim=-1)/torch.sum(neg_weight[:,:num_kpt,:],dim=-1)
#negative_col=torch.sum((neg[:,:,:num_kpt]-neg_margin)*neg_weight[:,:,:num_kpt],dim=-2)/torch.sum(neg_weight[:,:,:num_kpt],dim=-2)
lse_negative_row=torch.logsumexp(log_scale*(neg_margin-neg)*neg_weight,dim=-1)
#lse_negative_col=torch.logsumexp(log_scale*(neg_margin-neg)*neg_weight,dim=-2)
loss_col=F.softplus(lse_positive_row+lse_negative_row)/log_scale
#loss_row=F.softplus(lse_positive_col+lse_negative_col)/log_scale
#loss=loss_col+loss_row
loss=loss_col
return torch.mean(loss),dists
def det_loss(img_score_inline,img_score_outline,pc_score_inline,pc_score_outline,dists,mask):
#score (B,N)
pids=torch.FloatTensor(np.arange(mask.size(-1))).to(mask.device)
diag_mask=torch.eq(torch.unsqueeze(pids,dim=1),torch.unsqueeze(pids,dim=0)).unsqueeze(0).expand(mask.size()).float()
furthest_positive,_=torch.max(dists*diag_mask,dim=1) #(B,N)
closest_negative,_=torch.min(dists+1e5*mask,dim=1) #(B,N)
loss_inline=torch.mean((furthest_positive-closest_negative)*(img_score_inline.squeeze()+pc_score_inline.squeeze()))
loss_outline=torch.mean(img_score_outline)+torch.mean(pc_score_outline)
return loss_inline+loss_outline
def det_loss2(img_score_inline,img_score_outline,pc_score_inline,pc_score_outline,dists,mask):
#score (B,N)
pids=torch.FloatTensor(np.arange(mask.size(-1))).to(mask.device)
diag_mask=torch.eq(torch.unsqueeze(pids,dim=1),torch.unsqueeze(pids,dim=0)).unsqueeze(0).expand(mask.size()).float()
furthest_positive,_=torch.max(dists*diag_mask,dim=1) #(B,N)
closest_negative,_=torch.min(dists+1e5*mask,dim=1) #(B,N)
#loss_inline=torch.mean((furthest_positive-closest_negative)*(img_score_inline.squeeze()+pc_score_inline.squeeze())) +torch.mean(1-img_score_inline)+torch.mean(1-pc_score_inline)
loss_inline=torch.mean(1-img_score_inline)+torch.mean(1-pc_score_inline)
loss_outline=torch.mean(img_score_outline)+torch.mean(pc_score_outline)
return loss_inline+loss_outline
def cal_acc(img_features,pc_features,mask):
dist=torch.sum((img_features.unsqueeze(-1)-pc_features.unsqueeze(-2))**2,dim=1) #(B,N,N)
furthest_positive,_=torch.max(dist*mask,dim=1)
closest_negative,_=torch.min(dist+1e5*mask,dim=1)
'''print(furthest_positive)
print(closest_negative)
print(torch.max(torch.sum(mask,dim=1)))
assert False'''
diff=furthest_positive-closest_negative
accuracy=(diff<0).sum(dim=1)/dist.size(1)
return accuracy | 5,553 | 54.54 | 182 | py |
CorrI2P | CorrI2P-main/layers_pc.py | import torch
import torch.nn as nn
import math
from typing import Tuple, List
import operations
class Swish(nn.Module):
def __init__(self):
"""
Swish activation function
"""
super(Swish, self).__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Swish activation. Apply element-wise.
:param x: torch.Tensor
:return: torch.Tensor
"""
return x * torch.sigmoid(x)
class MyLinear(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
normalization: str='batch',
norm_momentum: float = 0.1,
activation: str = 'relu',
dropout_rate: float=None):
"""
Customized Linear module that integrates pytorch Linear, normalization and activation functions
:param in_channels: C of input tensor
:param out_channels: C of output tensor
:param normalization: normalization method, 'batch', 'instance'
:param norm_momentum: momentum in normalization layer
:param activation: activation method, 'relu', 'elu', 'swish', 'leakyrelu', 'selu'
:param dropout_rate: drop percentage
"""
super(MyLinear, self).__init__()
self.activation = activation
self.normalization = normalization
if dropout_rate is not None and dropout_rate > 0 and dropout_rate < 1:
self.dropout = nn.Dropout(p=dropout_rate)
else:
self.dropout = None
self.linear = nn.Linear(in_channels, out_channels, bias=True)
if self.normalization == 'batch':
self.norm = nn.BatchNorm1d(out_channels, momentum=norm_momentum, affine=True)
elif self.normalization == 'instance':
self.norm = nn.InstanceNorm1d(out_channels, momentum=norm_momentum, affine=True)
if self.activation == 'relu':
self.act = nn.ReLU()
elif 'elu' == activation:
self.act = nn.ELU(alpha=1.0)
elif 'swish' == self.activation:
self.act = Swish()
elif 'leakyrelu' == self.activation:
self.act = nn.LeakyReLU(0.01)
elif 'selu' == self.activation:
self.act = nn.SELU()
self.weight_init()
def weight_init(self):
"""
Weight initialization
:return: None
"""
for m in self.modules():
if isinstance(m, nn.Linear):
n = m.in_features
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm2d) \
or isinstance(m, nn.BatchNorm1d) \
or isinstance(m, nn.BatchNorm3d) \
or isinstance(m, nn.InstanceNorm2d) \
or isinstance(m, nn.InstanceNorm1d) \
or isinstance(m, nn.InstanceNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Linear -> normalization -> activation -> dropout
:param x: <torch.FloatTensor, BxC> Input pytorch tensor
:return: torch.Tensor, BxC
"""
x = self.linear(x)
if self.normalization is not None:
x = self.norm(x)
if self.activation is not None:
x = self.act(x)
if self.dropout is not None:
x = self.dropout(x)
return x
class MyConv2d(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: int or Tuple,
stride: int=1,
padding: int=0,
bias: bool=True,
normalization: str = 'batch' or None,
norm_momentum: float = 0.1,
activation: str = 'relu' or None):
"""
Customized nn.Conv2d module that integrates pytorch Conv2d, normalization and activation functions
:param in_channels: C of input tensor
:param out_channels: C of output tensor
:param kernel_size: kernel size of 2d convolution, int or Tuple[int, int]
:param stride: stride of 2d convolution, int or Tuple[int, int]
:param padding: padding, int or Tuple[int, int]
:param bias: whether to perform bias
:param normalization: normalization method, 'batch', 'instance'
:param norm_momentum: momentum in normazliation layer
:param activation: activation method, 'relu', 'elu', 'swish', 'leakyrelu', 'selu'
"""
super(MyConv2d, self).__init__()
self.activation = activation
self.normalization = normalization
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
if self.normalization == 'batch':
self.norm = nn.BatchNorm2d(out_channels, momentum=norm_momentum, affine=True)
elif self.normalization == 'instance':
self.norm = nn.InstanceNorm2d(out_channels, momentum=norm_momentum, affine=True)
if self.activation == 'relu':
self.act = nn.ReLU()
elif self.activation == 'elu':
self.act = nn.ELU(alpha=1.0)
elif 'swish' == self.activation:
self.act = Swish()
elif 'leakyrelu' == self.activation:
self.act = nn.LeakyReLU(0.01)
elif 'selu' == self.activation:
self.act = nn.SELU()
self.weight_init()
def weight_init(self):
"""
Weight initialization
:return: None
"""
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm2d) \
or isinstance(m, nn.BatchNorm1d) \
or isinstance(m, nn.BatchNorm3d) \
or isinstance(m, nn.InstanceNorm2d) \
or isinstance(m, nn.InstanceNorm1d) \
or isinstance(m, nn.InstanceNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Conv2d -> normalization -> activation
:param x: <torch.FloatTensor, BxCxHxW>
:return: <torch.FloatTensor, BxCxHxW>
"""
x = self.conv(x)
if self.normalization is not None:
x = self.norm(x)
if self.activation is not None:
x = self.act(x)
return x
class UpConv(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
scale_factor: float=2.0,
mode: str='bilinear',
kernel_size: int=3,
stride: int=1,
padding: int=1,
normalization: str=None,
activation: str=None):
"""
This is a upsampling module. Instead of transposed convolution, we use Upsampling + Conv2d.
Note that the kernel_size, stride, padding should be tuned to acquire correct output size
:param in_channels: C of input tensor
:param out_channels: C of output tensor
:param scale_factor: upsampling scale factor
:param mode: the upsampling algorithm: one of 'nearest', 'linear', 'bilinear', 'bicubic' and 'trilinear'.
:param kernel_size: kernel size of conv2d
:param stride: stride of conv2d
:param padding: padding of conv2d
:param normalization: normalization method, 'batch', 'instance'
:param activation: activation method, 'relu', 'elu', 'swish', 'leakyrelu', 'selu'
"""
super(UpConv, self).__init__()
self.activation = activation
self.normalization = normalization
self.up_sample = nn.Upsample(scale_factor=scale_factor, mode=mode)
self.conv = MyConv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=True,
normalization=normalization, activation=activation)
self.weight_init()
def weight_init(self):
"""
Weight initialization
:return:
"""
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.fill_(0.001)
elif isinstance(m, nn.BatchNorm2d) \
or isinstance(m, nn.BatchNorm1d) \
or isinstance(m, nn.BatchNorm3d) \
or isinstance(m, nn.InstanceNorm2d) \
or isinstance(m, nn.InstanceNorm1d) \
or isinstance(m, nn.InstanceNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
"""
nn.Upsample -> MyConv2d
:param x:
:return:
"""
x = self.up_sample(x)
x = self.conv(x)
return x
class EquivariantLayer(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
normalization: str = 'batch',
norm_momentum: float=0.1,
activation: str = 'relu',
dropout_rate: float = None):
"""
This is the building block of PointNet, i.e., kernel size 1 Conv1d
:param in_channels: C of input tensor
:param out_channels: C of output tensor
:param normalization: normalization method, 'batch', 'instance'
:param norm_momentum: momentum in normazliation layer
:param activation: activation method, 'relu', 'elu', 'swish', 'leakyrelu', 'selu'
"""
super(EquivariantLayer, self).__init__()
self.activation = activation
self.normalization = normalization
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
if 'batch' == self.normalization:
self.norm = nn.BatchNorm1d(out_channels, momentum=norm_momentum, affine=True)
elif 'instance' == self.normalization:
self.norm = nn.InstanceNorm1d(out_channels, momentum=norm_momentum, affine=True)
if 'relu' == self.activation:
self.act = nn.ReLU()
elif 'elu' == self.activation:
self.act = nn.ELU(alpha=1.0)
elif 'swish' == self.activation:
self.act = Swish()
elif 'leakyrelu' == self.activation:
self.act = nn.LeakyReLU(0.01)
elif 'selu' == self.activation:
self.act = nn.SELU()
if dropout_rate is not None and dropout_rate > 0 and dropout_rate < 1:
self.dropout = nn.Dropout(p=dropout_rate)
else:
self.dropout = None
self.weight_init()
def weight_init(self):
"""
Weight initialization
:return:
"""
for m in self.modules():
if isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, nn.BatchNorm2d) \
or isinstance(m, nn.BatchNorm1d) \
or isinstance(m, nn.BatchNorm3d) \
or isinstance(m, nn.InstanceNorm2d) \
or isinstance(m, nn.InstanceNorm1d) \
or isinstance(m, nn.InstanceNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
This is the building block of PointNet, i.e., kernel size 1 Conv1d, followed by normalization and activation
:param x: <torch.FloatTensor, BxCxL>
:return: <torch.FloatTensor, BxCxL>
"""
x = self.conv(x)
if self.normalization is not None:
x = self.norm(x)
if self.activation is not None:
x = self.act(x)
if self.dropout is not None:
x = self.dropout(x)
return x
class PointNet(nn.Module):
def __init__(self,
in_channels: int,
out_channels_list: List[int],
normalization: str='batch',
norm_momentum: float=0.1,
activation: str='relu',
output_init_radius: float=None,
norm_act_at_last: bool=False,
dropout_list: List[float]=None):
"""
PointNet, i.e., a series of EquivariantLayer
:param in_channels: C in input tensors
:param out_channels_list: A list of intermediate and final output channels
:param normalization: normalization method, 'batch', 'instance'
:param norm_momentum: momentum in normazliation layer
:param activation: activation method, 'relu', 'elu', 'swish', 'leakyrelu', 'selu'
:param output_init_radius: The output tensor value range at initialization
"""
super(PointNet, self).__init__()
if dropout_list is None:
dropout_list = [-1] * len(out_channels_list)
self.layers = nn.ModuleList()
previous_out_channels = in_channels
for i, c_out in enumerate(out_channels_list):
if(i == len(out_channels_list)-1):
if False == norm_act_at_last:
self.layers.append(EquivariantLayer(previous_out_channels,
c_out,
normalization=None,
norm_momentum=None,
activation=None,
dropout_rate=dropout_list[i]))
else:
self.layers.append(EquivariantLayer(previous_out_channels,
c_out,
normalization=normalization,
norm_momentum=norm_momentum,
activation=activation,
dropout_rate=dropout_list[i]))
else:
self.layers.append(EquivariantLayer(previous_out_channels,
c_out,
normalization=normalization,
norm_momentum=norm_momentum,
activation=activation,
dropout_rate=dropout_list[i]))
previous_out_channels = c_out
# initialize the last layer to satisfy output_init_radius
if output_init_radius is not None:
self.layers[len(out_channels_list)-1].conv.bias.data.uniform_(-1*output_init_radius, output_init_radius)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
PointNet
:param x: <torch.FloatTensor, BxCxN>
:return: <torch.FloatTensor, BxCxN>
"""
for layer in self.layers:
x = layer(x)
return x
class PointNetConv2d(nn.Module):
def __init__(self,
in_channels: int,
out_channels_list: List[int],
normalization: str='batch',
norm_momentum: float=0.1,
activation: str='relu',
output_init_radius: float=None):
"""
PointNet, i.e., a series of EquivariantLayer
:param in_channels: C in input tensors
:param out_channels_list: A list of intermediate and final output channels
:param normalization: normalization method, 'batch', 'instance'
:param norm_momentum: momentum in normazliation layer
:param activation: activation method, 'relu', 'elu', 'swish', 'leakyrelu', 'selu'
:param output_init_radius: The output tensor value range at initialization
"""
super(PointNetConv2d, self).__init__()
self.layers = nn.ModuleList()
previous_out_channels = in_channels
for i, c_out in enumerate(out_channels_list):
self.layers.append(MyConv2d(previous_out_channels,
c_out,
kernel_size=(1, 1),
stride=1,
padding=0,
bias=True,
normalization=normalization,
norm_momentum=norm_momentum,
activation=activation))
previous_out_channels = c_out
# initialize the last layer to satisfy output_init_radius
if output_init_radius is not None:
self.layers[len(out_channels_list)-1].conv.bias.data.uniform_(-1*output_init_radius, output_init_radius)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
PointNet
:param x: <torch.FloatTensor, BxCxMxN>
:return: <torch.FloatTensor, BxCxMxN>
"""
for layer in self.layers:
x = layer(x)
return x
class PointResNet(nn.Module):
def __init__(self,
in_channels: int,
out_channels_list: List[int],
normalization: str='batch',
norm_momentum: float=0.1,
activation: str='relu'):
"""
PointNet with skip connection
in -> out[0]
out[0] -> out[1] ----
out[1] -> out[2] |
... ... |
out[k-2]+out[1] -> out[k-1] <---
:param in_channels: C of input tensor
:param out_channels_list: List of channels of PointNet
:param normalization: normalization method, 'batch', 'instance'
:param norm_momentum: momentum in normazliation layer
:param activation: activation method, 'relu', 'elu', 'swish', 'leakyrelu', 'selu'
"""
super(PointResNet, self).__init__()
self.out_channels_list = out_channels_list
self.layers = nn.ModuleList()
previous_out_channels = in_channels
for i, c_out in enumerate(out_channels_list):
self.layers.append(EquivariantLayer(previous_out_channels,
c_out,
norm_momentum=norm_momentum,
normalization=normalization,
activation=activation))
previous_out_channels = c_out
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
PointNet with skip connection
in -> out[0]
out[0] -> out[1] ----
out[1] -> out[2] |
... ... |
out[k-2]+out[1] -> out[k-1] <---
:param x: <torch.FloatTensor, BxCxN>
:return: <torch.FloatTensor, BxCxN>
"""
layer0_out = self.layers[0](x) # BxCxN
for l in range(1, len(self.out_channels_list)-1):
if l == 1:
x_tmp = self.layers[l](layer0_out)
else:
x_tmp = self.layers[l](x_tmp)
layer_final_out = self.layers[len(self.out_channels_list)-1](torch.cat((layer0_out, x_tmp), dim=1))
return layer_final_out
class PointNetFusion(nn.Module):
def __init__(self,
in_channels: int,
out_channels_list_before: List[int],
out_channels_list_after: List[int],
normalization: str='batch',
norm_momentum: float=0.1,
activation: str='relu',
act_norm_at_endof_pn1=True):
"""
This is a modified PointNet. The maxpool output of the first PN is expanded and concatenate to
the output (before maxpool) of the first PN. The concatenated features are forwarded into a second PN.
:param in_channels: C of input tensor
:param out_channels_list_before: List of channels in first PN
:param out_channels_list_after: List of channels in second PN
:param normalization: normalization method, 'batch', 'instance'
:param norm_momentum: momentum in normazliation layer
:param activation: activation method, 'relu', 'elu', 'swish', 'leakyrelu', 'selu'
:param act_norm_at_endof_pn1: whether to apply activation and normalization at the last layer of first PointNet
"""
super(PointNetFusion, self).__init__()
self.layers_before = nn.ModuleList()
previous_out_channels = in_channels
for i, c_out in enumerate(out_channels_list_before):
if act_norm_at_endof_pn1 or (i != len(out_channels_list_before)-1):
self.layers_before.append(EquivariantLayer(previous_out_channels,
c_out,
normalization=normalization,
norm_momentum=norm_momentum,
activation=activation))
else:
self.layers_before.append(EquivariantLayer(previous_out_channels,
c_out,
normalization=None,
activation=None))
previous_out_channels = c_out
self.layers_after = nn.ModuleList()
previous_out_channels = 2 * previous_out_channels
for i, c_out in enumerate(out_channels_list_after):
if i != len(out_channels_list_after)-1:
self.layers_after.append(EquivariantLayer(previous_out_channels,
c_out,
normalization=normalization,
norm_momentum=norm_momentum,
activation=activation))
else:
self.layers_after.append(EquivariantLayer(previous_out_channels,
c_out,
normalization=None,
activation=None))
previous_out_channels = c_out
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Two PointNets
:param x: <torch.FloatTensor BxCxN>
:return: <torch.FloatTensor BxCxN>
"""
for layer in self.layers_before:
x = layer(x)
# BxCxN -> BxCx1
x_max, _ = torch.max(x, dim=2, keepdim=True) # BxCx1
x_max_expanded = x_max.expand(x.size()) # BxCxN
# BxCxN -> Bx(C+C)xN
y = torch.cat((x, x_max_expanded), dim=1)
for layer in self.layers_after:
y = layer(y)
# BxCxN
return y
class PointNetFusionConv2d(nn.Module):
def __init__(self,
in_channels,
out_channels_list_before,
out_channels_list_after,
normalization='batch',
norm_momentum=0.1,
activation='relu',
act_norm_at_endof_pn1=True):
"""
This is a modified PointNet. The maxpool output of the first PN is expanded and concatenated to
the output (before maxpool) of the first PN. The concatenated features are forwarded into a second PN.
The difference between this class and PointNetFusion is that:
This class is implemented using Conv2d instead of EquivariantLayer,
The input / output of this class is BxCxMxK / BxCxMx1,
That is, M point clouds, each point cloud has K points.
:param in_channels: C of input tensor
:param out_channels_list_before: List of channels in first PN
:param out_channels_list_after: List of channels in second PN
:param normalization: normalization method, 'batch', 'instance'
:param norm_momentum: momentum in normazliation layer
:param activation: activation method, 'relu', 'elu', 'swish', 'leakyrelu', 'selu'
:param act_norm_at_endof_pn1: whether to apply activation and normalization at the last layer of first PointNet
"""
super(PointNetFusionConv2d, self).__init__()
self.layers_before = nn.ModuleList()
previous_out_channels = in_channels
for i, c_out in enumerate(out_channels_list_before):
if act_norm_at_endof_pn1 or (i != len(out_channels_list_before) - 1):
self.layers_before.append(MyConv2d(previous_out_channels,
c_out,
kernel_size=(1, 1),
stride=1,
padding=0,
bias=True,
normalization=normalization,
norm_momentum=norm_momentum,
activation=activation))
else:
self.layers_before.append(MyConv2d(previous_out_channels,
c_out,
kernel_size=(1, 1),
stride=1,
padding=0,
bias=True,
normalization=None,
activation=None))
previous_out_channels = c_out
self.layers_after = nn.ModuleList()
previous_out_channels = 2 * previous_out_channels
for i, c_out in enumerate(out_channels_list_after):
if i != len(out_channels_list_after)-1:
self.layers_after.append(MyConv2d(previous_out_channels,
c_out,
kernel_size=(1, 1),
stride=1,
padding=0,
bias=True,
normalization=normalization,
norm_momentum=norm_momentum,
activation=activation))
else:
self.layers_after.append(MyConv2d(previous_out_channels,
c_out,
kernel_size=(1, 1),
stride=1,
padding=0,
bias=True,
normalization=None,
activation=None))
previous_out_channels = c_out
def forward(self, x) -> torch.Tensor:
"""
PointNetFusion that works for M point clouds, each point cloud has K points.
:param x: <torch.FloatTensor, BxCxMxK>
:return: <torch.FloatTensor, BxCxMx1>
"""
for layer in self.layers_before:
x = layer(x)
# BxCxMxK -> BxCxMx1
x_max, _ = torch.max(x, dim=3, keepdim=True)
x_max_expanded = x_max.expand(x.size()) # BxCxMxK
# BxCxMxK -> Bx(C+C)xMxK
y = torch.cat((x, x_max_expanded), dim=1)
for layer in self.layers_after:
y = layer(y)
y_max, _ = torch.max(y, dim=3, keepdim=True) # BxCxMx1
return y_max
class KNNModule(nn.Module):
def __init__(self, in_channels, out_channels_list, activation, normalization, norm_momentum=0.1):
super(KNNModule, self).__init__()
self.layers = nn.ModuleList()
previous_out_channels = in_channels
for c_out in out_channels_list:
self.layers.append(MyConv2d(previous_out_channels, c_out, kernel_size=1, stride=1, padding=0, bias=True,
activation=activation, normalization=normalization,
norm_momentum=norm_momentum))
previous_out_channels = c_out
def forward(self, coordinate, x, precomputed_knn_I, K, center_type):
'''
:param coordinate: Bx3xM Variable
:param x: BxCxM Variable
:param precomputed_knn_I: BxMxK'
:param K: K neighbors
:param center_type: 'center' or 'avg'
:return:
'''
# 0. compute knn
# 1. for each node, calculate the center of its k neighborhood
# 2. normalize nodes with the corresponding center
# 3. fc for these normalized points
# 4. maxpool for each neighborhood
coordinate_tensor = coordinate.data # Bx3xM
if precomputed_knn_I is not None:
assert precomputed_knn_I.size()[2] >= K
knn_I = precomputed_knn_I[:, :, 0:K]
else:
coordinate_Mx1 = coordinate_tensor.unsqueeze(3) # Bx3xMx1
coordinate_1xM = coordinate_tensor.unsqueeze(2) # Bx3x1xM
norm = torch.sum((coordinate_Mx1 - coordinate_1xM) ** 2, dim=1) # BxMxM, each row corresponds to each coordinate - other coordinates
knn_D, knn_I = torch.topk(norm, k=K, dim=2, largest=False, sorted=True) # BxMxK
# debug
# print(knn_D[0])
# print(knn_I[0])
# assert False
neighbors = operations.knn_gather_wrapper(coordinate_tensor, knn_I) # Bx3xMxK
if center_type == 'avg':
neighbors_center = torch.mean(neighbors, dim=3, keepdim=True) # Bx3xMx1
elif center_type == 'center':
neighbors_center = coordinate_tensor.unsqueeze(3) # Bx3xMx1
else:
neighbors_center = None
neighbors_decentered = (neighbors - neighbors_center).detach()
neighbors_center = neighbors_center.squeeze(3).detach()
# debug
# print(neighbors[0, 0])
# print(neighbors_avg[0, 0])
# print(neighbors_decentered[0, 0])
# assert False
x_neighbors = operations.knn_gather_by_indexing(x, knn_I) # BxCxMxK
x_augmented = torch.cat((neighbors_decentered, x_neighbors), dim=1) # Bx(3+C)xMxK
for layer in self.layers:
x_augmented = layer(x_augmented)
feature, _ = torch.max(x_augmented, dim=3, keepdim=False)
return neighbors_center, feature
class GeneralKNNFusionModule(nn.Module):
def __init__(self, in_channels, out_channels_list_before, out_channels_list_after,
activation, normalization, norm_momentum=0.1):
super(GeneralKNNFusionModule, self).__init__()
self.layers_before = nn.ModuleList()
previous_out_channels = in_channels
for i, c_out in enumerate(out_channels_list_before):
self.layers_before.append(
MyConv2d(previous_out_channels, c_out, kernel_size=1, stride=1, padding=0, bias=True,
activation=activation, normalization=normalization,
norm_momentum=norm_momentum))
previous_out_channels = c_out
self.layers_after = nn.ModuleList()
previous_out_channels = 2 * previous_out_channels
for i, c_out in enumerate(out_channels_list_after):
self.layers_after.append(
MyConv2d(previous_out_channels, c_out, kernel_size=1, stride=1, padding=0, bias=True,
activation=activation, normalization=normalization,
norm_momentum=norm_momentum))
previous_out_channels = c_out
def forward(self, query, database, database_features, K):
'''
:param query: Bx3xM FloatTensor
:param database: Bx3xN FloatTensor
:param x: BxCxN FloatTensor
:param K: K neighbors
:return:
'''
# 1. compute knn, query -> database
# 2. for each query, normalize neighbors with its coordinate
# 3. FC for these normalized points
# 4. maxpool for each query
B, M, N, C = query.size()[0], query.size()[2], database.size()[2], database_features.size()[1]
query_Mx1 = query.detach().unsqueeze(3) # Bx3xMx1
database_1xN = database.detach().unsqueeze(2) # Bx3x1xN
norm = torch.norm(query_Mx1 - database_1xN, dim=1, keepdim=False) # Bx3xMxN -> BxMxN
knn_D, knn_I = torch.topk(norm, k=K, dim=2, largest=False, sorted=True) # BxMxK, BxMxK
knn_I_3 = knn_I.unsqueeze(1).expand(B, 3, M, K).contiguous().view(B, 3, M*K) # Bx3xMxK -> Bx3xM*K
knn_I_C = knn_I.unsqueeze(1).expand(B, C, M, K).contiguous().view(B, C, M*K) # BxCxMxK -> BxCxM*K
query_neighbor_coord = torch.gather(database, dim=2, index=knn_I_3).view(B, 3, M, K) # Bx3xMxK
query_neighbor_feature = torch.gather(database_features, dim=2, index=knn_I_C).view(B, C, M, K) # BxCxMxK
query_neighbor_coord_decentered = (query_neighbor_coord - query_Mx1).detach()
query_neighbor = torch.cat((query_neighbor_coord_decentered, query_neighbor_feature), dim=1) # Bx(3+C)xMxK
for layer in self.layers_before:
query_neighbor = layer(query_neighbor)
feature, _ = torch.max(query_neighbor, dim=3, keepdim=True) # BxCxMx1
y = torch.cat((feature.expand_as(query_neighbor), query_neighbor), dim=1) # Bx2CxMxK
for layer in self.layers_after:
y = layer(y)
feature, _ = torch.max(y, dim=3, keepdim=False) # BxCxM
return feature
class KNNFusionModule(nn.Module):
def __init__(self, in_channels, out_channels_list_before, out_channels_list_after,
activation, normalization, norm_momentum=0.1):
super(KNNFusionModule, self).__init__()
self.layers_before = nn.ModuleList()
previous_out_channels = in_channels
for i, c_out in enumerate(out_channels_list_before):
self.layers_before.append(
MyConv2d(previous_out_channels, c_out, kernel_size=1, stride=1, padding=0, bias=True,
activation=activation, normalization=normalization,
norm_momentum=norm_momentum))
previous_out_channels = c_out
self.layers_after = nn.ModuleList()
previous_out_channels = 2 * previous_out_channels
for i, c_out in enumerate(out_channels_list_after):
self.layers_after.append(
MyConv2d(previous_out_channels, c_out, kernel_size=1, stride=1, padding=0, bias=True,
activation=activation, normalization=normalization,
norm_momentum=norm_momentum))
previous_out_channels = c_out
def forward(self, coordinate, x, precomputed_knn_I, K, center_type):
'''
:param coordinate: Bx3xM Variable
:param x: BxCxM Variable
:param precomputed_knn_I: BxMxK'
:param K: K neighbors
:param center_type: 'center' or 'avg'
:return:
'''
# 0. compute knn
# 1. for each node, calculate the center of its k neighborhood
# 2. normalize nodes with the corresponding center
# 3. fc for these normalized points
# 4. maxpool for each neighborhood
coordinate_tensor = coordinate.data # Bx3xM
if precomputed_knn_I is not None:
assert precomputed_knn_I.size()[2] >= K
knn_I = precomputed_knn_I[:, :, 0:K]
else:
coordinate_Mx1 = coordinate_tensor.unsqueeze(3) # Bx3xMx1
coordinate_1xM = coordinate_tensor.unsqueeze(2) # Bx3x1xM
norm = torch.sum((coordinate_Mx1 - coordinate_1xM) ** 2, dim=1) # BxMxM, each row corresponds to each coordinate - other coordinates
knn_D, knn_I = torch.topk(norm, k=K, dim=2, largest=False, sorted=True) # BxMxK
neighbors = operations.knn_gather_wrapper(coordinate_tensor, knn_I) # Bx3xMxK
if center_type == 'avg':
neighbors_center = torch.mean(neighbors, dim=3, keepdim=True) # Bx3xMx1
elif center_type == 'center':
neighbors_center = coordinate_tensor.unsqueeze(3) # Bx3xMx1
neighbors_decentered = (neighbors - neighbors_center).detach()
neighbors_center = neighbors_center.squeeze(3).detach()
# debug
# print(neighbors[0, 0])
# print(neighbors_avg[0, 0])
# print(neighbors_decentered[0, 0])
# assert False
x_neighbors = operations.knn_gather_by_indexing(x, knn_I) # BxCxMxK
x_augmented = torch.cat((neighbors_decentered, x_neighbors), dim=1) # Bx(3+C)xMxK
for layer in self.layers_before:
x_augmented = layer(x_augmented)
feature, _ = torch.max(x_augmented, dim=3, keepdim=True) # BxCxMx1
y = torch.cat((feature.expand_as(x_augmented), x_augmented), dim=1) # Bx2CxMxK
for layer in self.layers_after:
y = layer(y)
feature, _ = torch.max(y, dim=3, keepdim=False) # BxCxM
return neighbors_center, feature
| 38,392 | 41.801561 | 145 | py |
CorrI2P | CorrI2P-main/pointnet2.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
import time
import operations
from layers_pc import *
from options import Options
import index_max
class PCEncoder(nn.Module):
def __init__(self, opt: Options, Ca: int, Cb: int, Cg: int):
super(PCEncoder, self).__init__()
self.opt = opt
# first PointNet
self.first_pointnet = PointNet(3 + 1 + 3,
[int(Ca / 2), int(Ca / 2), int(Ca / 2)],
activation=self.opt.activation,
normalization=self.opt.normalization,
norm_momentum=opt.norm_momentum,
norm_act_at_last=True)
self.second_pointnet = PointNet(Ca, [Ca, Ca], activation=self.opt.activation,
normalization=self.opt.normalization,
norm_momentum=opt.norm_momentum,
norm_act_at_last=True)
self.knnlayer = GeneralKNNFusionModule(3 + Ca, (int(Cb), int(Cb)),
(Cb*2, Cb),
activation=self.opt.activation,
normalization=self.opt.normalization,
norm_momentum=opt.norm_momentum)
self.final_pointnet = PointNet(3+Cb, [int(Cg/2), Cg], activation=self.opt.activation,
normalization=self.opt.normalization,
norm_momentum=opt.norm_momentum,
norm_act_at_last=True)
node_idx_list = torch.from_numpy(np.arange(self.opt.node_a_num).astype(np.int64)).long() # ma LongTensor
self.node_idx_1NMa = node_idx_list.unsqueeze(0).unsqueeze(1).expand(1, self.opt.input_pt_num, self.opt.node_a_num) # 1xNxMa
def forward(self, pc, intensity, sn, node_a, node_b):
'''
:param pc: Bx3xN Tensor
:param intensity: Bx1xN Tensor
:param sn: Bx3xN Tensor
:param node_a: Bx3xMa FloatTensor
:param node_b: Bx3xMb FloatTensor
:param keypoint_anchor_idx: BxK IntTensor
:return:
'''
B, N, Ma, Mb = pc.size(0), pc.size(2), node_a.size(2), node_b.size(2)
# modify the pc according to the node_a, minus the center
pc_B3NMa = pc.unsqueeze(3).expand(B, 3, N, Ma) #(B,3,N,Ma)
node_a_B3NMa = node_a.unsqueeze(2).expand(B, 3, N, Ma) #(B,3,N,Ma)
diff = torch.norm(pc_B3NMa - node_a_B3NMa, dim=1, p=2, keepdim=False) # BxNxMa
_, min_k_idx = torch.topk(diff, k=self.opt.k_interp_point_a, dim=2, largest=False, sorted=True) # BxNxk0
min_idx = min_k_idx[:, :, 0] # BxN
mask = torch.eq(min_idx.unsqueeze(2).expand(B, N, Ma),
self.node_idx_1NMa.to(device=min_idx.device, dtype=torch.long).expand(B, N, Ma)) # BxNxMa
mask_row_max, _ = torch.max(mask, dim=1, keepdim=False) # BxMa, this indicates whether the node has nearby points
mask_row_max_B1Ma_float = mask_row_max.unsqueeze(1).to(dtype=torch.float) #(B,1,Ma)
mask_B1NMa_float = mask.unsqueeze(1).to(dtype=torch.float) # Bx1xNxMa
mask_row_sum = torch.sum(mask_B1NMa_float, dim=2, keepdim=False) # Bx1xMa
# calculate the center of the cluster
pc_masked = pc.unsqueeze(3) * mask_B1NMa_float # BxCxNxMa
cluster_mean = torch.sum(pc_masked, dim=2) / (mask_row_sum + 1e-5).detach() # Bx3xMa
# assign each point with a center
pc_centers = torch.gather(cluster_mean,
index=min_idx.unsqueeze(1).expand(B, 3, N),
dim=2) # Bx3xN
pc_decentered = (pc - pc_centers).detach() # Bx3xN
# go through the first PointNet
pc_augmented = torch.cat((pc_decentered, intensity, sn), dim=1) # Bx7xN
first_pn_out = self.first_pointnet(pc_augmented) #(B,C,N)
with torch.cuda.device(first_pn_out.get_device()):
first_gather_index = index_max.forward_cuda_shared_mem(first_pn_out.detach(), min_idx.int(),
Ma).detach().long()
first_pn_out_masked_max = first_pn_out.gather(dim=2,
index=first_gather_index) * mask_row_max_B1Ma_float # BxCxMa
# scatter the masked_max back to the N points
scattered_first_masked_max = torch.gather(first_pn_out_masked_max,
dim=2,
index=min_idx.unsqueeze(1).expand(B, first_pn_out.size(1), N)) # BxCxN
first_pn_out_fusion = torch.cat((first_pn_out, scattered_first_masked_max), dim=1) # Bx2CxN
second_pn_out = self.second_pointnet(first_pn_out_fusion)
with torch.cuda.device(second_pn_out.get_device()):
second_gather_index = index_max.forward_cuda_shared_mem(second_pn_out, min_idx.int(), Ma).detach().long()
node_a_features = second_pn_out.gather(dim=2,
index=second_gather_index) * mask_row_max_B1Ma_float # BxCaxMa
# knn module, knn search on nodes: ----------------------------------
node_b_features = self.knnlayer(query=node_b,
database=cluster_mean,
database_features=node_a_features,
# database_features=torch.cat((cluster_mean, second_pn_out_masked_max), dim=1),
K=self.opt.k_ab) # BxCbxM
# get global feature
final_pn_out = self.final_pointnet(torch.cat((node_b, node_b_features), dim=1)) # BxCgxN
global_feature, _ = torch.max(final_pn_out, dim=2, keepdim=True) # BxCgx1
return pc_centers,\
cluster_mean,\
min_k_idx, \
first_pn_out, \
second_pn_out, \
node_a_features, \
node_b_features, \
global_feature
def gather_topk_features(self, min_k_idx, features):
"""
:param min_k_idx: BxNxk
:param features: BxCxM
:return:
"""
B, N, k = min_k_idx.size(0), min_k_idx.size(1), min_k_idx.size(2)
C, M = features.size(1), features.size(2)
return torch.gather(features.unsqueeze(3).expand(B, C, M, k),
index=min_k_idx.unsqueeze(1).expand(B, C, N, k),
dim=2) # BxCxNxk
| 6,833 | 48.165468 | 132 | py |
CorrI2P | CorrI2P-main/options.py | import numpy as np
import math
import torch
class Options:
def __init__(self):
self.is_debug = False
self.is_fine_resolution = True
self.is_remove_ground = False
self.accumulation_frame_num = 3
self.accumulation_frame_skip = 6
self.delta_ij_max = 40
self.translation_max = 10.0
self.crop_original_top_rows = 50
self.img_scale = 0.5
self.img_H = 160 # 320 * 0.5
self.img_W = 512 # 1224 * 0.5
# the fine resolution is img_H/scale x img_W/scale
self.img_fine_resolution_scale = 32
self.input_pt_num = 40960
self.pc_min_range = -1.0
self.pc_max_range = 80.0
self.node_a_num = 128
self.node_b_num = 128
self.k_ab = 16
self.k_interp_ab = 3
self.k_interp_point_a = 3
self.k_interp_point_b = 3
# CAM coordinate
self.P_tx_amplitude = 0
self.P_ty_amplitude = 0
self.P_tz_amplitude = 0
self.P_Rx_amplitude = 0.0 * math.pi / 12.0
self.P_Ry_amplitude = 2.0 * math.pi
self.P_Rz_amplitude = 0.0 * math.pi / 12.0
self.dataloader_threads = 10
self.batch_size = 8
self.gpu_ids = [0]
self.device = torch.device('cuda', self.gpu_ids[0])
self.normalization = 'batch'
self.norm_momentum = 0.1
self.activation = 'relu'
self.lr = 0.001
self.lr_decay_step = 20
self.lr_decay_scale = 0.5
self.vis_max_batch = 4
if self.is_fine_resolution:
self.coarse_loss_alpha = 50
else:
self.coarse_loss_alpha = 1
| 1,651 | 26.081967 | 59 | py |
CorrI2P | CorrI2P-main/kitti_pc_img_dataloader.py | import os
import torch
import torch.utils.data as data
from torchvision import transforms
import numpy as np
from PIL import Image
import random
import math
import open3d as o3d
import cv2
import struct
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.sparse import coo_matrix
class KittiCalibHelper:
def __init__(self, root_path):
self.root_path = root_path
self.calib_matrix_dict = self.read_calib_files()
def read_calib_files(self):
seq_folders = [name for name in os.listdir(
os.path.join(self.root_path, 'calib'))]
calib_matrix_dict = {}
for seq in seq_folders:
calib_file_path = os.path.join(
self.root_path, 'calib', seq, 'calib.txt')
with open(calib_file_path, 'r') as f:
for line in f.readlines():
seq_int = int(seq)
if calib_matrix_dict.get(seq_int) is None:
calib_matrix_dict[seq_int] = {}
key = line[0:2]
mat = np.fromstring(line[4:], sep=' ').reshape(
(3, 4)).astype(np.float32)
if 'Tr' == key:
P = np.identity(4)
P[0:3, :] = mat
calib_matrix_dict[seq_int][key] = P
else:
K = mat[0:3, 0:3]
calib_matrix_dict[seq_int][key + '_K'] = K
fx = K[0, 0]
fy = K[1, 1]
cx = K[0, 2]
cy = K[1, 2]
# mat[0, 3] = fx*tx + cx*tz
# mat[1, 3] = fy*ty + cy*tz
# mat[2, 3] = tz
tz = mat[2, 3]
tx = (mat[0, 3] - cx * tz) / fx
ty = (mat[1, 3] - cy * tz) / fy
P = np.identity(4)
P[0:3, 3] = np.asarray([tx, ty, tz])
calib_matrix_dict[seq_int][key] = P
return calib_matrix_dict
def get_matrix(self, seq: int, matrix_key: str):
return self.calib_matrix_dict[seq][matrix_key]
class FarthestSampler:
def __init__(self, dim=3):
self.dim = dim
def calc_distances(self, p0, points):
return ((p0 - points) ** 2).sum(axis=0)
def sample(self, pts, k):
farthest_pts = np.zeros((self.dim, k))
farthest_pts_idx = np.zeros(k, dtype=np.int)
init_idx = np.random.randint(len(pts))
farthest_pts[:, 0] = pts[:, init_idx]
farthest_pts_idx[0] = init_idx
distances = self.calc_distances(farthest_pts[:, 0:1], pts)
for i in range(1, k):
idx = np.argmax(distances)
farthest_pts[:, i] = pts[:, idx]
farthest_pts_idx[i] = idx
distances = np.minimum(distances, self.calc_distances(farthest_pts[:, i:i+1], pts))
return farthest_pts, farthest_pts_idx
class kitti_pc_img_dataset(data.Dataset):
def __init__(self, root_path, mode, num_pc,
P_tx_amplitude=5, P_ty_amplitude=5, P_tz_amplitude=5,
P_Rx_amplitude=0, P_Ry_amplitude=2.0 * math.pi, P_Rz_amplitude=0,num_kpt=512,is_front=False):
super(kitti_pc_img_dataset, self).__init__()
self.root_path = root_path
self.mode = mode
self.dataset = self.make_kitti_dataset(root_path, mode)
self.calibhelper = KittiCalibHelper(root_path)
self.num_pc = num_pc
self.img_H = 160
self.img_W = 512
self.P_tx_amplitude = P_tx_amplitude
self.P_ty_amplitude = P_ty_amplitude
self.P_tz_amplitude = P_tz_amplitude
self.P_Rx_amplitude = P_Rx_amplitude
self.P_Ry_amplitude = P_Ry_amplitude
self.P_Rz_amplitude = P_Rz_amplitude
self.num_kpt=num_kpt
self.farthest_sampler = FarthestSampler(dim=3)
self.node_a_num=256
self.node_b_num=256
self.is_front=is_front
print('load data complete')
def read_velodyne_bin(self, path):
pc_list = []
with open(path, 'rb') as f:
content = f.read()
pc_iter = struct.iter_unpack('ffff', content)
for idx, point in enumerate(pc_iter):
pc_list.append([point[0], point[1], point[2], point[3]])
return np.asarray(pc_list, dtype=np.float32).T
def make_kitti_dataset(self, root_path, mode):
dataset = []
if mode == 'train':
seq_list = list(range(9))
elif 'val' == mode:
seq_list = [9, 10]
else:
raise Exception('Invalid mode.')
skip_start_end = 0
for seq in seq_list:
img2_folder = os.path.join(
root_path, 'sequences', '%02d' % seq, 'img_P2')
img3_folder = os.path.join(
root_path, 'sequences', '%02d' % seq, 'img_P3')
pc_folder = os.path.join(
root_path, 'sequences', '%02d' % seq, 'pc_npy_with_normal')
K2_folder = os.path.join(
root_path, 'sequences', '%02d' % seq, 'K_P2')
K3_folder = os.path.join(
root_path, 'sequences', '%02d' % seq, 'K_P3')
sample_num = round(len(os.listdir(img2_folder)))
for i in range(skip_start_end, sample_num - skip_start_end):
dataset.append((img2_folder, pc_folder,
K2_folder, seq, i, 'P2', sample_num))
dataset.append((img3_folder, pc_folder,
K3_folder, seq, i, 'P3', sample_num))
return dataset
def downsample_with_intensity_sn(self, pointcloud, intensity, sn, voxel_grid_downsample_size):
pcd=o3d.geometry.PointCloud()
pcd.points=o3d.utility.Vector3dVector(np.transpose(pointcloud))
intensity_max=np.max(intensity)
fake_colors=np.zeros((pointcloud.shape[1],3))
fake_colors[:,0:1]=np.transpose(intensity)/intensity_max
pcd.colors=o3d.utility.Vector3dVector(fake_colors)
pcd.normals=o3d.utility.Vector3dVector(np.transpose(sn))
down_pcd=pcd.voxel_down_sample(voxel_size=voxel_grid_downsample_size)
down_pcd_points=np.transpose(np.asarray(down_pcd.points))
pointcloud=down_pcd_points
intensity=np.transpose(np.asarray(down_pcd.colors)[:,0:1])*intensity_max
sn=np.transpose(np.asarray(down_pcd.normals))
return pointcloud, intensity, sn
def downsample_np(self, pc_np, intensity_np, sn_np):
if pc_np.shape[1] >= self.num_pc:
choice_idx = np.random.choice(pc_np.shape[1], self.num_pc, replace=False)
else:
fix_idx = np.asarray(range(pc_np.shape[1]))
while pc_np.shape[1] + fix_idx.shape[0] < self.num_pc:
fix_idx = np.concatenate((fix_idx, np.asarray(range(pc_np.shape[1]))), axis=0)
random_idx = np.random.choice(pc_np.shape[1], self.num_pc - fix_idx.shape[0], replace=False)
choice_idx = np.concatenate((fix_idx, random_idx), axis=0)
pc_np = pc_np[:, choice_idx]
intensity_np = intensity_np[:, choice_idx]
sn_np=sn_np[:,choice_idx]
return pc_np, intensity_np, sn_np
def camera_matrix_cropping(self, K: np.ndarray, dx: float, dy: float):
K_crop = np.copy(K)
K_crop[0, 2] -= dx
K_crop[1, 2] -= dy
return K_crop
def camera_matrix_scaling(self, K: np.ndarray, s: float):
K_scale = s * K
K_scale[2, 2] = 1
return K_scale
def augment_img(self, img_np):
brightness = (0.8, 1.2)
contrast = (0.8, 1.2)
saturation = (0.8, 1.2)
hue = (-0.1, 0.1)
color_aug = transforms.ColorJitter(
brightness, contrast, saturation, hue)
img_color_aug_np = np.array(color_aug(Image.fromarray(img_np)))
return img_color_aug_np
def angles2rotation_matrix(self, angles):
Rx = np.array([[1, 0, 0],
[0, np.cos(angles[0]), -np.sin(angles[0])],
[0, np.sin(angles[0]), np.cos(angles[0])]])
Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])],
[0, 1, 0],
[-np.sin(angles[1]), 0, np.cos(angles[1])]])
Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0],
[np.sin(angles[2]), np.cos(angles[2]), 0],
[0, 0, 1]])
R = np.dot(Rz, np.dot(Ry, Rx))
return R
def generate_random_transform(self):
"""
:param pc_np: pc in NWU coordinate
:return:
"""
t = [random.uniform(-self.P_tx_amplitude, self.P_tx_amplitude),
random.uniform(-self.P_ty_amplitude, self.P_ty_amplitude),
random.uniform(-self.P_tz_amplitude, self.P_tz_amplitude)]
angles = [random.uniform(-self.P_Rx_amplitude, self.P_Rx_amplitude),
random.uniform(-self.P_Ry_amplitude, self.P_Ry_amplitude),
random.uniform(-self.P_Rz_amplitude, self.P_Rz_amplitude)]
rotation_mat = self.angles2rotation_matrix(angles)
P_random = np.identity(4, dtype=np.float32)
P_random[0:3, 0:3] = rotation_mat
P_random[0:3, 3] = t
# print('t',t)
# print('angles',angles)
return P_random
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
img_folder, pc_folder, K_folder, seq, seq_i, key, _ = self.dataset[index]
img = np.load(os.path.join(img_folder, '%06d.npy' % seq_i))
data = np.load(os.path.join(pc_folder, '%06d.npy' % seq_i))
intensity = data[3:4, :]
sn = data[4:, :]
pc = data[0:3, :]
P_Tr = np.dot(self.calibhelper.get_matrix(seq, key),
self.calibhelper.get_matrix(seq, 'Tr'))
pc = np.dot(P_Tr[0:3, 0:3], pc) + P_Tr[0:3, 3:]
sn = np.dot(P_Tr[0:3, 0:3], sn)
K = np.load(os.path.join(K_folder, '%06d.npy' % seq_i))
pc, intensity, sn = self.downsample_with_intensity_sn(pc, intensity, sn, voxel_grid_downsample_size=0.1)
pc, intensity, sn = self.downsample_np(pc, intensity,sn)
img = cv2.resize(img,
(int(round(img.shape[1] * 0.5)),
int(round((img.shape[0] * 0.5)))),
interpolation=cv2.INTER_LINEAR)
K = self.camera_matrix_scaling(K, 0.5)
if 'train' == self.mode:
img_crop_dx = random.randint(0, img.shape[1] - self.img_W)
img_crop_dy = random.randint(0, img.shape[0] - self.img_H)
else:
img_crop_dx = int((img.shape[1] - self.img_W) / 2)
img_crop_dy = int((img.shape[0] - self.img_H) / 2)
img = img[img_crop_dy:img_crop_dy + self.img_H,
img_crop_dx:img_crop_dx + self.img_W, :]
K = self.camera_matrix_cropping(K, dx=img_crop_dx, dy=img_crop_dy)
#1/4 scale
K_4=self.camera_matrix_scaling(K,0.25)
if 'train' == self.mode:
img = self.augment_img(img)
pc_ = np.dot(K_4, pc)
pc_mask = np.zeros((1, np.shape(pc)[1]), dtype=np.float32)
pc_[0:2, :] = pc_[0:2, :] / pc_[2:, :]
xy = np.floor(pc_[0:2, :])
is_in_picture = (xy[0, :] >= 0) & (xy[0, :] <= (self.img_W*0.25 - 1)) & (xy[1, :] >= 0) & (xy[1, :] <= (self.img_H*0.25 - 1)) & (pc_[2, :] > 0)
pc_mask[:, is_in_picture] = 1.
pc_kpt_idx=np.where(pc_mask.squeeze()==1)[0]
index=np.random.permutation(len(pc_kpt_idx))[0:self.num_kpt]
pc_kpt_idx=pc_kpt_idx[index]
pc_outline_idx=np.where(pc_mask.squeeze()==0)[0]
index=np.random.permutation(len(pc_outline_idx))[0:self.num_kpt]
pc_outline_idx=pc_outline_idx[index]
xy2 = xy[:, is_in_picture]
img_mask = coo_matrix((np.ones_like(xy2[0, :]), (xy2[1, :], xy2[0, :])), shape=(int(self.img_H*0.25), int(self.img_W*0.25))).toarray()
img_mask = np.array(img_mask)
img_mask[img_mask > 0] = 1.
img_kpt_index=xy[1,pc_kpt_idx]*self.img_W*0.25 +xy[0,pc_kpt_idx]
img_outline_index=np.where(img_mask.squeeze().reshape(-1)==0)[0]
index=np.random.permutation(len(img_outline_index))[0:self.num_kpt]
img_outline_index=img_outline_index[index]
P = self.generate_random_transform()
pc = np.dot(P[0:3, 0:3], pc) + P[0:3, 3:]
sn = np.dot(P[0:3, 0:3], sn)
node_a_np, _ = self.farthest_sampler.sample(pc[:, np.random.choice( pc.shape[1],
self.node_a_num * 8,
replace=False)],
k=self.node_a_num)
node_b_np, _ = self.farthest_sampler.sample(pc[:, np.random.choice( pc.shape[1],
self.node_b_num * 8,
replace=False)],
k=self.node_b_num)
return {'img': torch.from_numpy(img.astype(np.float32) / 255.).permute(2, 0, 1).contiguous(),
'pc': torch.from_numpy(pc.astype(np.float32)),
'intensity': torch.from_numpy(intensity.astype(np.float32)),
'sn': torch.from_numpy(sn.astype(np.float32)),
'K': torch.from_numpy(K_4.astype(np.float32)),
'P': torch.from_numpy(np.linalg.inv(P).astype(np.float32)),
'pc_mask': torch.from_numpy(pc_mask).float(), #(1,20480)
'img_mask': torch.from_numpy(img_mask).float(), #(40,128)
'pc_kpt_idx': torch.from_numpy(pc_kpt_idx), #512
'pc_outline_idx':torch.from_numpy(pc_outline_idx), #512
'img_kpt_idx':torch.from_numpy(img_kpt_index).long() , #512
'img_outline_index':torch.from_numpy(img_outline_index).long(),
'node_a':torch.from_numpy(node_a_np).float(),
'node_b':torch.from_numpy(node_b_np).float()
}
if __name__ == '__main__':
dataset = kitti_pc_img_dataset('/gpfs1/scratch/siyuren2/dataset/', 'val', 20480)
data = dataset[4000]
'''img=data[0].numpy() #full size
pc=data[1].numpy()
intensity=data[2].numpy()
sn=data[3].numpy()
K=data[4].numpy()
P=data[5].numpy()
pc_mask=data[6].numpy()
img_mask=data[7].numpy() #1/4 size
pc_kpt_idx=data[8].numpy() #(B,512)
pc_outline_idx=data[9].numpy()
img_kpt_idx=data[10].numpy()
img_outline_idx=data[11].numpy()
np.save('./test_data/img.npy',img)
np.save('./test_data/pc.npy',pc)
np.save('./test_data/intensity.npy',intensity)
np.save('./test_data/sn.npy',sn)
np.save('./test_data/K.npy',K)
np.save('./test_data/P.npy',P)
np.save('./test_data/pc_mask.npy',pc_mask)
np.save('./test_data/img_mask.npy',img_mask)
'''
'''for i,data in enumerate(dataset):
print(i,data['pc'].size())'''
print(len(dataset))
print(data['pc'].size())
print(data['img'].size())
print(data['pc_mask'].size())
print(data['intensity'].size())
np.save('./test_data/pc.npy', data['pc'].numpy())
np.save('./test_data/P.npy', data['P'].numpy())
np.save('./test_data/img.npy', data['img'].numpy())
np.save('./test_data/img_mask.npy', data['img_mask'].numpy())
np.save('./test_data/pc_mask.npy', data['pc_mask'].numpy())
np.save('./test_data/K.npy', data['K'].numpy())
"""
img = dict['img'].numpy()
img_mask = dict['img_mask'].numpy()
img = img.transpose(1, 2, 0)
cv2.imwrite('img.png',np.uint8(img*255))
cv2.imwrite('img_mask.png', np.uint8(img_mask * 255))
cv2.imshow('img', cv2.resize(img,(512,160)))
cv2.imshow('img_mask', cv2.resize(img_mask,(512,160)))
color = []
for i in range(np.shape(pc_data)[1]):
if pc_mask[0, i] > 0:
color.append([0, 1, 1])
else:
color.append([0, 0, 1])
color = np.asarray(color, dtype=np.float64)
print(color.shape)
print(np.sum(pc_mask), np.sum(img_mask))
pointcloud = o3d.geometry.PointCloud()
pointcloud.points = o3d.utility.Vector3dVector(pc_data.T)
pointcloud.colors = o3d.utility.Vector3dVector(color)
o3d.visualization.draw_geometries([pointcloud])
# plt.imshow(dict['img'].permute(1,2,0).numpy())
# plt.show()"""
| 16,785 | 37.856481 | 151 | py |
CorrI2P | CorrI2P-main/train.py | import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import torch
import argparse
from network import CorrI2P
from kitti_pc_img_dataloader import kitti_pc_img_dataset
import loss
import numpy as np
import datetime
import logging
import math
import options
import cv2
from scipy.spatial.transform import Rotation
def get_P_diff(P_pred_np,P_gt_np):
P_diff=np.dot(np.linalg.inv(P_pred_np),P_gt_np)
t_diff=np.linalg.norm(P_diff[0:3,3])
r_diff=P_diff[0:3,0:3]
R_diff=Rotation.from_matrix(r_diff)
angles_diff=np.sum(np.abs(R_diff.as_euler('xzy',degrees=True)))
return t_diff,angles_diff
def test_acc(model,testdataloader,args):
t_diff_set=[]
angles_diff_set=[]
for step,data in enumerate(testdataloader):
if step%5==0:
model.eval()
img=data['img'].cuda() #full size
pc=data['pc'].cuda()
intensity=data['intensity'].cuda()
sn=data['sn'].cuda()
K=data['K'].cuda()
P=data['P'].cuda()
pc_mask=data['pc_mask'].cuda()
img_mask=data['img_mask'].cuda() #1/4 size
pc_kpt_idx=data['pc_kpt_idx'].cuda() #(B,512)
pc_outline_idx=data['pc_outline_idx'].cuda()
img_kpt_idx=data['img_kpt_idx'].cuda()
img_outline_idx=data['img_outline_index'].cuda()
node_a=data['node_a'].cuda()
node_b=data['node_b'].cuda()
img_features,pc_features,img_score,pc_score=model(pc,intensity,sn,img,node_a,node_b) #64 channels feature
img_score=img_score[0].data.cpu().numpy()
pc_score=pc_score[0].data.cpu().numpy()
img_feature=img_features[0].data.cpu().numpy()
pc_feature=pc_features[0].data.cpu().numpy()
pc=pc[0].data.cpu().numpy()
P=P[0].data.cpu().numpy()
K=K[0].data.cpu().numpy()
img_x=np.linspace(0,np.shape(img_feature)[-1]-1,np.shape(img_feature)[-1]).reshape(1,-1).repeat(np.shape(img_feature)[-2],0).reshape(1,np.shape(img_score)[-2],np.shape(img_score)[-1])
img_y=np.linspace(0,np.shape(img_feature)[-2]-1,np.shape(img_feature)[-2]).reshape(-1,1).repeat(np.shape(img_feature)[-1],1).reshape(1,np.shape(img_score)[-2],np.shape(img_score)[-1])
img_xy=np.concatenate((img_x,img_y),axis=0)
img_xy_flatten=img_xy.reshape(2,-1)
img_feature_flatten=img_feature.reshape(np.shape(img_feature)[0],-1)
img_score_flatten=img_score.squeeze().reshape(-1)
img_index=(img_score_flatten>args.img_thres)
#topk_img_index=np.argsort(-img_score_flatten)[:args.num_kpt]
img_xy_flatten_sel=img_xy_flatten[:,img_index]
img_feature_flatten_sel=img_feature_flatten[:,img_index]
img_score_flatten_sel=img_score_flatten[img_index]
pc_index=(pc_score.squeeze()>args.pc_thres)
#topk_pc_index=np.argsort(-pc_score.squeeze())[:args.num_kpt]
pc_sel=pc[:,pc_index]
pc_feature_sel=pc_feature[:,pc_index]
pc_score_sel=pc_score.squeeze()[pc_index]
dist=1-np.sum(np.expand_dims(pc_feature_sel,axis=2)*np.expand_dims(img_feature_flatten_sel,axis=1),axis=0)
sel_index=np.argmin(dist,axis=1)
#sel_index=np.argsort(dist,axis=1)[:,0]
img_xy_pc=img_xy_flatten_sel[:,sel_index]
is_success,R,t,inliers=cv2.solvePnPRansac(pc_sel.T,img_xy_pc.T,K,useExtrinsicGuess=False,
iterationsCount=500,
reprojectionError=args.dist_thres,
flags=cv2.SOLVEPNP_EPNP,
distCoeffs=None)
R,_=cv2.Rodrigues(R)
T_pred=np.eye(4)
T_pred[0:3,0:3]=R
T_pred[0:3,3:]=t
t_diff,angles_diff=get_P_diff(T_pred,P)
t_diff_set.append(t_diff)
angles_diff_set.append(angles_diff)
return np.mean(np.array(t_diff_set)),np.mean(np.array(angles_diff_set))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Point Cloud Registration')
parser.add_argument('--epoch', type=int, default=25, metavar='epoch',
help='number of epoch to train')
parser.add_argument('--train_batch_size', type=int, default=20, metavar='train_batch_size',
help='Size of train batch')
parser.add_argument('--val_batch_size', type=int, default=8, metavar='val_batch_size',
help='Size of val batch')
parser.add_argument('--data_path', type=str, default='/home/siyu_ren/kitti_dataset/', metavar='data_path',
help='train and test data path')
parser.add_argument('--num_point', type=int, default=40960, metavar='num_point',
help='point cloud size to train')
parser.add_argument('--num_workers', type=int, default=8, metavar='num_workers',
help='num of CPUs')
parser.add_argument('--val_freq', type=int, default=1000, metavar='val_freq',
help='')
parser.add_argument('--lr', type=float, default=0.001, metavar='lr',
help='')
parser.add_argument('--min_lr', type=float, default=0.00001, metavar='lr',
help='')
parser.add_argument('--P_tx_amplitude', type=float, default=10, metavar='P_tx_amplitude',
help='')
parser.add_argument('--P_ty_amplitude', type=float, default=0, metavar='P_ty_amplitude',
help='')
parser.add_argument('--P_tz_amplitude', type=float, default=10, metavar='P_tz_amplitude',
help='')
parser.add_argument('--P_Rx_amplitude', type=float, default=2*math.pi*0, metavar='P_Rx_amplitude',
help='')
parser.add_argument('--P_Ry_amplitude', type=float, default=2*math.pi, metavar='P_Ry_amplitude',
help='')
parser.add_argument('--P_Rz_amplitude', type=float, default=2*math.pi*0, metavar='P_Rz_amplitude',
help='')
parser.add_argument('--save_path', type=str, default='./log_xy_40960_128', metavar='save_path',
help='path to save log and model')
'''parser.add_argument('--save_path', type=str, default='./only_test', metavar='save_path',
help='path to save log and model')'''
parser.add_argument('--num_kpt', type=int, default=512, metavar='num_kpt',
help='')
parser.add_argument('--dist_thres', type=float, default=1, metavar='num_kpt',
help='')
parser.add_argument('--img_thres', type=float, default=0.9, metavar='img_thres',
help='')
parser.add_argument('--pc_thres', type=float, default=0.9, metavar='pc_thres',
help='')
parser.add_argument('--pos_margin', type=float, default=0.2, metavar='pos_margin',
help='')
parser.add_argument('--neg_margin', type=float, default=1.8, metavar='neg_margin',
help='')
args = parser.parse_args()
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
logdir=os.path.join(args.save_path, 'dist_thres_%0.2f_pos_margin_%0.2f_neg_margin_%0.2f'%(args.dist_thres,args.pos_margin,args.neg_margin,))
try:
os.makedirs(logdir)
except:
print('mkdir failue')
logger=logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter=logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/log.txt' % (logdir))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
train_dataset = kitti_pc_img_dataset(args.data_path, 'train', args.num_point,
P_tx_amplitude=args.P_tx_amplitude,
P_ty_amplitude=args.P_ty_amplitude,
P_tz_amplitude=args.P_tz_amplitude,
P_Rx_amplitude=args.P_Rx_amplitude,
P_Ry_amplitude=args.P_Ry_amplitude,
P_Rz_amplitude=args.P_Rz_amplitude,num_kpt=args.num_kpt,is_front=False)
test_dataset = kitti_pc_img_dataset(args.data_path, 'val', args.num_point,
P_tx_amplitude=args.P_tx_amplitude,
P_ty_amplitude=args.P_ty_amplitude,
P_tz_amplitude=args.P_tz_amplitude,
P_Rx_amplitude=args.P_Rx_amplitude,
P_Ry_amplitude=args.P_Ry_amplitude,
P_Rz_amplitude=args.P_Rz_amplitude,num_kpt=args.num_kpt,is_front=False)
assert len(train_dataset) > 10
assert len(test_dataset) > 10
trainloader=torch.utils.data.DataLoader(train_dataset,batch_size=args.train_batch_size,shuffle=True,drop_last=True,num_workers=args.num_workers)
testloader=torch.utils.data.DataLoader(test_dataset,batch_size=args.val_batch_size,shuffle=False,drop_last=True,num_workers=args.num_workers)
opt=options.Options()
model=CorrI2P(opt)
model=model.cuda()
current_lr=args.lr
learnable_params=filter(lambda p:p.requires_grad,model.parameters())
optimizer=torch.optim.Adam(learnable_params,lr=current_lr)
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.9)
#scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epoch, eta_min=args.lr)
logger.info(args)
global_step=0
best_t_diff=1000
best_r_diff=1000
for epoch in range(args.epoch):
for step,data in enumerate(trainloader):
global_step+=1
model.train()
optimizer.zero_grad()
img=data['img'].cuda() #full size
pc=data['pc'].cuda()
intensity=data['intensity'].cuda()
sn=data['sn'].cuda()
K=data['K'].cuda()
P=data['P'].cuda()
pc_mask=data['pc_mask'].cuda()
img_mask=data['img_mask'].cuda() #1/4 size
B=img_mask.size(0)
pc_kpt_idx=data['pc_kpt_idx'].cuda() #(B,512)
pc_outline_idx=data['pc_outline_idx'].cuda()
img_kpt_idx=data['img_kpt_idx'].cuda()
img_outline_idx=data['img_outline_index'].cuda()
node_a=data['node_a'].cuda()
node_b=data['node_b'].cuda()
img_x=torch.linspace(0,img_mask.size(-1)-1,img_mask.size(-1)).view(1,-1).expand(img_mask.size(-2),img_mask.size(-1)).unsqueeze(0).expand(img_mask.size(0),img_mask.size(-2),img_mask.size(-1)).unsqueeze(1).cuda()
img_y=torch.linspace(0,img_mask.size(-2)-1,img_mask.size(-2)).view(-1,1).expand(img_mask.size(-2),img_mask.size(-1)).unsqueeze(0).expand(img_mask.size(0),img_mask.size(-2),img_mask.size(-1)).unsqueeze(1).cuda()
img_xy=torch.cat((img_x,img_y),dim=1)
img_features,pc_features,img_score,pc_score=model(pc,intensity,sn,img,node_a,node_b) #64 channels feature
pc_features_inline=torch.gather(pc_features,index=pc_kpt_idx.unsqueeze(1).expand(B,pc_features.size(1),args.num_kpt),dim=-1)
pc_features_outline=torch.gather(pc_features,index=pc_outline_idx.unsqueeze(1).expand(B,pc_features.size(1),args.num_kpt),dim=-1)
pc_xyz_inline=torch.gather(pc,index=pc_kpt_idx.unsqueeze(1).expand(B,3,args.num_kpt),dim=-1)
pc_score_inline=torch.gather(pc_score,index=pc_kpt_idx.unsqueeze(1),dim=-1)
pc_score_outline=torch.gather(pc_score,index=pc_outline_idx.unsqueeze(1),dim=-1)
img_features_flatten=img_features.contiguous().view(img_features.size(0),img_features.size(1),-1)
img_score_flatten=img_score.contiguous().view(img_score.size(0),img_score.size(1),-1)
img_xy_flatten=img_xy.contiguous().view(img_features.size(0),2,-1)
img_features_flatten_inline=torch.gather(img_features_flatten,index=img_kpt_idx.unsqueeze(1).expand(B,img_features_flatten.size(1),args.num_kpt),dim=-1)
img_xy_flatten_inline=torch.gather(img_xy_flatten,index=img_kpt_idx.unsqueeze(1).expand(B,2,args.num_kpt),dim=-1)
img_score_flatten_inline=torch.gather(img_score_flatten,index=img_kpt_idx.unsqueeze(1),dim=-1)
img_features_flatten_outline=torch.gather(img_features_flatten,index=img_outline_idx.unsqueeze(1).expand(B,img_features_flatten.size(1),args.num_kpt),dim=-1)
img_score_flatten_outline=torch.gather(img_score_flatten,index=img_outline_idx.unsqueeze(1),dim=-1)
pc_xyz_projection=torch.bmm(K,(torch.bmm(P[:,0:3,0:3],pc_xyz_inline)+P[:,0:3,3:]))
#pc_xy_projection=torch.floor(pc_xyz_projection[:,0:2,:]/pc_xyz_projection[:,2:,:]).float()
pc_xy_projection=pc_xyz_projection[:,0:2,:]/pc_xyz_projection[:,2:,:]
correspondence_mask=(torch.sqrt(torch.sum(torch.square(img_xy_flatten_inline.unsqueeze(-1)-pc_xy_projection.unsqueeze(-2)),dim=1))<=args.dist_thres).float()
loss_desc,dists=loss.desc_loss(img_features_flatten_inline,pc_features_inline,correspondence_mask,pos_margin=args.pos_margin,neg_margin=args.neg_margin)
#loss_det=loss2.det_loss(img_score_flatten_inline.squeeze(),img_score_flatten_outline.squeeze(),pc_score_inline,pc_score_outline.squeeze())
loss_det=loss.det_loss2(img_score_flatten_inline.squeeze(),img_score_flatten_outline.squeeze(),pc_score_inline.squeeze(),pc_score_outline.squeeze(),dists,correspondence_mask)
loss=loss_desc+loss_det*0.5
#loss=loss_desc
loss.backward()
optimizer.step()
#torch.cuda.empty_cache()
if global_step%6==0:
logger.info('%s-%d-%d, loss: %f, loss desc: %f, loss det: %f'%('train',epoch,global_step,loss.data.cpu().numpy(),loss_desc.data.cpu().numpy(),loss_det.data.cpu().numpy()))
if global_step%args.val_freq==0 and epoch>5:
t_diff,r_diff=test_acc(model,testloader,args)
if t_diff<=best_t_diff:
torch.save(model.state_dict(),os.path.join(logdir,'mode_best_t.t7'))
best_t_diff=t_diff
if r_diff<=best_r_diff:
torch.save(model.state_dict(),os.path.join(logdir,'mode_best_r.t7'))
best_r_diff=r_diff
logger.info('%s-%d-%d, t_error: %f, r_error: %f'%('test',epoch,global_step,t_diff,r_diff))
torch.save(model.state_dict(),os.path.join(logdir,'mode_last.t7'))
if epoch%5==0 and epoch>0:
current_lr=current_lr*0.25
if current_lr<args.min_lr:
current_lr=args.min_lr
for param_group in optimizer.param_groups:
param_group['lr']=current_lr
logger.info('%s-%d-%d, updata lr, current lr is %f'%('train',epoch,global_step,current_lr))
torch.save(model.state_dict(),os.path.join(logdir,'mode_epoch_%d.t7'%epoch)) | 15,572 | 52.332192 | 222 | py |
CorrI2P | CorrI2P-main/imagenet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.utils import load_state_dict_from_url
import numpy as np
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, in_channels, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
out = []
x = self.conv1(x) # /2
x = self.bn1(x)
x = self.relu(x)
out.append(x)
x = self.maxpool(x)
x = self.layer1(x) # /4
out.append(x)
x = self.layer2(x) # /8
out.append(x)
x = self.layer3(x) # /16
out.append(x)
x = self.layer4(x) # /32
out.append(x)
x = self.avgpool(x) # Cx1x1
out.append(x)
return out
def _resnet(in_channels, arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(in_channels, block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(in_channels=3, pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(in_channels, 'resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(in_channels=3, pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(in_channels, 'resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(in_channels=3, pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(in_channels, 'resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(in_channels=3, pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(in_channels, 'resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(in_channels=3, pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(in_channels, 'resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(in_channels=3, pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet(in_channels, 'resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(in_channels=3, pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet(in_channels, 'resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(in_channels=3, pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet(in_channels, 'wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(in_channels=3, pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet(in_channels, 'wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
class ImageEncoder(nn.Module):
def __init__(self):
super(ImageEncoder, self).__init__()
self.backbone = resnet34(in_channels=3, pretrained=False, progress=True)
# image mesh grid
'''input_mesh_np = np.meshgrid(np.linspace(start=0, stop=self.opt.img_W - 1, num=self.opt.img_W),
np.linspace(start=0, stop=self.opt.img_H - 1, num=self.opt.img_H))
input_mesh = torch.from_numpy(np.stack(input_mesh_np, axis=0).astype(np.float32)).to(self.opt.device) # 2xHxW
self.input_mesh = input_mesh.unsqueeze(0).expand(self.opt.batch_size, 2, self.opt.img_H,
self.opt.img_W) # Bx2xHxW
'''
def forward(self, x):
#K(B,3,3)
resnet_out = self.backbone(x)
return resnet_out
class ResidualConv(nn.Module):
def __init__(self,inplanes,planes,stride=1,kernel_1=False):
super(ResidualConv,self).__init__()
self.conv1=conv3x3(inplanes,planes,stride)
self.bn1=nn.BatchNorm2d(planes)
self.relu=nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
if kernel_1:
self.conv_skip = nn.Sequential(
nn.Conv2d(inplanes, planes, kernel_size=1,bias=False),
nn.BatchNorm2d(planes))
else:
self.conv_skip = nn.Sequential(
nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1,bias=False),
nn.BatchNorm2d(planes))
self.stride = stride
def forward(self, x):
identity = self.conv_skip(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity
out = self.relu(out)
return out
class attention_pc2img(nn.Module):
def __init__(self,in_channel,output_channel):
super(attention_pc2img,self).__init__()
'''self.conv=nn.Sequential(nn.Conv2d(in_channel,in_channel,1),nn.BatchNorm2d(in_channel),nn.ReLU(),
nn.Conv2d(in_channel,in_channel,1),nn.BatchNorm2d(in_channel),nn.ReLU(),
nn.Conv2d(in_channel,output_channel,1),nn.BatchNorm2d(output_channel),nn.ReLU())'''
self.conv=nn.Sequential(ResidualConv(in_channel,in_channel),ResidualConv(in_channel,in_channel),nn.Conv2d(in_channel,output_channel,1),nn.BatchNorm2d(output_channel),nn.ReLU())
def forward(self,pc_global_feature,img_local_feature,pc_local_feature):
#print(img_local_feature.size(),pc_global_feature.size())
B,_,H,W=img_local_feature.size()
feature=torch.cat([img_local_feature,pc_global_feature.unsqueeze(-1).unsqueeze(-1).repeat(1,1,H,W)],dim=1)
feature=self.conv(feature)
attention=F.softmax(feature,dim=1)
#print(attention.size())
#print(pc_local_feature.size())
feature_fusion=torch.sum(attention.unsqueeze(1)*pc_local_feature.unsqueeze(-1).unsqueeze(-1),dim=2)
return feature_fusion
class ImageUpSample(nn.Module):
def __init__(self,in_channel,output_channel):
super(ImageUpSample,self).__init__()
self.up=nn.Upsample(scale_factor=2,mode='bilinear',align_corners=False)
#self.up=nn.ConvTranspose2d(in_channel,in_channel,kernel_size=3,stride=2)
self.conv=nn.Sequential(ResidualConv(in_channel,output_channel),ResidualConv(output_channel,output_channel))
'''self.conv=nn.Sequential(nn.Conv2d(in_channel,output_channel,1,bias=False),nn.BatchNorm2d(output_channel),nn.ReLU(),
nn.Conv2d(output_channel,output_channel,1,bias=False),nn.BatchNorm2d(output_channel),nn.ReLU(),
nn.Conv2d(output_channel,output_channel,1,bias=False),nn.BatchNorm2d(output_channel),nn.ReLU())'''
def forward(self,x1,x2):
#x1: downsampled
x1=self.up(x1)
x=self.conv(torch.cat((x1,x2),dim=1))
return x
if __name__=='__main__':
a=torch.rand(10,3,160,512).cuda()
model=ImageEncoder()
model=model.cuda()
b=model(a)
for i in b:
print(i.size())
'''print(b[0].size())
print(b[1].size())
print(b[2].size())''' | 18,388 | 39.238512 | 184 | py |
CorrI2P | CorrI2P-main/operations.py | import time
import numpy as np
import math
import torch
# generalized batch size
CUDA_SHARED_MEM_DIM_X = 24
# size of SOM
CUDA_SHARED_MEM_DIM_Y = 512
def knn_gather_wrapper(som_node, som_node_knn_I):
'''
:param som_node: Bx3xN
:param som_node_knn_I: BxNxK
:param som_node_neighbors: Bx3xNxK
:return:
'''
B = som_node.size()[0]
C = som_node.size()[1]
N = som_node.size()[2]
K = som_node_knn_I.size()[2]
assert C==3
som_node_neighbors = knn_gather_by_indexing(som_node, som_node_knn_I)
return som_node_neighbors
def knn_gather_by_indexing(som_node, som_node_knn_I):
'''
:param som_node: BxCxN
:param som_node_knn_I: BxNxK
:param som_node_neighbors: BxCxNxK
:return:
'''
B = som_node.size()[0]
C = som_node.size()[1]
N = som_node.size()[2]
K = som_node_knn_I.size()[2]
som_node_knn_I = som_node_knn_I.unsqueeze(1).expand(B, C, N, K).contiguous().view(B, C, N*K)
som_node_neighbors = torch.gather(som_node, dim=2, index=som_node_knn_I).view(B, C, N, K)
return som_node_neighbors
if __name__=='__main__':
# from kitti.options_detector import Options
# opt = Options().parse() # set CUDA_VISIBLE_DEVICES before import torch
print('Done.')
| 1,267 | 20.862069 | 96 | py |
CorrI2P | CorrI2P-main/nuScenes/options.py | import numpy as np
import math
import torch
import random
class Options:
def __init__(self):
self.dataroot = '/extssd/jiaxin/nuscenes'
# self.dataroot = '/data/personal/jiaxin/datasets/kitti'
self.checkpoints_dir = 'checkpoints'
self.version = '3.3'
self.is_debug = False
self.is_fine_resolution = True
self.is_remove_ground = False
self.accumulation_frame_num = 3 #3
self.accumulation_frame_skip = 4
self.translation_max = 10.0
self.test_translation_max = 10.0
self.range_radius = 100
self.crop_original_top_rows = 100
self.img_scale = 0.2
self.img_H = 160 # after scale before crop 800 * 0.4 = 320
self.img_W = 320 # after scale before crop 1600 * 0.4 = 640
# the fine resolution is img_H/scale x img_W/scale
self.img_fine_resolution_scale = 32
self.num_kpt=512
self.input_pt_num = 40960
self.node_a_num = 256
self.node_b_num = 256
self.k_ab = 32
self.k_interp_ab = 3
self.k_interp_point_a = 3
self.k_interp_point_b = 3
# ENU coordinate
self.P_tx_amplitude = 10
self.P_ty_amplitude = 0
self.P_tz_amplitude = 10
self.P_Rx_amplitude = 0
self.P_Ry_amplitude = 2.0 * math.pi
self.P_Rz_amplitude = 0
self.dataloader_threads = 10
self.batch_size = 12
self.gpu_ids = [1]
self.device = torch.device('cuda', self.gpu_ids[0])
self.normalization = 'batch'
self.norm_momentum = 0.1
self.activation = 'relu'
self.lr = 0.001
self.lr_decay_step = 15
self.lr_decay_scale = 0.5
self.vis_max_batch = 4
if self.is_fine_resolution:
self.coarse_loss_alpha = 50
else:
self.coarse_loss_alpha = 1
if __name__=='__main__':
camera_name_list = ['CAM_FRONT',
# 'CAM_FRONT_LEFT',
# 'CAM_FRONT_RIGHT',
# 'CAM_BACK',
# 'CAM_BACK_LEFT',
# 'CAM_BACK_RIGHT'
]
for i in range(100):
print(i,random.choice(camera_name_list)) | 2,257 | 29.513514 | 68 | py |
CorrI2P | CorrI2P-main/nuScenes_script/make_dataset.py | import open3d
import torch.utils.data as data
import random
import numbers
import os
import os.path
import numpy as np
import struct
import math
import torch
import torchvision
import cv2
from PIL import Image
from torchvision import transforms
import pickle
from pyquaternion import Quaternion
import matplotlib
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from data import augmentation
from util import vis_tools
from nuscenes_t import options
from data.kitti_helper import FarthestSampler, camera_matrix_cropping, camera_matrix_scaling, projection_pc_img
from nuscenes.utils.data_classes import LidarPointCloud
from nuscenes.nuscenes import NuScenes
def downsample_with_reflectance(pointcloud, reflectance, voxel_grid_downsample_size):
pcd = open3d.geometry.PointCloud()
pcd.points = open3d.utility.Vector3dVector(np.transpose(pointcloud[0:3, :]))
reflectance_max = np.max(reflectance)
fake_colors = np.zeros((pointcloud.shape[1], 3))
fake_colors[:, 0] = reflectance / reflectance_max
pcd.colors = open3d.utility.Vector3dVector(fake_colors)
down_pcd = pcd.voxel_down_sample(voxel_size=voxel_grid_downsample_size)
down_pcd_points = np.transpose(np.asarray(down_pcd.points)) # 3xN
pointcloud = down_pcd_points
reflectance = np.asarray(down_pcd.colors)[:, 0] * reflectance_max
return pointcloud, reflectance
def load_dataset_info(filepath):
with open(filepath, 'rb') as f:
dataset_read = pickle.load(f)
return dataset_read
def make_nuscenes_dataset(root_path):
dataset = load_dataset_info(os.path.join(root_path, 'dataset_info.list'))
return dataset
def get_sample_data_ego_pose_P(nusc, sample_data):
sample_data_pose = nusc.get('ego_pose', sample_data['ego_pose_token'])
sample_data_pose_R = np.asarray(Quaternion(sample_data_pose['rotation']).rotation_matrix).astype(np.float32)
sample_data_pose_t = np.asarray(sample_data_pose['translation']).astype(np.float32)
sample_data_pose_P = get_P_from_Rt(sample_data_pose_R, sample_data_pose_t)
return sample_data_pose_P
def get_calibration_P(nusc, sample_data):
calib = nusc.get('calibrated_sensor', sample_data['calibrated_sensor_token'])
R = np.asarray(Quaternion(calib['rotation']).rotation_matrix).astype(np.float32)
t = np.asarray(calib['translation']).astype(np.float32)
P = get_P_from_Rt(R, t)
return P
def get_P_from_Rt(R, t):
P = np.identity(4)
P[0:3, 0:3] = R
P[0:3, 3] = t
return P
def get_camera_K(nusc, camera):
calib = nusc.get('calibrated_sensor', camera['calibrated_sensor_token'])
return np.asarray(calib['camera_intrinsic']).astype(np.float32)
def transform_pc_np(P, pc_np):
"""
:param pc_np: 3xN
:param P: 4x4
:return:
"""
pc_homo_np = np.concatenate((pc_np,
np.ones((1, pc_np.shape[1]), dtype=pc_np.dtype)),
axis=0)
P_pc_homo_np = np.dot(P, pc_homo_np)
return P_pc_homo_np[0:3, :]
def get_lidar_pc_intensity_by_token(nusc,lidar_token):
lidar = nusc.get('sample_data', lidar_token)
pc = LidarPointCloud.from_file(os.path.join(nusc.dataroot, lidar['filename']))
pc_np = pc.points[0:3, :]
intensity_np = pc.points[3:4, :]
# remove point falls on egocar
x_inside = np.logical_and(pc_np[0, :] < 0.8, pc_np[0, :] > -0.8)
y_inside = np.logical_and(pc_np[1, :] < 2.7, pc_np[1, :] > -2.7)
inside_mask = np.logical_and(x_inside, y_inside)
outside_mask = np.logical_not(inside_mask)
pc_np = pc_np[:, outside_mask]
intensity_np = intensity_np[:, outside_mask]
P_oi = get_sample_data_ego_pose_P(nusc, lidar)
return pc_np, intensity_np, P_oi
def lidar_frame_accumulation(nusc,opt,lidar, P_io, P_lidar_vehicle, P_vehicle_lidar,
direction,
pc_np_list, intensity_np_list):
counter = 1
accumulated_counter = 0
while accumulated_counter < opt.accumulation_frame_num:
if lidar[direction] == '':
break
if counter % opt.accumulation_frame_skip != 0:
counter += 1
lidar = nusc.get('sample_data', lidar[direction])
continue
pc_np_j, intensity_np_j, P_oj = get_lidar_pc_intensity_by_token(nusc,lidar[direction])
P_ij = np.dot(P_io, P_oj)
P_ij_trans = np.dot(np.dot(P_lidar_vehicle, P_ij), P_vehicle_lidar)
pc_np_j_transformed = transform_pc_np(P_ij_trans, pc_np_j)
pc_np_list.append(pc_np_j_transformed)
intensity_np_list.append(intensity_np_j)
counter += 1
lidar = nusc.get('sample_data', lidar[direction])
accumulated_counter += 1
# print('accumulation %s %d' % (direction, counter))
return pc_np_list, intensity_np_list
def accumulate_lidar_points(nusc,lidar):
pc_np_list = []
intensity_np_list = []
# load itself
pc_np_i, intensity_np_i, P_oi = get_lidar_pc_intensity_by_token(nusc,lidar['token'])
pc_np_list.append(pc_np_i)
intensity_np_list.append(intensity_np_i)
P_io = np.linalg.inv(P_oi)
P_vehicle_lidar = get_calibration_P(nusc, lidar)
P_lidar_vehicle = np.linalg.inv(P_vehicle_lidar)
# load next
pc_np_list, intensity_np_list = lidar_frame_accumulation(nusc,opt,lidar, P_io, P_lidar_vehicle, P_vehicle_lidar,
'next',
pc_np_list, intensity_np_list)
# load prev
pc_np_list, intensity_np_list = lidar_frame_accumulation(nusc,opt,lidar, P_io, P_lidar_vehicle, P_vehicle_lidar,
'prev',
pc_np_list, intensity_np_list)
pc_np = np.concatenate(pc_np_list, axis=1)
intensity_np = np.concatenate(intensity_np_list, axis=1)
return pc_np, intensity_np
def downsample_np(pc_np, intensity_np, k):
'''if pc_np.shape[1] >= k:
choice_idx = np.random.choice(pc_np.shape[1], k, replace=False)
else:
fix_idx = np.asarray(range(pc_np.shape[1]))
while pc_np.shape[1] + fix_idx.shape[0] < k:
fix_idx = np.concatenate((fix_idx, np.asarray(range(pc_np.shape[1]))), axis=0)
random_idx = np.random.choice(pc_np.shape[1], k - fix_idx.shape[0], replace=False)
choice_idx = np.concatenate((fix_idx, random_idx), axis=0)'''
choice_idx = np.random.choice(pc_np.shape[1], k, replace=False)
pc_np = pc_np[:, choice_idx]
intensity_np = intensity_np[:, choice_idx]
return pc_np, intensity_np
def make_dataset(root,output_path,mode,opt:options.Options):
try:
os.mkdir(output_path)
except:
pass
save_path=os.path.join(output_path,mode)
try:
os.mkdir(save_path)
except:
pass
i=0
pc_save_path=os.path.join(save_path,'PC')
K_save_path=os.path.join(save_path,'K')
img_save_path=os.path.join(save_path,'img')
try:
os.mkdir(pc_save_path)
except:
pass
try:
os.mkdir(K_save_path)
except:
pass
try:
os.mkdir(img_save_path)
except:
pass
if mode == 'train':
nuscenes_path = os.path.join(root, 'trainval')
version = 'v1.0-trainval'
else:
nuscenes_path = os.path.join(root, 'test')
version = 'v1.0-test'
dataset = make_nuscenes_dataset(nuscenes_path)
nusc = NuScenes(version=version, dataroot=nuscenes_path, verbose=True)
'''camera_name_list = ['CAM_FRONT'
#'CAM_FRONT_LEFT',
#'CAM_FRONT_RIGHT',
#'CAM_BACK',
#'CAM_BACK_LEFT',
#'CAM_BACK_RIGHT'
]'''
for index in range(len(dataset)):
print('%d/%d'%(index,len(dataset)))
item=dataset[index]
lidar_token=item[0]
nearby_cam_token_dict=item[1]
lidar=nusc.get('sample_data',lidar_token)
pc_np, intensity_np = accumulate_lidar_points(nusc,lidar)
#if pc_np.shape[1]>2*opt.input_pt_num:
pc_np,intensity_np=downsample_with_reflectance(pc_np,intensity_np[0],voxel_grid_downsample_size=0.3)
print('after sample',pc_np.shape[1])
pointcloud=open3d.geometry.PointCloud()
pointcloud.points=open3d.utility.Vector3dVector(pc_np.T)
open3d.visualization.draw_geometries([pointcloud])
#assert False
if pc_np.shape[1]<45000:
continue
intensity_np=np.expand_dims(intensity_np,axis=0)
pc_np=pc_np.astype(np.float32)
intensity_np=intensity_np.astype(np.float32)
lidar_calib_P = get_calibration_P(nusc, lidar)
lidar_pose_P = get_sample_data_ego_pose_P(nusc, lidar)
camera_name = 'CAM_FRONT'
nearby_camera_token = random.choice(nearby_cam_token_dict[camera_name])
camera = nusc.get('sample_data', nearby_camera_token)
img = np.array(Image.open(os.path.join(nusc.dataroot, camera['filename'])))
K = get_camera_K(nusc, camera)
img = img[opt.crop_original_top_rows:, :, :]
K = camera_matrix_cropping(K, dx=0, dy=opt.crop_original_top_rows)
img = cv2.resize(img,
(int(round(img.shape[1] * opt.img_scale)),
int(round((img.shape[0] * opt.img_scale)))),
interpolation=cv2.INTER_LINEAR)
K = camera_matrix_scaling(K, opt.img_scale)
camera_calib_P = get_calibration_P(nusc, camera)
camera_pose_P = get_sample_data_ego_pose_P(nusc, camera)
camera_pose_P_inv = np.linalg.inv(camera_pose_P)
camera_calib_P_inv = np.linalg.inv(camera_calib_P)
P_cam_pc = np.dot(camera_calib_P_inv, np.dot(camera_pose_P_inv,
np.dot(lidar_pose_P, lidar_calib_P)))
pc_np = np.dot(P_cam_pc[0:3, 0:3], pc_np) + P_cam_pc[0:3, 3:]
pc_np_down,intensity_np_down=downsample_np(pc_np,intensity_np,opt.input_pt_num)
H = img.shape[0]
W = img.shape[1]
uvz = np.dot(K, pc_np_down)
depth = uvz[2, :]
uv = uvz[0:2, :] / uvz[2:, :]
in_img = (depth > 0) & (uv[0, :] >= 0) & (uv[0, :] <= W - 1) & (uv[1, :] >= 0) & (uv[1, :] <= H - 1)
#print('in image',np.sum(in_img))
if np.sum(in_img)>6000:
'''np.save(os.path.join(pc_save_path,'%06d.npy'%i),np.concatenate((pc_np,intensity_np),axis=0).astype(np.float32))
np.save(os.path.join(K_save_path, '%06d.npy'%i), K.astype(np.float32))
np.save(os.path.join(img_save_path, '%06d.npy'%i), img.astype(np.float32))'''
i=i+1
if __name__=='__main__':
opt = options.Options()
root1='D:\\nuscene\\train_val_keyframes\\nuscene' #39125
root2 = 'D:\\nuscene\\train_val_keyframes\\nuscene'
make_dataset(root1, 'F:\\nuscenes','train', opt)
make_dataset(root2, 'F:\\nuscenes','test', opt) | 11,184 | 35.914191 | 126 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/models/backbone.py | # nuScenes dev-kit.
# Code written by Freddy Boulton 2020.
from typing import Tuple
import torch
from torch import nn
from torchvision.models import (mobilenet_v2, resnet18, resnet34, resnet50,
resnet101, resnet152)
def trim_network_at_index(network: nn.Module, index: int = -1) -> nn.Module:
"""
Returns a new network with all layers up to index from the back.
:param network: Module to trim.
:param index: Where to trim the network. Counted from the last layer.
"""
assert index < 0, f"Param index must be negative. Received {index}."
return nn.Sequential(*list(network.children())[:index])
def calculate_backbone_feature_dim(backbone, input_shape: Tuple[int, int, int]) -> int:
""" Helper to calculate the shape of the fully-connected regression layer. """
tensor = torch.ones(1, *input_shape)
output_feat = backbone.forward(tensor)
return output_feat.shape[-1]
RESNET_VERSION_TO_MODEL = {'resnet18': resnet18, 'resnet34': resnet34,
'resnet50': resnet50, 'resnet101': resnet101,
'resnet152': resnet152}
class ResNetBackbone(nn.Module):
"""
Outputs tensor after last convolution before the fully connected layer.
Allowed versions: resnet18, resnet34, resnet50, resnet101, resnet152.
"""
def __init__(self, version: str):
"""
Inits ResNetBackbone
:param version: resnet version to use.
"""
super().__init__()
if version not in RESNET_VERSION_TO_MODEL:
raise ValueError(f'Parameter version must be one of {list(RESNET_VERSION_TO_MODEL.keys())}'
f'. Received {version}.')
self.backbone = trim_network_at_index(RESNET_VERSION_TO_MODEL[version](), -1)
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
"""
Outputs features after last convolution.
:param input_tensor: Shape [batch_size, n_channels, length, width].
:return: Tensor of shape [batch_size, n_convolution_filters]. For resnet50,
the shape is [batch_size, 2048].
"""
backbone_features = self.backbone(input_tensor)
return torch.flatten(backbone_features, start_dim=1)
class MobileNetBackbone(nn.Module):
"""
Outputs tensor after last convolution before the fully connected layer.
Allowed versions: mobilenet_v2.
"""
def __init__(self, version: str):
"""
Inits MobileNetBackbone.
:param version: mobilenet version to use.
"""
super().__init__()
if version != 'mobilenet_v2':
raise NotImplementedError(f'Only mobilenet_v2 has been implemented. Received {version}.')
self.backbone = trim_network_at_index(mobilenet_v2(), -1)
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
"""
Outputs features after last convolution.
:param input_tensor: Shape [batch_size, n_channels, length, width].
:return: Tensor of shape [batch_size, n_convolution_filters]. For mobilenet_v2,
the shape is [batch_size, 1280].
"""
backbone_features = self.backbone(input_tensor)
return backbone_features.mean([2, 3])
| 3,274 | 34.597826 | 103 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/models/covernet.py | # nuScenes dev-kit.
# Code written by Freddy Boulton, Tung Phan 2020.
from typing import List, Tuple, Callable, Union
import numpy as np
import torch
from torch import nn
from torch.nn import functional as f
from nuscenes.prediction.models.backbone import calculate_backbone_feature_dim
# Number of entries in Agent State Vector
ASV_DIM = 3
class CoverNet(nn.Module):
""" Implementation of CoverNet https://arxiv.org/pdf/1911.10298.pdf """
def __init__(self, backbone: nn.Module, num_modes: int,
n_hidden_layers: List[int] = None,
input_shape: Tuple[int, int, int] = (3, 500, 500)):
"""
Inits Covernet.
:param backbone: Backbone model. Typically ResNetBackBone or MobileNetBackbone
:param num_modes: Number of modes in the lattice
:param n_hidden_layers: List of dimensions in the fully connected layers after the backbones.
If None, set to [4096]
:param input_shape: Shape of image input. Used to determine the dimensionality of the feature
vector after the CNN backbone.
"""
if n_hidden_layers and not isinstance(n_hidden_layers, list):
raise ValueError(f"Param n_hidden_layers must be a list. Received {type(n_hidden_layers)}")
super().__init__()
if not n_hidden_layers:
n_hidden_layers = [4096]
self.backbone = backbone
backbone_feature_dim = calculate_backbone_feature_dim(backbone, input_shape)
n_hidden_layers = [backbone_feature_dim + ASV_DIM] + n_hidden_layers + [num_modes]
linear_layers = [nn.Linear(in_dim, out_dim)
for in_dim, out_dim in zip(n_hidden_layers[:-1], n_hidden_layers[1:])]
self.head = nn.ModuleList(linear_layers)
def forward(self, image_tensor: torch.Tensor,
agent_state_vector: torch.Tensor) -> torch.Tensor:
"""
:param image_tensor: Tensor of images in the batch.
:param agent_state_vector: Tensor of agent state vectors in the batch
:return: Logits for the batch.
"""
backbone_features = self.backbone(image_tensor)
logits = torch.cat([backbone_features, agent_state_vector], dim=1)
for linear in self.head:
logits = linear(logits)
return logits
def mean_pointwise_l2_distance(lattice: torch.Tensor, ground_truth: torch.Tensor) -> torch.Tensor:
"""
Computes the index of the closest trajectory in the lattice as measured by l1 distance.
:param lattice: Lattice of pre-generated trajectories. Shape [num_modes, n_timesteps, state_dim]
:param ground_truth: Ground truth trajectory of agent. Shape [1, n_timesteps, state_dim].
:return: Index of closest mode in the lattice.
"""
stacked_ground_truth = ground_truth.repeat(lattice.shape[0], 1, 1)
return torch.pow(lattice - stacked_ground_truth, 2).sum(dim=2).sqrt().mean(dim=1).argmin()
class ConstantLatticeLoss:
"""
Computes the loss for a constant lattice CoverNet model.
"""
def __init__(self, lattice: Union[np.ndarray, torch.Tensor],
similarity_function: Callable[[torch.Tensor, torch.Tensor], int] = mean_pointwise_l2_distance):
"""
Inits the loss.
:param lattice: numpy array of shape [n_modes, n_timesteps, state_dim]
:param similarity_function: Function that computes the index of the closest trajectory in the lattice
to the actual ground truth trajectory of the agent.
"""
self.lattice = torch.Tensor(lattice)
self.similarity_func = similarity_function
def __call__(self, batch_logits: torch.Tensor, batch_ground_truth_trajectory: torch.Tensor) -> torch.Tensor:
"""
Computes the loss on a batch.
:param batch_logits: Tensor of shape [batch_size, n_modes]. Output of a linear layer since this class
uses nn.functional.cross_entropy.
:param batch_ground_truth_trajectory: Tensor of shape [batch_size, 1, n_timesteps, state_dim]
:return: Average element-wise loss on the batch.
"""
# If using GPU, need to copy the lattice to the GPU if haven't done so already
# This ensures we only copy it once
if self.lattice.device != batch_logits.device:
self.lattice = self.lattice.to(batch_logits.device)
batch_losses = torch.Tensor().requires_grad_(True).to(batch_logits.device)
for logit, ground_truth in zip(batch_logits, batch_ground_truth_trajectory):
closest_lattice_trajectory = self.similarity_func(self.lattice, ground_truth)
label = torch.LongTensor([closest_lattice_trajectory]).to(batch_logits.device)
classification_loss = f.cross_entropy(logit.unsqueeze(0), label)
batch_losses = torch.cat((batch_losses, classification_loss.unsqueeze(0)), 0)
return batch_losses.mean()
| 4,935 | 39.793388 | 112 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/models/mtp.py | # nuScenes dev-kit.
# Code written by Freddy Boulton, Elena Corina Grigore 2020.
import math
import random
from typing import List, Tuple
import torch
from torch import nn
from torch.nn import functional as f
from nuscenes.prediction.models.backbone import calculate_backbone_feature_dim
# Number of entries in Agent State Vector
ASV_DIM = 3
class MTP(nn.Module):
"""
Implementation of Multiple-Trajectory Prediction (MTP) model
based on https://arxiv.org/pdf/1809.10732.pdf
"""
def __init__(self, backbone: nn.Module, num_modes: int,
seconds: float = 6, frequency_in_hz: float = 2,
n_hidden_layers: int = 4096, input_shape: Tuple[int, int, int] = (3, 500, 500)):
"""
Inits the MTP network.
:param backbone: CNN Backbone to use.
:param num_modes: Number of predicted paths to estimate for each agent.
:param seconds: Number of seconds into the future to predict.
Default for the challenge is 6.
:param frequency_in_hz: Frequency between timesteps in the prediction (in Hz).
Highest frequency is nuScenes is 2 Hz.
:param n_hidden_layers: Size of fully connected layer after the CNN
backbone processes the image.
:param input_shape: Shape of the input expected by the network.
This is needed because the size of the fully connected layer after
the backbone depends on the backbone and its version.
Note:
Although seconds and frequency_in_hz are typed as floats, their
product should be an int.
"""
super().__init__()
self.backbone = backbone
self.num_modes = num_modes
backbone_feature_dim = calculate_backbone_feature_dim(backbone, input_shape)
self.fc1 = nn.Linear(backbone_feature_dim + ASV_DIM, n_hidden_layers)
predictions_per_mode = int(seconds * frequency_in_hz) * 2
self.fc2 = nn.Linear(n_hidden_layers, int(num_modes * predictions_per_mode + num_modes))
def forward(self, image_tensor: torch.Tensor,
agent_state_vector: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the model.
:param image_tensor: Tensor of images shape [batch_size, n_channels, length, width].
:param agent_state_vector: Tensor of floats representing the agent state.
[batch_size, 3].
:return: Tensor of dimension [batch_size, number_of_modes * number_of_predictions_per_mode + number_of_modes]
storing the predicted trajectory and mode probabilities. Mode probabilities are normalized to sum
to 1 during inference.
"""
backbone_features = self.backbone(image_tensor)
features = torch.cat([backbone_features, agent_state_vector], dim=1)
predictions = self.fc2(self.fc1(features))
# Normalize the probabilities to sum to 1 for inference.
mode_probabilities = predictions[:, -self.num_modes:].clone()
if not self.training:
mode_probabilities = f.softmax(mode_probabilities, dim=-1)
predictions = predictions[:, :-self.num_modes]
return torch.cat((predictions, mode_probabilities), 1)
class MTPLoss:
""" Computes the loss for the MTP model. """
def __init__(self,
num_modes: int,
regression_loss_weight: float = 1.,
angle_threshold_degrees: float = 5.):
"""
Inits MTP loss.
:param num_modes: How many modes are being predicted for each agent.
:param regression_loss_weight: Coefficient applied to the regression loss to
balance classification and regression performance.
:param angle_threshold_degrees: Minimum angle needed between a predicted trajectory
and the ground to consider it a match.
"""
self.num_modes = num_modes
self.num_location_coordinates_predicted = 2 # We predict x, y coordinates at each timestep.
self.regression_loss_weight = regression_loss_weight
self.angle_threshold = angle_threshold_degrees
def _get_trajectory_and_modes(self,
model_prediction: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Splits the predictions from the model into mode probabilities and trajectory.
:param model_prediction: Tensor of shape [batch_size, n_timesteps * n_modes * 2 + n_modes].
:return: Tuple of tensors. First item is the trajectories of shape [batch_size, n_modes, n_timesteps, 2].
Second item are the mode probabilities of shape [batch_size, num_modes].
"""
mode_probabilities = model_prediction[:, -self.num_modes:].clone()
desired_shape = (model_prediction.shape[0], self.num_modes, -1, self.num_location_coordinates_predicted)
trajectories_no_modes = model_prediction[:, :-self.num_modes].clone().reshape(desired_shape)
return trajectories_no_modes, mode_probabilities
@staticmethod
def _angle_between(ref_traj: torch.Tensor,
traj_to_compare: torch.Tensor) -> float:
"""
Computes the angle between the last points of the two trajectories.
The resulting angle is in degrees and is an angle in the [0; 180) interval.
:param ref_traj: Tensor of shape [n_timesteps, 2].
:param traj_to_compare: Tensor of shape [n_timesteps, 2].
:return: Angle between the trajectories.
"""
EPSILON = 1e-5
if (ref_traj.ndim != 2 or traj_to_compare.ndim != 2 or
ref_traj.shape[1] != 2 or traj_to_compare.shape[1] != 2):
raise ValueError('Both tensors should have shapes (-1, 2).')
if torch.isnan(traj_to_compare[-1]).any() or torch.isnan(ref_traj[-1]).any():
return 180. - EPSILON
traj_norms_product = float(torch.norm(ref_traj[-1]) * torch.norm(traj_to_compare[-1]))
# If either of the vectors described in the docstring has norm 0, return 0 as the angle.
if math.isclose(traj_norms_product, 0):
return 0.
# We apply the max and min operations below to ensure there is no value
# returned for cos_angle that is greater than 1 or less than -1.
# This should never be the case, but the check is in place for cases where
# we might encounter numerical instability.
dot_product = float(ref_traj[-1].dot(traj_to_compare[-1]))
angle = math.degrees(math.acos(max(min(dot_product / traj_norms_product, 1), -1)))
if angle >= 180:
return angle - EPSILON
return angle
@staticmethod
def _compute_ave_l2_norms(tensor: torch.Tensor) -> float:
"""
Compute the average of l2 norms of each row in the tensor.
:param tensor: Shape [1, n_timesteps, 2].
:return: Average l2 norm. Float.
"""
l2_norms = torch.norm(tensor, p=2, dim=2)
avg_distance = torch.mean(l2_norms)
return avg_distance.item()
def _compute_angles_from_ground_truth(self, target: torch.Tensor,
trajectories: torch.Tensor) -> List[Tuple[float, int]]:
"""
Compute angle between the target trajectory (ground truth) and the predicted trajectories.
:param target: Shape [1, n_timesteps, 2].
:param trajectories: Shape [n_modes, n_timesteps, 2].
:return: List of angle, index tuples.
"""
angles_from_ground_truth = []
for mode, mode_trajectory in enumerate(trajectories):
# For each mode, we compute the angle between the last point of the predicted trajectory for that
# mode and the last point of the ground truth trajectory.
angle = self._angle_between(target[0], mode_trajectory)
angles_from_ground_truth.append((angle, mode))
return angles_from_ground_truth
def _compute_best_mode(self,
angles_from_ground_truth: List[Tuple[float, int]],
target: torch.Tensor, trajectories: torch.Tensor) -> int:
"""
Finds the index of the best mode given the angles from the ground truth.
:param angles_from_ground_truth: List of (angle, mode index) tuples.
:param target: Shape [1, n_timesteps, 2]
:param trajectories: Shape [n_modes, n_timesteps, 2]
:return: Integer index of best mode.
"""
# We first sort the modes based on the angle to the ground truth (ascending order), and keep track of
# the index corresponding to the biggest angle that is still smaller than a threshold value.
angles_from_ground_truth = sorted(angles_from_ground_truth)
max_angle_below_thresh_idx = -1
for angle_idx, (angle, mode) in enumerate(angles_from_ground_truth):
if angle <= self.angle_threshold:
max_angle_below_thresh_idx = angle_idx
else:
break
# We choose the best mode at random IF there are no modes with an angle less than the threshold.
if max_angle_below_thresh_idx == -1:
best_mode = random.randint(0, self.num_modes - 1)
# We choose the best mode to be the one that provides the lowest ave of l2 norms between the
# predicted trajectory and the ground truth, taking into account only the modes with an angle
# less than the threshold IF there is at least one mode with an angle less than the threshold.
else:
# Out of the selected modes above, we choose the final best mode as that which returns the
# smallest ave of l2 norms between the predicted and ground truth trajectories.
distances_from_ground_truth = []
for angle, mode in angles_from_ground_truth[:max_angle_below_thresh_idx + 1]:
norm = self._compute_ave_l2_norms(target - trajectories[mode, :, :])
distances_from_ground_truth.append((norm, mode))
distances_from_ground_truth = sorted(distances_from_ground_truth)
best_mode = distances_from_ground_truth[0][1]
return best_mode
def __call__(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""
Computes the MTP loss on a batch.
The predictions are of shape [batch_size, n_ouput_neurons of last linear layer]
and the targets are of shape [batch_size, 1, n_timesteps, 2]
:param predictions: Model predictions for batch.
:param targets: Targets for batch.
:return: zero-dim tensor representing the loss on the batch.
"""
batch_losses = torch.Tensor().requires_grad_(True).to(predictions.device)
trajectories, modes = self._get_trajectory_and_modes(predictions)
for batch_idx in range(predictions.shape[0]):
angles = self._compute_angles_from_ground_truth(target=targets[batch_idx],
trajectories=trajectories[batch_idx])
best_mode = self._compute_best_mode(angles,
target=targets[batch_idx],
trajectories=trajectories[batch_idx])
best_mode_trajectory = trajectories[batch_idx, best_mode, :].unsqueeze(0)
regression_loss = f.smooth_l1_loss(best_mode_trajectory, targets[batch_idx])
mode_probabilities = modes[batch_idx].unsqueeze(0)
best_mode_target = torch.tensor([best_mode], device=predictions.device)
classification_loss = f.cross_entropy(mode_probabilities, best_mode_target)
loss = classification_loss + self.regression_loss_weight * regression_loss
batch_losses = torch.cat((batch_losses, loss.unsqueeze(0)), 0)
avg_loss = torch.mean(batch_losses)
return avg_loss
| 11,960 | 44.135849 | 117 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/tests/test_mtp_loss.py |
import math
import unittest
try:
import torch
except ModuleNotFoundError:
raise unittest.SkipTest('Skipping test as torch was not found!')
from nuscenes.prediction.models import mtp
class TestMTPLoss(unittest.TestCase):
"""
Test each component of MTPLoss as well as the
__call__ method.
"""
def test_get_trajectories_and_modes(self):
loss_n_modes_5 = mtp.MTPLoss(5, 0, 0)
loss_n_modes_1 = mtp.MTPLoss(1, 0, 0)
xy_pred = torch.arange(60).view(1, -1).repeat(1, 5).view(-1, 60)
mode_pred = torch.arange(5).view(1, -1)
prediction_bs_1 = torch.cat([xy_pred.reshape(1, -1), mode_pred], dim=1)
prediction_bs_2 = prediction_bs_1.repeat(2, 1)
# Testing many modes with batch size 1.
traj, modes = loss_n_modes_5._get_trajectory_and_modes(prediction_bs_1)
self.assertTrue(torch.allclose(traj, xy_pred.unsqueeze(0).reshape(1, 5, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred))
# Testing many modes with batch size > 1.
traj, modes = loss_n_modes_5._get_trajectory_and_modes(prediction_bs_2)
self.assertTrue(torch.allclose(traj, xy_pred.repeat(1, 2).unsqueeze(0).reshape(2, 5, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred.repeat(2, 1)))
xy_pred = torch.arange(60).view(1, -1).repeat(1, 1).view(-1, 60)
mode_pred = torch.arange(1).view(1, -1)
prediction_bs_1 = torch.cat([xy_pred.reshape(1, -1), mode_pred], dim=1)
prediction_bs_2 = prediction_bs_1.repeat(2, 1)
# Testing one mode with batch size 1.
traj, modes = loss_n_modes_1._get_trajectory_and_modes(prediction_bs_1)
self.assertTrue(torch.allclose(traj, xy_pred.unsqueeze(0).reshape(1, 1, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred))
# Testing one mode with batch size > 1.
traj, modes = loss_n_modes_1._get_trajectory_and_modes(prediction_bs_2)
self.assertTrue(torch.allclose(traj, xy_pred.repeat(1, 2).unsqueeze(0).reshape(2, 1, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred.repeat(2, 1)))
def test_angle_between_trajectories(self):
def make_trajectory(last_point):
traj = torch.zeros((12, 2))
traj[-1] = torch.Tensor(last_point)
return traj
loss = mtp.MTPLoss(0, 0, 0)
# test angle is 0.
self.assertEqual(loss._angle_between(make_trajectory([0, 0]), make_trajectory([0, 0])), 0.)
self.assertEqual(loss._angle_between(make_trajectory([15, 15]), make_trajectory([15, 15])), 0.)
# test angle is 15.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 1]),
make_trajectory([math.sqrt(3)/2, 0.5])), 15., places=4)
# test angle is 30.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 0]),
make_trajectory([math.sqrt(3)/2, 0.5])), 30., places=4)
# test angle is 45.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 1]),
make_trajectory([0, 1])), 45., places=4)
# test angle is 90.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 1]),
make_trajectory([-1, 1])), 90., places=4)
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 0]),
make_trajectory([0, 1])), 90., places=4)
# test angle is 180.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 0]),
make_trajectory([-1, 0])), 180., places=4)
self.assertAlmostEqual(loss._angle_between(make_trajectory([0, 1]),
make_trajectory([0, -1])), 180., places=4)
self.assertAlmostEqual(loss._angle_between(make_trajectory([3, 1]),
make_trajectory([-3, -1])), 180., places=4)
def test_compute_best_mode_nothing_below_threshold(self):
angles = [(90, 0), (80, 1), (70, 2)]
target = None
traj = None
loss = mtp.MTPLoss(3, 0, 5)
self.assertTrue(loss._compute_best_mode(angles, target, traj) in {0, 1, 2})
loss = mtp.MTPLoss(3, 0, 65)
self.assertTrue(loss._compute_best_mode(angles, target, traj) in {0, 1, 2})
def test_compute_best_mode_only_one_below_threshold(self):
angles = [(30, 1), (3, 0), (25, 2)]
target = torch.ones((1, 6, 2))
trajectory = torch.zeros((3, 6, 2))
loss = mtp.MTPLoss(3, 0, 5)
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 0)
def test_compute_best_mode_multiple_below_threshold(self):
angles = [(2, 2), (4, 1), (10, 0)]
target = torch.ones((1, 6, 2))
trajectory = torch.zeros((3, 6, 2))
trajectory[1] = 1
loss = mtp.MTPLoss(3, 0, 5)
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 1)
def test_compute_best_mode_only_one_mode(self):
angles = [(25, 0)]
target = torch.ones((1, 6, 2))
trajectory = torch.zeros((1, 6, 2))
loss = mtp.MTPLoss(1, 0, 5)
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 0)
trajectory[0] = 1
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 0)
def test_loss_single_mode(self):
targets = torch.zeros((16, 1, 30, 2))
targets[:, :, :, 1] = torch.arange(start=0, end=3, step=0.1)
predictions = torch.ones((16, 61))
predictions[:, :60] = targets[0, 0, :, :].reshape(-1, 60)
predictions[:, 60] = 1/10
loss = mtp.MTPLoss(1, 1, angle_threshold_degrees=20)
# Only regression loss in single mode case.
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
0, places=4)
# Now the best mode differs by 1 from the ground truth.
# Smooth l1 loss subtracts 0.5 from l1 norm if diff >= 1.
predictions[:, :60] += 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()), 0.5,
places=4)
# In this case, one element has perfect regression, the others are off by 1.
predictions[1, :60] -= 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
(15/16)*0.5,
places=4)
def test_loss_many_modes(self):
targets = torch.zeros((16, 1, 30, 2))
targets[:, :, :, 1] = torch.arange(start=0, end=3, step=0.1)
predictions = torch.ones((16, 610))
predictions[:, 540:600] = targets[0, 0, :, :].reshape(-1, 60)
predictions[:, -10:] = 1/10
loss = mtp.MTPLoss(10, 1, angle_threshold_degrees=20)
# Since one mode exactly matches gt, loss should only be classification error.
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
-math.log(1/10), places=4)
# Now the best mode differs by 1 from the ground truth.
# Smooth l1 loss subtracts 0.5 from l1 norm if diff >= 1.
predictions[:, 540:600] += 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
-math.log(1/10) + 0.5,
places=4)
# In this case, one element has perfect regression, the others are off by 1.
predictions[1, 540:600] -= 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
-math.log(1/10) + (15/16)*0.5,
places=4)
| 7,907 | 41.28877 | 106 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/tests/test_covernet.py | # nuScenes dev-kit.
# Code written by Freddy Boulton, 2020.
import math
import unittest
try:
import torch
from torch.nn.functional import cross_entropy
except ModuleNotFoundError:
raise unittest.SkipTest('Skipping test as torch was not found!')
from nuscenes.prediction.models.backbone import ResNetBackbone
from nuscenes.prediction.models.covernet import mean_pointwise_l2_distance, ConstantLatticeLoss, CoverNet
class TestCoverNet(unittest.TestCase):
def test_shapes_in_forward_pass_correct(self):
resnet = ResNetBackbone('resnet50')
covernet = CoverNet(resnet, 5, n_hidden_layers=[4096], input_shape=(3, 100, 100))
image = torch.zeros(4, 3, 100, 100)
asv = torch.empty(4, 3).random_(12)
logits = covernet(image, asv)
self.assertTupleEqual(logits.shape, (4, 5))
class TestConstantLatticeLoss(unittest.TestCase):
def test_l1_distance(self):
lattice = torch.zeros(3, 6, 2)
lattice[0] = torch.arange(1, 13).reshape(6, 2)
lattice[1] = torch.arange(1, 13).reshape(6, 2) * 3
lattice[2] = torch.arange(1, 13).reshape(6, 2) * 6
# Should select the first mode
ground_truth = torch.arange(1, 13, dtype=torch.float).reshape(6, 2).unsqueeze(0) + 2
self.assertEqual(mean_pointwise_l2_distance(lattice, ground_truth), 0)
# Should select the second mode
ground_truth = torch.arange(1, 13, dtype=torch.float).reshape(6, 2).unsqueeze(0) * 3 + 4
self.assertEqual(mean_pointwise_l2_distance(lattice, ground_truth), 1)
# Should select the third mode
ground_truth = torch.arange(1, 13, dtype=torch.float).reshape(6, 2).unsqueeze(0) * 6 + 10
self.assertEqual(mean_pointwise_l2_distance(lattice, ground_truth), 2)
def test_constant_lattice_loss(self):
def generate_trajectory(theta: float) -> torch.Tensor:
trajectory = torch.zeros(6, 2)
trajectory[:, 0] = torch.arange(6, dtype=torch.float) * math.cos(theta)
trajectory[:, 1] = torch.arange(6, dtype=torch.float) * math.sin(theta)
return trajectory
lattice = torch.zeros(3, 6, 2)
lattice[0] = generate_trajectory(math.pi / 2)
lattice[1] = generate_trajectory(math.pi / 4)
lattice[2] = generate_trajectory(3 * math.pi / 4)
ground_truth = torch.zeros(5, 1, 6, 2)
ground_truth[0, 0] = generate_trajectory(0.2)
ground_truth[1, 0] = generate_trajectory(math.pi / 3)
ground_truth[2, 0] = generate_trajectory(5 * math.pi / 6)
ground_truth[3, 0] = generate_trajectory(6 * math.pi / 11)
ground_truth[4, 0] = generate_trajectory(4 * math.pi / 9)
logits = torch.Tensor([[2, 10, 5],
[-3, 4, 5],
[-4, 2, 7],
[8, -2, 3],
[10, 3, 6]])
answer = cross_entropy(logits, torch.LongTensor([1, 1, 2, 0, 0]))
loss = ConstantLatticeLoss(lattice, mean_pointwise_l2_distance)
loss_value = loss(logits, ground_truth)
self.assertAlmostEqual(float(loss_value.detach().numpy()), float(answer.detach().numpy()))
| 3,212 | 36.8 | 105 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/tests/run_covernet.py | # nuScenes dev-kit.
# Code written by Freddy Boulton, 2020.
"""
Regression test to see if CoverNet implementation can overfit on a single example.
"""
import argparse
import math
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, IterableDataset
from nuscenes.prediction.models.backbone import MobileNetBackbone
from nuscenes.prediction.models.covernet import CoverNet, ConstantLatticeLoss
def generate_trajectory(theta: float) -> torch.Tensor:
trajectory = torch.zeros(6, 2)
trajectory[:, 0] = torch.arange(6) * math.cos(theta)
trajectory[:, 1] = torch.arange(6) * math.sin(theta)
return trajectory
class Dataset(IterableDataset):
""" Implements an infinite dataset of the same input image, agent state vector and ground truth label. """
def __iter__(self,):
while True:
image = torch.zeros((3, 100, 100))
agent_state_vector = torch.ones(3)
ground_truth = generate_trajectory(math.pi / 2)
yield image, agent_state_vector, ground_truth.unsqueeze(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run CoverNet to make sure it overfits on a single test case.')
parser.add_argument('--use_gpu', type=int, help='Whether to use gpu', default=0)
args = parser.parse_args()
if args.use_gpu:
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cpu')
dataset = Dataset()
dataloader = DataLoader(dataset, batch_size=16, num_workers=0)
backbone = MobileNetBackbone('mobilenet_v2')
model = CoverNet(backbone, num_modes=3, input_shape=(3, 100, 100))
model = model.to(device)
lattice = torch.zeros(3, 6, 2)
lattice[0] = generate_trajectory(math.pi / 2)
lattice[1] = generate_trajectory(math.pi / 4)
lattice[2] = generate_trajectory(3 * math.pi / 4)
loss_function = ConstantLatticeLoss(lattice)
optimizer = optim.SGD(model.parameters(), lr=0.1)
n_iter = 0
minimum_loss = 0
for img, agent_state_vector, ground_truth in dataloader:
img = img.to(device)
agent_state_vector = agent_state_vector.to(device)
ground_truth = ground_truth.to(device)
optimizer.zero_grad()
logits = model(img, agent_state_vector)
loss = loss_function(logits, ground_truth)
loss.backward()
optimizer.step()
current_loss = loss.cpu().detach().numpy()
print(f"Current loss is {current_loss:.2f}")
if np.allclose(current_loss, minimum_loss, atol=1e-2):
print(f"Achieved near-zero loss after {n_iter} iterations.")
break
n_iter += 1
| 2,733 | 28.397849 | 112 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/tests/test_mtp.py | import unittest
try:
import torch
except ModuleNotFoundError:
raise unittest.SkipTest('Skipping test as torch was not found!')
from nuscenes.prediction.models import backbone
from nuscenes.prediction.models import mtp
class TestMTP(unittest.TestCase):
def setUp(self):
self.image = torch.ones((1, 3, 100, 100))
self.agent_state_vector = torch.ones((1, 3))
self.image_5 = torch.ones((5, 3, 100, 100))
self.agent_state_vector_5 = torch.ones((5, 3))
def _run(self, model):
pred = model(self.image, self.agent_state_vector)
pred_5 = model(self.image_5, self.agent_state_vector_5)
self.assertTupleEqual(pred.shape, (1, 75))
self.assertTupleEqual(pred_5.shape, (5, 75))
model.training = False
pred = model(self.image, self.agent_state_vector)
self.assertTrue(torch.allclose(pred[:, -3:].sum(axis=1), torch.ones(pred.shape[0])))
def test_works_with_resnet_18(self,):
rn_18 = backbone.ResNetBackbone('resnet18')
model = mtp.MTP(rn_18, 3, 6, 2, input_shape=(3, 100, 100))
self._run(model)
def test_works_with_resnet_34(self,):
rn_34 = backbone.ResNetBackbone('resnet34')
model = mtp.MTP(rn_34, 3, 6, 2, input_shape=(3, 100, 100))
self._run(model)
def test_works_with_resnet_50(self,):
rn_50 = backbone.ResNetBackbone('resnet50')
model = mtp.MTP(rn_50, 3, 6, 2, input_shape=(3, 100, 100))
self._run(model)
def test_works_with_resnet_101(self,):
rn_101 = backbone.ResNetBackbone('resnet101')
model = mtp.MTP(rn_101, 3, 6, 2, input_shape=(3, 100, 100))
self._run(model)
def test_works_with_resnet_152(self,):
rn_152 = backbone.ResNetBackbone('resnet152')
model = mtp.MTP(rn_152, 3, 6, 2, input_shape=(3, 100, 100))
self._run(model)
def test_works_with_mobilenet_v2(self,):
mobilenet = backbone.MobileNetBackbone('mobilenet_v2')
model = mtp.MTP(mobilenet, 3, 6, 2, input_shape=(3, 100, 100))
self._run(model)
| 2,085 | 32.111111 | 92 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/tests/test_backbone.py | import unittest
try:
import torch
from torchvision.models.resnet import BasicBlock, Bottleneck
except ModuleNotFoundError:
raise unittest.SkipTest('Skipping test as torch was not found!')
from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone
class TestBackBones(unittest.TestCase):
def count_layers(self, model):
if isinstance(model[4][0], BasicBlock):
n_convs = 2
elif isinstance(model[4][0], Bottleneck):
n_convs = 3
else:
raise ValueError("Backbone layer block not supported!")
return sum([len(model[i]) for i in range(4, 8)]) * n_convs + 2
def test_resnet(self):
rn_18 = ResNetBackbone('resnet18')
rn_34 = ResNetBackbone('resnet34')
rn_50 = ResNetBackbone('resnet50')
rn_101 = ResNetBackbone('resnet101')
rn_152 = ResNetBackbone('resnet152')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(rn_18(tensor).shape[1], 512)
self.assertEqual(rn_34(tensor).shape[1], 512)
self.assertEqual(rn_50(tensor).shape[1], 2048)
self.assertEqual(rn_101(tensor).shape[1], 2048)
self.assertAlmostEqual(rn_152(tensor).shape[1], 2048)
self.assertEqual(self.count_layers(list(rn_18.backbone.children())), 18)
self.assertEqual(self.count_layers(list(rn_34.backbone.children())), 34)
self.assertEqual(self.count_layers(list(rn_50.backbone.children())), 50)
self.assertEqual(self.count_layers(list(rn_101.backbone.children())), 101)
self.assertEqual(self.count_layers(list(rn_152.backbone.children())), 152)
with self.assertRaises(ValueError):
ResNetBackbone('resnet51')
def test_mobilenet(self):
mobilenet = MobileNetBackbone('mobilenet_v2')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(mobilenet(tensor).shape[1], 1280) | 1,925 | 34.018182 | 82 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/tests/run_image_generation.py | import argparse
from typing import List
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from nuscenes import NuScenes
from nuscenes.prediction import PredictHelper
from nuscenes.prediction.input_representation.static_layers import StaticLayerRasterizer
from nuscenes.prediction.models.backbone import ResNetBackbone
from nuscenes.prediction.models.mtp import MTP, MTPLoss
class TestDataset(Dataset):
def __init__(self, tokens: List[str], helper: PredictHelper):
self.tokens = tokens
self.static_layer_representation = StaticLayerRasterizer(helper)
def __len__(self):
return len(self.tokens)
def __getitem__(self, index: int):
token = self.tokens[index]
instance_token, sample_token = token.split("_")
image = self.static_layer_representation.make_representation(instance_token, sample_token)
image = torch.Tensor(image).permute(2, 0, 1)
agent_state_vector = torch.ones((3))
ground_truth = torch.ones((1, 12, 2))
ground_truth[:, :, 1] = torch.arange(0, 6, step=0.5)
return image, agent_state_vector, ground_truth
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Makes sure image generation code can run on gpu "
"with multiple workers")
parser.add_argument('--data_root', type=str)
parser.add_argument('--use_gpu', type=bool, help='Whether to use gpu', default=False)
args = parser.parse_args()
NUM_MODES = 1
if args.use_gpu:
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cpu')
tokens = ['bd26c2cdb22d4bb1834e808c89128898_ca9a282c9e77460f8360f564131a8af5',
'085fb7c411914888907f7198e998a951_ca9a282c9e77460f8360f564131a8af5',
'bc38961ca0ac4b14ab90e547ba79fbb6_ca9a282c9e77460f8360f564131a8af5',
'56a71c208ac6472f90b6a82529a6ce61_ca9a282c9e77460f8360f564131a8af5',
'85246a44cc6340509e3882e2ff088391_ca9a282c9e77460f8360f564131a8af5',
'42641eb6adcb4f8f8def8ef129d9e843_ca9a282c9e77460f8360f564131a8af5',
'4080c30aa7104d91ad005a50b18f6108_ca9a282c9e77460f8360f564131a8af5',
'c1958768d48640948f6053d04cffd35b_ca9a282c9e77460f8360f564131a8af5',
'4005437c730645c2b628dc1da999e06a_39586f9d59004284a7114a68825e8eec',
'a017fe4e9c3d445784aae034b1322006_356d81f38dd9473ba590f39e266f54e5',
'a0049f95375044b8987fbcca8fda1e2b_c923fe08b2ff4e27975d2bf30934383b',
'61dd7d03d7ad466d89f901ed64e2c0dd_e0845f5322254dafadbbed75aaa07969',
'86ed8530809d4b1b8fbc53808f599339_39586f9d59004284a7114a68825e8eec',
'2a80b29c0281435ca4893e158a281ce0_2afb9d32310e4546a71cbe432911eca2',
'8ce4fe54af77467d90c840465f69677f_de7593d76648450e947ba0c203dee1b0',
'f4af7fd215ee47aa8b64bac0443d7be8_9ee4020153674b9e9943d395ff8cfdf3']
tokens = tokens * 32
nusc = NuScenes('v1.0-trainval', dataroot=args.data_root)
helper = PredictHelper(nusc)
dataset = TestDataset(tokens, helper)
dataloader = DataLoader(dataset, batch_size=16, num_workers=16)
backbone = ResNetBackbone('resnet18')
model = MTP(backbone, NUM_MODES)
model = model.to(device)
loss_function = MTPLoss(NUM_MODES, 1, 5)
current_loss = 10000
optimizer = optim.SGD(model.parameters(), lr=0.1)
n_iter = 0
minimum_loss = 0
while True:
for img, agent_state_vector, ground_truth in dataloader:
img = img.to(device)
agent_state_vector = agent_state_vector.to(device)
ground_truth = ground_truth.to(device)
optimizer.zero_grad()
prediction = model(img, agent_state_vector)
loss = loss_function(prediction, ground_truth)
loss.backward()
optimizer.step()
current_loss = loss.cpu().detach().numpy()
print(f"Current loss is {current_loss:.4f}")
if n_iter % 32 == 0:
print(f"Number of iterations: {n_iter}.")
n_iter += 1
| 4,227 | 34.830508 | 99 | py |
CorrI2P | CorrI2P-main/nuScenes_script/nuscenes/prediction/tests/run_mtp.py | # nuScenes dev-kit.
# Code written by Freddy Boulton, 2020.
"""
Regression test to see if MTP can overfit on a single example.
"""
import argparse
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, IterableDataset
from nuscenes.prediction.models.backbone import ResNetBackbone
from nuscenes.prediction.models.mtp import MTP, MTPLoss
class Dataset(IterableDataset):
"""
Implements an infinite dataset where the input data
is always the same and the target is a path going
forward with 75% probability, and going backward
with 25% probability.
"""
def __init__(self, num_modes: int = 1):
self.num_modes = num_modes
def __iter__(self,):
while True:
image = torch.zeros((3, 100, 100))
agent_state_vector = torch.ones(3)
ground_truth = torch.ones((1, 12, 2))
if self.num_modes == 1:
going_forward = True
else:
going_forward = np.random.rand() > 0.25
if going_forward:
ground_truth[:, :, 1] = torch.arange(0, 6, step=0.5)
else:
ground_truth[:, :, 1] = -torch.arange(0, 6, step=0.5)
yield image, agent_state_vector, ground_truth
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run MTP to make sure it overfits on a single test case.')
parser.add_argument('--num_modes', type=int, help='How many modes to learn.', default=1)
parser.add_argument('--use_gpu', type=bool, help='Whether to use gpu', default=False)
args = parser.parse_args()
if args.use_gpu:
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cpu')
dataset = Dataset(args.num_modes)
dataloader = DataLoader(dataset, batch_size=16, num_workers=0)
backbone = ResNetBackbone('resnet18')
model = MTP(backbone, args.num_modes)
model = model.to(device)
loss_function = MTPLoss(args.num_modes, 1, 5)
current_loss = 10000
optimizer = optim.SGD(model.parameters(), lr=0.1)
n_iter = 0
minimum_loss = 0
if args.num_modes == 2:
# We expect to see 75% going_forward and
# 25% going backward. So the minimum
# classification loss is expected to be
# 0.56234
minimum_loss += 0.56234
for img, agent_state_vector, ground_truth in dataloader:
img = img.to(device)
agent_state_vector = agent_state_vector.to(device)
ground_truth = ground_truth.to(device)
optimizer.zero_grad()
prediction = model(img, agent_state_vector)
loss = loss_function(prediction, ground_truth)
loss.backward()
optimizer.step()
current_loss = loss.cpu().detach().numpy()
print(f"Current loss is {current_loss:.4f}")
if np.allclose(current_loss, minimum_loss, atol=1e-4):
print(f"Achieved near-zero loss after {n_iter} iterations.")
break
n_iter += 1
| 3,074 | 26.954545 | 107 | py |
be_great | be_great-main/be_great/great_utils.py | import typing as tp
import numpy as np
import pandas as pd
import torch
from transformers import AutoTokenizer
def _array_to_dataframe(
data: tp.Union[pd.DataFrame, np.ndarray], columns=None
) -> pd.DataFrame:
"""Converts a Numpy Array to a Pandas DataFrame
Args:
data: Pandas DataFrame or Numpy NDArray
columns: If data is a Numpy Array, columns needs to be a list of all column names
Returns:
Pandas DataFrame with the given data
"""
if isinstance(data, pd.DataFrame):
return data
assert isinstance(
data, np.ndarray
), "Input needs to be a Pandas DataFrame or a Numpy NDArray"
assert (
columns
), "To convert the data into a Pandas DataFrame, a list of column names has to be given!"
assert len(columns) == len(
data[0]
), "%d column names are given, but array has %d columns!" % (
len(columns),
len(data[0]),
)
return pd.DataFrame(data=data, columns=columns)
def _get_column_distribution(df: pd.DataFrame, col: str) -> tp.Union[list, dict]:
"""Returns the distribution of a given column. If continuous, returns a list of all values.
If categorical, returns a dictionary in form {"A": 0.6, "B": 0.4}
Args:
df: pandas DataFrame
col: name of the column
Returns:
Distribution of the column
"""
if df[col].dtype == "float":
col_dist = df[col].to_list()
else:
col_dist = df[col].value_counts(1).to_dict()
return col_dist
def _convert_tokens_to_text(
tokens: tp.List[torch.Tensor], tokenizer: AutoTokenizer
) -> tp.List[str]:
"""Decodes the tokens back to strings
Args:
tokens: List of tokens to decode
tokenizer: Tokenizer used for decoding
Returns:
List of decoded strings
"""
# Convert tokens to text
text_data = [tokenizer.decode(t) for t in tokens]
# Clean text
text_data = [d.replace("<|endoftext|>", "") for d in text_data]
text_data = [d.replace("\n", " ") for d in text_data]
text_data = [d.replace("\r", "") for d in text_data]
return text_data
def _convert_text_to_tabular_data(
text: tp.List[str], df_gen: pd.DataFrame
) -> pd.DataFrame:
"""Converts the sentences back to tabular data
Args:
text: List of the tabular data in text form
df_gen: Pandas DataFrame where the tabular data is appended
Returns:
Pandas DataFrame with the tabular data from the text appended
"""
columns = df_gen.columns.to_list()
# Convert text to tabular data
for t in text:
features = t.split(",")
td = dict.fromkeys(columns)
# Transform all features back to tabular data
for f in features:
values = f.strip().split(" is ")
if values[0] in columns and not td[values[0]]:
try:
td[values[0]] = [values[1]]
except IndexError:
# print("An Index Error occurred - if this happends a lot, consider fine-tuning your model further.")
pass
df_gen = pd.concat([df_gen, pd.DataFrame(td)], ignore_index=True, axis=0)
return df_gen
def _encode_row_partial(row, shuffle=True):
"""Function that takes a row and converts all columns into the text representation that are not NaN."""
num_cols = len(row.index)
if not shuffle:
idx_list = np.arange(num_cols)
else:
idx_list = np.random.permutation(num_cols)
lists = ", ".join(
sum(
[
[f"{row.index[i]} is {row[row.index[i]]}"]
if not pd.isna(row[row.index[i]])
else []
for i in idx_list
],
[],
)
)
return lists
# Now append first NaN attribute
def _get_random_missing(row):
"""Return a random missing column or None if all columns are filled."""
nans = list(row[pd.isna(row)].index)
return np.random.choice(nans) if len(nans) > 0 else None
def _partial_df_to_promts(partial_df: pd.DataFrame):
"""Convert DataFrame with missingvalues to a list of starting promts for GReaT
Args:
partial_df: Pandas DataFrame to be imputed where missing values are encoded by NaN.
Returns:
List of strings with the starting prompt for each sample.
"""
encoder = lambda x: _encode_row_partial(x, True)
res_encode = list(partial_df.apply(encoder, axis=1))
res_first = list(partial_df.apply(_get_random_missing, axis=1))
# Edge case: all values are missing, will return empty string which is not supported.
# Use first attribute as starting prompt.
# default_promt = partial_df.columns[0] + " is "
res = [
((enc + ", ") if len(enc) > 0 else "")
+ (fst + " is" if fst is not None else "")
for enc, fst in zip(res_encode, res_first)
]
return res
class bcolors:
"""
We love colors, you?
"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
| 5,134 | 28.176136 | 121 | py |
be_great | be_great-main/be_great/great_trainer.py | import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from transformers import Trainer
def _seed_worker(_):
"""
Helper function to set worker seed during Dataloader initialization.
"""
worker_seed = torch.initial_seed() % 2**32
random.seed(worker_seed)
np.random.seed(worker_seed)
torch.manual_seed(worker_seed)
torch.cuda.manual_seed_all(worker_seed)
class GReaTTrainer(Trainer):
"""GReaT Trainer
Overwrites the get_train_dataloader methode of the HuggingFace Trainer to not remove the "unused" columns -
they are needed later!
"""
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
data_collator = self.data_collator
train_dataset = (
self.train_dataset
) # self._remove_unused_columns(self.train_dataset, description="training")
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self._train_batch_size,
sampler=train_sampler,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
worker_init_fn=_seed_worker,
)
| 1,416 | 28.520833 | 111 | py |
be_great | be_great-main/be_great/great.py | import os
import warnings
import json
import typing as tp
import logging
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments
from be_great.great_dataset import GReaTDataset, GReaTDataCollator
from be_great.great_start import (
GReaTStart,
CategoricalStart,
ContinuousStart,
RandomStart,
_pad_tokens,
)
from be_great.great_trainer import GReaTTrainer
from be_great.great_utils import (
_array_to_dataframe,
_get_column_distribution,
_convert_tokens_to_text,
_convert_text_to_tabular_data,
_partial_df_to_promts,
bcolors,
)
class GReaT:
"""GReaT Class
The GReaT class handles the whole generation flow. It is used to fine-tune a large language model for tabular data,
and to sample synthetic tabular data.
Attributes:
llm (str): HuggingFace checkpoint of a pretrained large language model, used a basis of our model
tokenizer (AutoTokenizer): Tokenizer, automatically downloaded from llm-checkpoint
model (AutoModelForCausalLM): Large language model, automatically downloaded from llm-checkpoint
experiment_dir (str): Directory, where the training checkpoints will be saved
epochs (int): Number of epochs to fine-tune the model
batch_size (int): Batch size used for fine-tuning
train_hyperparameters (dict): Additional hyperparameters added to the TrainingArguments used by the
HuggingFaceLibrary, see here the full list of all possible values
https://huggingface.co/docs/transformers/main/en/main_classes/trainer#transformers.TrainingArguments
columns (list): List of all features/columns of the tabular dataset
num_cols (list): List of all numerical features/columns of the tabular dataset
conditional_col (str): Name of a feature/column on which the sampling can be conditioned
conditional_col_dist (dict | list): Distribution of the feature/column specified by condtional_col
"""
def __init__(
self,
llm: str,
experiment_dir: str = "trainer_great",
epochs: int = 100,
batch_size: int = 8,
efficient_finetuning: str = "",
**train_kwargs,
):
"""Initializes GReaT.
Args:
llm: HuggingFace checkpoint of a pretrained large language model, used a basis of our model
experiment_dir: Directory, where the training checkpoints will be saved
epochs: Number of epochs to fine-tune the model
batch_size: Batch size used for fine-tuning
efficient_finetuning: Indication of fune-tuning method
train_kwargs: Additional hyperparameters added to the TrainingArguments used by the HuggingFaceLibrary,
see here the full list of all possible values
https://huggingface.co/docs/transformers/main/en/main_classes/trainer#transformers.TrainingArguments
"""
# Load Model and Tokenizer from HuggingFace
self.efficient_finetuning = efficient_finetuning
self.llm = llm
self.tokenizer = AutoTokenizer.from_pretrained(self.llm)
self.tokenizer.pad_token = self.tokenizer.eos_token
self.model = AutoModelForCausalLM.from_pretrained(self.llm)
if self.efficient_finetuning == "lora":
# Lazy importing
try:
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_int8_training,
TaskType,
)
except ImportError:
raise ImportError(
"This function requires the 'peft' package. Please install it with - pip install peft"
)
# Define LoRA Config
lora_config = LoraConfig(
r=16, # only training 0.16% of the parameters of the model
lora_alpha=32,
target_modules=[
"c_attn"
], # this is specific for gpt2 model, to be adapted
lora_dropout=0.05,
bias="none",
task_type=TaskType.CAUSAL_LM, # this is specific for gpt2 model, to be adapted
)
# prepare int-8 model for training
self.model = prepare_model_for_int8_training(self.model)
# add LoRA adaptor
self.model = get_peft_model(self.model, lora_config)
self.model.print_trainable_parameters()
# Set the training hyperparameters
self.experiment_dir = experiment_dir
self.epochs = epochs
self.batch_size = batch_size
self.train_hyperparameters = train_kwargs
# Needed for the sampling process
self.columns = None
self.num_cols = None
self.conditional_col = None
self.conditional_col_dist = None
def fit(
self,
data: tp.Union[pd.DataFrame, np.ndarray],
column_names: tp.Optional[tp.List[str]] = None,
conditional_col: tp.Optional[str] = None,
resume_from_checkpoint: tp.Union[bool, str] = False,
) -> GReaTTrainer:
"""Fine-tune GReaT using tabular data.
Args:
data: Pandas DataFrame or Numpy Array that contains the tabular data
column_names: If data is Numpy Array, the feature names have to be defined. If data is Pandas
DataFrame, the value is ignored
conditional_col: If given, the distribution of this column is saved and used as a starting
point for the generation process later. If None, the last column is considered as conditional feature
resume_from_checkpoint: If True, resumes training from the latest checkpoint in the experiment_dir.
If path, resumes the training from the given checkpoint (has to be a valid HuggingFace checkpoint!)
Returns:
GReaTTrainer used for the fine-tuning process
"""
df = _array_to_dataframe(data, columns=column_names)
self._update_column_information(df)
self._update_conditional_information(df, conditional_col)
# Convert DataFrame into HuggingFace dataset object
logging.info("Convert data into HuggingFace dataset object...")
great_ds = GReaTDataset.from_pandas(df)
great_ds.set_tokenizer(self.tokenizer)
# Set training hyperparameters
logging.info("Create GReaT Trainer...")
training_args = TrainingArguments(
self.experiment_dir,
num_train_epochs=self.epochs,
per_device_train_batch_size=self.batch_size,
**self.train_hyperparameters,
)
great_trainer = GReaTTrainer(
self.model,
training_args,
train_dataset=great_ds,
tokenizer=self.tokenizer,
data_collator=GReaTDataCollator(self.tokenizer),
)
# Start training
logging.info("Start training...")
great_trainer.train(resume_from_checkpoint=resume_from_checkpoint)
return great_trainer
def sample(
self,
n_samples: int,
start_col: tp.Optional[str] = "",
start_col_dist: tp.Optional[tp.Union[dict, list]] = None,
temperature: float = 0.7,
k: int = 100,
max_length: int = 100,
device: str = "cuda",
) -> pd.DataFrame:
"""Generate synthetic tabular data samples
Args:
n_samples: Number of synthetic samples to generate
start_col: Feature to use as starting point for the generation process. If not given, the target
learned during the fitting is used as starting point
start_col_dist: Feature distribution of the starting feature. Should have the format
"{F1: p1, F2: p2, ...}" for discrete columns or be a list of possible values for continuous columns.
If not given, the target distribution learned during the fitting is used as starting point
temperature: The generation samples each token from the probability distribution given by a softmax
function. The temperature parameter controls the softmax function. A low temperature makes it sharper
(0 equals greedy search), a high temperature brings more diversity but also uncertainty into the output.
See this blog article (https://huggingface.co/blog/how-to-generate) to read more about the generation
process
k: Sampling Batch Size. Set as high as possible. Speeds up the generation process significantly
max_length: Maximal number of tokens to generate - has to be long enough to not cut any information!
device: Set to "cpu" if the GPU should not be used. You can also specify the concrete GPU
Returns:
Pandas DataFrame with n_samples rows of generated data
"""
great_start = self._get_start_sampler(start_col, start_col_dist)
# Move model to device
self.model.to(device)
# Init empty DataFrame for the generated samples
df_gen = pd.DataFrame(columns=self.columns)
# Start generation process
with tqdm(total=n_samples) as pbar:
already_generated = 0
_cnt = 0
try:
while n_samples > df_gen.shape[0]:
start_tokens = great_start.get_start_tokens(k)
start_tokens = torch.tensor(start_tokens).to(device)
# Generate tokens
tokens = self.model.generate(
input_ids=start_tokens,
max_length=max_length,
do_sample=True,
temperature=temperature,
pad_token_id=50256,
)
# Convert tokens back to tabular data
text_data = _convert_tokens_to_text(tokens, self.tokenizer)
df_gen = _convert_text_to_tabular_data(text_data, df_gen)
# Remove rows with flawed numerical values
for i_num_cols in self.num_cols:
df_gen = df_gen[
pd.to_numeric(df_gen[i_num_cols], errors="coerce").notnull()
]
df_gen[self.num_cols] = df_gen[self.num_cols].astype(float)
# Remove rows with missing values
df_gen = df_gen.drop(df_gen[df_gen.isna().any(axis=1)].index)
# Update process bar
pbar.update(df_gen.shape[0] - already_generated)
already_generated = df_gen.shape[0]
# Check if we actually generating synth samples and if not break everything
_cnt += 1
if _cnt > 13 and already_generated == 0: # (:
raise Exception("Breaking the generation loop!")
except Exception as e:
print(f"{bcolors.FAIL}An error has occurred: {str(e)}{bcolors.ENDC}")
print(
f"{bcolors.WARNING}To address this issue, consider fine-tuning the GReaT model for an longer period. This can be achieved by increasing the number of epochs.{bcolors.ENDC}"
)
print(
f"{bcolors.WARNING}Alternatively, you might consider increasing the max_length parameter within the sample function. For example: model.sample(n_samples=10, max_length=2000){bcolors.ENDC}"
)
print(
f"{bcolors.OKBLUE}If the problem persists despite these adjustments, feel free to raise an issue on our GitHub page at: https://github.com/kathrinse/be_great/issues{bcolors.ENDC}"
)
df_gen = df_gen.reset_index(drop=True)
return df_gen.head(n_samples)
def great_sample(
self,
starting_prompts: tp.Union[str, list[str]],
temperature: float = 0.7,
max_length: int = 100,
device: str = "cuda",
) -> pd.DataFrame:
"""Generate synthetic tabular data samples conditioned on a given input.
Args:
starting_prompts: String or List of Strings on which the output is conditioned.
For example, "Sex is female, Age is 26"
temperature: The generation samples each token from the probability distribution given by a softmax
function. The temperature parameter controls the softmax function. A low temperature makes it sharper
(0 equals greedy search), a high temperature brings more diversity but also uncertainty into the output.
See this blog article (https://huggingface.co/blog/how-to-generate) to read more about the generation
process.
max_length: Maximal number of tokens to generate - has to be long enough to not cut any information
device: Set to "cpu" if the GPU should not be used. You can also specify the concrete GPU.
Returns:
Pandas DataFrame with synthetic data generated based on starting_prompts
"""
# ToDo: Add n_samples argument to generate more samples for one conditional input.
self.model.to(device)
starting_prompts = (
[starting_prompts]
if isinstance(starting_prompts, str)
else starting_prompts
)
generated_data = []
# Generate a sample for each starting point
if len(starting_prompts) > 1:
loop_iter = tqdm(starting_prompts)
else:
loop_iter = starting_prompts
for prompt in loop_iter:
start_token = torch.tensor(self.tokenizer(prompt)["input_ids"]).to(device)
# Generate tokens
gen = self.model.generate(
input_ids=torch.unsqueeze(start_token, 0),
max_length=max_length,
do_sample=True,
temperature=temperature,
pad_token_id=50256,
)
generated_data.append(torch.squeeze(gen))
# Convert Text back to Tabular Data
decoded_data = _convert_tokens_to_text(generated_data, self.tokenizer)
df_gen = _convert_text_to_tabular_data(
decoded_data, pd.DataFrame(columns=self.columns)
)
return df_gen
def impute(
self,
df_miss: pd.DataFrame,
temperature: float = 0.7,
k: int = 100,
max_length: int = 100,
max_retries=15,
device: str = "cuda",
) -> pd.DataFrame:
"""Impute a DataFrame with missing values using a trained GReaT model.
Args:
df_miss: pandas data frame of the exact same format (column names, value ranges/types) as the data that
was used to train the GReaT model, however some values might be missing, which is indicated by the value of NaN.
This function will sample the missing values conditioned on the remaining values.
temperature: The generation samples each token from the probability distribution given by a softmax
function. The temperature parameter controls the softmax function. A low temperature makes it sharper
(0 equals greedy search), a high temperature brings more diversity but also uncertainty into the output.
See this blog article (https://huggingface.co/blog/how-to-generate) to read more about the generation
process
k: Sampling Batch Size. Set as high as possible. Speeds up the generation process significantly
max_length: Maximal number of tokens to generate - has to be long enough to not cut any information!
device: Set to "cpu" if the GPU should not be used. You can also specify the specific GPU to run on.
Returns:
Pandas DataFrame with n_samples rows of generated data
"""
# Check DataFrame passed.
if set(df_miss.columns) != set(self.columns):
raise ValueError(
"The column names in the DataFrame passed to impute do not match the columns of the GReaT model."
)
self.model.to(device)
# start_token = torch.tensor(_pad_tokens(self.tokenizer(starting_prompts)["input_ids"])).to(device)
index = 0
df_list = []
with tqdm(total=len(df_miss)) as pbar:
while index < len(df_miss):
is_complete = False
retries = 0
df_curr = df_miss.iloc[[index]]
org_index = df_curr.index # Keep index in new DataFrame
while not is_complete:
num_attrs_missing = pd.isna(df_curr).sum().sum()
# print("Number of missing values: ", num_attrs_missing)
# Generate text promt from current features.
starting_prompts = _partial_df_to_promts(df_curr)
df_curr = self.great_sample(
starting_prompts, temperature, max_length, device=device
)
# Convert numerical values to float, flawed numerical values to NaN
for i_num_cols in self.num_cols:
df_curr[i_num_cols] = pd.to_numeric(
df_curr[i_num_cols], errors="coerce"
)
df_curr[self.num_cols] = df_curr[self.num_cols].astype(np.float)
# Check for missing values
nans = df_curr.isna()
if not df_curr.isna().any().any():
is_complete = True
df_list.append(df_curr.set_index(org_index))
else:
retries += 1
if retries == max_retries:
warnings.warn("Max retries reached.")
break
index += 1
pbar.update(1)
return pd.concat(df_list, axis=0)
def save(self, path: str):
"""Save GReaT Model
Saves the model weights and a configuration file in the given directory.
Args:
path: Path where to save the model
"""
# Make directory
if os.path.isdir(path):
warnings.warn(f"Directory {path} already exists and is overwritten now.")
else:
os.mkdir(path)
# Save attributes
with open(path + "/config.json", "w") as f:
attributes = self.__dict__.copy()
attributes.pop("tokenizer")
attributes.pop("model")
# NDArray is not JSON serializable and therefore has to be converted into a list.
if isinstance(attributes["conditional_col_dist"], np.ndarray):
attributes["conditional_col_dist"] = list(
attributes["conditional_col_dist"]
)
json.dump(attributes, f)
# Save model weights
torch.save(self.model.state_dict(), path + "/model.pt")
def load_finetuned_model(self, path: str):
"""Load fine-tuned model
Load the weights of a fine-tuned large language model into the GReaT pipeline
Args:
path: Path to the fine-tuned model
"""
self.model.load_state_dict(torch.load(path))
@classmethod
def load_from_dir(cls, path: str):
"""Load GReaT class
Load trained GReaT model from directory.
Args:
path: Directory where GReaT model is saved
Returns:
New instance of GReaT loaded from directory
"""
assert os.path.isdir(path), f"Directory {path} does not exist."
# Load attributes
with open(path + "/config.json", "r") as f:
attributes = json.load(f)
# Create new be_great model instance
great = cls(attributes["llm"])
# Set all attributes
for k, v in attributes.items():
setattr(great, k, v)
# Load model weights
great.model.load_state_dict(torch.load(path + "/model.pt", map_location="cpu"))
return great
def _update_column_information(self, df: pd.DataFrame):
# Update the column names (and numerical columns for some sanity checks after sampling)
self.columns = df.columns.to_list()
self.num_cols = df.select_dtypes(include=np.number).columns.to_list()
def _update_conditional_information(
self, df: pd.DataFrame, conditional_col: tp.Optional[str] = None
):
assert conditional_col is None or isinstance(
conditional_col, str
), f"The column name has to be a string and not {type(conditional_col)}"
assert (
conditional_col is None or conditional_col in df.columns
), f"The column name {conditional_col} is not in the feature names of the given dataset"
# Take the distribution of the conditional column for a starting point in the generation process
self.conditional_col = conditional_col if conditional_col else df.columns[-1]
self.conditional_col_dist = _get_column_distribution(df, self.conditional_col)
def _get_start_sampler(
self,
start_col: tp.Optional[str],
start_col_dist: tp.Optional[tp.Union[tp.Dict, tp.List]],
) -> GReaTStart:
if start_col and start_col_dist is None:
raise ValueError(
f"Start column {start_col} was given, but no corresponding distribution."
)
if start_col_dist is not None and not start_col:
raise ValueError(
f"Start column distribution {start_col} was given, the column name is missing."
)
assert start_col is None or isinstance(
start_col, str
), f"The column name has to be a string and not {type(start_col)}"
assert (
start_col_dist is None
or isinstance(start_col_dist, dict)
or isinstance(start_col_dist, list)
), f"The distribution of the start column on has to be a list or a dict and not {type(start_col_dist)}"
start_col = start_col if start_col else self.conditional_col
start_col_dist = start_col_dist if start_col_dist else self.conditional_col_dist
if isinstance(start_col_dist, dict):
return CategoricalStart(self.tokenizer, start_col, start_col_dist)
elif isinstance(start_col_dist, list):
return ContinuousStart(self.tokenizer, start_col, start_col_dist)
else:
return RandomStart(self.tokenizer, self.columns)
| 22,761 | 42.028355 | 208 | py |
FLAC | FLAC-main/flac.py | import torch
import numpy as np
def pairwise_distances(a, b=None, eps=1e-6):
"""
Calculates the pairwise distances between matrices a and b (or a and a, if b is not set)
:param a:
:param b:
:return:
"""
if b is None:
b = a
aa = torch.sum(a**2, dim=1)
bb = torch.sum(b**2, dim=1)
aa = aa.expand(bb.size(0), aa.size(0)).t()
bb = bb.expand(aa.size(0), bb.size(0))
AB = torch.mm(a, b.transpose(0, 1))
dists = aa + bb - 2 * AB
dists = torch.clamp(dists, min=0, max=np.inf)
dists = torch.sqrt(dists + eps)
return dists
def flac_loss(protected_attr_features, features, labels, d=1):
# Protected attribute features kernel
protected_d = pairwise_distances(protected_attr_features)
protected_s = 1.0 / (1 + protected_d**d)
# Target features kernel
features_d = pairwise_distances(features)
features_s = 1.0 / (1 + features_d**d)
th = (torch.max(protected_s) + torch.min(protected_s)) / 2
# calc the mask
mask = (labels[:, None] == labels) & (protected_s < th) | (
labels[:, None] != labels
) & (protected_s > th)
mask = mask.to(labels.device)
# if mask is empty, return zero
if sum(sum(mask)) == 0:
return torch.tensor(0.0).to(labels.device)
# similarity to distance
protected_s = 1 - protected_s
# convert to probabilities
protected_s = protected_s / (
torch.sum(protected_s * mask.int().float(), dim=1, keepdim=True) + 1e-7
)
features_s = features_s / (
torch.sum(features_s * mask.int().float(), dim=1, keepdim=True) + 1e-7
)
# Jeffrey's divergence
loss = (protected_s[mask] - features_s[mask]) * (
torch.log(protected_s[mask]) - torch.log(features_s[mask])
)
return torch.mean(loss)
| 1,798 | 27.109375 | 92 | py |
FLAC | FLAC-main/train_imagenet.py | import argparse
import datetime
import logging
import os
import time
from pathlib import Path
import numpy as np
import torch
from torch import nn
from datasets.imagenet import get_imagenet
from models.imagenet_models import resnet18
from utils.logging import set_logging
from utils.utils import AverageMeter, pretty_dict, save_model, set_seed
from flac import flac_loss
from tqdm import tqdm
def parse_option():
parser = argparse.ArgumentParser()
parser.add_argument(
"--exp_name",
type=str,
default="test",
)
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--print_freq", type=int, default=300, help="print frequency")
parser.add_argument("--save_freq", type=int, default=200, help="save frequency")
parser.add_argument(
"--epochs", type=int, default=250, help="number of training epochs"
)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--alpha", type=float, default=1000)
parser.add_argument("--bs", type=int, default=128, help="batch_size")
parser.add_argument("--lr", type=float, default=1e-3)
# hyperparameters
parser.add_argument("--weight", type=float, default=1.0)
parser.add_argument("--ratio", type=int, default=0)
parser.add_argument("--aug", type=int, default=1)
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu)
return opt
def set_model():
model = resnet18(num_classes=9).cuda()
criterion = nn.CrossEntropyLoss()
return model, criterion
def train(train_loader, model, criterion, optimizer, epoch, opt):
model.train()
avg_ce_loss = AverageMeter()
avg_con_loss = AverageMeter()
avg_loss = AverageMeter()
train_iter = iter(train_loader)
for idx, (images, labels, _, _, pr_feat) in enumerate(tqdm(train_iter)):
bsz = labels.shape[0]
labels = labels.cuda()
pr_feat = pr_feat.cuda()
images = images.cuda()
logits, features = model(images)
loss_mi_div = opt.alpha * (flac_loss(pr_feat, features, labels, 0.5))
loss_cl = criterion(logits, labels)
loss = loss_cl + loss_mi_div
avg_ce_loss.update(loss_cl.item(), bsz)
avg_con_loss.update(loss_mi_div.item(), bsz)
avg_loss.update(loss.item(), bsz)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return avg_ce_loss.avg, avg_con_loss.avg, avg_loss.avg
def imagenet_unbiased_accuracy(
outputs, labels, cluster_labels, num_correct, num_instance, num_cluster_repeat=3
):
for j in range(num_cluster_repeat):
for i in range(outputs.size(0)):
output = outputs[i]
label = labels[i]
cluster_label = cluster_labels[j][i]
_, pred = output.topk(1, 0, largest=True, sorted=True)
correct = pred.eq(label).view(-1).float()
num_correct[j][label][cluster_label] += correct.item()
num_instance[j][label][cluster_label] += 1
return num_correct, num_instance
def n_correct(pred, labels):
_, predicted = torch.max(pred.data, 1)
n_correct = (predicted == labels).sum().item()
return n_correct
def validate(
val_loader, model, num_classes=9, num_clusters=9, num_cluster_repeat=3, key=None
):
model.eval()
total = 0
f_correct = 0
num_correct = [
np.zeros([num_classes, num_clusters]) for _ in range(num_cluster_repeat)
]
num_instance = [
np.zeros([num_classes, num_clusters]) for _ in range(num_cluster_repeat)
]
for images, labels, bias_labels, index, _ in val_loader:
images, labels = images.cuda(), labels.cuda()
for bias_label in bias_labels:
bias_label.cuda()
output, _ = model(images)
batch_size = labels.size(0)
total += batch_size
if key == "unbiased":
num_correct, num_instance = imagenet_unbiased_accuracy(
output.data,
labels,
bias_labels,
num_correct,
num_instance,
num_cluster_repeat,
)
else:
f_correct += n_correct(output, labels)
if key == "unbiased":
for k in range(num_cluster_repeat):
x, y = [], []
_num_correct, _num_instance = (
num_correct[k].flatten(),
num_instance[k].flatten(),
)
for i in range(_num_correct.shape[0]):
__num_correct, __num_instance = _num_correct[i], _num_instance[i]
if __num_instance >= 10:
x.append(__num_instance)
y.append(__num_correct / __num_instance)
f_correct += sum(y) / len(x)
return f_correct / num_cluster_repeat
else:
return f_correct / total
def main():
opt = parse_option()
exp_name = f"flac-imagenet-{opt.exp_name}-lr{opt.lr}-bs{opt.bs}-alpha{opt.alpha}-ratio{opt.ratio}-aug{opt.aug}-seed{opt.seed}-epochs{opt.epochs}"
opt.exp_name = exp_name
output_dir = f"results/{exp_name}"
save_path = Path(output_dir)
save_path.mkdir(parents=True, exist_ok=True)
set_logging(exp_name, "INFO", str(save_path))
set_seed(opt.seed)
logging.info(f"save_path: {save_path}")
np.set_printoptions(precision=3)
torch.set_printoptions(precision=3)
imagenet_path = "../../data/imagenet"
imagenet_a_path = "../../data/imagenet-a"
feat_root = "../imagenet_biased_feats"
train_loader = get_imagenet(
f"{imagenet_path}/train",
f"{imagenet_path}/train",
batch_size=opt.bs,
train=True,
bias_feature_root=feat_root,
load_bias_feature=True,
aug=1,
)
val_loaders = {}
val_loaders["biased"] = get_imagenet(
f"{imagenet_path}/val",
f"{imagenet_path}/val",
batch_size=128,
train=False,
aug=False,
)
val_loaders["unbiased"] = get_imagenet(
f"{imagenet_path}/val",
f"{imagenet_path}/val",
batch_size=128,
train=False,
aug=False,
)
val_loaders["ImageNet-A"] = get_imagenet(
imagenet_a_path,
imagenet_a_path,
batch_size=128,
train=False,
val_data="ImageNet-A",
)
model, criterion = set_model()
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.epochs)
(save_path / "checkpoints").mkdir(parents=True, exist_ok=True)
best_accs = pretty_dict(**{"unbiased": 0, "ImageNet-A": 0})
best_epochs = pretty_dict(**{"unbiased": 0, "ImageNet-A": 0})
best_stats = pretty_dict()
start_time = time.time()
for epoch in range(1, opt.epochs + 1):
logging.info(
f"[{epoch} / {opt.epochs}] Learning rate: {scheduler.get_last_lr()[0]}"
)
ce_loss, con_loss, loss = train(
train_loader, model, criterion, optimizer, epoch, opt
)
logging.info(
f"[{epoch} / {opt.epochs}] Loss: {loss} CE Loss: {ce_loss} Con Loss: {con_loss}"
)
scheduler.step()
stats = pretty_dict()
for key, val_loader in val_loaders.items():
val_acc = validate(val_loader, model, key=key)
stats[f"valid/acc_{key}"] = val_acc
logging.info(f"[{epoch} / {opt.epochs}] current: {stats}")
for key in best_accs.keys():
if stats[f"valid/acc_{key}"] > best_accs[key]:
best_accs[key] = stats[f"valid/acc_{key}"]
best_epochs[key] = epoch
best_stats[key] = stats
save_file = save_path / "checkpoints" / f"best_{key}.pth"
save_model(model, optimizer, opt, epoch, save_file)
logging.info(
f"[{epoch} / {opt.epochs}] best {key} accuracy: {best_accs[key]} at epoch {best_epochs[key]} \n best_stats: {best_stats[key]}"
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info(f"Total training time: {total_time_str}")
save_file = save_path / "checkpoints" / f"last.pth"
save_model(model, optimizer, opt, opt.epochs, save_file)
if __name__ == "__main__":
main()
| 8,376 | 29.461818 | 149 | py |
FLAC | FLAC-main/train_biased_mnist.py | import argparse
import datetime
import logging
import os
import time
from pathlib import Path
import numpy as np
import torch
from torch import nn, optim
from flac import flac_loss
from datasets.biased_mnist import get_color_mnist
from models.simple_conv import SimpleConvNet
from utils.logging import set_logging
from utils.utils import (
AverageMeter,
MultiDimAverageMeter,
accuracy,
load_model,
pretty_dict,
save_model,
set_seed,
)
def parse_option():
parser = argparse.ArgumentParser()
parser.add_argument(
"--exp_name",
type=str,
default="test",
)
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument(
"--color_classifier",
type=str,
default="./bias_capturing_classifiers/bcc_biased_mnist.pth",
)
parser.add_argument("--print_freq", type=int, default=300, help="print frequency")
parser.add_argument("--save_freq", type=int, default=200, help="save frequency")
parser.add_argument(
"--epochs", type=int, default=80, help="number of training epochs"
)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--corr", type=float, default=0.999)
parser.add_argument("--alpha", type=float, default=1000)
parser.add_argument("--bs", type=int, default=128, help="batch_size")
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--d", type=float, default=0.5)
parser.add_argument("--th", type=float, default=0.7)
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu)
return opt
def set_model(opt):
model = SimpleConvNet().cuda()
criterion1 = nn.CrossEntropyLoss()
protected_net = SimpleConvNet()
protected_net.load_state_dict(load_model(opt.color_classifier))
protected_net.cuda()
return model, criterion1, protected_net
def train(train_loader, model, criterion, optimizer, protected_net, opt):
model.train()
protected_net.eval()
avg_loss = AverageMeter()
avg_clloss = AverageMeter()
avg_miloss = AverageMeter()
train_iter = iter(train_loader)
for idx, (images, labels, biases, _) in enumerate(train_iter):
bsz = labels.shape[0]
labels, biases = labels.cuda(), biases.cuda()
images = images.cuda()
logits, features = model(images)
with torch.no_grad():
pr_l, pr_feat = protected_net(images)
loss_mi_div = opt.alpha * flac_loss(pr_feat, features, labels, 0.5)
loss_cl = criterion(logits, labels)
loss = loss_cl + loss_mi_div
avg_loss.update(loss.item(), bsz)
avg_clloss.update(loss_cl.item(), bsz)
avg_miloss.update(loss_mi_div.item(), bsz)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return avg_loss.avg, avg_clloss.avg, avg_miloss.avg
def validate(val_loader, model):
model.eval()
top1 = AverageMeter()
attrwise_acc_meter = MultiDimAverageMeter(dims=(10, 10))
with torch.no_grad():
for idx, (images, labels, biases, _) in enumerate(val_loader):
images, labels, biases = images.cuda(), labels.cuda(), biases.cuda()
bsz = labels.shape[0]
output, _ = model(images)
preds = output.data.max(1, keepdim=True)[1].squeeze(1)
(acc1,) = accuracy(output, labels, topk=(1,))
top1.update(acc1[0], bsz)
corrects = (preds == labels).long()
attrwise_acc_meter.add(
corrects.cpu(), torch.stack([labels.cpu(), biases.cpu()], dim=1)
)
return top1.avg, attrwise_acc_meter.get_unbiased_acc()
def main():
opt = parse_option()
exp_name = f"flac-color_mnist_corr{opt.corr}-{opt.exp_name}-lr{opt.lr}-alpha{opt.alpha}-bs{opt.bs}-seed{opt.seed}"
opt.exp_name = exp_name
output_dir = f"results/{exp_name}"
save_path = Path(output_dir)
save_path.mkdir(parents=True, exist_ok=True)
set_logging(exp_name, "INFO", str(save_path))
set_seed(opt.seed)
logging.info(f"save_path: {save_path}")
np.set_printoptions(precision=3)
torch.set_printoptions(precision=3)
root = "../data/biased_mnist"
train_loader = get_color_mnist(
root,
batch_size=opt.bs,
data_label_correlation=opt.corr,
n_confusing_labels=9,
split="train",
seed=opt.seed,
aug=False,
)
logging.info(
f"confusion_matrix - \n original: {train_loader.dataset.confusion_matrix_org}, \n normalized: {train_loader.dataset.confusion_matrix}"
)
val_loaders = {}
val_loaders["valid"] = get_color_mnist(
root,
batch_size=256,
data_label_correlation=0.1,
n_confusing_labels=9,
split="train_val",
seed=opt.seed,
aug=False,
)
val_loaders["test"] = get_color_mnist(
root,
batch_size=256,
data_label_correlation=0.1,
n_confusing_labels=9,
split="valid",
seed=opt.seed,
aug=False,
)
model, criterion, protected_net = set_model(opt)
decay_epochs = [opt.epochs // 3, opt.epochs * 2 // 3]
optimizer = optim.Adam(model.parameters(), lr=opt.lr, weight_decay=1e-4)
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decay_epochs, gamma=0.1
)
logging.info(f"decay_epochs: {decay_epochs}")
(save_path / "checkpoints").mkdir(parents=True, exist_ok=True)
best_accs = {"valid": 0, "test": 0}
best_epochs = {"valid": 0, "test": 0}
best_stats = {}
start_time = time.time()
for epoch in range(1, opt.epochs + 1):
logging.info(
f"[{epoch} / {opt.epochs}] Learning rate: {scheduler.get_last_lr()[0]}"
)
loss, cllossp, milossp = train(
train_loader, model, criterion, optimizer, protected_net, opt
)
logging.info(
f"[{epoch} / {opt.epochs}] Loss: {loss} Loss CE: {cllossp} Loss MI: {milossp}"
)
scheduler.step()
stats = pretty_dict(epoch=epoch)
_, acc_unbiased_train = validate(train_loader, model)
logging.info(f"/acc_unbiased_train {acc_unbiased_train.item() * 100}")
for key, val_loader in val_loaders.items():
_, acc_unbiased = validate(val_loader, model)
stats[f"{key}/acc_unbiased"] = acc_unbiased.item() * 100
logging.info(f"[{epoch} / {opt.epochs}] {stats}")
for tag in best_accs.keys():
if stats[f"{tag}/acc_unbiased"] > best_accs[tag]:
best_accs[tag] = stats[f"{tag}/acc_unbiased"]
best_epochs[tag] = epoch
best_stats[tag] = pretty_dict(
**{f"best_{tag}_{k}": v for k, v in stats.items()}
)
save_file = save_path / "checkpoints" / f"best_{tag}.pth"
save_model(model, optimizer, opt, epoch, save_file)
logging.info(
f"[{epoch} / {opt.epochs}] best {tag} accuracy: {best_accs[tag]:.3f} at epoch {best_epochs[tag]} \n best_stats: {best_stats[tag]}"
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info(f"Total training time: {total_time_str}")
save_file = save_path / "checkpoints" / f"last.pth"
save_model(model, optimizer, opt, opt.epochs, save_file)
if __name__ == "__main__":
main()
| 7,471 | 30.394958 | 146 | py |
FLAC | FLAC-main/train_celeba.py | import argparse
import datetime
import logging
import os
import time
from pathlib import Path
import numpy as np
import torch
from torch import nn
from flac import flac_loss
from datasets.celeba import get_celeba
from models.resnet import ResNet18
from utils.logging import set_logging
from utils.utils import (
AverageMeter,
MultiDimAverageMeter,
accuracy,
load_model,
pretty_dict,
save_model,
set_seed,
)
from tqdm import tqdm
def parse_option():
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name", type=str, default="test")
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--task", type=str, default="blonde")
parser.add_argument(
"--gender_classifier",
type=str,
default="./bias_capturing_classifiers/bcc_gender.pth",
)
parser.add_argument("--epochs", type=int, default=40)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--bs", type=int, default=128, help="batch_size")
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--alpha", type=float, default=20000)
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu)
return opt
def set_model(opt):
model = ResNet18(pretrained=True).cuda()
criterion1 = nn.CrossEntropyLoss()
protected_net = ResNet18()
protected_net.load_state_dict(load_model(opt.gender_classifier))
protected_net.cuda()
return model, criterion1, protected_net
def train(train_loader, model, criterion, optimizer, protected_net, opt):
model.train()
protected_net.eval()
avg_loss = AverageMeter()
avg_clloss = AverageMeter()
avg_miloss = AverageMeter()
total_b_pred = 0
total = 0
train_iter = iter(train_loader)
for idx, (images, labels, biases, _) in enumerate(tqdm(train_iter)):
bsz = labels.shape[0]
labels, biases = labels.cuda(), biases.cuda()
images = images.cuda()
logits, features = model(images)
with torch.no_grad():
pr_l, pr_feat = protected_net(images)
predicted_race = pr_l.argmax(dim=1, keepdim=True)
predicted_race = predicted_race.T
total_b_pred += predicted_race.eq(biases.view_as(predicted_race)).sum().item()
total += bsz
loss_mi_div = opt.alpha * (flac_loss(pr_feat, features, labels, 0.5))
loss_cl = 0.01 * criterion(logits, labels)
loss = loss_cl + loss_mi_div
avg_loss.update(loss.item(), bsz)
avg_clloss.update(loss_cl.item(), bsz)
avg_miloss.update(loss_mi_div.item(), bsz)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return avg_loss.avg, avg_clloss.avg, avg_miloss.avg
def validate(val_loader, model):
model.eval()
top1 = AverageMeter()
attrwise_acc_meter = MultiDimAverageMeter(dims=(2, 2))
with torch.no_grad():
for idx, (images, labels, biases, _) in enumerate(val_loader):
images, labels, biases = images.cuda(), labels.cuda(), biases.cuda()
bsz = labels.shape[0]
output, _ = model(images)
preds = output.data.max(1, keepdim=True)[1].squeeze(1)
(acc1,) = accuracy(output, labels, topk=(1,))
top1.update(acc1[0], bsz)
corrects = (preds == labels).long()
attrwise_acc_meter.add(
corrects.cpu(), torch.stack([labels.cpu(), biases.cpu()], dim=1)
)
return top1.avg, attrwise_acc_meter.get_mean()
def main():
opt = parse_option()
exp_name = f"flac-celeba_{opt.task}-{opt.exp_name}-lr{opt.lr}-alpha{opt.alpha}-bs{opt.bs}-seed{opt.seed}"
opt.exp_name = exp_name
if opt.task == "makeup":
opt.epochs = 40
elif opt.task == "blonde":
opt.epochs = 10
else:
raise AttributeError()
output_dir = f"results/{exp_name}"
save_path = Path(output_dir)
save_path.mkdir(parents=True, exist_ok=True)
set_logging(exp_name, "INFO", str(save_path))
logging.info(f"Set seed: {opt.seed}")
set_seed(opt.seed)
logging.info(f"save_path: {save_path}")
np.set_printoptions(precision=3)
torch.set_printoptions(precision=3)
root = "../data/"
train_loader = get_celeba(
root,
batch_size=opt.bs,
target_attr=opt.task,
split="train",
aug=False,
)
val_loaders = {}
val_loaders["valid"] = get_celeba(
root, batch_size=256, target_attr=opt.task, split="train_valid", aug=False
)
val_loaders["test"] = get_celeba(
root, batch_size=256, target_attr=opt.task, split="valid", aug=False
)
model, criterion, protected_net = set_model(opt)
decay_epochs = [opt.epochs // 3, opt.epochs * 2 // 3]
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decay_epochs, gamma=0.1
)
logging.info(f"decay_epochs: {decay_epochs}")
(save_path / "checkpoints").mkdir(parents=True, exist_ok=True)
best_accs = {"valid": 0, "test": 0}
best_epochs = {"valid": 0, "test": 0}
best_stats = {}
start_time = time.time()
for epoch in range(1, opt.epochs + 1):
logging.info(
f"[{epoch} / {opt.epochs}] Learning rate: {scheduler.get_last_lr()[0]}"
)
# loss = train(train_loader, model, criterion, optimizer)
# logging.info(f'[{epoch} / {opt.epochs}] Loss: {loss:.4f}')
loss, cllossp, milossp = train(
train_loader, model, criterion, optimizer, protected_net, opt
)
logging.info(
f"[{epoch} / {opt.epochs}] Loss: {loss} Loss CE: {cllossp} Loss MI: {milossp}"
)
scheduler.step()
stats = pretty_dict(epoch=epoch)
for key, val_loader in val_loaders.items():
accs, valid_attrwise_accs = validate(val_loader, model)
stats[f"{key}/acc"] = accs.item()
stats[f"{key}/acc_unbiased"] = torch.mean(valid_attrwise_accs).item() * 100
eye_tsr = torch.eye(2)
stats[f"{key}/acc_skew"] = (
valid_attrwise_accs[eye_tsr > 0.0].mean().item() * 100
)
stats[f"{key}/acc_align"] = (
valid_attrwise_accs[eye_tsr == 0.0].mean().item() * 100
)
logging.info(f"[{epoch} / {opt.epochs}] {valid_attrwise_accs} {stats}")
for tag in val_loaders.keys():
if stats[f"{tag}/acc_unbiased"] > best_accs[tag]:
best_accs[tag] = stats[f"{tag}/acc_unbiased"]
best_epochs[tag] = epoch
best_stats[tag] = pretty_dict(
**{f"best_{tag}_{k}": v for k, v in stats.items()}
)
save_file = save_path / "checkpoints" / f"best_{tag}.pth"
save_model(model, optimizer, opt, epoch, save_file)
logging.info(
f"[{epoch} / {opt.epochs}] best {tag} accuracy: {best_accs[tag]:.3f} at epoch {best_epochs[tag]} \n best_stats: {best_stats[tag]}"
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info(f"Total training time: {total_time_str}")
save_file = save_path / "checkpoints" / f"last.pth"
save_model(model, optimizer, opt, opt.epochs, save_file)
if __name__ == "__main__":
main()
| 7,494 | 30.893617 | 146 | py |
FLAC | FLAC-main/train_utk_face.py | import argparse
import datetime
import logging
import os
import time
from pathlib import Path
import numpy as np
import torch
from torch import nn
from flac import flac_loss
from datasets.utk_face import get_utk_face
from models.resnet import ResNet18
from utils.logging import set_logging
from utils.utils import (
AverageMeter,
MultiDimAverageMeter,
accuracy,
load_model,
pretty_dict,
save_model,
set_seed,
)
def parse_option():
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name", type=str, default="test")
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--task", type=str, default="race")
parser.add_argument("--epochs", type=int, default=20)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--bs", type=int, default=128, help="batch_size")
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--alpha", type=float, default=1000)
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu)
return opt
def set_model(opt):
model = ResNet18().cuda()
criterion1 = nn.CrossEntropyLoss()
protected_net = ResNet18()
if opt.task == "race":
protected_attr_model = "./bias_capturing_classifiers/bcc_race.pth"
elif opt.task == "age":
protected_attr_model = "./bias_capturing_classifiers/bcc_age.pth"
dd = load_model(protected_attr_model)
protected_net.load_state_dict(dd)
protected_net.cuda()
return model, criterion1, protected_net
def train(train_loader, model, criterion, optimizer, protected_net, opt):
model.train()
avg_loss = AverageMeter()
avg_clloss = AverageMeter()
avg_miloss = AverageMeter()
total_b_pred = 0
total = 0
train_iter = iter(train_loader)
total_steps = len(train_iter)
for idx, (images, labels, biases, _) in enumerate(train_iter):
bsz = labels.shape[0]
labels, biases = labels.cuda(), biases.cuda()
images = images.cuda()
logits, features = model(images)
with torch.no_grad():
pr_l, pr_feat = protected_net(images)
predicted_race = pr_l.argmin(dim=1, keepdim=True)
predicted_race = predicted_race.T
loss_mi_div = opt.alpha * (flac_loss(pr_feat, features, labels))
loss_cl = 0.01 * criterion(logits, labels)
loss = loss_cl + loss_mi_div
avg_loss.update(loss.item(), bsz)
avg_clloss.update(loss_cl.item(), bsz)
avg_miloss.update(loss_mi_div.item(), bsz)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_b_pred += predicted_race.eq(biases.view_as(predicted_race)).sum().item()
total += bsz
return avg_loss.avg, avg_clloss.avg, avg_miloss.avg
def validate(val_loader, model):
model.eval()
top1 = AverageMeter()
attrwise_acc_meter = MultiDimAverageMeter(dims=(2, 2))
with torch.no_grad():
for idx, (images, labels, biases, _) in enumerate(val_loader):
images, labels, biases = images.cuda(), labels.cuda(), biases.cuda()
bsz = labels.shape[0]
output, _ = model(images)
preds = output.data.max(1, keepdim=True)[1].squeeze(1)
(acc1,) = accuracy(output, labels, topk=(1,))
top1.update(acc1[0], bsz)
corrects = (preds == labels).long()
attrwise_acc_meter.add(
corrects.cpu(), torch.stack([labels.cpu(), biases.cpu()], dim=1)
)
return top1.avg, attrwise_acc_meter.get_mean()
def main():
opt = parse_option()
exp_name = f"flac-utk_face_{opt.task}-{opt.exp_name}-lr{opt.lr}-alpha{opt.alpha}-bs{opt.bs}-seed{opt.seed}"
opt.exp_name = exp_name
output_dir = f"results/{exp_name}"
save_path = Path(output_dir)
save_path.mkdir(parents=True, exist_ok=True)
set_logging(exp_name, "INFO", str(save_path))
logging.info(f"Set seed: {opt.seed}")
set_seed(opt.seed)
logging.info(f"save_path: {save_path}")
np.set_printoptions(precision=3)
torch.set_printoptions(precision=3)
root = "../data/utk_face"
train_loader = get_utk_face(
root,
batch_size=opt.bs,
bias_attr=opt.task,
split="train",
aug=False,
)
val_loaders = {}
val_loaders["valid"] = get_utk_face(
root, batch_size=256, bias_attr=opt.task, split="valid", aug=False
)
val_loaders["test"] = get_utk_face(
root, batch_size=256, bias_attr=opt.task, split="test", aug=False
)
model, criterion, protected_net = set_model(opt)
decay_epochs = [opt.epochs // 3, opt.epochs * 2 // 3]
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decay_epochs, gamma=0.1
)
logging.info(f"decay_epochs: {decay_epochs}")
(save_path / "checkpoints").mkdir(parents=True, exist_ok=True)
best_accs = {"valid": 0, "test": 0}
best_epochs = {"valid": 0, "test": 0}
best_stats = {}
start_time = time.time()
for epoch in range(1, opt.epochs + 1):
logging.info(
f"[{epoch} / {opt.epochs}] Learning rate: {scheduler.get_last_lr()[0]}"
)
loss, cllossp, milossp = train(
train_loader, model, criterion, optimizer, protected_net, opt
)
logging.info(
f"[{epoch} / {opt.epochs}] Loss: {loss} Loss CE: {cllossp} Loss MI: {milossp}"
)
scheduler.step()
stats = pretty_dict(epoch=epoch)
for key, val_loader in val_loaders.items():
accs, valid_attrwise_accs = validate(val_loader, model)
stats[f"{key}/acc"] = accs.item()
stats[f"{key}/acc_unbiased"] = torch.mean(valid_attrwise_accs).item() * 100
eye_tsr = torch.eye(2)
stats[f"{key}/acc_skew"] = (
valid_attrwise_accs[eye_tsr == 0.0].mean().item() * 100
)
stats[f"{key}/acc_align"] = (
valid_attrwise_accs[eye_tsr > 0.0].mean().item() * 100
)
logging.info(f"[{epoch} / {opt.epochs}] {valid_attrwise_accs} {stats}")
for tag in val_loaders.keys():
if stats[f"{tag}/acc_unbiased"] > best_accs[tag]:
best_accs[tag] = stats[f"{tag}/acc_unbiased"]
best_epochs[tag] = epoch
best_stats[tag] = pretty_dict(
**{f"best_{tag}_{k}": v for k, v in stats.items()}
)
save_file = save_path / "checkpoints" / f"best_{tag}.pth"
save_model(model, optimizer, opt, epoch, save_file)
logging.info(
f"[{epoch} / {opt.epochs}] best {tag} accuracy: {best_accs[tag]:.3f} at epoch {best_epochs[tag]} \n best_stats: {best_stats[tag]}"
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info(f"Total training time: {total_time_str}")
save_file = save_path / "checkpoints" / f"last.pth"
save_model(model, optimizer, opt, opt.epochs, save_file)
if __name__ == "__main__":
main()
| 7,226 | 31.263393 | 146 | py |
FLAC | FLAC-main/get_imagenet_bias_features.py | import argparse
import os
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from sklearn.metrics.pairwise import cosine_similarity
from datasets.imagenet import get_imagenet
from models.imagenet_models import bagnet18
from utils.utils import AverageMeter, accuracy, set_seed
from tqdm import tqdm
def train_biased_model(g_net, tr_loader, n_epochs=120):
g_opt = torch.optim.Adam(g_net.parameters(), lr=1e-3)
g_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(g_opt, n_epochs)
print(f"train_biased_model - opt: {g_opt}, sched: {g_scheduler}")
g_net.train()
top1 = AverageMeter()
bias_top1 = AverageMeter()
for n in range(n_epochs):
tr_iter = iter(tqdm(tr_loader))
for x, y, bias, _, _ in tr_iter:
x, y, bias = x.cuda(), y.cuda(), bias.cuda()
N = x.size(0)
pred, _ = g_net(x)
loss = F.cross_entropy(pred, y)
g_opt.zero_grad()
loss.backward()
g_opt.step()
(prec1,) = accuracy(pred, y, topk=(1,))
(bias_prec1,) = accuracy(pred, bias, topk=(1,))
top1.update(prec1.item(), N)
bias_top1.update(bias_prec1.item(), N)
g_scheduler.step()
print(
f"Training biased model - Epoch: {n} acc: {top1.avg}, bias acc: {bias_top1.avg}"
)
torch.save(g_net.state_dict(), "./imagenet_biased_feats/model.pth")
print(
f"Training biased model done - final acc: {top1.avg}, bias acc: {bias_top1.avg}"
)
return g_net
def parse_option():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--bs", type=int, default=64, help="batch_size")
parser.add_argument("--ckpt", action="store_true")
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu)
return opt
def get_features(model, dataloader):
model.eval()
with torch.no_grad():
data_iter = iter(dataloader)
num_data = len(dataloader.dataset)
all_feats = None
for img, _, _, _, _ in data_iter:
if not all_feats == None:
curr = model(img.cuda())[1].cpu()
all_feats = torch.cat((all_feats, curr), 0)
else:
all_feats = model(img.cuda())[1].cpu()
return all_feats
def get_marginal(feats, targets, num_classes):
N_total = feats.shape[0]
marginal = torch.zeros(N_total)
for n in range(num_classes):
target_feats = feats[targets == n]
N = target_feats.shape[0]
N_ref = 1024
ref_idx = np.random.choice(N, N_ref, replace=False)
ref_feats = target_feats[ref_idx]
mask = 1 - cosine_similarity(target_feats, ref_feats.cpu().numpy())
marginal[targets == n] = torch.from_numpy(mask).sum(1)
return marginal
def main():
opt = parse_option()
set_seed(opt.seed)
root = "./data/imagenet"
train_loader = get_imagenet(
f"{root}/train",
f"{root}/train",
batch_size=128,
train=True,
aug=False,
)
model = bagnet18(num_classes=9).cuda()
model.cuda()
# model = train_biased_model(model, train_loader)
model.load_state_dict(torch.load("./bias_capturing_classifiers/bcc_imagenet.pth"))
all_feats = get_features(model, train_loader)
targets = torch.tensor([t for _, t in train_loader.dataset.dataset])
marginal = get_marginal(all_feats, targets, 9)
save_path = Path(f"imagenet_biased_feats/imagenet-seed{opt.seed}")
save_path.mkdir(parents=True, exist_ok=True)
torch.save(all_feats, save_path / "bias_feats.pt")
print(f"Saved feats at {save_path / 'bias_feats.pt'}")
torch.save(marginal, save_path / "marginal.pt")
print(f"Saved marginal at {save_path / 'marginal.pt'}")
if __name__ == "__main__":
main()
| 3,966 | 30.23622 | 92 | py |
FLAC | FLAC-main/models/imagenet_models.py | """ResNet and BagNet implementations.
original codes
- https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
- https://github.com/wielandbrendel/bag-of-local-features-models/blob/master/bagnets/pytorchnet.py
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.model_zoo import load_url as load_state_dict_from_url
MODEL_URLS = {
"bagnet9": "https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet8-34f4ccd2.pth.tar",
"bagnet17": "https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet16-105524de.pth.tar",
"bagnet33": "https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet32-2ddd53ed.pth.tar",
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
class BasicBlock_(nn.Module):
expansion = 1
__constants__ = ["downsample"]
def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1):
super(BasicBlock_, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=kernel_size, stride=stride, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
if identity.size(-1) != out.size(-1):
diff = identity.size(-1) - out.size(-1)
identity = identity[:, :, :-diff, :-diff]
out += identity
out = self.relu(out)
return out
class Bottleneck_(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1):
super(Bottleneck_, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=kernel_size,
stride=stride,
padding=0,
bias=False,
) # changed padding from (kernel_size - 1) // 2
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if residual.size(-1) != out.size(-1):
diff = residual.size(-1) - out.size(-1)
residual = residual[:, :, :-diff, :-diff]
out += residual
out = self.relu(out)
return out
class BagNetDeep(nn.Module):
def __init__(
self,
block,
layers,
strides=[2, 2, 2, 1],
kernel3=[0, 0, 0, 0],
num_classes=1000,
feature_pos="post",
avg_pool=True,
):
super(BagNetDeep, self).__init__()
self.inplanes = 64
self.feature_pos = feature_pos
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=0.001)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(
block, 64, layers[0], stride=strides[0], kernel3=kernel3[0], prefix="layer1"
)
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=strides[1],
kernel3=kernel3[1],
prefix="layer2",
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=strides[2],
kernel3=kernel3[2],
prefix="layer3",
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=strides[3],
kernel3=kernel3[3],
prefix="layer4",
)
self.avgpool = nn.AvgPool2d(1, stride=1)
self.dim_in = 512 * block.expansion
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.avg_pool = avg_pool
self.block = block
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, kernel3=0, prefix=""):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
kernel = 1 if kernel3 == 0 else 3
layers.append(
block(self.inplanes, planes, stride, downsample, kernel_size=kernel)
)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
kernel = 1 if kernel3 <= i else 3
layers.append(block(self.inplanes, planes, kernel_size=kernel))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x_ = nn.AvgPool2d(x.size()[2], stride=1)(x)
feat = x_.view(x_.size(0), -1)
x = self.fc(feat)
feat = F.normalize(feat, dim=1)
return x, feat
def get_features(self, x, layer=2):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn1(x)
x = self.relu(x)
for i in range(1, 5):
x = eval(f"self.layer{i}")(x)
if i == layer:
return x
x_ = nn.AvgPool2d(x.size()[2], stride=1)(x)
x = x_.view(x_.size(0), -1)
x = self.fc(x)
return x
def bagnet18(feature_pos="post", num_classes=1000, rf=43):
model = BagNetDeep(
BasicBlock_,
[2, 2, 2, 2],
strides=[2, 2, 2, 1],
kernel3=[1, 0, 0, 0],
num_classes=num_classes,
feature_pos=feature_pos,
)
return model
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
return_feature=False,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.return_feature = return_feature
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
feat = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = feat + identity
out = self.relu(out)
if self.return_feature:
return out, feat
else:
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
feature_mode=False,
num_classes=1000,
feature_pos="post",
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
rf=None,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.feature_pos = feature_pos
self.feature_mode = feature_mode
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
last_block = i == blocks - 1
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
return_feature=last_block and self.feature_mode,
)
)
return nn.Sequential(*layers)
def forward(self, x, b=None):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x_ = self.avgpool(x)
x_ = torch.flatten(x_, 1)
x = self.fc(x_)
feat = F.normalize(x_, dim=1)
return x, feat
def get_features(self, x, layer=2):
assert self.feature_mode
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
for i in range(1, 5):
x, feat = eval(f"self.layer{i}")(x)
if i == layer:
return feat
x_ = self.avgpool(x)
x_ = torch.flatten(x_, 1)
x = self.fc(x_)
return x
def _resnet(
arch, block, layers, pretrained, progress, rf, num_classes, feature_pos, **kwargs
):
model = ResNet(
block, layers, rf=rf, num_classes=num_classes, feature_pos=feature_pos, **kwargs
)
if pretrained:
state_dict = load_state_dict_from_url(MODEL_URLS[arch], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(
feature_pos="post",
num_classes=1000,
rf=43,
pretrained=False,
progress=True,
**kwargs,
):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(
"resnet18",
BasicBlock,
[2, 2, 2, 2],
pretrained,
progress,
rf,
num_classes,
feature_pos,
**kwargs,
)
| 17,605 | 30.161062 | 159 | py |
FLAC | FLAC-main/models/resnet.py | import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet18
class ResNet18(nn.Module):
def __init__(self, num_classes=2, pretrained=True, model=None):
super().__init__()
if model == None:
model = resnet18(pretrained=pretrained)
modules = list(model.children())[:-1]
self.extractor = nn.Sequential(*modules)
self.embed_size = 512
self.num_classes = num_classes
self.fc = nn.Linear(self.embed_size, num_classes)
else:
modules = list(model.children())[:-1]
self.extractor = nn.Sequential(*modules)
self.embed_size = 512
self.num_classes = num_classes
self.fc = model.fc
print(f"ResNet18 - num_classes: {num_classes} pretrained: {pretrained}")
def forward(self, x):
out = self.extractor(x)
out = out.squeeze(-1).squeeze(-1)
logits = self.fc(out)
feat = F.normalize(out, dim=1)
return logits, feat
| 1,043 | 32.677419 | 80 | py |
FLAC | FLAC-main/models/simple_conv.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleConvNet(nn.Module):
def __init__(self, kernel_size=7, **kwargs):
super(SimpleConvNet, self).__init__()
padding = kernel_size // 2
layers = [
nn.Conv2d(3, 16, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
]
self.extracter = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
self.dim_in = 128
print(f"SimpleConvNet: kernel_size {kernel_size}")
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def get_last_shared_layer(self):
return self.fc
def forward(self, x):
x = self.extracter(x)
x = self.avgpool(x)
feat = torch.flatten(x, 1)
logits = self.fc(feat)
feat = F.normalize(feat, dim=1)
return logits, feat
def get_feature(self, x):
x = self.extracter(x)
x = self.avgpool(x)
feat = torch.flatten(x, 1)
return feat
| 1,805 | 31.836364 | 86 | py |
FLAC | FLAC-main/models/bagnets/utils.py | import numpy as np
import matplotlib.pyplot as plt
from skimage import feature, transform
def plot_heatmap(heatmap, original, ax, cmap='RdBu_r',
percentile=99, dilation=0.5, alpha=0.25):
"""
Plots the heatmap on top of the original image
(which is shown by most important edges).
Parameters
----------
heatmap : Numpy Array of shape [X, X]
Heatmap to visualise.
original : Numpy array of shape [X, X, 3]
Original image for which the heatmap was computed.
ax : Matplotlib axis
Axis onto which the heatmap should be plotted.
cmap : Matplotlib color map
Color map for the visualisation of the heatmaps (default: RdBu_r)
percentile : float between 0 and 100 (default: 99)
Extreme values outside of the percentile range are clipped.
This avoids that a single outlier dominates the whole heatmap.
dilation : float
Resizing of the original image. Influences the edge detector and
thus the image overlay.
alpha : float in [0, 1]
Opacity of the overlay image.
"""
if len(heatmap.shape) == 3:
heatmap = np.mean(heatmap, 0)
dx, dy = 0.05, 0.05
xx = np.arange(0.0, heatmap.shape[1], dx)
yy = np.arange(0.0, heatmap.shape[0], dy)
xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
extent = xmin, xmax, ymin, ymax
cmap_original = plt.get_cmap('Greys_r')
cmap_original.set_bad(alpha=0)
overlay = None
if original is not None:
# Compute edges (to overlay to heatmaps later)
original_greyscale = original if len(original.shape) == 2 else np.mean(original, axis=-1)
in_image_upscaled = transform.rescale(original_greyscale, dilation, mode='constant',
multichannel=False, anti_aliasing=True)
edges = feature.canny(in_image_upscaled).astype(float)
edges[edges < 0.5] = np.nan
edges[:5, :] = np.nan
edges[-5:, :] = np.nan
edges[:, :5] = np.nan
edges[:, -5:] = np.nan
overlay = edges
abs_max = np.percentile(np.abs(heatmap), percentile)
abs_min = abs_max
ax.imshow(heatmap, extent=extent, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
if overlay is not None:
ax.imshow(overlay, extent=extent, interpolation='none', cmap=cmap_original, alpha=alpha)
def generate_heatmap_pytorch(model, image, target, patchsize):
"""
Generates high-resolution heatmap for a BagNet by decomposing the
image into all possible patches and by computing the logits for
each patch.
Parameters
----------
model : Pytorch Model
This should be one of the BagNets.
image : Numpy array of shape [1, 3, X, X]
The image for which we want to compute the heatmap.
target : int
Class for which the heatmap is computed.
patchsize : int
The size of the receptive field of the given BagNet.
"""
import torch
with torch.no_grad():
# pad with zeros
_, c, x, y = image.shape
padded_image = np.zeros((c, x + patchsize - 1, y + patchsize - 1))
padded_image[:, (patchsize-1)//2:(patchsize-1)//2 + x, (patchsize-1)//2:(patchsize-1)//2 + y] = image[0]
image = padded_image[None].astype(np.float32)
# turn to torch tensor
input = torch.from_numpy(image).cuda()
# extract patches
patches = input.permute(0, 2, 3, 1)
patches = patches.unfold(1, patchsize, 1).unfold(2, patchsize, 1)
num_rows = patches.shape[1]
num_cols = patches.shape[2]
patches = patches.contiguous().view((-1, 3, patchsize, patchsize))
# compute logits for each patch
logits_list = []
for batch_patches in torch.split(patches, 1000):
logits = model(batch_patches)
logits = logits[:, target][:, 0]
logits_list.append(logits.data.cpu().numpy().copy())
logits = np.hstack(logits_list)
return logits.reshape((224, 224)) | 4,133 | 36.926606 | 112 | py |
FLAC | FLAC-main/models/bagnets/pytorchnet.py | import torch.nn as nn
import math
import torch
from collections import OrderedDict
from torch.utils import model_zoo
import torch.nn.functional as F
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
__all__ = ['bagnet9', 'bagnet17', 'bagnet33']
model_urls = {
'bagnet9': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet8-34f4ccd2.pth.tar',
'bagnet17': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet16-105524de.pth.tar',
'bagnet33': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet32-2ddd53ed.pth.tar',
}
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1):
super(Bottleneck, self).__init__()
# print('Creating bottleneck with kernel size {} and stride {} with padding {}'.format(kernel_size, stride, (kernel_size - 1) // 2))
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=stride,
padding=0, bias=False) # changed padding from (kernel_size - 1) // 2
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x, **kwargs):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if residual.size(-1) != out.size(-1):
diff = residual.size(-1) - out.size(-1)
residual = residual[:,:,:-diff,:-diff]
out += residual
out = self.relu(out)
return out
class BagNet(nn.Module):
def __init__(self, block, layers, strides=[1, 2, 2, 2], kernel3=[0, 0, 0, 0], num_classes=1000, avg_pool=True):
self.inplanes = 64
super(BagNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=0.001)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], kernel3=kernel3[0], prefix='layer1')
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], kernel3=kernel3[1], prefix='layer2')
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], kernel3=kernel3[2], prefix='layer3')
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], kernel3=kernel3[3], prefix='layer4')
self.avgpool = nn.AvgPool2d(1, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.avg_pool = avg_pool
self.block = block
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, kernel3=0, prefix=''):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
kernel = 1 if kernel3 == 0 else 3
layers.append(block(self.inplanes, planes, stride, downsample, kernel_size=kernel))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
kernel = 1 if kernel3 <= i else 3
layers.append(block(self.inplanes, planes, kernel_size=kernel))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = nn.AvgPool2d(x.size()[2], stride=1)(x)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
feat = F.normalize(feat, dim=1)
return x, feat
def bagnet33(pretrained=False, strides=[2, 2, 2, 1], **kwargs):
"""Constructs a Bagnet-33 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = BagNet(Bottleneck, [3, 4, 6, 3], strides=strides, kernel3=[1,1,1,1], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['bagnet33']))
return model
def bagnet17(pretrained=False, strides=[2, 2, 2, 1], **kwargs):
"""Constructs a Bagnet-17 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = BagNet(Bottleneck, [3, 4, 6, 3], strides=strides, kernel3=[1,1,1,0], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['bagnet17']))
return model
def bagnet9(pretrained=False, strides=[2, 2, 2, 1], **kwargs):
"""Constructs a Bagnet-9 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = BagNet(Bottleneck, [3, 4, 6, 3], strides=strides, kernel3=[1,1,0,0], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['bagnet9']))
return model
| 6,272 | 37.722222 | 167 | py |
FLAC | FLAC-main/models/bagnets/kerasnet.py | import keras
from keras.models import load_model
__all__ = ['bagnet9', 'bagnet17', 'bagnet33']
model_urls = {
'bagnet9': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/d413271344758455ac086992beb579e256447839/bagnet8.h5',
'bagnet17': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/d413271344758455ac086992beb579e256447839/bagnet16.h5',
'bagnet33': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/d413271344758455ac086992beb579e256447839/bagnet32.h5',
}
def bagnet9():
model_path = keras.utils.get_file(
'bagnet8.h5',
model_urls['bagnet9'],
cache_subdir='models',
file_hash='5b70adc7c4ff77d932dbba485a5ea1d333a65e777a45511010f22e304a2fdd69')
return load_model(model_path)
def bagnet17():
model_path = keras.utils.get_file(
'bagnet16.h5',
model_urls['bagnet17'],
cache_subdir='models',
file_hash='b262dfee15a86c91e6aa21bfd86505ecd20a539f7f7c72439d5b1d352dd98a1d')
return load_model(model_path)
def bagnet33():
model_path = keras.utils.get_file(
'bagnet32.h5',
model_urls['bagnet33'],
cache_subdir='models',
file_hash='96d8842eec8b8ce5b3bc6a5f4ff3c8c0278df3722c12bc84408e1487811f8f0f')
return load_model(model_path)
| 1,466 | 37.605263 | 154 | py |
FLAC | FLAC-main/datasets/utk_face.py | import logging
import os
import pickle
from pathlib import Path
import PIL
import numpy as np
import torch
import torch.utils.data
from datasets.utils import TwoCropTransform, get_confusion_matrix
from torch.utils.data.sampler import WeightedRandomSampler
from torchvision import transforms
class UTKFace:
def __init__(self, root, transform, t, **kwargs):
self.root = Path(root) / "images"
self.filenames = os.listdir(self.root)
self.transform = transform
self.t = t
def __getitem__(self, index):
filename = self.filenames[index]
X = PIL.Image.open(os.path.join(self.root, filename))
if self.t == "age":
if len(filename.split("_")) == 4:
target = int(filename.split("_")[0])
else:
target = 15
if target >= 15:
target = 0
else:
target = 1
elif self.t == "race":
if len(filename.split("_")) == 4:
target = int(filename.split("_")[2])
else:
target = 10
if target > 0:
target = 1
if self.transform is not None:
X = self.transform(X)
return X, target
def __len__(self):
return len(self.filenames)
class BiasedUTKFace:
def __init__(
self, root, transform, split, bias_attr="race", bias_rate=0.9, **kwargs
):
self.root = Path(root) / "images"
filenames = np.array(os.listdir(self.root))
np.random.shuffle(filenames)
num_files = len(filenames)
num_train = int(num_files * 0.8)
target_attr = "gender"
self.transform = transform
self.target_attr = target_attr
self.bias_rate = bias_rate
self.bias_attr = bias_attr
self.train = split == "train"
save_path = (
Path(root)
/ "pickles"
/ f"biased_utk_face-target_{target_attr}-bias_{bias_attr}-{bias_rate}"
)
if save_path.is_dir():
print(f"use existing biased_utk_face from {save_path}")
data_split = "train" if self.train else "test"
self.files, self.targets, self.bias_targets = pickle.load(
open(save_path / f"{data_split}_dataset.pkl", "rb")
)
if split in ["valid", "test"]:
save_path = Path(f"../clusters/utk_face_rand_indices_{bias_attr}.pkl")
if not save_path.exists():
rand_indices = torch.randperm(len(self.targets))
pickle.dump(rand_indices, open(save_path, "wb"))
else:
rand_indices = pickle.load(open(save_path, "rb"))
num_total = len(rand_indices)
num_valid = int(0.5 * num_total)
if split == "valid":
indices = rand_indices[:num_valid]
elif split == "test":
indices = rand_indices[num_valid:]
indices = indices.numpy()
self.files = self.files[indices]
self.targets = self.targets[indices]
self.bias_targets = self.bias_targets[indices]
else:
train_dataset = self.build(filenames[:num_train], train=True)
test_dataset = self.build(filenames[num_train:], train=False)
print(f"save biased_utk_face to {save_path}")
save_path.mkdir(parents=True, exist_ok=True)
pickle.dump(train_dataset, open(save_path / f"train_dataset.pkl", "wb"))
pickle.dump(test_dataset, open(save_path / f"test_dataset.pkl", "wb"))
self.files, self.targets, self.bias_targets = (
train_dataset if self.train else test_dataset
)
self.targets, self.bias_targets = (
torch.from_numpy(self.targets).long(),
torch.from_numpy(self.bias_targets).long(),
)
(
self.confusion_matrix_org,
self.confusion_matrix,
self.confusion_matrix_by,
) = get_confusion_matrix(
num_classes=2, targets=self.targets, biases=self.bias_targets
)
print(f"Use BiasedUTKFace - target_attr: {target_attr}")
print(
f"BiasedUTKFace -- total: {len(self.files)}, target_attr: {self.target_attr}, bias_attr: {self.bias_attr} "
f"bias_rate: {self.bias_rate}"
)
print(
[
f"[{split}] target_{i}-bias_{j}: {sum((self.targets == i) & (self.bias_targets == j))}"
for i in (0, 1)
for j in (0, 1)
]
)
def build(self, filenames, train=False):
attr_dict = {
"age": (
0,
lambda x: x >= 20,
lambda x: x <= 10,
),
"gender": (1, lambda x: x == 0, lambda x: x == 1),
"race": (2, lambda x: x == 0, lambda x: x != 0),
}
assert self.target_attr in attr_dict.keys()
target_cls_idx, *target_filters = attr_dict[self.target_attr]
bias_cls_idx, *bias_filters = attr_dict[self.bias_attr]
target_classes = self.get_class_from_filename(filenames, target_cls_idx)
bias_classes = self.get_class_from_filename(filenames, bias_cls_idx)
total_files = []
total_targets = []
total_bias_targets = []
for i in (0, 1):
major_idx = np.where(
target_filters[i](target_classes) & bias_filters[i](bias_classes)
)[0]
minor_idx = np.where(
target_filters[1 - i](target_classes) & bias_filters[i](bias_classes)
)[0]
np.random.shuffle(minor_idx)
num_major = major_idx.shape[0]
num_minor_org = minor_idx.shape[0]
if train:
num_minor = int(num_major * (1 - self.bias_rate))
else:
num_minor = minor_idx.shape[0]
num_minor = min(num_minor, num_minor_org)
num_total = num_major + num_minor
majors = filenames[major_idx]
minors = filenames[minor_idx][:num_minor]
total_files.append(np.concatenate((majors, minors)))
total_bias_targets.append(np.ones(num_total) * i)
total_targets.append(
np.concatenate((np.ones(num_major) * i, np.ones(num_minor) * (1 - i)))
)
files = np.concatenate(total_files)
targets = np.concatenate(total_targets)
bias_targets = np.concatenate(total_bias_targets)
return files, targets, bias_targets
def get_class_from_filename(self, filenames, cls_idx):
return np.array(
[
int(fname.split("_")[cls_idx]) if len(fname.split("_")) == 4 else 10
for fname in filenames
]
)
def __getitem__(self, index):
filename, target, bias = (
self.files[index],
int(self.targets[index]),
int(self.bias_targets[index]),
)
X = PIL.Image.open(os.path.join(self.root, filename))
if self.transform is not None:
X = self.transform(X)
return X, target, bias, index
def __len__(self):
return len(self.files)
def get_utk_face(
root,
batch_size,
split,
bias_attr="race",
bias_rate=0.9,
num_workers=8,
aug=False,
image_size=64,
two_crop=False,
ratio=0,
given_y=True,
):
logging.info(
f"get_utk_face - split: {split}, aug: {aug}, given_y: {given_y}, ratio: {ratio}"
)
size_dict = {64: 72, 128: 144, 224: 256}
load_size = size_dict[image_size]
train = split == "train"
if train:
if aug:
transform = transforms.Compose(
[
transforms.RandomResizedCrop(size=image_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply(
[transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8
),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
else:
transform = transforms.Compose(
[
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
]
)
else:
transform = transforms.Compose(
[
transforms.Resize(load_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
]
)
if two_crop:
transform = TwoCropTransform(transform)
dataset = BiasedUTKFace(
root, transform=transform, split=split, bias_rate=bias_rate, bias_attr=bias_attr
)
def clip_max_ratio(score):
upper_bd = score.min() * ratio
return np.clip(score, None, upper_bd)
if ratio != 0:
if given_y:
weights = [
1 / dataset.confusion_matrix_by[c, b]
for c, b in zip(dataset.targets, dataset.bias_targets)
]
else:
weights = [
1 / dataset.confusion_matrix[b, c]
for c, b in zip(dataset.targets, dataset.bias_targets)
]
if ratio > 0:
weights = clip_max_ratio(np.array(weights))
sampler = WeightedRandomSampler(weights, len(weights), replacement=True)
else:
sampler = None
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
sampler=sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=two_crop,
)
return dataloader
def get_org_utk_face(
root,
batch_size,
num_workers=8,
split="train",
aug=False,
image_size=64,
two_crop=False,
ratio=0,
given_y=True,
t="race",
):
logging.info(
f"get_utk_face - split: {split}, aug: {aug}, given_y: {given_y}, ratio: {ratio}"
)
size_dict = {64: 72, 128: 144, 224: 256}
load_size = size_dict[image_size]
train = split == "train"
if train:
if aug:
transform = transforms.Compose(
[
transforms.RandomResizedCrop(size=image_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply(
[transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8
),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
else:
transform = transforms.Compose(
[
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
]
)
else:
transform = transforms.Compose(
[
transforms.Resize(load_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
]
)
if two_crop:
transform = TwoCropTransform(transform)
dataset = UTKFace(root, transform=transform, t=t)
def clip_max_ratio(score):
upper_bd = score.min() * ratio
return np.clip(score, None, upper_bd)
if ratio != 0:
if given_y:
weights = [
1 / dataset.confusion_matrix_by[c, b]
for c, b in zip(dataset.targets, dataset.bias_targets)
]
else:
weights = [
1 / dataset.confusion_matrix[b, c]
for c, b in zip(dataset.targets, dataset.bias_targets)
]
if ratio > 0:
weights = clip_max_ratio(np.array(weights))
sampler = WeightedRandomSampler(weights, len(weights), replacement=True)
else:
sampler = None
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
sampler=sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=two_crop,
)
return dataloader
| 13,188 | 31.168293 | 119 | py |
FLAC | FLAC-main/datasets/utils.py | import torch
class TwoCropTransform:
"""Create two crops of the same image"""
def __init__(self, transform):
self.transform = transform
def __call__(self, x):
return [self.transform(x), self.transform(x)]
def get_confusion_matrix(num_classes, targets, biases):
confusion_matrix_org = torch.zeros(num_classes, num_classes)
confusion_matrix_org_by = torch.zeros(num_classes, num_classes)
for t, p in zip(targets, biases):
confusion_matrix_org[p.long(), t.long()] += 1
confusion_matrix_org_by[t.long(), p.long()] += 1
confusion_matrix = confusion_matrix_org / confusion_matrix_org.sum(1).unsqueeze(1)
confusion_matrix_by = confusion_matrix_org_by / confusion_matrix_org_by.sum(
1
).unsqueeze(1)
# confusion_matrix = confusion_matrix_org / confusion_matrix_org.sum()
return confusion_matrix_org, confusion_matrix, confusion_matrix_by
def get_unsup_confusion_matrix(num_classes, targets, biases, marginals):
confusion_matrix_org = torch.zeros(num_classes, num_classes).float()
confusion_matrix_cnt = torch.zeros(num_classes, num_classes).float()
for t, p, m in zip(targets, biases, marginals):
confusion_matrix_org[p.long(), t.long()] += m
confusion_matrix_cnt[p.long(), t.long()] += 1
zero_idx = confusion_matrix_org == 0
confusion_matrix_cnt[confusion_matrix_cnt == 0] = 1
confusion_matrix_org = confusion_matrix_org / confusion_matrix_cnt
confusion_matrix_org[zero_idx] = 1
confusion_matrix_org = 1 / confusion_matrix_org
confusion_matrix_org[zero_idx] = 0
confusion_matrix = confusion_matrix_org / confusion_matrix_org.sum(1).unsqueeze(1)
# confusion_matrix = confusion_matrix_org / confusion_matrix_org.sum()
return confusion_matrix_org, confusion_matrix
| 1,814 | 38.456522 | 86 | py |
FLAC | FLAC-main/datasets/celeba.py | import logging
import pickle
from pathlib import Path
import numpy as np
import torch
from datasets.utils import TwoCropTransform, get_confusion_matrix
from torch.utils.data import WeightedRandomSampler
from torch.utils.data.dataloader import DataLoader
from torchvision import transforms as T
from torchvision.datasets.celeba import CelebA
class BiasedCelebASplit:
def __init__(self, root, split, transform, target_attr, **kwargs):
self.transform = transform
self.target_attr = target_attr
self.celeba = CelebA(
root=root,
# download = True,
split="train" if split == "train_valid" else split,
target_type="attr",
transform=transform,
)
self.bias_idx = 20
if target_attr == "blonde":
self.target_idx = 9
if split in ["train", "train_valid"]:
save_path = Path(root) / "pickles" / "blonde"
if save_path.is_dir():
print(f"use existing blonde indices from {save_path}")
self.indices = pickle.load(open(save_path / "indices.pkl", "rb"))
else:
self.indices = self.build_blonde()
print(f"save blonde indices to {save_path}")
save_path.mkdir(parents=True, exist_ok=True)
pickle.dump(self.indices, open(save_path / f"indices.pkl", "wb"))
self.attr = self.celeba.attr[self.indices]
else:
self.attr = self.celeba.attr
self.indices = torch.arange(len(self.celeba))
elif target_attr == "makeup":
self.target_idx = 18
self.attr = self.celeba.attr
self.indices = torch.arange(len(self.celeba))
else:
raise AttributeError
if split in ["train", "train_valid"]:
save_path = Path(f"../clusters/celeba_rand_indices_{target_attr}.pkl")
if not save_path.exists():
rand_indices = torch.randperm(len(self.indices))
pickle.dump(rand_indices, open(save_path, "wb"))
else:
rand_indices = pickle.load(open(save_path, "rb"))
num_total = len(rand_indices)
num_train = int(0.8 * num_total)
if split == "train":
indices = rand_indices[:num_train]
elif split == "train_valid":
indices = rand_indices[num_train:]
self.indices = self.indices[indices]
self.attr = self.attr[indices]
self.targets = self.attr[:, self.target_idx]
self.biases = self.attr[:, self.bias_idx]
(
self.confusion_matrix_org,
self.confusion_matrix,
self.confusion_matrix_by,
) = get_confusion_matrix(
num_classes=2, targets=self.targets, biases=self.biases
)
print(
f"Use BiasedCelebASplit \n target_attr: {target_attr} split: {split} \n {self.confusion_matrix_org}"
)
def build_blonde(self):
biases = self.celeba.attr[:, self.bias_idx]
targets = self.celeba.attr[:, self.target_idx]
selects = torch.arange(len(self.celeba))[(biases == 0) & (targets == 0)]
non_selects = torch.arange(len(self.celeba))[~((biases == 0) & (targets == 0))]
np.random.shuffle(selects)
indices = torch.cat([selects[:2000], non_selects])
return indices
def __getitem__(self, index):
img, _ = self.celeba.__getitem__(self.indices[index])
target, bias = self.targets[index], self.biases[index]
return img, target, bias, index
def __len__(self):
return len(self.targets)
def get_celeba(
root,
batch_size,
target_attr="blonde",
split="train",
num_workers=8,
aug=True,
two_crop=False,
ratio=0,
img_size=224,
given_y=True,
):
logging.info(
f"get_celeba - split:{split}, aug: {aug}, given_y: {given_y}, ratio: {ratio}"
)
if split == "eval":
transform = T.Compose(
[
T.Resize((img_size, img_size)),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
else:
if aug:
transform = T.Compose(
[
T.RandomResizedCrop(size=img_size, scale=(0.2, 1.0)),
T.RandomHorizontalFlip(),
T.RandomApply([T.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
T.RandomGrayscale(p=0.2),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
else:
transform = T.Compose(
[
T.Resize((img_size, img_size)),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
if two_crop:
transform = TwoCropTransform(transform)
dataset = BiasedCelebASplit(
root=root,
split=split,
transform=transform,
target_attr=target_attr,
)
def clip_max_ratio(score):
upper_bd = score.min() * ratio
return np.clip(score, None, upper_bd)
if ratio != 0:
if given_y:
weights = [
1 / dataset.confusion_matrix_by[c, b]
for c, b in zip(dataset.targets, dataset.biases)
]
else:
weights = [
1 / dataset.confusion_matrix[b, c]
for c, b in zip(dataset.targets, dataset.biases)
]
if ratio > 0:
weights = clip_max_ratio(np.array(weights))
sampler = WeightedRandomSampler(weights, len(weights), replacement=True)
else:
sampler = None
dataloader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
sampler=sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=two_crop,
)
return dataloader
| 6,228 | 31.612565 | 112 | py |
FLAC | FLAC-main/datasets/biased_mnist.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Python implementation of Biased-MNIST.
"""
import logging
import os
import pickle
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from datasets.utils import (
TwoCropTransform,
get_confusion_matrix,
get_unsup_confusion_matrix,
)
from torch.utils import data
from torchvision import transforms
from torchvision.datasets import MNIST
class BiasedMNIST(MNIST):
"""A base class for Biased-MNIST.
We manually select ten colours to synthetic colour bias. (See `COLOUR_MAP` for the colour configuration)
Usage is exactly same as torchvision MNIST dataset class.
You have two paramters to control the level of bias.
Parameters
----------
root : str
path to MNIST dataset.
data_label_correlation : float, default=1.0
Here, each class has the pre-defined colour (bias).
data_label_correlation, or `rho` controls the level of the dataset bias.
A sample is coloured with
- the pre-defined colour with probability `rho`,
- coloured with one of the other colours with probability `1 - rho`.
The number of ``other colours'' is controlled by `n_confusing_labels` (default: 9).
Note that the colour is injected into the background of the image (see `_binary_to_colour`).
Hence, we have
- Perfectly biased dataset with rho=1.0
- Perfectly unbiased with rho=0.1 (1/10) ==> our ``unbiased'' setting in the test time.
In the paper, we explore the high correlations but with small hints, e.g., rho=0.999.
n_confusing_labels : int, default=9
In the real-world cases, biases are not equally distributed, but highly unbalanced.
We mimic the unbalanced biases by changing the number of confusing colours for each class.
In the paper, we use n_confusing_labels=9, i.e., during training, the model can observe
all colours for each class. However, you can make the problem harder by setting smaller n_confusing_labels, e.g., 2.
We suggest to researchers considering this benchmark for future researches.
"""
COLOUR_MAP = [
[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[225, 225, 0],
[225, 0, 225],
[0, 255, 255],
[255, 128, 0],
[255, 0, 128],
[128, 0, 255],
[128, 128, 128],
]
def __init__(
self,
root,
bias_feature_root="./biased_feats",
split="train",
transform=None,
target_transform=None,
download=False,
data_label_correlation=1.0,
n_confusing_labels=9,
seed=1,
load_bias_feature=False,
train_corr=None,
):
assert split in ["train", "valid"]
train = split in ["train"]
super().__init__(
root,
train=train,
transform=transform,
target_transform=target_transform,
download=download,
)
self.load_bias_feature = load_bias_feature
if self.load_bias_feature:
if train_corr:
bias_feature_dir = f"{bias_feature_root}/train{train_corr}-corr{data_label_correlation}-seed{seed}"
logging.info(f"load bias feature: {bias_feature_dir}")
self.bias_features = torch.load(f"{bias_feature_dir}/bias_feats.pt")
self.marginal = torch.load(f"{bias_feature_dir}/marginal.pt")
else:
bias_feature_dir = f"{bias_feature_root}/color_mnist-corr{data_label_correlation}-seed{seed}"
logging.info(f"load bias feature: {bias_feature_dir}")
self.bias_features = torch.load(f"{bias_feature_dir}/bias_feats.pt")
self.marginal = torch.load(f"{bias_feature_dir}/marginal.pt")
save_path = (
Path(root)
/ "pickles"
/ f"color_mnist-corr{data_label_correlation}-seed{seed}"
/ split
)
if save_path.is_dir():
logging.info(f"use existing color_mnist from {save_path}")
self.data = pickle.load(open(save_path / "data.pkl", "rb"))
self.targets = pickle.load(open(save_path / "targets.pkl", "rb"))
self.biased_targets = pickle.load(
open(save_path / "biased_targets.pkl", "rb")
)
else:
self.random = True
self.data_label_correlation = data_label_correlation
self.n_confusing_labels = n_confusing_labels
self.data, self.targets, self.biased_targets = self.build_biased_mnist()
indices = np.arange(len(self.data))
self._shuffle(indices)
self.data = self.data[indices].numpy()
self.targets = self.targets[indices]
self.biased_targets = self.biased_targets[indices]
logging.info(f"save color_mnist to {save_path}")
save_path.mkdir(parents=True, exist_ok=True)
pickle.dump(self.data, open(save_path / "data.pkl", "wb"))
pickle.dump(self.targets, open(save_path / "targets.pkl", "wb"))
pickle.dump(
self.biased_targets, open(save_path / "biased_targets.pkl", "wb")
)
if load_bias_feature:
(
self.confusion_matrix_org,
self.confusion_matrix,
) = get_unsup_confusion_matrix(
num_classes=10,
targets=self.targets,
biases=self.biased_targets,
marginals=self.marginal,
)
else:
(
self.confusion_matrix_org,
self.confusion_matrix,
self.confusion_matrix_by,
) = get_confusion_matrix(
num_classes=10, targets=self.targets, biases=self.biased_targets
)
@property
def raw_folder(self):
return os.path.join(self.root, "raw")
@property
def processed_folder(self):
return os.path.join(self.root, "processed")
def _shuffle(self, iteratable):
if self.random:
np.random.shuffle(iteratable)
def _make_biased_mnist(self, indices, label):
raise NotImplementedError
def _update_bias_indices(self, bias_indices, label):
if self.n_confusing_labels > 9 or self.n_confusing_labels < 1:
raise ValueError(self.n_confusing_labels)
indices = np.where((self.targets == label).numpy())[0]
self._shuffle(indices)
indices = torch.LongTensor(indices)
n_samples = len(indices)
n_correlated_samples = int(n_samples * self.data_label_correlation)
n_decorrelated_per_class = int(
np.ceil((n_samples - n_correlated_samples) / (self.n_confusing_labels))
)
correlated_indices = indices[:n_correlated_samples]
bias_indices[label] = torch.cat([bias_indices[label], correlated_indices])
decorrelated_indices = torch.split(
indices[n_correlated_samples:], n_decorrelated_per_class
)
other_labels = [
_label % 10
for _label in range(label + 1, label + 1 + self.n_confusing_labels)
]
self._shuffle(other_labels)
for idx, _indices in enumerate(decorrelated_indices):
_label = other_labels[idx]
bias_indices[_label] = torch.cat([bias_indices[_label], _indices])
def build_biased_mnist(self):
"""Build biased MNIST."""
n_labels = self.targets.max().item() + 1
bias_indices = {label: torch.LongTensor() for label in range(n_labels)}
for label in range(n_labels):
self._update_bias_indices(bias_indices, label)
data = torch.ByteTensor()
targets = torch.LongTensor()
biased_targets = []
for bias_label, indices in bias_indices.items():
_data, _targets = self._make_biased_mnist(indices, bias_label)
data = torch.cat([data, _data])
targets = torch.cat([targets, _targets])
biased_targets.extend([bias_label] * len(indices))
biased_targets = torch.LongTensor(biased_targets)
return data, targets, biased_targets
def __getitem__(self, index):
img, target, bias = (
self.data[index],
int(self.targets[index]),
int(self.biased_targets[index]),
)
img = Image.fromarray(img.astype(np.uint8), mode="RGB")
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.load_bias_feature:
bias_feat = self.bias_features[index]
return img, target, bias, index, bias_feat
else:
return img, target, bias, index
class ColourBiasedMNIST(BiasedMNIST):
def __init__(
self,
root,
bias_feature_root="./biased_feats",
split="train",
transform=None,
target_transform=None,
download=False,
data_label_correlation=1.0,
n_confusing_labels=9,
seed=1,
load_bias_feature=False,
train_corr=None,
):
super(ColourBiasedMNIST, self).__init__(
root,
bias_feature_root=bias_feature_root,
split=split,
transform=transform,
target_transform=target_transform,
download=download,
data_label_correlation=data_label_correlation,
n_confusing_labels=n_confusing_labels,
seed=seed,
load_bias_feature=load_bias_feature,
train_corr=train_corr,
)
def _binary_to_colour(self, data, colour):
fg_data = torch.zeros_like(data)
fg_data[data != 0] = 255
fg_data[data == 0] = 0
fg_data = torch.stack([fg_data, fg_data, fg_data], dim=1)
bg_data = torch.zeros_like(data)
bg_data[data == 0] = 1
bg_data[data != 0] = 0
bg_data = torch.stack([bg_data, bg_data, bg_data], dim=3)
bg_data = bg_data * torch.ByteTensor(colour)
bg_data = bg_data.permute(0, 3, 1, 2)
data = fg_data + bg_data
return data.permute(0, 2, 3, 1)
def _make_biased_mnist(self, indices, label):
return (
self._binary_to_colour(self.data[indices], self.COLOUR_MAP[label]),
self.targets[indices],
)
def get_color_mnist(
root,
batch_size,
data_label_correlation,
n_confusing_labels=9,
split="train",
num_workers=4,
seed=1,
aug=True,
two_crop=False,
ratio=0,
bias_feature_root="./biased_feats",
load_bias_feature=False,
given_y=True,
train_corr=None,
):
logging.info(
f"get_color_mnist - split: {split}, aug: {aug}, given_y: {given_y}, ratio: {ratio}"
)
normalize = transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
if aug:
train_transform = transforms.Compose(
[
transforms.RandomRotation(20),
transforms.RandomApply(
[transforms.ColorJitter(0.3, 0.3, 0.3, 0.3)], p=0.8
),
transforms.ToTensor(),
normalize,
]
)
else:
train_transform = transforms.Compose([transforms.ToTensor(), normalize])
if two_crop:
train_transform = TwoCropTransform(train_transform)
if split == "train_val":
dataset = ColourBiasedMNIST(
root,
split="train",
transform=train_transform,
download=True,
data_label_correlation=data_label_correlation,
n_confusing_labels=n_confusing_labels,
seed=seed,
load_bias_feature=load_bias_feature,
train_corr=train_corr,
)
indices = list(range(len(dataset)))
split = int(np.floor(0.1 * len(dataset)))
np.random.shuffle(indices)
valid_idx = indices[:split]
valid_sampler = data.sampler.SubsetRandomSampler(valid_idx)
dataloader = data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
sampler=valid_sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return dataloader
else:
dataset = ColourBiasedMNIST(
root,
bias_feature_root=bias_feature_root,
split=split,
transform=train_transform,
download=True,
data_label_correlation=data_label_correlation,
n_confusing_labels=n_confusing_labels,
seed=seed,
load_bias_feature=load_bias_feature,
train_corr=train_corr,
)
def clip_max_ratio(score):
upper_bd = score.min() * ratio
return np.clip(score, None, upper_bd)
if ratio != 0:
if load_bias_feature:
weights = dataset.marginal
else:
if given_y:
weights = [
1 / dataset.confusion_matrix_by[c, b]
for c, b in zip(dataset.targets, dataset.biased_targets)
]
else:
weights = [
1 / dataset.confusion_matrix[b, c]
for c, b in zip(dataset.targets, dataset.biased_targets)
]
if ratio > 0:
weights = clip_max_ratio(np.array(weights))
sampler = data.WeightedRandomSampler(
weights, len(weights), replacement=True
)
else:
sampler = None
dataloader = data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True if sampler is None and split == "train" else False,
sampler=sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=split == "train",
)
return dataloader
| 14,204 | 32.661137 | 124 | py |
FLAC | FLAC-main/datasets/imagenet.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
9-Class ImageNet wrapper. Many codes are borrowed from the official torchvision dataset.
https://github.com/pytorch/vision/blob/master/torchvision/datasets/imagenet.py
The following nine classes are selected to build the subset:
dog, cat, frog, turtle, bird, monkey, fish, crab, insect
"""
import logging
import os
import numpy as np
import torch
import torch.utils.data
from PIL import Image
from datasets.utils import TwoCropTransform
from torch.utils import data
from torchvision import transforms
IMG_EXTENSIONS = [
".jpg",
".JPG",
".jpeg",
".JPEG",
".png",
".PNG",
".ppm",
".PPM",
".bmp",
".BMP",
]
CLASS_TO_INDEX = {
"n01641577": 2,
"n01644373": 2,
"n01644900": 2,
"n01664065": 3,
"n01665541": 3,
"n01667114": 3,
"n01667778": 3,
"n01669191": 3,
"n01819313": 4,
"n01820546": 4,
"n01833805": 4,
"n01843383": 4,
"n01847000": 4,
"n01978287": 7,
"n01978455": 7,
"n01980166": 7,
"n01981276": 7,
"n02085620": 0,
"n02099601": 0,
"n02106550": 0,
"n02106662": 0,
"n02110958": 0,
"n02123045": 1,
"n02123159": 1,
"n02123394": 1,
"n02123597": 1,
"n02124075": 1,
"n02174001": 8,
"n02177972": 8,
"n02190166": 8,
"n02206856": 8,
"n02219486": 8,
"n02486410": 5,
"n02487347": 5,
"n02488291": 5,
"n02488702": 5,
"n02492035": 5,
"n02607072": 6,
"n02640242": 6,
"n02641379": 6,
"n02643566": 6,
"n02655020": 6,
}
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, class_to_idx, data="ImageNet"):
# dog, cat, frog, turtle, bird, monkey, fish, crab, insect
RESTRICTED_RANGES = [
(151, 254),
(281, 285),
(30, 32),
(33, 37),
(89, 97),
(372, 378),
(393, 397),
(118, 121),
(306, 310),
]
range_sets = [set(range(s, e + 1)) for s, e in RESTRICTED_RANGES]
class_to_idx_ = {}
if data == "ImageNet-A":
for class_name, idx in class_to_idx.items():
try:
class_to_idx_[class_name] = CLASS_TO_INDEX[class_name]
except Exception:
pass
elif data == "ImageNet-C":
# TODO
pass
else: # ImageNet
for class_name, idx in class_to_idx.items():
for new_idx, range_set in enumerate(range_sets):
if idx in range_set:
if new_idx == 0: # classes that overlap with ImageNet-A
if idx in [151, 207, 234, 235, 254]:
class_to_idx_[class_name] = new_idx
elif new_idx == 4:
if idx in [89, 90, 94, 96, 97]:
class_to_idx_[class_name] = new_idx
elif new_idx == 5:
if idx in [372, 373, 374, 375, 378]:
class_to_idx_[class_name] = new_idx
else:
class_to_idx_[class_name] = new_idx
images = []
dir = os.path.expanduser(dir)
a = sorted(class_to_idx_.keys())
for target in a:
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx_[target])
images.append(item)
return images, class_to_idx_
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def pil_loader(path):
with open(path, "rb") as f:
with Image.open(f) as img:
return img.convert("RGB")
class ImageFolder(torch.utils.data.Dataset):
def __init__(
self,
orig_root,
root,
bias_feature_root="./biased_feats",
transform=None,
target_transform=None,
loader=pil_loader,
train=True,
val_data="ImageNet",
seed=1,
load_bias_feature=False,
cluster_root=None,
cluster_name="cluster",
):
classes, class_to_idx = find_classes(orig_root)
imgs, class_to_idx_ = make_dataset(orig_root, class_to_idx, val_data)
if len(imgs) == 0:
raise (
RuntimeError(
"Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)
)
)
self.orig_root = orig_root
self.root = root
self.dataset = imgs
self.classes = classes
self.class_to_idx = class_to_idx_
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.train = train
self.val_data = val_data
self.clusters = []
self.load_bias_feature = load_bias_feature
if self.load_bias_feature:
bias_feat_path = f"{bias_feature_root}/imagenet-seed{seed}"
logging.info(f"Load bias feature at {bias_feat_path}")
self.bias_features = torch.load(f"{bias_feat_path}/bias_feats.pt")
print(torch.count_nonzero(self.bias_features))
self.marginal = torch.load(f"{bias_feat_path}/marginal.pt")
for i in range(3):
self.clusters.append(
torch.load("clusters/cluster_label_{}.pth".format(i + 1))
)
# self.split = 'train_' if train else ''
# self.cluster_root = cluster_root
# if cluster_root is not None:
# cluster_path = f'{cluster_root}/{self.split}'
# logging.info(f'ImageFolder cluster_path: {cluster_path}')
# cluster_files = glob.glob(f'{cluster_path}{cluster_name}_label_*.pth')
# self.clusters = [torch.load(f) for f in cluster_files]
# else:
# self.clusters = []
def __getitem__(self, index):
path, target = self.dataset[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.load_bias_feature:
bias_feat = self.bias_features[index]
else:
bias_feat = -1
if not self.train and self.val_data == "ImageNet":
bias_target = [
self.clusters[0][index],
self.clusters[1][index],
self.clusters[2][index],
]
return img, target, bias_target, index, bias_feat
else:
return img, target, target, index, bias_feat
def __len__(self):
return len(self.dataset)
def get_imagenet(
orig_root,
root,
batch_size,
bias_feature_root="./biased_feats",
train=True,
num_workers=8,
seed=1,
load_size=256,
image_size=224,
val_data="ImageNet",
aug=False,
two_crop=False,
ratio=0,
load_bias_feature=False,
cluster_root=None,
cluster_name="cluster",
):
if train:
if aug:
logging.info("get_imagenet - aug")
transform = transforms.Compose(
[
transforms.RandomResizedCrop(size=image_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply(
[transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8
),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
else:
transform = transforms.Compose(
[
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
]
)
else:
transform = transforms.Compose(
[
transforms.Resize(load_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
]
)
if two_crop:
transform = TwoCropTransform(transform)
dataset = ImageFolder(
orig_root,
root,
bias_feature_root=bias_feature_root,
transform=transform,
train=train,
seed=seed,
val_data=val_data,
load_bias_feature=load_bias_feature,
cluster_root=cluster_root,
cluster_name=cluster_name,
)
def clip_max_ratio(score):
upper_bd = score.min() * ratio
return np.clip(score, None, upper_bd)
if ratio != 0:
if load_bias_feature:
weights = dataset.marginal
else:
raise NotImplementedError()
logging.info(f"weight ratio max: {weights.max()} min: {weights.min()}")
if ratio > 0:
weights = clip_max_ratio(np.array(weights))
sampler = data.WeightedRandomSampler(weights, len(weights), replacement=True)
else:
sampler = None
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True if sampler is None else False,
num_workers=num_workers,
pin_memory=True,
)
return dataloader
| 9,982 | 28.361765 | 88 | py |
FLAC | FLAC-main/utils/utils.py | from __future__ import print_function
import logging
import math
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
class MultiDimAverageMeter(object):
def __init__(self, dims=(2, 2)):
self.dims = dims
self.cum = torch.zeros(np.prod(dims))
self.cnt = torch.zeros(np.prod(dims))
self.idx_helper = torch.arange(np.prod(dims), dtype=torch.long).reshape(*dims)
def add(self, vals, idxs):
flattened_idx = torch.stack(
[self.idx_helper[tuple(idxs[i])] for i in range(idxs.size(0))],
dim=0,
)
self.cum.index_add_(0, flattened_idx, vals.view(-1).float())
self.cnt.index_add_(
0, flattened_idx, torch.ones_like(vals.view(-1), dtype=torch.float)
)
def get_mean(self):
return (self.cum / self.cnt).reshape(*self.dims)
def get_unbiased_acc(self):
return (self.cum[self.cnt > 0] / self.cnt[self.cnt > 0]).mean()
def reset(self):
self.cum.zero_()
self.cnt.zero_()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def set_seed(seed):
logging.info(f"=======> Using Fixed Random Seed: {seed} <========")
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
cudnn.deterministic = True
cudnn.benchmark = False # set to False for final report
def set_optimizer(opt, model):
optimizer = optim.SGD(
model.parameters(),
lr=opt.learning_rate,
momentum=opt.momentum,
weight_decay=opt.weight_decay,
)
return optimizer
def save_model(model, optimizer, opt, epoch, save_file):
state = {
"opt": opt,
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
}
torch.save(state, save_file)
del state
def load_model(path):
state = torch.load(path)
return state["model"]
class pretty_dict(dict):
def __str__(self):
return str(
{k: round(v, 3) if isinstance(v, float) else v for k, v in self.items()}
)
| 3,121 | 24.801653 | 88 | py |
fce | fce-main/fuzzy_binning.py | from utils import *
import argparse
import warnings
import pickle
import numpy as np
import os
import torch
import pandas as pd
from tqdm import tqdm
from calibration_utils import *
warnings.filterwarnings("ignore")
# -------------------------------------------------------------------------------------------------------------------#
parser = argparse.ArgumentParser(description='Binning setup')
parser.add_argument('predict_probs_pkl', type=str,
help='.pickle file containing softmax prediction probabilities')
parser.add_argument('predicted_labels', type=str,
help='.pickle file containing predicted labels')
parser.add_argument('labels', type=str,
help='.pickle file containing actual labels')
parser.add_argument('bins', type=int,
help='Number of bins to calculate calibration error')
args = parser.parse_args()
predict_probs_pkl = args.predict_probs_pkl
predicted_labels = args.predicted_labels
labels = args.labels
n_bins = args.bins
# -------------------------------------------------------------------------------------------------------------------#
# analysis of predictions
ece_list = []
fce_list = []
uf_list = []
of_list = []
bins_list = []
ece_breakdown = []
fce_breakdown = []
print("Binning probabilities...")
with open(predict_probs_pkl, 'rb') as handle:
soft_preds = pickle.load(handle)
with open(predicted_labels, 'rb') as handle:
preds = pickle.load(handle)
with open(labels, 'rb') as handle:
labels = pickle.load(handle)
incorrect = []
correct = []
for i in range(len(preds)):
if preds[i] != labels[i]:
incorrect.append([torch.softmax(soft_preds[i], 0), int(preds[i]), int(labels[i])])
else:
correct.append([torch.softmax(soft_preds[i], 0), int(preds[i]), int(labels[i])])
# calculate overconfidence --> expectation of confidence over incorrect predictions
of = []
for i in range(len(incorrect)):
of.append(max(incorrect[i][0]))
# calculate underconfidence --> expectation of 1-confidence over correct predictions
uf = []
for i in range(len(correct)):
uf.append(1 - max(correct[i][0]))
uf_list.append(np.mean(uf))
of_list.append(np.mean(of))
ece_dict = {'soft_preds': np.array([np.array(torch.softmax(x, 0)) for x in soft_preds]),
'preds': [int(x) for x in preds],
'labels': [int(x) for x in labels]}
print("Calculating ECE...")
ece_vals, ece = expected_calibration_error(np.array(ece_dict['labels']),
ece_dict['soft_preds'], num_bins=n_bins)
bins_list.append(n_bins)
ece_list.append(ece)
ece_breakdown.append(ece_vals)
print("ECE calculations done!")
print("Calculating FCE...")
fce_vals, fce = fuzzy_calibration_error(np.array(ece_dict['labels']), ece_dict['soft_preds'], n_bins)
fce_list.append(fce)
fce_breakdown.append(fce_vals)
print("FCE calculations done!")
print ("ECE: %.3f \nFCE: %.3f " %(ece, fce))
| 2,962 | 29.546392 | 118 | py |
fce | fce-main/paper_demo/get_predictions.py | from datasets import load_dataset
from pytorch_lightning import Trainer, seed_everything
from utils import *
import argparse
import warnings
import pickle
import os
import torch
from calibration_utils import *
warnings.filterwarnings("ignore")
# -------------------------------------------------------------------------------------------------------------------#
parser = argparse.ArgumentParser(description='Data setup')
parser.add_argument('ds_name', type=str,
help='Dataset name (news/agnews/imdb)')
parser.add_argument('size', type=int,
help='Train data size')
parser.add_argument('data_dir', type=str,
help='Directory location to save data')
parser.add_argument('result_dir', type=str,
help='Directory location to save results')
args = parser.parse_args()
size = args.size
ds_name = args.ds_name
filename = ds_name + str(size) + ".hf"
data_dir = args.data_dir
result_dir = args.result_dir
# -------------------------------------------------------------------------------------------------------------------#
# load dataset
if ds_name == 'news':
data = load_dataset("SetFit/20_newsgroups")
elif ds_name == 'agnews':
data = load_dataset("ag_news")
elif ds_name == 'imdb':
data = load_dataset("imdb")
# data = load_dataset("huggingface_dataset") # To load other huggingface datasets
# saving dataset variation
create_data_split(data, size, ds_name, data_dir)
# setup input data
seed_everything(42)
dm = finetuning_data(model_name_or_path="bert-base-cased", data_name=ds_name, filename=filename,
data_dir=data_dir)
dm.setup("fit")
print("Train: {}".format(len(dm.dataset['train'])))
print("Test: {}".format(len(dm.dataset['test'])))
# -------------------------------------------------------------------------------------------------------------------#
# define model parameters
model = finetuner(model_name_or_path="bert-base-cased", num_labels=dm.num_labels)
# define training hyperparameters
trainer = Trainer(
max_epochs=1,
accelerator="auto",
devices=1 if torch.cuda.is_available() else None)
# model training
trainer.fit(model, datamodule=dm)
print("Fine-tuning done!")
# model eval
metrs = trainer.test(model, datamodule=dm)
outputs = trainer.predict(model, datamodule=dm, return_predictions=True)
print(metrs)
soft_preds = [y for x in outputs for y in x[0]]
preds = [y for x in outputs for y in x[1]]
labels = [y for x in outputs for y in x[2]]
dir_path = result_dir
# Check whether the specified path exists or not
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print("Saving prediction probabilities...")
filename = ds_name + str(size)
preds_file = 'predicted_labels' + filename + '.pickle'
soft_preds_file = 'predict_probs' + filename + '.pickle'
labels_file = 'labels' + filename + '.pickle'
preds_path = os.path.join(dir_path, preds_file)
soft_preds_path = os.path.join(dir_path, soft_preds_file)
labels_path = os.path.join(dir_path, labels_file)
# save
with open(soft_preds_path, 'wb') as handle:
pickle.dump(soft_preds, handle)
with open(preds_path, 'wb') as handle:
pickle.dump(preds, handle)
with open(labels_path, 'wb') as handle:
pickle.dump(labels, handle)
print("Saved prediction probabilities!")
# -------------------------------------------------------------------------------------------------------------------#
| 3,417 | 30.072727 | 118 | py |
fce | fce-main/paper_demo/utils.py | import os
import datasets
import evaluate
import numpy as np
import pandas as pd
import torch
from datasets import DatasetDict, Dataset
from pytorch_lightning import LightningDataModule, LightningModule, Trainer, seed_everything
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
from typing import Optional
from utils import *
tqdm.pandas()
####----datasetup utils----####
def preprocess(text):
if not isinstance(text.strip(), str) or len(text.strip()) == 0:
text = pd.NA
return text
def strat_sampler(split, n):
# stratified sampling on preprocessed train data
strat_data = split.groupby('label', group_keys=False).apply(lambda x:
x.sample(int(np.rint(n * len(x) / len(split))))).sample(
frac=1).reset_index(drop=True)
return strat_data
def drop_empty(data_split):
split = pd.DataFrame(data_split)
split["text"] = split["text"].apply(preprocess)
split = split[split["text"].notna()]
return split
def create_data_split(data, size, ds_name, data_dir):
data_n = DatasetDict({"train": Dataset.from_pandas(strat_sampler(drop_empty(data['train']),
size)),
"test": Dataset.from_pandas(drop_empty(data['test']))})
data_dir = data_dir
# Check whether the specified path exists or not
if not os.path.exists(data_dir):
os.makedirs(data_dir)
data_n.save_to_disk(data_dir + "/" + ds_name + str(size) + ".hf")
####----training utils----####
class finetuning_data(LightningDataModule):
task_field_map = {
"ft": ["text"],
"mia": ["predictions"]}
task_num_labels = {
"news": 20,
"agnews": 4,
"imdb": 2}
loader_columns = [
"input_ids",
"token_type_ids",
"attention_mask",
"start_positions",
"end_positions",
"labels",
]
def __init__(
self,
model_name_or_path: str,
data_dir: str = "./data",
filename: str = "news1000.hf",
data_name: str = "news",
max_seq_length: int = 256,
train_batch_size: int = 4,
eval_batch_size: int = 4,
**kwargs,
):
super().__init__()
self.model_name_or_path = model_name_or_path
self.data_dir = data_dir
self.filename = filename
self.data_name = data_name
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.max_seq_length = max_seq_length
self.input_fields = ["text"]
self.num_labels = self.task_num_labels[self.data_name]
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True)
def setup(self, stage: str):
self.original_dataset = datasets.load_from_disk(os.path.join(self.data_dir, self.filename))
test_dataset = pd.DataFrame(self.original_dataset['test'])
train_dataset = pd.DataFrame(self.original_dataset['train'])
# randomly shuffle rows
sampled = train_dataset.sample(frac=1).reset_index(drop=True)
combined_dataset = pd.concat([train_dataset, sampled], ignore_index=True)
combined_dataset = combined_dataset.sample(frac=1).reset_index(drop=True) # shuffle original and augmented rows
combined_dataset["label"] = pd.to_numeric(combined_dataset["label"])
test_dataset = pd.DataFrame(self.original_dataset['test'])
self.dataset = DatasetDict({"train": Dataset.from_pandas(combined_dataset),
"test": Dataset.from_pandas(test_dataset)})
for split in self.dataset.keys():
self.dataset[split] = self.dataset[split].map(
self.convert_to_features,
batched=True,
remove_columns=["label"],
)
self.columns = [c for c in self.dataset[split].column_names if c in self.loader_columns]
self.dataset[split].set_format(type="torch", columns=self.columns)
self.eval_splits = [x for x in self.dataset.keys() if "validation" in x]
def prepare_data(self):
datasets.load_from_disk(os.path.join(self.data_dir, self.filename))
def train_dataloader(self):
return DataLoader(self.dataset["train"], batch_size=self.train_batch_size, shuffle=True)
def test_dataloader(self):
return DataLoader(self.dataset["test"], batch_size=self.eval_batch_size)
def predict_dataloader(self):
return DataLoader(self.dataset["test"], batch_size=self.eval_batch_size)
def convert_to_features(self, example_batch, indices=None):
# Either encode single sentence or sentence pairs
if len(self.input_fields) > 1:
texts_or_text_pairs = list(zip(example_batch[self.input_fields[0]], example_batch[self.input_fields[1]]))
else:
texts_or_text_pairs = example_batch[self.input_fields[0]]
# Tokenize the text/text pairs
features = self.tokenizer.batch_encode_plus(
texts_or_text_pairs, max_length=self.max_seq_length, padding='max_length', truncation=True
)
# Rename label to labels to make it easier to pass to model forward
features["labels"] = example_batch["label"]
return features
class finetuner(LightningModule):
def __init__(
self,
model_name_or_path: str,
num_labels: int,
learning_rate: float = 2e-5,
adam_epsilon: float = 1e-8,
warmup_steps: int = 0,
weight_decay: float = 0.0,
train_batch_size: int = 4,
eval_batch_size: int = 4,
eval_splits: Optional[list] = None,
**kwargs,
):
super().__init__()
self.save_hyperparameters()
self.config = AutoConfig.from_pretrained(model_name_or_path, num_labels=num_labels)
self.model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, config=self.config)
self.acc = evaluate.load("accuracy")
self.f1 = evaluate.load("f1")
def forward(self, **inputs):
return self.model(**inputs)
def training_step(self, batch, batch_idx):
outputs = self(**batch)
loss = outputs[0]
return loss
def test_step(self, batch, batch_idx, dataloader_idx=0):
outputs = self(**batch)
val_loss, logits = outputs[:2]
if self.hparams.num_labels > 1:
preds = torch.argmax(logits, axis=1)
elif self.hparams.num_labels == 1:
preds = logits.squeeze()
labels = batch["labels"]
return {"loss": val_loss, "preds": preds, "labels": labels}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
val_loss, logits = self(**batch)[:2]
if self.hparams.num_labels > 1:
preds = torch.argmax(logits, axis=1)
elif self.hparams.num_labels == 1:
preds = logits.squeeze()
labels = batch["labels"]
return [logits, preds, labels]
def test_epoch_end(self, outputs):
preds = torch.cat([x["preds"] for x in outputs]).detach().cpu().numpy()
labels = torch.cat([x["labels"] for x in outputs]).detach().cpu().numpy()
# soft_preds = torch.cat([x["soft_preds"] for x in outputs]).detach().cpu().numpy()
loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log("val_loss", loss, prog_bar=True)
accuracy = self.acc.compute(predictions=preds, references=labels)["accuracy"]
f1 = self.f1.compute(predictions=preds, references=labels, average='macro')["f1"]
self.log_dict({"accuracy": accuracy, "f1": f1}, prog_bar=True)
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.warmup_steps,
num_training_steps=self.trainer.estimated_stepping_batches,
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
| 9,031 | 33.212121 | 120 | py |
fce | fce-main/paper_demo/calibration_metrics.py | import pickle
import numpy as np
import os
import torch
import pandas as pd
import skfuzzy
from tqdm import tqdm
def expected_calibration_error(y_true, y_pred, num_bins):
ece_vals = []
pred_y = np.argmax(y_pred, axis=-1)
correct = (pred_y == y_true).astype(np.float32)
prob_y = np.max(y_pred, axis=-1)
b = np.linspace(start=0, stop=1.0, num=num_bins)
bins = np.digitize(prob_y, bins=b, right=True)
o = 0
for b in range(num_bins):
mask = bins == b
if np.any(mask):
o += np.abs(np.sum(correct[mask] - prob_y[mask]))
if sum(correct[mask])>0:
ece_vals.append(round(o / sum(correct[mask]),3))
else:
ece_vals.append(0)
return ece_vals, round(o / y_pred.shape[0],3)
def ece_binning(y_true, y_pred, num_bins):
prob_vals = []
pred_y = np.argmax(y_pred, axis=-1)
correct = (pred_y == y_true).astype(np.float32)
prob_y = np.max(y_pred, axis=-1)
b = np.linspace(start=0, stop=1.0, num=num_bins)
bins = np.digitize(prob_y, bins=b, right=True)
o = 0
for b in range(num_bins):
mask = bins == b
if np.any(mask):
o += np.abs(np.sum(correct[mask] - prob_y[mask]))
if sum(correct[mask])>0:
prob_vals.append(prob_y[mask])
else:
prob_vals.append([0])
return prob_vals
def expected_calibration_error_plot(y_true, y_pred, num_bins):
pred_y = np.argmax(y_pred, axis=-1)
correct = (pred_y == y_true).astype(np.float32)
prob_y = np.max(y_pred, axis=-1)
b = np.linspace(start=0, stop=1.0, num=num_bins)
bins = np.digitize(prob_y, bins=b, right=True)
o = 0
x = []
y = []
for b in range(num_bins):
mask = bins == b
if np.any(mask):
x.append(np.sum(prob_y[mask]) / y_pred.shape[0])
y.append(np.sum(correct[mask]) / y_pred.shape[0])
return x, y
def intervals(parts, duration):
part_duration = duration / parts
return [i * part_duration for i in range(parts)]
def fuzzy_binning(x, bins):
ticks = intervals(bins, 1)
ticks.append(1.0)
x = np.array([x])
y = {}
for i in range(len(ticks)-1):
t0, t1 = ticks[i], ticks[i+1]
mid = (t0+t1)/2
b = (t0+mid)/2
c = (mid+t1)/2
a = (2*t0)-b
d = (2*t1)-c
y[i] = round(float(skfuzzy.trapmf(x, np.array([a,b,c,d]))),3)
return y
def fuzzy_conf(g, correct, prob_y):
acc_sum = []
conf_sum = []
for i in range(len(correct)):
if correct[i]==1:
acc_sum.append(g[i])
conf_sum.append(g[i]*prob_y[i])
return (sum(acc_sum), sum(conf_sum))
def fuzzy_calibration_error(y_true, y_pred, n_bins):
pred_y = np.argmax(y_pred, axis=-1)
correct = (pred_y == y_true).astype(np.float32)
prob_y = np.max(y_pred, axis=-1)
mem = []
for p in prob_y:
mem.append(fuzzy_binning(p, bins=n_bins))
bins = n_bins
g_bin = {}
total_mem_g_bin = {}
acc_sum_g_bin = {}
conf_sum_g_bin = {}
acc_g_bin = {}
conf_g_bin = {}
fce_num = 0
fce_den = 0
fce_vals = []
for bin_ in range(bins):
g_bin[bin_] = [x[bin_] for x in mem]
total_mem_g_bin[bin_] = sum(g_bin[bin_])
acc_sum_g_bin[bin_], conf_sum_g_bin[bin_] = fuzzy_conf(g_bin[bin_], correct, prob_y)
if total_mem_g_bin[bin_]!= 0:
acc_g_bin[bin_] = acc_sum_g_bin[bin_]/total_mem_g_bin[bin_]
conf_g_bin[bin_] = conf_sum_g_bin[bin_]/total_mem_g_bin[bin_]
else:
acc_g_bin[bin_] = 0
conf_g_bin[bin_] = 0
fce_vals.append(abs(acc_g_bin[bin_] - conf_g_bin[bin_]))
fce_num += total_mem_g_bin[bin_]*abs(acc_g_bin[bin_] - conf_g_bin[bin_])
fce_den += total_mem_g_bin[bin_]
fce = round(float(fce_num/fce_den),3)
return fce_vals, fce
| 4,061 | 24.074074 | 92 | py |
fce | fce-main/paper_demo/binning.py | from utils import *
import argparse
import warnings
import pickle
import numpy as np
import os
import torch
import pandas as pd
from tqdm import tqdm
from calibration_utils import *
warnings.filterwarnings("ignore")
# -------------------------------------------------------------------------------------------------------------------#
parser = argparse.ArgumentParser(description='Binning setup')
parser.add_argument('ds_name', type=str,
help='Dataset name (news/agnews/imdb)')
parser.add_argument('size', type=int,
help='Train data size')
parser.add_argument('bins', type=int,
help='Number of bins to calculate calibration error')
parser.add_argument('result_dir', type=str,
help='Directory location for results')
args = parser.parse_args()
size = args.size
ds_name = args.ds_name
result_dir = args.result_dir
n_bins = args.bins
# -------------------------------------------------------------------------------------------------------------------#
# analysis of predictions
data_name = ds_name + str(size)
ece_list = []
fce_list = []
uf_list = []
of_list = []
bins_list = []
ece_breakdown = []
fce_breakdown = []
print("Binning probabilities...")
for n_bins in tqdm(range(1, n_bins + 1)):
with open(os.path.join(result_dir, 'predict_probs' + data_name + '.pickle'), 'rb') as handle:
soft_preds = pickle.load(handle)
with open(os.path.join(result_dir, 'predicted_labels' + data_name + '.pickle'), 'rb') as handle:
preds = pickle.load(handle)
with open(os.path.join(result_dir, 'labels' + data_name + '.pickle'), 'rb') as handle:
labels = pickle.load(handle)
incorrect = []
correct = []
for i in range(len(preds)):
if preds[i] != labels[i]:
incorrect.append([torch.softmax(soft_preds[i], 0), int(preds[i]), int(labels[i])])
else:
correct.append([torch.softmax(soft_preds[i], 0), int(preds[i]), int(labels[i])])
# calculate overconfidence --> expectation of confidence over incorrect predictions
of = []
for i in range(len(incorrect)):
of.append(max(incorrect[i][0]))
# calculate underconfidence --> expectation of 1-confidence over correct predictions
uf = []
for i in range(len(correct)):
uf.append(1 - max(correct[i][0]))
uf_list.append(np.mean(uf))
of_list.append(np.mean(of))
ece_dict = {'soft_preds': np.array([np.array(torch.softmax(x, 0)) for x in soft_preds]),
'preds': [int(x) for x in preds],
'labels': [int(x) for x in labels]}
ece_vals, ece = expected_calibration_error(np.array(ece_dict['labels']),
ece_dict['soft_preds'], num_bins=n_bins)
bins_list.append(n_bins)
ece_list.append(ece)
ece_breakdown.append(ece_vals)
print("Done!")
fce_vals, fce = fuzzy_calibration_error(np.array(ece_dict['labels']), ece_dict['soft_preds'], n_bins)
fce_list.append(fce)
fce_breakdown.append(fce_vals)
print("Done!")
print("Saving calibration evaluation results...")
data = pd.DataFrame(list(zip(bins_list, uf_list, of_list,
ece_list, ece_breakdown, fce_list, fce_breakdown)),
columns=['bins', 'uf', 'of', 'ece', 'ece_breakdown',
'fce', 'fce_breakdown'])
data.to_csv(os.path.join(result_dir, 'calibration_eval.csv'), index=False)
print("Done!")
| 3,474 | 33.068627 | 118 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/adversarial_autoencoder.py | import numpy as np
import tensorflow as tf
from tensorflow.compat.v1.layers import Dense
from tensorflow.nn import leaky_relu
from tensorflow.python.keras.layers import Flatten, Conv2D, Dropout
from models.customlayers import build_unified_decoder, build_unified_encoder
def adversarial_autoencoder(z, x, dropout_rate, dropout, config):
outputs = {}
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
with tf.variable_scope("Bottleneck"):
intermediate_conv = Conv2D(temp_out.get_shape().as_list()[3] // 8, 1, padding='same')
intermediate_conv_reverse = Conv2D(temp_out.get_shape().as_list()[3], 1, padding='same')
dropout_layer = Dropout(dropout_rate)
temp_out = intermediate_conv(temp_out)
reshape = temp_out.get_shape().as_list()[1:]
z_layer = Dense(config.zDim)
dec_dense = Dense(np.prod(reshape))
outputs['z_'] = z_ = dropout_layer(z_layer(Flatten()(temp_out)), dropout)
reshaped = tf.reshape(dropout_layer(dec_dense(z_), dropout), [-1, *reshape])
temp_out = intermediate_conv_reverse(reshaped)
with tf.variable_scope('Decoder'):
decoder = build_unified_decoder(config.outputWidth, config.intermediateResolutions, config.numChannels)
# Decode: z -> x_hat
for layer in decoder:
temp_out = layer(temp_out)
outputs['x_hat'] = temp_out
# Discriminator
with tf.variable_scope('Discriminator'):
discriminator = [
Dense(50, activation=leaky_relu),
Dense(50, activation=leaky_relu),
Dense(1)
]
# fake
temp_out = z_
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_'] = temp_out
# real
temp_out = z
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d'] = temp_out
# adding noise
epsilon = tf.random_uniform([config.batchsize, 1], minval=0., maxval=1.)
outputs['z_hat'] = z_hat = z + epsilon * (z - z_)
temp_out = z_hat
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_hat'] = temp_out
return outputs
| 2,383 | 31.657534 | 111 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/customlayers.py | import math
import tensorflow as tf
from tensorflow.compat.v1.layers import Conv2D, Conv2DTranspose, BatchNormalization
from tensorflow.keras.layers import LeakyReLU, ReLU, LayerNormalization
def sample(dec_dense, decoder, reshape, tensor, zDim):
sampled = tf.random.normal(shape=(tf.shape(tensor)[0], zDim))
sample_out = tf.reshape(dec_dense(sampled), [-1, *reshape])
for layer in decoder:
sample_out = layer(sample_out)
return sample_out
def build_unified_encoder(input_shape, intermediateResolutions, use_batchnorm=True):
encoder = []
num_pooling = int(math.log(input_shape[1], 2) - math.log(float(intermediateResolutions[0]), 2))
for i in range(num_pooling):
filters = int(min(128, 32 * (2 ** i)))
encoder.append(Conv2D(filters=filters, kernel_size=5, strides=2, padding='same', name=f'enc_conv2D_{i}'))
encoder.append(BatchNormalization() if use_batchnorm else LayerNormalization([1, 2]))
encoder.append(LeakyReLU())
return encoder
def build_unified_decoder(outputWidth, intermediateResolutions, outputChannels, final_activation=tf.identity, use_batchnorm=True):
decoder = []
num_upsampling = int(math.log(outputWidth, 2) - math.log(float(intermediateResolutions[0]), 2))
decoder.append(BatchNormalization() if use_batchnorm else LayerNormalization([1, 2]))
decoder.append(ReLU())
for i in range(num_upsampling):
filters = int(max(32, 128 / (2 ** i)))
decoder.append(Conv2DTranspose(filters=filters, kernel_size=5, strides=2, padding='same', name=f'dec_Conv2DT_{i}'))
decoder.append(BatchNormalization() if use_batchnorm else LayerNormalization([1, 2]))
decoder.append(LeakyReLU())
decoder.append(Conv2D(filters=outputChannels, kernel_size=1, strides=1, padding='same', name='dec_Conv2D_final', activation=final_activation))
return decoder
| 1,884 | 47.333333 | 146 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/constrained_adversarial_autoencoder_Chen.py | import numpy as np
import tensorflow as tf
from bunch import Bunch
from tensorflow.compat.v1.layers import Dense
from tensorflow.nn import leaky_relu
from tensorflow.python.keras.layers import AvgPool2D, ReLU, Add, LayerNormalization
from tensorflow.python.layers.convolutional import Conv2D, Conv2DTranspose
from tensorflow.python.layers.core import Flatten
def constrained_adversarial_autoencoder_Chen(z, x, dropout_rate, dropout, config):
outputs = {}
dim = 64
with tf.variable_scope('Encoder'):
encoder = Bunch({
# Model definition
'enc_conv': Conv2D(filters=dim, kernel_size=3, padding='same'),
'enc_res1_conv1': Conv2D(filters=2 * dim, kernel_size=3, padding='same'),
'enc_res1_layernorm1': LayerNormalization([1, 2]),
'enc_res1_conv2': Conv2D(filters=2 * dim, kernel_size=3, strides=2, padding='same'),
'enc_res1_layernorm2': LayerNormalization([1, 2]),
'enc_res1_shortcut1': Conv2D(filters=2 * dim, kernel_size=1, padding='same'),
'enc_res1_shortcut2': AvgPool2D(),
'enc_res2_conv1': Conv2D(filters=4 * dim, kernel_size=3, padding='same'),
'enc_res2_layernorm1': LayerNormalization([1, 2]),
'enc_res2_conv2': Conv2D(filters=4 * dim, kernel_size=3, strides=2, padding='same'),
'enc_res2_layernorm2': LayerNormalization([1, 2]),
'enc_res2_shortcut1': Conv2D(filters=4 * dim, kernel_size=1, padding='same'),
'enc_res2_shortcut2': AvgPool2D(),
'enc_res3_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'),
'enc_res3_layernorm1': LayerNormalization([1, 2]),
'enc_res3_conv2': Conv2D(filters=8 * dim, kernel_size=3, strides=2, padding='same'),
'enc_res3_layernorm2': LayerNormalization([1, 2]),
'enc_res3_shortcut1': Conv2D(filters=8 * dim, kernel_size=1, padding='same'),
'enc_res3_shortcut2': AvgPool2D(),
'enc_res4_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'),
'enc_res4_layernorm1': LayerNormalization([1, 2]),
'enc_res4_conv2': Conv2D(filters=8 * dim, kernel_size=3, padding='same'),
'enc_res4_layernorm2': LayerNormalization([1, 2]),
'enc_flatten': Flatten(),
'enc_dense': Dense(config.zDim),
})
features, z_ = evaluate_encoder(encoder, x)
outputs['z_'] = z_
with tf.variable_scope('Decoder'):
decoder = Bunch({
# Model definition
'dec_1': Dense(np.prod(features.get_shape().as_list()[1:])),
'dec_res1_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'),
'dec_res1_layernorm1': LayerNormalization([1, 2]),
'dec_res1_conv2': Conv2DTranspose(filters=8 * dim, kernel_size=3, padding='same'),
'dec_res1_layernorm2': LayerNormalization([1, 2]),
'dec_res2_conv1': Conv2D(filters=4 * dim, kernel_size=3, padding='same'),
'dec_res2_layernorm1': LayerNormalization([1, 2]),
'dec_res2_conv2': Conv2DTranspose(filters=4 * dim, kernel_size=3, strides=2, padding='same'),
'dec_res2_layernorm2': LayerNormalization([1, 2]),
'dec_res2_shortcut': Conv2DTranspose(filters=4 * dim, kernel_size=1, padding='same', strides=2),
'dec_res3_conv1': Conv2D(filters=2 * dim, kernel_size=3, padding='same'),
'dec_res3_layernorm1': LayerNormalization([1, 2]),
'dec_res3_conv2': Conv2DTranspose(filters=2 * dim, kernel_size=3, strides=2, padding='same'),
'dec_res3_layernorm2': LayerNormalization([1, 2]),
'dec_res3_shortcut': Conv2DTranspose(filters=2 * dim, kernel_size=1, padding='same', strides=2),
'dec_res4_conv1': Conv2D(filters=dim, kernel_size=3, padding='same'),
'dec_res4_layernorm1': LayerNormalization([1, 2]),
'dec_res4_conv2': Conv2DTranspose(filters=dim, kernel_size=3, strides=2, padding='same'),
'dec_res4_layernorm2': LayerNormalization([1, 2]),
'dec_res4_shortcut': Conv2DTranspose(filters=dim, kernel_size=1, padding='same', strides=2),
# post process
'dec_layernorm': LayerNormalization([1, 2]),
'dec_conv': Conv2D(1, 1, padding='same'),
})
outputs['x_hat'] = x_hat = evaluate_decoder(decoder, z_, features.get_shape().as_list()[1:])
# projecting reconstruction to latent space for constrained part
outputs['z_rec'] = evaluate_encoder(encoder, x_hat)[1]
# Discriminator
with tf.variable_scope('Discriminator'):
discriminator = [
Dense(400, activation=leaky_relu),
Dense(200, activation=leaky_relu),
Dense(1)
]
# fake
temp_out = z_
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_'] = temp_out
# real
temp_out = z
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d'] = temp_out
# reparametrization
epsilon = tf.random_uniform([], 0.0, 1.0)
outputs['z_hat'] = z_hat = epsilon * z + (1 - epsilon) * z_
temp_out = z_hat
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_hat'] = temp_out
return outputs
def evaluate_encoder(encoder, x):
# Evaluate
output = encoder.enc_conv(x)
# residual block 1
output_temp = encoder.enc_res1_conv2(
ReLU()(encoder.enc_res1_layernorm2(encoder.enc_res1_conv1(ReLU()(encoder.enc_res1_layernorm1(output))))))
output = Add()([output_temp, encoder.enc_res1_shortcut2(encoder.enc_res1_shortcut1(output))])
# residual block 2
output_temp = encoder.enc_res2_conv2(
ReLU()(encoder.enc_res2_layernorm2(encoder.enc_res2_conv1(ReLU()(encoder.enc_res2_layernorm1(output))))))
output = Add()([output_temp, encoder.enc_res2_shortcut2(encoder.enc_res2_shortcut1(output))])
# residual block 3
output_temp = encoder.enc_res3_conv2(
ReLU()(encoder.enc_res3_layernorm2(encoder.enc_res3_conv1(ReLU()(encoder.enc_res3_layernorm1(output))))))
output = Add()([output_temp, encoder.enc_res3_shortcut2(encoder.enc_res3_shortcut1(output))])
# residual block 4
output_temp = encoder.enc_res4_conv2(
ReLU()(encoder.enc_res4_layernorm2(encoder.enc_res4_conv1(ReLU()(encoder.enc_res4_layernorm1(output))))))
output = Add()([output_temp, output])
flatten = encoder.enc_flatten(output)
return output, encoder.enc_dense(flatten)
def evaluate_decoder(decoder, z, reshape):
# Evaluate
output = tf.reshape(decoder.dec_1(z), [-1, *reshape])
# residual block 1
output_temp = decoder.dec_res1_conv2(ReLU()(decoder.dec_res1_layernorm2(decoder.dec_res1_conv1(ReLU()(decoder.dec_res1_layernorm1(output))))))
output = Add()([output_temp, output])
# residual block 2
output_temp = decoder.dec_res2_conv2(ReLU()(decoder.dec_res2_layernorm2(decoder.dec_res2_conv1(ReLU()(decoder.dec_res2_layernorm1(output))))))
output = Add()([output_temp, decoder.dec_res2_shortcut(output)])
# residual block 3
output_temp = decoder.dec_res3_conv2(ReLU()(decoder.dec_res3_layernorm2(decoder.dec_res3_conv1(ReLU()(decoder.dec_res3_layernorm1(output))))))
output = Add()([output_temp, decoder.dec_res3_shortcut(output)])
# residual block 4
output_temp = decoder.dec_res4_conv2(ReLU()(decoder.dec_res4_layernorm2(decoder.dec_res4_conv1(ReLU()(decoder.dec_res4_layernorm1(output))))))
output = Add()([output_temp, decoder.dec_res4_shortcut(output)])
output = decoder.dec_layernorm(output)
output = ReLU()(output)
return decoder.dec_conv(output)
| 7,781 | 46.742331 | 146 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/constrained_autoencoder.py | import numpy as np
import tensorflow as tf
from tensorflow.compat.v1.layers import Dense
from tensorflow.python.keras.layers import Conv2D, Flatten, Dropout
from models.customlayers import build_unified_encoder, build_unified_decoder
def constrained_autoencoder(x, dropout_rate, dropout, config):
outputs = {}
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
with tf.variable_scope("Bottleneck"):
intermediate_conv = Conv2D(temp_out.get_shape().as_list()[3] // 8, 1, padding='same')
intermediate_conv_reverse = Conv2D(temp_out.get_shape().as_list()[3], 1, padding='same')
dropout_layer = Dropout(dropout_rate)
temp_out = intermediate_conv(temp_out)
reshape = temp_out.get_shape().as_list()[1:]
z_layer = Dense(config.zDim)
dec_dense = Dense(np.prod(reshape))
outputs['z'] = z = dropout_layer(z_layer(Flatten()(temp_out)), dropout)
reshaped = tf.reshape(dropout_layer(dec_dense(z), dropout), [-1, *reshape])
temp_out = intermediate_conv_reverse(reshaped)
with tf.variable_scope('Decoder'):
decoder = build_unified_decoder(config.outputWidth, config.intermediateResolutions, config.numChannels)
# Decode: z -> x_hat
for layer in decoder:
temp_out = layer(temp_out)
outputs['x_hat'] = temp_out
with tf.variable_scope('Encoder'):
# mapping reconstruction to latent space for constrained part
for layer in encoder:
temp_out = layer(temp_out)
outputs['z_rec'] = dropout_layer(z_layer(Flatten()(intermediate_conv(temp_out))), dropout)
return outputs
| 1,813 | 36.020408 | 111 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/fanogan.py | import numpy as np
import tensorflow as tf
from tensorflow import sigmoid
from tensorflow.compat.v1.layers import Conv2D, Flatten
from tensorflow.compat.v1.layers import Dense
from tensorflow.python.keras.layers import Conv2D, Dropout, Flatten
from models.customlayers import build_unified_decoder, build_unified_encoder
def fanogan(z, x, dropout_rate, dropout, config):
outputs = {}
# Encoder
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
temp_temp_out = temp_out
intermediate_conv = Conv2D(temp_temp_out.get_shape().as_list()[3] // 8, 1, padding='same')
dropout_layer = Dropout(dropout_rate)
temp_out = intermediate_conv(temp_out)
reshape = temp_out.get_shape().as_list()[1:]
z_layer = Dense(config.zDim)
outputs['z_enc'] = z_enc = tf.nn.tanh(dropout_layer(z_layer(Flatten()(temp_out)), dropout))
# Generator
with tf.variable_scope('Generator'):
intermediate_conv_reverse = Conv2D(temp_temp_out.get_shape().as_list()[3], 1, padding='same')
dec_dense = Dense(np.prod(reshape))
generator = build_unified_decoder(config.outputWidth, config.intermediateResolutions, config.numChannels, use_batchnorm=False)
temp_out_z_enc = intermediate_conv_reverse(tf.reshape(dropout_layer(dec_dense(z_enc), dropout), [-1, *reshape]))
# encoder training:
for layer in generator:
temp_out_z_enc = layer(temp_out_z_enc)
outputs['x_enc'] = x_enc = sigmoid(temp_out_z_enc) # recon_img
# generator training
temp_out = intermediate_conv_reverse(tf.reshape(dropout_layer(dec_dense(z), dropout), [-1, *reshape]))
for layer in generator:
temp_out = layer(temp_out)
outputs['x_'] = x_ = sigmoid(temp_out)
# Discriminator
with tf.variable_scope('Discriminator'):
discriminator = build_unified_encoder(x_.get_shape().as_list(), config.intermediateResolutions, use_batchnorm=False)
discriminator_dense = Dense(1)
# fake:
temp_out = x_
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_fake_features'] = temp_out
outputs['d_'] = discriminator_dense(temp_out)
# real:
temp_out = x
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_features'] = temp_out # image_features
outputs['d'] = discriminator_dense(temp_out)
alpha = tf.random_uniform(shape=[config.batchsize, 1], minval=0., maxval=1.) # eps
diff = tf.reshape((x_ - x), [config.batchsize, np.prod(x.get_shape().as_list()[1:])])
outputs['x_hat'] = x_hat = x + tf.reshape(alpha * diff, [config.batchsize, *x.get_shape().as_list()[1:]])
temp_out = x_hat
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_hat_features'] = temp_out
outputs['d_hat'] = discriminator_dense(temp_out)
# encoder training:
temp_out = x_enc
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_enc_features'] = temp_out # recon_features
outputs['d_enc'] = discriminator_dense(temp_out)
return outputs
| 3,394 | 38.941176 | 134 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/autoencoder.py | import numpy as np
import tensorflow as tf
from tensorflow.compat.v1.layers import Dense
from tensorflow.python.keras.layers import Conv2D, Flatten, Dropout
from models.customlayers import build_unified_encoder, build_unified_decoder
def autoencoder(x, dropout_rate, dropout, config):
outputs = {}
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
with tf.variable_scope("Bottleneck"):
intermediate_conv = Conv2D(temp_out.get_shape().as_list()[3] // 8, 1, padding='same')
intermediate_conv_reverse = Conv2D(temp_out.get_shape().as_list()[3], 1, padding='same')
dropout_layer = Dropout(dropout_rate)
temp_out = intermediate_conv(temp_out)
reshape = temp_out.get_shape().as_list()[1:]
z_layer = Dense(config.zDim)
dec_dense = Dense(np.prod(reshape))
outputs['z'] = z = dropout_layer(z_layer(Flatten()(temp_out)), dropout)
temp_out = intermediate_conv_reverse(tf.reshape(dropout_layer(dec_dense(z)), [-1, *reshape]))
with tf.variable_scope('Decoder'):
decoder = build_unified_decoder(config.outputWidth, config.intermediateResolutions, config.numChannels)
# Decode: z -> x_hat
for layer in decoder:
temp_out = layer(temp_out)
outputs['x_hat'] = temp_out
return outputs
| 1,485 | 35.243902 | 111 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/constrained_adversarial_autoencoder.py | import numpy as np
import tensorflow as tf
from tensorflow.compat.v1.layers import Dense
from tensorflow.nn import leaky_relu
from tensorflow.python.keras.layers import Flatten, Conv2D, Dropout
from models.customlayers import build_unified_decoder, build_unified_encoder
def constrained_adversarial_autoencoder(z, x, dropout_rate, dropout, config):
outputs = {}
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
temp_temp_out = temp_out
intermediate_conv = Conv2D(temp_temp_out.get_shape().as_list()[3] // 8, 1, padding='same')
dropout_layer = Dropout(dropout_rate)
temp_out = intermediate_conv(temp_out)
reshape = temp_out.get_shape().as_list()[1:]
z_layer = Dense(config.zDim)
dec_dense = Dense(np.prod(reshape))
outputs['z_'] = z_ = dropout_layer(z_layer(Flatten()(temp_out)), dropout)
with tf.variable_scope('Decoder'):
decoder = build_unified_decoder(config.outputWidth, config.intermediateResolutions, config.numChannels)
intermediate_conv_reverse = Conv2D(temp_temp_out.get_shape().as_list()[3], 1, padding='same')
reshaped = tf.reshape(dropout_layer(dec_dense(z_)), [-1, *reshape])
temp_out = intermediate_conv_reverse(reshaped)
# Decode: z -> x_hat
for layer in decoder:
temp_out = layer(temp_out)
outputs['x_hat'] = temp_out
with tf.variable_scope('Encoder'):
# mapping reconstruction to latent space for constrained part
for layer in encoder:
temp_out = layer(temp_out)
outputs['z_rec'] = dropout_layer(z_layer(Flatten()(intermediate_conv(temp_out))))
# Discriminator
with tf.variable_scope('Discriminator'):
discriminator = [
Dense(100, activation=leaky_relu),
Dense(50, activation=leaky_relu),
Dense(1)
]
# fake
temp_out = z_
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_'] = temp_out
# real
temp_out = z
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d'] = temp_out
# adding noise
epsilon = tf.random_uniform([config.batchsize, 1], minval=0., maxval=1.)
outputs['z_hat'] = z_hat = z + epsilon * (z - z_)
temp_out = z_hat
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_hat'] = temp_out
return outputs
| 2,658 | 32.2375 | 111 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/fanogan_schlegl.py | import numpy as np
import tensorflow as tf
from bunch import Bunch
from tensorflow.compat.v1.layers import Conv2D, Conv2DTranspose, Dense
from tensorflow.keras.layers import ReLU, Add, LayerNormalization, AvgPool2D
from tensorflow.python.keras.layers import Flatten
from models.customlayers import build_unified_encoder
def fanogan_schlegl(z, x, dropout_rate, dropout, config):
outputs = {}
dim = 64
# Encoder
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), intermediateResolutions=config.intermediateResolutions)
enc_dense = Dense(config.zDim)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
outputs['z_enc'] = z_enc = tf.nn.tanh(enc_dense(Flatten()(temp_out))) # restricting encoder outputs to range [-1;1]
# Generator
with tf.variable_scope('Generator'):
generator = Bunch({
# Model definition
'gen_1': Dense(np.prod(config.intermediateResolutions) * 8 * dim),
'gen_res1_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'),
'gen_res1_layernorm1': LayerNormalization([1, 2]),
'gen_res1_conv2': Conv2DTranspose(filters=8 * dim, kernel_size=3, padding='same'),
'gen_res1_layernorm2': LayerNormalization([1, 2]),
'gen_res2_conv1': Conv2D(filters=4 * dim, kernel_size=3, padding='same'),
'gen_res2_layernorm1': LayerNormalization([1, 2]),
'gen_res2_conv2': Conv2DTranspose(filters=4 * dim, kernel_size=3, strides=2, padding='same'),
'gen_res2_layernorm2': LayerNormalization([1, 2]),
'gen_res2_shortcut': Conv2DTranspose(filters=4 * dim, kernel_size=1, padding='same', strides=2),
'gen_res3_conv1': Conv2D(filters=2 * dim, kernel_size=3, padding='same'),
'gen_res3_layernorm1': LayerNormalization([1, 2]),
'gen_res3_conv2': Conv2DTranspose(filters=2 * dim, kernel_size=3, strides=2, padding='same'),
'gen_res3_layernorm2': LayerNormalization([1, 2]),
'gen_res3_shortcut': Conv2DTranspose(filters=2 * dim, kernel_size=1, padding='same', strides=2),
'gen_res4_conv1': Conv2D(filters=1 * dim, kernel_size=3, padding='same'),
'gen_res4_layernorm1': LayerNormalization([1, 2]),
'gen_res4_conv2': Conv2DTranspose(filters=1 * dim, kernel_size=3, strides=2, padding='same'),
'gen_res4_layernorm2': LayerNormalization([1, 2]),
'gen_res4_shortcut': Conv2DTranspose(filters=1 * dim, kernel_size=1, padding='same', strides=2),
# post process
'gen_layernorm': LayerNormalization([1, 2]),
'gen_conv': Conv2D(1, 1, padding='same', activation='tanh')
})
outputs['x_'] = x_ = evaluate_generator(generator, z, config.intermediateResolutions, dim)
# encoder training:
outputs['x_enc'] = x_enc = evaluate_generator(generator, z_enc, config.intermediateResolutions, dim)
# Discriminator
with tf.variable_scope('Discriminator'):
discriminator = Bunch({
# Model definition
'dis_conv': Conv2D(dim, 3, padding='same'),
'dis_res1_conv1': Conv2D(filters=2 * dim, kernel_size=3, padding='same'),
'dis_res1_layernorm1': LayerNormalization([1, 2]),
'dis_res1_conv2': Conv2D(filters=2 * dim, kernel_size=3, strides=2, padding='same'),
'dis_res1_layernorm2': LayerNormalization([1, 2]),
'dis_res1_shortcut1': Conv2D(filters=2 * dim, kernel_size=1, padding='same'),
'dis_res1_shortcut2': AvgPool2D(),
'dis_res2_conv1': Conv2D(filters=4 * dim, kernel_size=3, padding='same'),
'dis_res2_layernorm1': LayerNormalization([1, 2]),
'dis_res2_conv2': Conv2D(filters=4 * dim, kernel_size=3, strides=2, padding='same'),
'dis_res2_layernorm2': LayerNormalization([1, 2]),
'dis_res2_shortcut1': Conv2D(filters=4 * dim, kernel_size=1, padding='same'),
'dis_res2_shortcut2': AvgPool2D(),
'dis_res3_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'),
'dis_res3_layernorm1': LayerNormalization([1, 2]),
'dis_res3_conv2': Conv2D(filters=8 * dim, kernel_size=3, strides=2, padding='same'),
'dis_res3_layernorm2': LayerNormalization([1, 2]),
'dis_res3_shortcut1': Conv2D(filters=8 * dim, kernel_size=1, padding='same'),
'dis_res3_shortcut2': AvgPool2D(),
'dis_res4_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'),
'dis_res4_layernorm1': LayerNormalization([1, 2]),
'dis_res4_conv2': Conv2D(filters=8 * dim, kernel_size=3, padding='same'),
'dis_res4_layernorm2': LayerNormalization([1, 2]),
# post process
# 'dis_flatten': Flatten(),
'dis_dense': Dense(1),
})
# fake:
outputs['d_fake_features'], outputs['d_'] = evaluate_discriminator(discriminator, x_)
# real:
outputs['d_features'], outputs['d'] = evaluate_discriminator(discriminator, x)
# add noise
alpha = tf.random_uniform(shape=[config.batchsize, 1], minval=0., maxval=1.) # eps
diff = tf.reshape((x_ - x), [config.batchsize, np.prod(x.get_shape().as_list()[1:])])
outputs['x_hat'] = x_hat = x + tf.reshape(alpha * diff, [config.batchsize, *x.get_shape().as_list()[1:]])
outputs['d_hat_features'], outputs['d_hat'] = evaluate_discriminator(discriminator, x_hat)
# encoder training:
outputs['d_enc_features'], outputs['d_enc'] = evaluate_discriminator(discriminator, x_enc)
return outputs
def evaluate_generator(generator, z, intermediateResolutions, dim):
# Evaluate
output = tf.reshape(generator.gen_1(z), [-1, intermediateResolutions[0], intermediateResolutions[1], 8 * dim])
# residual block 1
output_temp = generator.gen_res1_conv2(ReLU()(generator.gen_res1_layernorm2(generator.gen_res1_conv1(ReLU()(generator.gen_res1_layernorm1(output))))))
output = Add()([output_temp, output])
# residual block 2
output_temp = generator.gen_res2_conv2(ReLU()(generator.gen_res2_layernorm2(generator.gen_res2_conv1(ReLU()(generator.gen_res2_layernorm1(output))))))
output = Add()([output_temp, generator.gen_res2_shortcut(output)])
# residual block 3
output_temp = generator.gen_res3_conv2(ReLU()(generator.gen_res3_layernorm2(generator.gen_res3_conv1(ReLU()(generator.gen_res3_layernorm1(output))))))
output = Add()([output_temp, generator.gen_res3_shortcut(output)])
# residual block 4
output_temp = generator.gen_res4_conv2(ReLU()(generator.gen_res4_layernorm2(generator.gen_res4_conv1(ReLU()(generator.gen_res4_layernorm1(output))))))
output = Add()([output_temp, generator.gen_res4_shortcut(output)])
output = generator.gen_layernorm(output)
output = ReLU()(output)
return generator.gen_conv(output)
def evaluate_discriminator(discriminator, x):
# Evaluate
output = discriminator.dis_conv(x)
# residual block 1
output_temp = discriminator.dis_res1_conv2(
ReLU()(discriminator.dis_res1_layernorm2(discriminator.dis_res1_conv1(ReLU()(discriminator.dis_res1_layernorm1(output))))))
output = Add()([output_temp, discriminator.dis_res1_shortcut2(discriminator.dis_res1_shortcut1(output))])
# residual block 2
output_temp = discriminator.dis_res2_conv2(
ReLU()(discriminator.dis_res2_layernorm2(discriminator.dis_res2_conv1(ReLU()(discriminator.dis_res2_layernorm1(output))))))
output = Add()([output_temp, discriminator.dis_res2_shortcut2(discriminator.dis_res2_shortcut1(output))])
# residual block 3
output_temp = discriminator.dis_res3_conv2(
ReLU()(discriminator.dis_res3_layernorm2(discriminator.dis_res3_conv1(ReLU()(discriminator.dis_res3_layernorm1(output))))))
output = Add()([output_temp, discriminator.dis_res3_shortcut2(discriminator.dis_res3_shortcut1(output))])
# residual block 4
output_temp = discriminator.dis_res4_conv2(
ReLU()(discriminator.dis_res4_layernorm2(discriminator.dis_res4_conv1(ReLU()(discriminator.dis_res4_layernorm1(output))))))
output = Add()([output_temp, output])
# output = discriminator.dis_flatten(output)
return output, discriminator.dis_dense(output)
| 8,423 | 51 | 154 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/autoencoder_spatial.py | import tensorflow as tf
from tensorflow.python.keras.layers import Dropout
from models.customlayers import build_unified_encoder, build_unified_decoder
def autoencoder_spatial(x, dropout_rate, dropout, config):
outputs = {}
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
dropout_layer = Dropout(dropout_rate)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
temp_out = dropout_layer(temp_out, training=dropout)
outputs['z'] = temp_out
with tf.variable_scope('Decoder'):
decoder = build_unified_decoder(config.outputWidth, config.intermediateResolutions, config.numChannels)
# Decode: z -> x_hat
for layer in decoder:
temp_out = layer(temp_out)
outputs['x_hat'] = temp_out
return outputs
| 900 | 31.178571 | 111 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/anovaegan.py | import numpy as np
import tensorflow as tf
from tensorflow.compat.v1.layers import Conv2D, Flatten
from tensorflow.compat.v1.layers import Dense
from tensorflow.python.keras.layers import Conv2D, Dropout, Flatten
from models.customlayers import build_unified_decoder, build_unified_encoder
def anovaegan(x, dropout_rate, dropout, config):
outputs = {}
# Encoder
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
temp_temp_out = temp_out
intermediate_conv = Conv2D(temp_temp_out.get_shape().as_list()[3] // 8, 1, padding='same')
dropout_layer = Dropout(dropout_rate)
temp_out = intermediate_conv(temp_out)
reshape = temp_out.get_shape().as_list()[1:]
mu_layer = Dense(config.zDim)
sigma_layer = Dense(config.zDim)
flatten = Flatten()(temp_out)
outputs['z_mu'] = z_mu = dropout_layer(mu_layer(flatten), dropout)
outputs['z_log_sigma'] = z_log_sigma = dropout_layer(sigma_layer(flatten), dropout)
outputs['z_sigma'] = z_sigma = tf.exp(z_log_sigma)
z_vae = z_mu + tf.random_normal(tf.shape(z_sigma)) * z_sigma
with tf.variable_scope("Generator"):
intermediate_conv_reverse = Conv2D(temp_temp_out.get_shape().as_list()[3], 1, padding='same')
dec_dense = Dense(np.prod(reshape))
decoder = build_unified_decoder(outputWidth=config.outputWidth, intermediateResolutions=config.intermediateResolutions,
outputChannels=config.numChannels,
use_batchnorm=False)
reshaped = tf.reshape(dropout_layer(dec_dense(z_vae)), [-1, *reshape])
temp_out = intermediate_conv_reverse(reshaped)
# Decode: z -> x_hat
for layer in decoder:
temp_out = layer(temp_out)
outputs['out'] = temp_out
# Discriminator
with tf.variable_scope('Discriminator'):
discriminator = build_unified_encoder(temp_out.get_shape().as_list(), intermediateResolutions=config.intermediateResolutions, use_batchnorm=False)
discriminator_dense = Dense(1)
# fake/reconstructed:
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_fake_features'] = temp_out
outputs['d_'] = discriminator_dense(temp_out)
# real:
temp_out = x
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_features'] = temp_out # image_features
outputs['d'] = discriminator_dense(temp_out)
# for GP
alpha = tf.random_uniform(shape=[config.batchsize, 1], minval=0., maxval=1.) # eps
diff = tf.reshape((outputs['out'] - x), [config.batchsize, np.prod(x.get_shape().as_list()[1:])])
outputs['x_hat'] = x_hat = x + tf.reshape(alpha * diff, [config.batchsize, *x.get_shape().as_list()[1:]])
temp_out = x_hat
for layer in discriminator:
temp_out = layer(temp_out)
outputs['d_hat_features'] = temp_out
outputs['d_hat'] = discriminator_dense(temp_out)
return outputs
| 3,258 | 39.234568 | 154 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/gaussian_mixture_variational_autoencoder.py | import numpy as np
import tensorflow as tf
from tensorflow.compat.v1.layers import Conv2D
from tensorflow.compat.v1.layers import Dense
from tensorflow.nn import relu
from tensorflow.python.keras.layers import Flatten, Dropout
from models.customlayers import build_unified_encoder, build_unified_decoder
def gaussian_mixture_variational_autoencoder(x, dropout_rate, dropout, config):
layers = {}
# encoding network q(z|x) and q(w|x)
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
with tf.variable_scope("Bottleneck"):
intermediate_conv = Conv2D(temp_out.get_shape().as_list()[3] // 8, 1, padding='same')
intermediate_conv_reverse = Conv2D(temp_out.get_shape().as_list()[3], 1, padding='same')
dropout_layer = Dropout(dropout_rate)
temp_out = intermediate_conv(temp_out)
reshape = temp_out.get_shape().as_list()[1:]
w_mu_layer = Dense(config.dim_w)
w_log_sigma_layer = Dense(config.dim_w)
z_mu_layer = Dense(config.dim_z)
z_log_sigma_layer = Dense(config.dim_z)
dec_dense = Dense(np.prod(reshape))
flatten = Flatten()(temp_out)
layers['w_mu'] = w_mu = dropout_layer(w_mu_layer(flatten), dropout)
layers['w_log_sigma'] = w_log_sigma = dropout_layer(w_log_sigma_layer(flatten), dropout)
layers['w_sampled'] = w_sampled = w_mu + tf.random_normal(tf.shape(w_log_sigma)) * tf.exp(0.5 * w_log_sigma)
layers['z_mu'] = z_mu = dropout_layer(z_mu_layer(flatten), dropout)
layers['z_log_sigma'] = z_log_sigma = dropout_layer(z_log_sigma_layer(flatten))
layers['z_sampled'] = z_sampled = z_mu + tf.random_normal(tf.shape(z_log_sigma)) * tf.exp(0.5 * z_log_sigma)
temp_out = intermediate_conv_reverse(tf.reshape(dropout_layer(dec_dense(z_sampled), dropout), [-1, *reshape]))
# posterior p(z|w,c)
z_wc_mu_layer = Dense(config.dim_z * config.dim_c)
z_wc_log_sigma_layer = Dense(config.dim_z * config.dim_c)
z_wc_mu = z_wc_mu_layer(w_sampled)
z_wc_log_sigma = z_wc_log_sigma_layer(w_sampled)
z_wc_log_sigma_inv = tf.nn.bias_add(z_wc_log_sigma, bias=tf.Variable(tf.constant(0.1, shape=[z_wc_log_sigma.get_shape()[-1]], dtype=tf.float32)))
layers['z_wc_mus'] = z_wc_mus = tf.reshape(z_wc_mu, [-1, config.dim_z, config.dim_c])
layers['z_wc_log_sigma_invs'] = z_wc_log_sigma_invs = tf.reshape(z_wc_log_sigma_inv, [-1, config.dim_z, config.dim_c])
# reparametrization
layers['z_wc_sampled'] = z_wc_mus + tf.random_normal(tf.shape(z_wc_log_sigma_invs)) * tf.exp(z_wc_log_sigma_invs)
# decoder p(x|z)
with tf.variable_scope('Decoder'):
decoder = build_unified_decoder(config.outputWidth, config.intermediateResolutions, config.numChannels)
for layer in decoder:
temp_out = layer(temp_out)
layers['xz_mu'] = temp_out
# prior p(c)
z_sample = tf.tile(tf.expand_dims(z_sampled, -1), [1, 1, config.dim_c])
loglh = -0.5 * (tf.squared_difference(z_sample, z_wc_mus) * tf.exp(z_wc_log_sigma_invs)) - z_wc_log_sigma_invs + tf.log(np.pi)
loglh_sum = tf.reduce_sum(loglh, 1)
layers['pc_logit'] = loglh_sum
layers['pc'] = tf.nn.softmax(loglh_sum)
return layers
| 3,390 | 43.618421 | 149 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/variational_autoencoder.py | import numpy as np
import tensorflow as tf
from tensorflow.compat.v1.layers import Dense
from tensorflow.python.keras.layers import Conv2D, Dropout, Flatten
from models.customlayers import build_unified_decoder, build_unified_encoder
def variational_autoencoder(x, dropout_rate, dropout, config):
outputs = {}
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
with tf.variable_scope("Bottleneck"):
intermediate_conv = Conv2D(temp_out.get_shape().as_list()[3] // 8, 1, padding='same')
intermediate_conv_reverse = Conv2D(temp_out.get_shape().as_list()[3], 1, padding='same')
dropout_layer = Dropout(dropout_rate)
temp_out = intermediate_conv(temp_out)
reshape = temp_out.get_shape().as_list()[1:]
mu_layer = Dense(config.zDim)
sigma_layer = Dense(config.zDim)
dec_dense = Dense(np.prod(reshape))
flatten = Flatten()(temp_out)
outputs['z_mu'] = z_mu = dropout_layer(mu_layer(flatten), dropout)
outputs['z_log_sigma'] = z_log_sigma = dropout_layer(sigma_layer(flatten), dropout)
outputs['z_sigma'] = z_sigma = tf.exp(z_log_sigma)
z_vae = z_mu + tf.random_normal(tf.shape(z_sigma)) * z_sigma
reshaped = tf.reshape(dropout_layer(dec_dense(z_vae), dropout), [-1, *reshape])
temp_out = intermediate_conv_reverse(reshaped)
with tf.variable_scope('Decoder'):
decoder = build_unified_decoder(config.outputWidth, config.intermediateResolutions, config.numChannels)
# Decode: z -> x_hat
for layer in decoder:
temp_out = layer(temp_out)
outputs['x_hat'] = temp_out
return outputs
| 1,834 | 37.229167 | 111 | py |
Unsupervised_Anomaly_Detection_Brain_MRI | Unsupervised_Anomaly_Detection_Brain_MRI-master/models/context_encoder_variational_autoencoder.py | import numpy as np
import tensorflow as tf
from tensorflow.compat.v1.layers import Dense
from tensorflow.python.keras.layers import Conv2D, Dropout, Flatten
from models.customlayers import build_unified_decoder, build_unified_encoder
def context_encoder_variational_autoencoder(x, x_ce, dropout_rate, dropout, config):
outputs = {}
with tf.variable_scope('Encoder'):
encoder = build_unified_encoder(x.get_shape().as_list(), config.intermediateResolutions)
temp_out = x
for layer in encoder:
temp_out = layer(temp_out)
temp_out_ce = x_ce
for layer in encoder:
temp_out_ce = layer(temp_out_ce)
with tf.variable_scope("Bottleneck"):
intermediate_conv = Conv2D(temp_out.get_shape().as_list()[3] // 8, 1, padding='same')
intermediate_conv_reverse = Conv2D(temp_out.get_shape().as_list()[3], 1, padding='same')
dropout_layer = Dropout(dropout_rate)
temp_out = intermediate_conv(temp_out)
temp_out_ce = intermediate_conv(temp_out_ce)
reshape = temp_out.get_shape().as_list()[1:]
mu_layer = Dense(config.zDim)
sigma_layer = Dense(config.zDim)
dec_dense = Dense(np.prod(reshape))
flatten = Flatten()(temp_out)
outputs['z_mu'] = z_mu = dropout_layer(mu_layer(flatten), dropout)
outputs['z_mu_ce'] = z_mu_ce = dropout_layer(mu_layer(Flatten()(temp_out_ce)), dropout)
outputs['z_log_sigma'] = z_log_sigma = dropout_layer(sigma_layer(flatten), dropout)
outputs['z_sigma'] = z_sigma = tf.exp(z_log_sigma)
z_vae = z_mu + tf.random_normal(tf.shape(z_sigma)) * z_sigma
reshaped = tf.reshape(dropout_layer(dec_dense(z_vae), dropout), [-1, *reshape])
temp_out = intermediate_conv_reverse(reshaped)
temp_out_ce = intermediate_conv_reverse(tf.reshape(dropout_layer(dec_dense(z_mu_ce), dropout), [-1, *reshape]))
with tf.variable_scope('Decoder'):
decoder = build_unified_decoder(config.outputWidth, config.intermediateResolutions, config.numChannels)
# Decode: z -> x_hat
for layer in decoder:
temp_out = layer(temp_out)
outputs['x_hat'] = temp_out
for layer in decoder:
temp_out_ce = layer(temp_out_ce)
outputs['x_hat_ce'] = temp_out_ce
return outputs
| 2,347 | 38.133333 | 119 | py |
MIAT | MIAT-main/train_MI_estimator_only_max.py | # This version max Natural MI of x and max Adversarial MI of x_adv
import os
import argparse
import numpy as np
import torch.optim as optim
from torch.optim import lr_scheduler, Adam
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import data_dataset
from models.resnet_new import ResNet18
from models.estimator import Estimator
from models.discriminators import MI1x1ConvNet, MIInternalConvNet, MIInternallastConvNet
from compute_MI import compute_loss
parser = argparse.ArgumentParser(description='PyTorch CIFAR MI AT')
parser.add_argument('--nat-img-train', type=str, help='natural training data', default='./data/train_images.npy')
parser.add_argument('--nat-label-train', type=str, help='natural training label', default='./data/train_labels.npy')
parser.add_argument('--nat-img-test', type=str, help='natural test data', default='./data/test_images.npy')
parser.add_argument('--nat-label-test', type=str, help='natural test label', default='./data/test_labels.npy')
parser.add_argument('--batch-size', type=int, default=200, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=50, metavar='N',
help='number of epochs to train')
parser.add_argument('--lr-mi', type=float, default=1e-2, metavar='LR',
help='learning rate')
parser.add_argument('--epsilon', default=8/255,
help='perturbation')
parser.add_argument('--num-steps', default=20,
help='perturb number of steps')
parser.add_argument('--step-size', default=0.007,
help='perturb step size')
parser.add_argument('--pre-target', default='./checkpoint/resnet_18/standard_AT/best_model.pth',
help='directory of model for saving checkpoint')
parser.add_argument('--va-mode', choices=['nce', 'fd', 'dv'], default='dv')
parser.add_argument('--va-fd-measure', default='JSD')
parser.add_argument('--va-hsize', type=int, default=2048)
parser.add_argument('--is_internal', type=bool, default=False)
parser.add_argument('--is_internal_last', type=bool, default=False)
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--model-dir', default='./checkpoint/resnet_18/MI_estimator/alpha',
help='directory of model for saving checkpoint')
parser.add_argument('--print_freq', default=50, type=int)
parser.add_argument('--save-freq', default=1, type=int, metavar='N', help='save frequency')
args = parser.parse_args()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
def make_optimizer(model, lr):
optimizer = Adam(model.parameters(), lr)
return optimizer
def make_optimizer_and_schedule(model, lr):
optimizer = Adam(model.parameters(), lr)
schedule = lr_scheduler.MultiStepLR(optimizer, milestones=[20, 30, 40], gamma=0.5)
return optimizer, schedule
def craft_adversarial_example_pgd(model, x_natural, y, step_size=0.007,
epsilon=0.031, perturb_steps=20, distance='l_inf'):
model.eval()
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv)
loss_ce = F.cross_entropy(logits, y)
grad = torch.autograd.grad(loss_ce, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
batch_size = len(x_natural)
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * F.cross_entropy(model(adv), y)
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
return x_adv
def MI_loss_nat(i, model, x_natural, y, x_adv, local_n, global_n, epoch):
model.eval()
local_n.train()
global_n.train()
pesudo_label = F.softmax(model(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
if torch.nonzero(index).size(0) != 0:
loss_n = (compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
else:
loss_n = compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True).mean()
if (i + 1) % args.print_freq == 0:
print('select right samples:' + str(torch.nonzero(index).size(0)))
print('Epoch [%d], Iter [%d/%d] Train MI estimator. Natural MI: -n %.4f'
% (epoch, i + 1, 50000 // args.batch_size, loss_n.item()))
return loss_n
def MI_loss_adv(i, model, x_natural, y, x_adv, local_n, global_n, epoch):
model.eval()
local_n.train()
global_n.train()
pesudo_label = F.softmax(model(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label != y)
if torch.nonzero(index).size(0) != 0:
loss_a = (compute_loss(args=args, former_input=x_adv-x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
else:
loss_a = compute_loss(args=args, former_input=x_adv-x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True).mean()
if (i + 1) % args.print_freq == 0:
print('select wrong samples:' + str(torch.nonzero(index).size(0)))
print('Epoch [%d], Iter [%d/%d] Train MI estimator. Adversasrial MI: -n %.4f'
% (epoch, i + 1, 50000 // args.batch_size, loss_a.item()))
return loss_a
def evaluate_mi_nat(encoder, x_natural, y, x_adv, local_n, global_n):
encoder.eval()
local_n.eval()
global_n.eval()
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label != y)
loss_n = (compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_a = (compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
return loss_n, loss_a
def evaluate_mi_adv(encoder, x_natural, y, x_adv, local_n, global_n):
encoder.eval()
local_n.eval()
global_n.eval()
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label != y)
loss_n = (compute_loss(args=args, former_input=x_adv-x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_a = (compute_loss(args=args, former_input=x_adv-x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
return loss_n, loss_a
def eval_test(model, device, test_loader, local_n, global_n, local_a, global_a):
model.eval()
local_n.eval()
global_n.eval()
local_a.eval()
global_a.eval()
cnt = 0
correct = 0
correct_adv = 0
losses_n_n = 0
losses_n_a = 0
losses_a_n = 0
losses_a_a = 0
for data, target in test_loader:
cnt += 1
data, target = data.to(device), target.to(device)
data_adv = craft_adversarial_example_pgd(model=model, x_natural=data, y=target,
step_size=0.007, epsilon=8/255,
perturb_steps=40, distance='l_inf')
with torch.no_grad():
output = model(data)
output_adv = model(data_adv)
pred = output.max(1, keepdim=True)[1]
pred_adv = output_adv.max(1, keepdim=True)[1]
test_loss_n_n, test_loss_n_a = evaluate_mi_nat(encoder=model, x_natural=data, y=target, x_adv=data_adv,
local_n=local_n, global_n=global_n)
test_loss_a_n, test_loss_a_a = evaluate_mi_adv(encoder=model, x_natural=data, y=target, x_adv=data_adv,
local_n=local_a, global_n=global_a)
correct += pred.eq(target.view_as(pred)).sum().item()
correct_adv += pred_adv.eq(target.view_as(pred_adv)).sum().item()
losses_n_n += test_loss_n_n.item()
losses_n_a += test_loss_n_a.item()
losses_a_n += test_loss_a_n.item()
losses_a_a += test_loss_a_a.item()
test_accuracy = correct_adv / len(test_loader.dataset)
print('Test: Accuracy: {}/{} ({:.2f}%), Robust Accuracy: {}/{} ({:.2f}%)'.format(correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset), correct_adv, len(test_loader.dataset),
100. * correct_adv / len(test_loader.dataset)))
print('Test: Natural MI: -n: {:.4f}, -a: {:.4f}'.format(
losses_n_n/cnt, losses_n_a/cnt))
print('Test: Adversarial MI: -n: {:.4f}, -a: {:.4f}'.format(
losses_a_n / cnt, losses_a_a / cnt))
return test_accuracy
def main():
# settings
setup_seed(args.seed)
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
device = torch.device("cuda")
# setup data loader
trans_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
trans_test = transforms.Compose([
transforms.ToTensor()
])
trainset = data_dataset(img_path=args.nat_img_train, clean_label_path=args.nat_label_train,
transform=trans_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, drop_last=False,
shuffle=True, num_workers=4, pin_memory=True)
testset = data_dataset(img_path=args.nat_img_test, clean_label_path=args.nat_label_test, transform=trans_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, drop_last=False, shuffle=False,
num_workers=4, pin_memory=True)
# load MI estimation model
# Estimator part 1: X or layer3 to H space
local_n = Estimator(args.va_hsize)
local_a = Estimator(args.va_hsize)
# estimator part 2: Z to H space
if args.is_internal == True:
if args.is_internal_last == True:
z_size = 512
global_n = MIInternallastConvNet(z_size, args.va_hsize)
global_a = MIInternallastConvNet(z_size, args.va_hsize)
else:
z_size = 256
global_n = MIInternalConvNet(z_size, args.va_hsize)
global_a = MIInternalConvNet(z_size, args.va_hsize)
else:
z_size = 10
global_n = MI1x1ConvNet(z_size, args.va_hsize)
global_a = MI1x1ConvNet(z_size, args.va_hsize)
print('----------------Start training-------------')
target_model = ResNet18(10)
state_dic = torch.load(args.pre_target)
new_state = target_model.state_dict()
for k in state_dic.keys():
if k in new_state.keys():
new_state[k] = state_dic[k]
# print(k)
else:
break
target_model.load_state_dict(new_state)
target_model = torch.nn.DataParallel(target_model).cuda()
local_n = torch.nn.DataParallel(local_n).cuda()
global_n = torch.nn.DataParallel(global_n).cuda()
local_a = torch.nn.DataParallel(local_a).cuda()
global_a = torch.nn.DataParallel(global_a).cuda()
cudnn.benchmark = True
opt_local_n, schedule_local_n = make_optimizer_and_schedule(local_n, lr=args.lr_mi)
opt_global_n, schedule_global_n = make_optimizer_and_schedule(global_n, lr=args.lr_mi)
opt_local_a, schedule_local_a = make_optimizer_and_schedule(local_a, lr=args.lr_mi)
opt_global_a, schedule_global_a = make_optimizer_and_schedule(global_a, lr=args.lr_mi)
# Train
for epoch in range(1, args.epochs + 1):
loss_n_all = 0
loss_a_all = 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
# craft adversarial examples
adv = craft_adversarial_example_pgd(model=target_model, x_natural=data, y=target)
# Train MI estimator
loss_n = MI_loss_nat(i=batch_idx, model=target_model, x_natural=data, y=target, x_adv=adv,
local_n=local_n, global_n=global_n, epoch=epoch)
loss_n_all += loss_n
opt_local_n.zero_grad()
opt_global_n.zero_grad()
loss_n.backward()
opt_local_n.step()
opt_global_n.step()
loss_a = MI_loss_adv(i=batch_idx, model=target_model, x_natural=data, y=target, x_adv=adv,
local_n=local_a, global_n=global_a, epoch=epoch)
loss_a_all += loss_a
opt_local_a.zero_grad()
opt_global_a.zero_grad()
loss_a.backward()
opt_local_a.step()
opt_global_a.step()
schedule_local_n.step()
schedule_global_n.step()
schedule_local_a.step()
schedule_global_a.step()
loss_n_all = loss_n_all / (batch_idx +1)
loss_a_all = loss_a_all / (batch_idx + 1)
# evaluation
print('================================================================')
# _ = eval_train(model=target_model, device=device, test_loader=train_loader, local_n=local_n,
# global_n=global_n)
test_accuracy = eval_test(model=target_model, device=device, test_loader=test_loader, local_n=local_n,
global_n=global_n, local_a=local_a,
global_a=global_a)
# save checkpoint
if epoch % args.save_freq == 0:
'''
torch.save(model.module.state_dict(),
os.path.join(model_dir, 'model-epoch{}.pt'.format(epoch)))
'''
torch.save(local_n.module.state_dict(),
os.path.join(args.model_dir, 'local_n_model.pth'))
torch.save(global_n.module.state_dict(),
os.path.join(args.model_dir, 'global_n_model.pth'))
torch.save(local_a.module.state_dict(),
os.path.join(args.model_dir, 'local_a_model.pth'))
torch.save(global_a.module.state_dict(),
os.path.join(args.model_dir, 'global_a_model.pth'))
print('save the model')
print('================================================================')
if __name__ == '__main__':
main()
| 17,182 | 37.440716 | 123 | py |
MIAT | MIAT-main/train_MIAT.py | import os
import argparse
import numpy as np
import torch.optim as optim
from torch.optim import lr_scheduler, Adam
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import data_dataset
# from models.resnet_new import ResNet18
from models.wideresnet_new import WideResNet
from models.estimator import Estimator
from models.discriminators import MI1x1ConvNet, MIInternalConvNet, MIInternallastConvNet
from compute_MI import compute_loss
parser = argparse.ArgumentParser(description='PyTorch CIFAR MI AT')
parser.add_argument('--nat-img-train', type=str, help='natural training data', default='./data/train_images.npy')
parser.add_argument('--nat-label-train', type=str, help='natural training label', default='./data/train_labels.npy')
parser.add_argument('--nat-img-test', type=str, help='natural test data', default='./data/test_images.npy')
parser.add_argument('--nat-label-test', type=str, help='natural test label', default='./data/test_labels.npy')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train')
# parser.add_argument('--lr-mi', type=float, default=1e-3, metavar='LR',
# help='learning rate')
parser.add_argument('--lr', type=float, default=1e-1, metavar='LR',
help='learning rate')
parser.add_argument('--weight-decay', '--wd', default=2e-4,
type=float, metavar='W')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum')
parser.add_argument('--epsilon', default=8/255,
help='perturbation')
parser.add_argument('--num-steps', default=10,
help='perturb number of steps')
parser.add_argument('--step-size', default=0.007,
help='perturb step size')
parser.add_argument('--warm-up', type=bool, default=True,
help='warm up the MI estimator')
parser.add_argument('--warm-epochs', type=int, default=20, metavar='N',
help='number of epochs to train')
'''
parser.add_argument('--pretrain-model', default='./checkpoint/resnet_18/ori/best_model.pth',
help='directory of model for saving checkpoint')
'''
parser.add_argument('--pre-local-n', default='./checkpoint/resnet_18/MI_estimator/beta_final_l2/local_n_model.pth',
help='directory of model for saving checkpoint')
parser.add_argument('--pre-global-n', default='./checkpoint/resnet_18/MI_estimator/beta_final_l2/global_n_model.pth',
help='directory of model for saving checkpoint')
parser.add_argument('--pre-local-a', default='./checkpoint/resnet_18/MI_estimator/beta_final_l2/local_a_model.pth',
help='directory of model for saving checkpoint')
parser.add_argument('--pre-global-a', default='./checkpoint/resnet_18/MI_estimator/beta_final_l2/global_a_model.pth',
help='directory of model for saving checkpoint')
parser.add_argument('--va-mode', choices=['nce', 'fd', 'dv'], default='dv')
parser.add_argument('--va-fd-measure', default='JSD')
parser.add_argument('--va-hsize', type=int, default=2048)
parser.add_argument('--is_internal', type=bool, default=False)
parser.add_argument('--is_internal_last', type=bool, default=False)
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--model-dir', default='./checkpoint/wideresnet/MIAT_standard',
help='directory of model for saving checkpoint')
parser.add_argument('--print_freq', type=int, default=50)
parser.add_argument('--save-freq', default=2, type=int, metavar='N', help='save frequency')
args = parser.parse_args()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
def make_optimizer_and_schedule(model, lr):
optimizer = Adam(model.parameters(), lr)
schedule = lr_scheduler.MultiStepLR(optimizer, milestones=[75, 90], gamma=0.1)
return optimizer, schedule
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate"""
lr = args.lr
if epoch >= 90:
lr = args.lr * 0.01
elif epoch >= 75:
lr = args.lr * 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def craft_adversarial_example_pgd(model, x_natural, y, step_size=0.007, epsilon=0.031, perturb_steps=20,
distance='l_inf'):
model.eval()
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv)
loss_ce = F.cross_entropy(logits, y)
grad = torch.autograd.grad(loss_ce, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
batch_size = len(x_natural)
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * F.cross_entropy(model(adv), y)
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
return x_adv
def MI_loss_nat(i, model, x_natural, y, x_adv, local_n, global_n, epoch):
model.train()
local_n.eval()
global_n.eval()
# logits_nat = model(x_natural)
logits_adv = model(x_adv)
loss_ce = F.cross_entropy(logits_adv, y)
# loss_ce = 0.2 * F.cross_entropy(logits_nat, y) + 0.8 * F.cross_entropy(logits_adv, y)
pesudo_label = F.softmax(model(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(model(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label != y)
if torch.nonzero(index).size(0) != 0:
loss_n = compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index
loss_a = compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index
loss_mea = torch.abs(torch.tensor(1.0).cuda() - torch.cosine_similarity(loss_n, loss_a, dim=0))
loss_a = loss_a.sum()/torch.nonzero(index).size(0)
loss_mi = loss_mea + 0.1 * loss_a
else:
loss_mi = 0.0
loss_all = loss_ce + loss_mi
if (i + 1) % args.print_freq == 0:
print('select samples:' + str(torch.nonzero(index).size(0)))
print('Epoch [%d], Iter [%d/%d] Train target model. Natural MI: %.4f; Loss_ce: %.4f; Loss_all: %.4f'
% (epoch, i + 1, 50000 // args.batch_size, loss_mi.item(), loss_ce.item(), loss_all.item()))
return loss_all
def MI_loss(i, model, x_natural, y, x_adv, local_n, global_n, local_a, global_a, epoch):
model.train()
local_n.eval()
global_n.eval()
local_a.eval()
global_a.eval()
# logits_nat = model(x_natural)
logits_adv = model(x_adv)
loss_ce = F.cross_entropy(logits_adv, y)
# loss_ce = 0.2 * F.cross_entropy(logits_nat, y) + 0.8 * F.cross_entropy(logits_adv, y)
pesudo_label = F.softmax(model(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(model(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label != y)
if torch.nonzero(index).size(0) != 0:
loss_n = compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index
loss_a = compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index
# loss_a_all = loss_a
loss_mea_n = torch.abs(torch.tensor(1.0).cuda() - torch.cosine_similarity(loss_n, loss_a, dim=0))
loss_a = compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_adv, encoder=model,
dim_local=local_a, dim_global=global_a, v_out=True) * index
loss_n = compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_natural, encoder=model,
dim_local=local_a, dim_global=global_a, v_out=True) * index
# loss_a_all = torch.tensor(0.1).cuda() * (loss_a_all - loss_a)
loss_mea_a = torch.abs(torch.tensor(1.0).cuda() - torch.cosine_similarity(loss_n, loss_a, dim=0))
loss_mi = loss_mea_n + loss_mea_a # + loss_a_all
else:
loss_mi = 0.0
loss_all = loss_ce + 5.0 * loss_mi
if (i + 1) % args.print_freq == 0:
print('select samples:' + str(torch.nonzero(index).size(0)))
print('Epoch [%d], Iter [%d/%d] Train target model. Natural MI: %.4f; Loss_ce: %.4f; Loss_all: %.4f'
% (epoch, i + 1, 50000 // args.batch_size, loss_mi.item(), loss_ce.item(), loss_all.item()))
return loss_all
def evaluate_mi_nat(encoder, x_natural, y, x_adv, local_n, global_n):
encoder.eval()
local_n.eval()
global_n.eval()
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label == y)
loss_r_n = (compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_r_a = (compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label != y)
loss_w_n = (compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_w_a = (compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
return loss_r_n, loss_r_a, loss_w_n, loss_w_a
def evaluate_mi_adv(encoder, x_natural, y, x_adv, local_n, global_n):
encoder.eval()
local_n.eval()
global_n.eval()
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label == y)
loss_r_n = (compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_r_a = (compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label != y)
loss_w_n = (compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_w_a = (compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
return loss_r_n, loss_r_a, loss_w_n, loss_w_a
def eval_test(model, device, test_loader, local_n, global_n, local_a, global_a):
model.eval()
local_n.eval()
global_n.eval()
local_a.eval()
global_a.eval()
cnt = 0
correct = 0
correct_adv = 0
losses_r_n = 0
losses_r_a = 0
losses_w_n = 0
losses_w_a = 0
losses_r_n_1 = 0
losses_r_a_1 = 0
losses_w_n_1 = 0
losses_w_a_1 = 0
for data, target in test_loader:
cnt += 1
data, target = data.to(device), target.to(device)
data_adv = craft_adversarial_example_pgd(model=model, x_natural=data, y=target,
step_size=0.007, epsilon=8/255,
perturb_steps=40, distance='l_inf')
with torch.no_grad():
output = model(data)
output_adv = model(data_adv)
pred = output.max(1, keepdim=True)[1]
pred_adv = output_adv.max(1, keepdim=True)[1]
test_loss_r_n, test_loss_r_a, test_loss_w_n, test_loss_w_a = evaluate_mi_nat(encoder=model, x_natural=data,
y=target, x_adv=data_adv, local_n=local_n, global_n=global_n)
test_loss_r_n_1, test_loss_r_a_1, test_loss_w_n_1, test_loss_w_a_1 = evaluate_mi_nat(encoder=model, x_natural=data,
y=target, x_adv=data_adv,
local_n=local_a, global_n=global_a)
correct += pred.eq(target.view_as(pred)).sum().item()
correct_adv += pred_adv.eq(target.view_as(pred_adv)).sum().item()
losses_r_n += test_loss_r_n.item()
losses_r_a += test_loss_r_a.item()
losses_w_n += test_loss_w_n.item()
losses_w_a += test_loss_w_a.item()
losses_r_n_1 += test_loss_r_n_1.item()
losses_r_a_1 += test_loss_r_a_1.item()
losses_w_n_1 += test_loss_w_n_1.item()
losses_w_a_1 += test_loss_w_a_1.item()
test_accuracy = (correct_adv + correct) / (2.0 * len(test_loader.dataset))
print('Test: Accuracy: {}/{} ({:.2f}%), Robust Accuracy: {}/{} ({:.2f}%)'.format(correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset), correct_adv, len(test_loader.dataset),
100. * correct_adv / len(test_loader.dataset)))
print('Test: Natural MI Right: -n: {:.4f}, -a: {:.4f}'.format(
losses_r_n/cnt, losses_r_a/cnt))
print('Test: Natural MI Wrong: -n: {:.4f}, -a: {:.4f}'.format(
losses_w_n / cnt, losses_w_a / cnt))
print('Test: Adv MI Right: -n: {:.4f}, -a: {:.4f}'.format(
losses_r_n_1/cnt, losses_r_a_1/cnt))
print('Test: Adv MI Wrong: -n: {:.4f}, -a: {:.4f}'.format(
losses_w_n_1 / cnt, losses_w_a_1 / cnt))
return test_accuracy
def main():
# settings
setup_seed(args.seed)
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
device = torch.device("cuda")
# setup data loader
trans_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
trans_test = transforms.Compose([
transforms.ToTensor()
])
trainset = data_dataset(img_path=args.nat_img_train, clean_label_path=args.nat_label_train,
transform=trans_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, drop_last=False,
shuffle=True, num_workers=4, pin_memory=True)
testset = data_dataset(img_path=args.nat_img_test, clean_label_path=args.nat_label_test, transform=trans_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, drop_last=False, shuffle=False,
num_workers=4, pin_memory=True)
# Estimator part 1: X or layer3 to H space
local_n = Estimator(args.va_hsize)
local_a = Estimator(args.va_hsize)
# estimator part 2: Z to H space
if args.is_internal == True:
if args.is_internal_last == True:
z_size = 512
global_n = MIInternallastConvNet(z_size, args.va_hsize)
global_a = MIInternallastConvNet(z_size, args.va_hsize)
else:
z_size = 256
global_n = MIInternalConvNet(z_size, args.va_hsize)
global_a = MIInternalConvNet(z_size, args.va_hsize)
else:
z_size = 10
global_n = MI1x1ConvNet(z_size, args.va_hsize)
global_a = MI1x1ConvNet(z_size, args.va_hsize)
# target_model = ResNet18(10)
target_model = WideResNet(34, 10, 10)
target_model = torch.nn.DataParallel(target_model).cuda()
local_n.load_state_dict(torch.load(args.pre_local_n))
global_n.load_state_dict(torch.load(args.pre_global_n))
local_a.load_state_dict(torch.load(args.pre_local_a))
global_a.load_state_dict(torch.load(args.pre_global_a))
local_n = torch.nn.DataParallel(local_n).cuda()
global_n = torch.nn.DataParallel(global_n).cuda()
local_a = torch.nn.DataParallel(local_a).cuda()
global_a = torch.nn.DataParallel(global_a).cuda()
cudnn.benchmark = True
optimizer = optim.SGD(target_model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# opt_local_n, schedule_local_n = make_optimizer_and_schedule(local_n, lr=args.lr_mi)
# opt_global_n, schedule_global_n = make_optimizer_and_schedule(global_n, lr=args.lr_mi)
# opt_local_a, schedule_local_a = make_optimizer_and_schedule(local_a, lr=args.lr_mi)
# opt_global_a, schedule_global_a = make_optimizer_and_schedule(global_a, lr=args.lr_mi)
# warm up
print('--------Warm up--------')
for epocah in range(0, 2):
for batch_idx, (data, target) in enumerate(train_loader):
target_model.train()
data, target = data.to(device), target.to(device)
logits_nat = target_model(data)
loss = F.cross_entropy(logits_nat, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Train
best_accuracy = 0
for epoch in range(1, args.epochs + 1):
adjust_learning_rate(optimizer, epoch)
print('--------Train the target model--------')
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
# craft adversarial examples
adv = craft_adversarial_example_pgd(model=target_model, x_natural=data, y=target, step_size=0.007,
epsilon=8/255, perturb_steps=40, distance='l_inf')
# Train MI estimator
loss = MI_loss(i=batch_idx, model=target_model, x_natural=data, y=target, x_adv=adv, local_n=local_n,
global_n=global_n, local_a=local_a, global_a=global_a, epoch=epoch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# evaluation
print('--------Evaluate the target model--------')
test_accuracy = eval_test(model=target_model, device=device, test_loader=test_loader, local_n=local_n,
global_n=global_n, local_a=local_a, global_a=global_a)
# save checkpoint
if test_accuracy >= best_accuracy: # epoch % args.save_freq == 0:
best_accuracy = test_accuracy
'''
torch.save(model.module.state_dict(),
os.path.join(model_dir, 'model-epoch{}.pt'.format(epoch)))
'''
torch.save(target_model.module.state_dict(),
os.path.join(args.model_dir, 'target_model.pth'))
'''
torch.save(local_n.module.state_dict(),
os.path.join(args.model_dir, 'local_model.pth'))
torch.save(global_n.module.state_dict(),
os.path.join(args.model_dir, 'global_model.pth'))
'''
print('save the model')
print('================================================================')
if __name__ == '__main__':
main()
| 22,429 | 40.080586 | 127 | py |
MIAT | MIAT-main/train_MI_estimator.py | # This version use cosine distance to enhance the difference between the MI of adv and the MI of nat.
import os
import argparse
import numpy as np
import torch.optim as optim
from torch.optim import lr_scheduler, Adam
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import data_dataset
from models.resnet_new import ResNet18
from models.estimator import Estimator
from models.discriminators import MI1x1ConvNet, MIInternalConvNet, MIInternallastConvNet
from compute_MI import compute_loss
parser = argparse.ArgumentParser(description='PyTorch CIFAR MI AT')
parser.add_argument('--nat-img-train', type=str, help='natural training data', default='./data/train_images.npy')
parser.add_argument('--nat-label-train', type=str, help='natural training label', default='./data/train_labels.npy')
parser.add_argument('--nat-img-test', type=str, help='natural test data', default='./data/test_images.npy')
parser.add_argument('--nat-label-test', type=str, help='natural test label', default='./data/test_labels.npy')
parser.add_argument('--batch-size', type=int, default=400, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=25, metavar='N',
help='number of epochs to train')
parser.add_argument('--lr-mi', type=float, default=1e-2, metavar='LR',
help='learning rate')
parser.add_argument('--epsilon', default=0.5,
help='perturbation')
parser.add_argument('--num-steps', default=20,
help='perturb number of steps')
parser.add_argument('--step-size', default=0.007,
help='perturb step size')
parser.add_argument('--pre-target', default='./checkpoint/resnet_18/standard_AT_pre_l2/best_model.pth',
help='directory of model for saving checkpoint')
parser.add_argument('--va-mode', choices=['nce', 'fd', 'dv'], default='dv')
parser.add_argument('--va-fd-measure', default='JSD')
parser.add_argument('--va-hsize', type=int, default=2048)
parser.add_argument('--is_internal', type=bool, default=False)
parser.add_argument('--is_internal_last', type=bool, default=False)
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--model-dir', default='./checkpoint/resnet_18/MI_estimator/beta_final_l2',
help='directory of model for saving checkpoint')
parser.add_argument('--print_freq', default=50, type=int)
parser.add_argument('--save-freq', default=1, type=int, metavar='N', help='save frequency')
args = parser.parse_args()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
def make_optimizer(model, lr):
optimizer = Adam(model.parameters(), lr)
return optimizer
def make_optimizer_and_schedule(model, lr):
optimizer = Adam(model.parameters(), lr)
schedule = lr_scheduler.MultiStepLR(optimizer, milestones=[10, 20], gamma=0.1)
return optimizer, schedule
def craft_adversarial_example_pgd(model, x_natural, y, step_size=0.007,
epsilon=0.031, perturb_steps=20, distance='l_inf'):
model.eval()
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv)
loss_ce = F.cross_entropy(logits, y)
grad = torch.autograd.grad(loss_ce, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
batch_size = len(x_natural)
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * F.cross_entropy(model(adv), y)
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
return x_adv
def MI_loss_nat(i, model, x_natural, y, x_adv, local_n, global_n, epoch):
model.eval()
local_n.train()
global_n.train()
pesudo_label = F.softmax(model(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
index_s = index
if torch.nonzero(index).size(0) != 0:
pesudo_label = F.softmax(model(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index_s = index * (pesudo_label == y)
loss_n = 0
if torch.nonzero(index_s).size(0) != 0:
loss_n = (compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s).sum() / torch.nonzero(
index).size(0)
loss_n_s = compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s
loss_a_s = compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s
loss_mea = torch.abs(torch.tensor(1.0).cuda() - torch.cosine_similarity(loss_n_s, loss_a_s, dim=0))
loss_r = 5.0 * loss_mea + loss_n
else:
loss_r = 0
pesudo_label = F.softmax(model(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index_s = index * (pesudo_label != y)
if torch.nonzero(index_s).size(0) != 0:
loss_n = (compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s).sum() / torch.nonzero(
index).size(0)
'''
loss_a = (compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s).sum() / torch.nonzero(
index).size(0)
'''
loss_n_s = compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s
loss_a_s = compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s
loss_mea = torch.abs(torch.cosine_similarity(loss_n_s, loss_a_s, dim=0))
# loss_w = loss_n - 0.15 * loss_a
loss_w = 5.0 * loss_mea + loss_n # - 0.5 * loss_a
else:
loss_w = 0
loss_all = 1.0 * loss_w + 0.5 * loss_r # 3
else:
loss_n = compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True).mean()
loss_all = loss_n
if (i + 1) % args.print_freq == 0:
print('select right nat samples; wrong adv samples:' + str(torch.nonzero(index).size(0)) + ';' + str(torch.nonzero(index_s).size(0)))
print('Epoch [%d], Iter [%d/%d] Train MI estimator. Natural MI: -n %.4f; Loss: -n %.4f'
% (epoch, i + 1, 50000 // args.batch_size, loss_n.item(), loss_all.item()))
return loss_all
def MI_loss_adv(i, model, x_natural, y, x_adv, local_n, global_n, epoch):
model.eval()
local_n.train()
global_n.train()
pesudo_label = F.softmax(model(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
index_s = index
loss_a = 0
if torch.nonzero(index).size(0) != 0:
pesudo_label = F.softmax(model(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index_s = index * (pesudo_label != y)
if torch.nonzero(index_s).size(0) != 0:
loss_a = compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s
loss_n = compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s
loss_mea = torch.abs(torch.cosine_similarity(loss_a, loss_n, dim=0))
loss_a = loss_a.sum()/torch.nonzero(index_s).size(0)
loss_all_w = 5.0 * loss_mea + loss_a #5
else:
loss_all_w = 0
pesudo_label = F.softmax(model(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index_s = index * (pesudo_label == y)
if torch.nonzero(index_s).size(0) != 0:
loss_a = compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s
loss_n = compute_loss(args=args, former_input=x_adv - x_natural, latter_input=x_natural, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True) * index_s
loss_mea = torch.abs(torch.tensor(1.0).cuda() - torch.cosine_similarity(loss_n, loss_a, dim=0))
loss_a = loss_a.sum() / torch.nonzero(index_s).size(0)
loss_all_r = 8.0 * loss_mea - 0.1 * loss_a # 5
else:
loss_all_r = 0
loss_all = 1.0 * loss_all_w + 0.5 * loss_all_r
else:
loss_a = compute_loss(args=args, former_input=x_adv-x_natural, latter_input=x_adv, encoder=model,
dim_local=local_n, dim_global=global_n, v_out=True).mean()
loss_all = loss_a
if (i + 1) % args.print_freq == 0:
print('select right natural samples; right adv samples:' + str(torch.nonzero(index).size(0)) + ';' + str(torch.nonzero(index_s).size(0)))
print('Epoch [%d], Iter [%d/%d] Train MI estimator. Adversasrial MI: -n %.4f; Loss: -n %.4f'
% (epoch, i + 1, 50000 // args.batch_size, loss_a.item(), loss_all.item()))
return loss_all
def evaluate_mi_nat(encoder, x_natural, y, x_adv, local_n, global_n):
encoder.eval()
local_n.eval()
global_n.eval()
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label == y)
loss_r_n = (compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_r_a = (compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label != y)
loss_w_n = (compute_loss(args=args, former_input=x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_w_a = (compute_loss(args=args, former_input=x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
return loss_r_n, loss_r_a, loss_w_n, loss_w_a
def evaluate_mi_adv(encoder, x_natural, y, x_adv, local_n, global_n):
encoder.eval()
local_n.eval()
global_n.eval()
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label == y)
loss_r_n = (compute_loss(args=args, former_input=x_adv-x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_r_a = (compute_loss(args=args, former_input=x_adv-x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
pesudo_label = F.softmax(encoder(x_natural), dim=0).max(1, keepdim=True)[1].squeeze()
index = (pesudo_label == y)
pesudo_label = F.softmax(encoder(x_adv), dim=0).max(1, keepdim=True)[1].squeeze()
index = index * (pesudo_label != y)
loss_w_n = (compute_loss(args=args, former_input=x_adv-x_natural, latter_input=x_natural, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
loss_w_a = (compute_loss(args=args, former_input=x_adv-x_natural, latter_input=x_adv, encoder=encoder,
dim_local=local_n, dim_global=global_n, v_out=True) * index).sum()/torch.nonzero(index).size(0)
return loss_r_n, loss_r_a, loss_w_n, loss_w_a
def eval_test(model, device, test_loader, local_n, global_n, local_a, global_a):
model.eval()
local_n.eval()
global_n.eval()
local_a.eval()
global_a.eval()
cnt = 0
correct = 0
correct_adv = 0
losses_n_n_r = 0
losses_n_a_r = 0
losses_n_n_w = 0
losses_n_a_w = 0
losses_a_n_r = 0
losses_a_a_r = 0
losses_a_n_w = 0
losses_a_a_w = 0
for data, target in test_loader:
cnt += 1
data, target = data.to(device), target.to(device)
data_adv = craft_adversarial_example_pgd(model=model, x_natural=data, y=target,
step_size=0.007, epsilon=0.5,
perturb_steps=40, distance='l_2')
with torch.no_grad():
output = model(data)
output_adv = model(data_adv)
pred = output.max(1, keepdim=True)[1]
pred_adv = output_adv.max(1, keepdim=True)[1]
test_loss_n_n_r, test_loss_n_a_r, test_loss_n_n_w, test_loss_n_a_w = evaluate_mi_nat(encoder=model, x_natural=data, y=target, x_adv=data_adv,
local_n=local_n, global_n=global_n)
test_loss_a_n_r, test_loss_a_a_r, test_loss_a_n_w, test_loss_a_a_w = evaluate_mi_adv(encoder=model, x_natural=data, y=target, x_adv=data_adv,
local_n=local_a, global_n=global_a)
correct += pred.eq(target.view_as(pred)).sum().item()
correct_adv += pred_adv.eq(target.view_as(pred_adv)).sum().item()
losses_n_n_r += test_loss_n_n_r.item()
losses_n_a_r += test_loss_n_a_r.item()
losses_n_n_w += test_loss_n_n_w.item()
losses_n_a_w += test_loss_n_a_w.item()
losses_a_n_r += test_loss_a_n_r.item()
losses_a_a_r += test_loss_a_a_r.item()
losses_a_n_w += test_loss_a_n_w.item()
losses_a_a_w += test_loss_a_a_w.item()
test_accuracy = correct_adv / len(test_loader.dataset)
print('Test: Accuracy: {}/{} ({:.2f}%), Robust Accuracy: {}/{} ({:.2f}%)'.format(correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset), correct_adv, len(test_loader.dataset),
100. * correct_adv / len(test_loader.dataset)))
print('Test: Natural MI: Right samples: -n: {:.4f}, -a: {:.4f}; Wrong samples: -n: {:.4f}, -a: {:.4f}'.format(
losses_n_n_r/cnt, losses_n_a_r/cnt, losses_n_n_w/cnt, losses_n_a_w/cnt))
print('Test: Adversarial MI: Right samples: -n: {:.4f}, -a: {:.4f}; Wrong samples: -n: {:.4f}, -a: {:.4f}'.format(
losses_a_n_r / cnt, losses_a_a_r / cnt, losses_a_n_w / cnt, losses_a_a_w / cnt))
return test_accuracy
def main():
# settings
setup_seed(args.seed)
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2"
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
device = torch.device("cuda")
# setup data loader
trans_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
trans_test = transforms.Compose([
transforms.ToTensor()
])
trainset = data_dataset(img_path=args.nat_img_train, clean_label_path=args.nat_label_train,
transform=trans_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, drop_last=False,
shuffle=True, num_workers=4, pin_memory=True)
testset = data_dataset(img_path=args.nat_img_test, clean_label_path=args.nat_label_test, transform=trans_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, drop_last=False, shuffle=False,
num_workers=4, pin_memory=True)
# load MI estimation model
# Estimator part 1: X or layer3 to H space
local_n = Estimator(args.va_hsize)
local_a = Estimator(args.va_hsize)
# estimator part 2: Z to H space
if args.is_internal == True:
if args.is_internal_last == True:
z_size = 512
global_n = MIInternallastConvNet(z_size, args.va_hsize)
global_a = MIInternallastConvNet(z_size, args.va_hsize)
else:
z_size = 256
global_n = MIInternalConvNet(z_size, args.va_hsize)
global_a = MIInternalConvNet(z_size, args.va_hsize)
else:
z_size = 10
global_n = MI1x1ConvNet(z_size, args.va_hsize)
global_a = MI1x1ConvNet(z_size, args.va_hsize)
print('----------------Start training-------------')
target_model = ResNet18(10)
state_dic = torch.load(args.pre_target)
new_state = target_model.state_dict()
for k in state_dic.keys():
if k in new_state.keys():
new_state[k] = state_dic[k]
# print(k)
else:
break
target_model.load_state_dict(new_state)
target_model = torch.nn.DataParallel(target_model).cuda()
local_n = torch.nn.DataParallel(local_n).cuda()
global_n = torch.nn.DataParallel(global_n).cuda()
local_a = torch.nn.DataParallel(local_a).cuda()
global_a = torch.nn.DataParallel(global_a).cuda()
cudnn.benchmark = True
opt_local_n, schedule_local_n = make_optimizer_and_schedule(local_n, lr=args.lr_mi)
opt_global_n, schedule_global_n = make_optimizer_and_schedule(global_n, lr=args.lr_mi)
opt_local_a, schedule_local_a = make_optimizer_and_schedule(local_a, lr=args.lr_mi)
opt_global_a, schedule_global_a = make_optimizer_and_schedule(global_a, lr=args.lr_mi)
# Train
for epoch in range(1, args.epochs + 1):
loss_n_all = 0
loss_a_all = 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
# craft adversarial examples
adv = craft_adversarial_example_pgd(model=target_model, x_natural=data, y=target, step_size=0.007,
epsilon=0.5, perturb_steps=20, distance='l_2')
# Train MI estimator
loss_n = MI_loss_nat(i=batch_idx, model=target_model, x_natural=data, y=target, x_adv=adv,
local_n=local_n, global_n=global_n, epoch=epoch)
loss_n_all += loss_n
opt_local_n.zero_grad()
opt_global_n.zero_grad()
loss_n.backward()
opt_local_n.step()
opt_global_n.step()
loss_a = MI_loss_adv(i=batch_idx, model=target_model, x_natural=data, y=target, x_adv=adv,
local_n=local_a, global_n=global_a, epoch=epoch)
loss_a_all += loss_a
opt_local_a.zero_grad()
opt_global_a.zero_grad()
loss_a.backward()
opt_local_a.step()
opt_global_a.step()
schedule_local_n.step()
schedule_global_n.step()
schedule_local_a.step()
schedule_global_a.step()
loss_n_all = loss_n_all / (batch_idx +1)
loss_a_all = loss_a_all / (batch_idx + 1)
# evaluation
print('================================================================')
# _ = eval_train(model=target_model, device=device, test_loader=train_loader, local_n=local_n,
# global_n=global_n)
test_accuracy = eval_test(model=target_model, device=device, test_loader=test_loader, local_n=local_n,
global_n=global_n, local_a=local_a,
global_a=global_a)
# save checkpoint
if epoch % args.save_freq == 0:
'''
torch.save(model.module.state_dict(),
os.path.join(model_dir, 'model-epoch{}.pt'.format(epoch)))
'''
torch.save(local_n.module.state_dict(),
os.path.join(args.model_dir, 'local_n_model.pth'))
torch.save(global_n.module.state_dict(),
os.path.join(args.model_dir, 'global_n_model.pth'))
torch.save(local_a.module.state_dict(),
os.path.join(args.model_dir, 'local_a_model.pth'))
torch.save(global_a.module.state_dict(),
os.path.join(args.model_dir, 'global_a_model.pth'))
print('save the model')
print('================================================================')
if __name__ == '__main__':
main()
| 23,492 | 40.580531 | 153 | py |
MIAT | MIAT-main/compute_MI.py | import torch
from functions.dim_losses import donsker_varadhan_loss, infonce_loss, fenchel_dual_loss
def compute_loss(args, former_input, latter_input, encoder, dim_local, dim_global, v_out=False, with_latent=False,
fake_relu=False, no_relu=False):
if no_relu and (not with_latent):
print("WARNING: 'no_relu' has no visible effect if 'with_latent is False.")
if no_relu and fake_relu:
raise ValueError("Options 'no_relu' and 'fake_relu' are exclusive")
output = compute_out(args=args, former_input=former_input, latter_input=latter_input, encoder=encoder,
dim_local=dim_local, dim_global=dim_global, v_out=v_out)
return output
def compute_out(args, former_input, latter_input, encoder, dim_local, dim_global, v_out):
'''
Compute dim loss or classificaiton loss
:param former_input: x
:param latter_input: x' for h(x')
:param loss_type : 'dim' (mi estimation) or 'cla' (classification)
:param detach:
:param enc_in_eval:
:return:
'''
rep_out = encoder(latter_input, args.is_internal, args.is_internal_last)
out_local, out_global = extract(former_input, rep_out, dim_local, dim_global)
va_fd_measure = args.va_fd_measure
va_mode = args.va_mode
loss_encoder_dim = cal_dim(out_local, out_global, va_fd_measure, va_mode, scale=1.0, v_out=v_out)
return loss_encoder_dim
def sample_locations(enc, n_samples):
'''Randomly samples locations from localized features.
Used for saving memory.
Args:
enc: Features.
n_samples: Number of samples to draw.
Returns:
torch.Tensor
'''
n_locs = enc.size(2)
batch_size = enc.size(0)
weights = torch.tensor([1. / n_locs] * n_locs, dtype=torch.float)
idx = torch.multinomial(weights, n_samples * batch_size, replacement=True) \
.view(batch_size, n_samples)
enc = enc.transpose(1, 2)
adx = torch.arange(0, batch_size).long()
enc = enc[adx[:, None], idx].transpose(1, 2)
return enc
def extract(input, outs, local_net=None, global_net=None, local_samples=None,
global_samples=None):
'''Wrapper function to be put in encoder forward for speed.
Args:
outs (list): List of activations
local_net (nn.Module): Network to encode local activations.
global_net (nn.Module): Network to encode global activations.
Returns:
tuple: local, global outputs
'''
L = input
G = outs
# All globals are reshaped as 1x1 feature maps.
global_size = G.size()[1:]
if len(global_size) == 1:
G = G[:, :, None, None]
L = L.detach()
L = local_net(L)
G = global_net(G)
N, local_units = L.size()[:2]
L = L.view(N, local_units, -1)
G = G.view(N, local_units, -1)
# Sample locations for saving memory.
if global_samples is not None:
G = sample_locations(G, global_samples)
if local_samples is not None:
L = sample_locations(L, local_samples)
return L, G
def cal_dim(L, G, measure='JSD', mode='fd', scale=1.0, act_penalty=0., v_out=False):
'''
Args:
measure: Type of f-divergence. For use with mode `fd`.
mode: Loss mode. Fenchel-dual `fd`, NCE `nce`, or Donsker-Vadadhan `dv`.
scale: Hyperparameter for local DIM. Called `beta` in the paper.
act_penalty: L2 penalty on the global activations. Can improve stability.
'''
if mode == 'fd':
loss = fenchel_dual_loss(L, G, measure=measure)
elif mode == 'nce':
loss = infonce_loss(L, G)
elif mode == 'dv':
loss = donsker_varadhan_loss(L, G, v_out)
else:
raise NotImplementedError(mode)
if act_penalty > 0.:
act_loss = act_penalty * (G ** 2).sum(1).mean()
else:
act_loss = 0.
loss_encoder = scale * loss + act_loss
return loss_encoder
| 3,956 | 29.206107 | 114 | py |
MIAT | MIAT-main/data.py | import numpy as np
import torch.utils.data as Data
from PIL import Image
# import tools
import torch
class data_noise_dataset(Data.Dataset):
def __init__(self, img_path, noisy_label_path, clean_label_path):
self.train_data = np.load(img_path).astype(np.float32) # B C H W
self.train_noisy_labels = np.load(noisy_label_path)
self.train_clean_labels = np.load(clean_label_path)
def __getitem__(self, index):
img, noisy_label, clean_label = self.train_data[index], self.train_noisy_labels[index], \
self.train_clean_labels[index]
img = torch.from_numpy(img)
noisy_label = torch.from_numpy(np.array(noisy_label)).long()
clean_label = torch.from_numpy(np.array(clean_label)).long()
return img, noisy_label, clean_label, index
def __len__(self):
return len(self.train_data)
class data_dataset(Data.Dataset):
def __init__(self, img_path, clean_label_path, transform=None):
self.transform = transform
self.train_data = np.load(img_path)
self.train_clean_labels = np.load(clean_label_path).astype(np.float32)
self.train_clean_labels = torch.from_numpy(self.train_clean_labels).long()
def __getitem__(self, index):
img, clean_label = self.train_data[index], self.train_clean_labels[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, clean_label
def __len__(self):
return len(self.train_data)
class distilled_dataset(Data.Dataset):
def __init__(self, distilled_images, distilled_noisy_labels, distilled_bayes_labels):
self.distilled_images = distilled_images
self.distilled_noisy_labels = distilled_noisy_labels
self.distilled_bayes_labels = distilled_bayes_labels
# print(self.distilled_images)
def __getitem__(self, index):
# print(index)
img, bayes_label, noisy_label = self.distilled_images[index], self.distilled_bayes_labels[index], self.distilled_noisy_labels[index]
# print(img)
# print(bayes_label)
# print(noisy_label)
img = torch.from_numpy(img)
bayes_label = torch.from_numpy(np.array(bayes_label)).long()
noisy_label = torch.from_numpy(np.array(noisy_label)).long()
return img, bayes_label, noisy_label, index
def __len__(self):
return len(self.distilled_images)
| 2,595 | 31.45 | 144 | py |
MIAT | MIAT-main/test_comparison.py | from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import torch.backends.cudnn as cudnn
from data import data_dataset# , data_noise_dataset, distilled_dataset
from models.vggnet import VGGNet19
from models.resnet_new import ResNet18
# from models import resnet_transition
# from models import resnet
import numpy as np
from torch.autograd import Variable
import torch.optim as optim
from utils_attack.autoattack import AutoAttack
from advertorch.attacks import LinfPGDAttack, CarliniWagnerL2Attack, DDNL2Attack, SpatialTransformAttack
from utils_attack.FWA import LinfFWA
from utils_attack.ti_dim_gpu import TIDIM_Attack
parser = argparse.ArgumentParser(description='PyTorch CIFAR TRADES Adversarial Training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--batch-size', type=int, default=1000, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dataset', type=str, help='fmnist,cifar10,svhn', default='cifar10')
parser.add_argument('--epsilon', default=0.031, help='perturbation')
parser.add_argument('--num-steps', default=10, help='perturb number of steps')
parser.add_argument('--step-size', default=0.007, help='perturb step size')
parser.add_argument('--model-dir', default='./checkpoint/resnet_18/MIAT_mart',
help='directory of model for saving checkpoint')
parser.add_argument('--print_freq', type=int, default=1)
parser.add_argument('--nat-img-train', type=str, help='natural training data', default='./data/train_images.npy')
parser.add_argument('--nat-label-train', type=str, help='natural training label', default='./data/train_labels.npy')
parser.add_argument('--nat-img-test', type=str, help='natural test data', default='./data/test_images.npy')
parser.add_argument('--nat-label-test', type=str, help='natural test label', default='./data/test_labels.npy')
args = parser.parse_args()
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
def craft_adversarial_example_pgd(model, x_natural, y, step_size=0.003,
epsilon=0.031, perturb_steps=10, distance='l_inf'):
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv)
loss_kl = F.cross_entropy(logits, y)
grad = torch.autograd.grad(loss_kl, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
batch_size = len(x_natural)
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * F.cross_entropy(model(adv), y)
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
return x_adv
def craft_adversarial_example(model, x_natural, y, step_size=0.003,
epsilon= 8/255, perturb_steps=10, distance='l_inf'):
'''
adversary = AutoAttack(model, norm='Linf', eps=8 / 255, version='standard')
'''
'''
adversary = DDNL2Attack(model, nb_iter=20, gamma=0.05, init_norm=1.0, quantize=True, levels=16, clip_min=0.0,
clip_max=1.0, targeted=False, loss_fn=None)
'''
'''
adversary = CarliniWagnerL2Attack(
model, 10, clip_min=0.0, clip_max=1.0, max_iterations=10, confidence=1, initial_const=1, learning_rate=1e-2,
binary_search_steps=4, targeted=False)
'''
'''
adversary = LinfFWA(predict=model, loss_fn=nn.CrossEntropyLoss(reduction="sum"),
eps=8/255, kernel_size=4, lr=0.007, nb_iter=40, dual_max_iter=15, grad_tol=1e-4,
int_tol=1e-4, device="cuda", postprocess=False, verbose=True)
'''
'''
adversary = SpatialTransformAttack(
model, 10, clip_min=0.0, clip_max=1.0, max_iterations=10, search_steps=5, targeted=False)
'''
'''
adversary = TIDIM_Attack(model,
decay_factor=1, prob=0.5,
epsilon=8/255, steps=40, step_size=0.01,
image_resize=33,
random_start=False)
'''
adversary = TIDIM_Attack(eps=8/255, steps=40, step_size=0.007, momentum=0.1, prob=0.5, clip_min=0.0, clip_max=1.0,
device=torch.device('cuda'), low=32, high=32)
# x_adv = adversary.run_standard_evaluation(x_natural, y, bs=args.batch_size)
# x_adv = adversary.perturb(x_natural, y)
x_adv = adversary.perturb(model, x_natural, y)
return x_adv
def evaluate_at(i, data, label, model):
model.eval() # Change model to 'eval' mode.
data_adv = craft_adversarial_example(model=model, x_natural=data, y=label, step_size=args.step_size,
epsilon=args.epsilon, perturb_steps=40, distance='l_inf')
logits = model(data)
outputs = F.softmax(logits, dim=1)
_, pred = torch.max(outputs.data, 1)
correct1 = (pred == label).sum()
logits = model(data_adv)
outputs = F.softmax(logits, dim=1)
_, pred = torch.max(outputs.data, 1)
correct_adv = (pred == label).sum()
acc1 = 100 * float(correct1) / data.size(0)
acc_adv = 100 * float(correct_adv) / data.size(0)
if (i + 1) % args.print_freq == 0:
print(
'Iter [%d/%d] Test classifier: Nat Acc: %.4f; Adv Acc: %.4f'
% (i + 1, 10000 // data.size(0), acc1, acc_adv))
return [acc1, acc_adv]
def main(args):
# settings
setup_seed(args.seed)
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2"
model_dir = args.model_dir
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# setup data loader
trans = transforms.Compose([
transforms.ToTensor()
])
testset = data_dataset(img_path=args.nat_img_test, clean_label_path=args.nat_label_test, transform=trans)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, drop_last=False, shuffle=False,
num_workers=4, pin_memory=True)
# classifier = resnet.ResNet18(10)
classifier = ResNet18(10)
# classifier = VGGNet19()
classifier.load_state_dict(torch.load(os.path.join(model_dir, 'target_model.pth')))
classifier = torch.nn.DataParallel(classifier).to(device)
cudnn.benchmark = True
# Adversraial test
print('------Starting test------')
total_num = 0.
test_nat_correct = 0.
test_adv_correct = 0.
for i, (data, labels) in enumerate(test_loader):
data = data.cuda()
labels = labels.cuda()
classifier.eval()
test_acc = evaluate_at(i, data, labels, classifier)
total_num += 1
test_nat_correct += test_acc[0]
test_adv_correct += test_acc[1]
test_nat_correct = test_nat_correct / total_num
test_adv_correct = test_adv_correct / total_num
print('Test Classifer: Nat ACC: %.4f; Adv ACC: %.4f' % (test_nat_correct, test_adv_correct))
if __name__ == '__main__':
main(args)
| 8,587 | 33.629032 | 118 | py |
MIAT | MIAT-main/train_standard.py | from __future__ import print_function
import os
import argparse
import torch
# import torch.nn as nn
import torch.nn.functional as F
# import torchvision
import torch.optim as optim
from torchvision import datasets, transforms
from models.wideresnet import WideResNet
# from models.resnet import ResNet18
from utils.standard_loss import standard_loss
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import data_dataset
import numpy as np
parser = argparse.ArgumentParser(description='PyTorch CIFAR TRADES Adversarial Training')
parser.add_argument('--nat-img-train', type=str, help='natural training data', default='./data/train_images.npy')
parser.add_argument('--nat-label-train', type=str, help='natural training label', default='./data/train_labels.npy')
parser.add_argument('--nat-img-test', type=str, help='natural test data', default='./data/test_images.npy')
parser.add_argument('--nat-label-test', type=str, help='natural test label', default='./data/test_labels.npy')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train')
parser.add_argument('--weight-decay', '--wd', default=2e-4,
type=float, metavar='W')
parser.add_argument('--lr', type=float, default=1e-1, metavar='LR',
help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--epsilon', default=8/255,
help='perturbation')
parser.add_argument('--num-steps', default=10,
help='perturb number of steps')
parser.add_argument('--step-size', default=0.007,
help='perturb step size')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--model-dir', default='./checkpoint/wideresnet/standard_AT',
help='directory of model for saving checkpoint')
parser.add_argument('--save-freq', '-s', default=2, type=int, metavar='N',
help='save frequency')
args = parser.parse_args()
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate"""
lr = args.lr
if epoch >= 90:
lr = args.lr * 0.01
elif epoch >= 75:
lr = args.lr * 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
# calculate robust loss
loss = standard_loss(model=model, x_natural=data, y=target, optimizer=optimizer, step_size=args.step_size,
epsilon=args.epsilon, perturb_steps=args.num_steps, distance='l_inf')
# loss = F.cross_entropy(model(data), target)
loss.backward()
optimizer.step()
# print progress
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, (batch_idx+1) * len(data), len(train_loader.dataset),
100. * (batch_idx+1) / len(train_loader), loss.item()))
'''
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
100. * (batch_idx + 1) / len(train_loader), loss.item()))
'''
def eval_train(model, device, train_loader):
model.eval()
train_loss = 0
correct = 0
with torch.no_grad():
for data, target in train_loader:
data, target = data.to(device), target.to(device)
output = model(data)
train_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
train_loss /= len(train_loader.dataset)
print('Training: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
train_loss, correct, len(train_loader.dataset),
100. * correct / len(train_loader.dataset)))
training_accuracy = correct / len(train_loader.dataset)
return train_loss, training_accuracy
def craft_adversarial_example(model, x_natural, y, step_size=0.003,
epsilon=0.031, perturb_steps=10, distance='l_inf'):
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
logits = model(x_adv)
loss_kl = F.cross_entropy(logits, y)
grad = torch.autograd.grad(loss_kl, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
batch_size = len(x_natural)
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * F.cross_entropy(model(adv), y)
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
return x_adv
def eval_test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
correct_adv = 0
#with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data_adv = craft_adversarial_example(model=model, x_natural=data, y=target,
step_size=0.007, epsilon=8/255,
perturb_steps=40, distance='l_inf')
output = model(data)
output_adv = model(data_adv)
test_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
pred_adv = output_adv.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
correct_adv += pred_adv.eq(target.view_as(pred_adv)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%), Robust Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset), correct_adv, len(test_loader.dataset),
100. * correct_adv / len(test_loader.dataset)))
test_accuracy = correct / len(test_loader.dataset)
return test_loss, test_accuracy
def main():
# settings
setup_seed(args.seed)
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
model_dir = args.model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
# setup data loader
trans_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
trans_test = transforms.Compose([
transforms.ToTensor()
])
trainset = data_dataset(img_path=args.nat_img_train, clean_label_path=args.nat_label_train,
transform=trans_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, drop_last=False,
shuffle=True, num_workers=4, pin_memory=True)
testset = data_dataset(img_path=args.nat_img_test, clean_label_path=args.nat_label_test, transform=trans_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, drop_last=False, shuffle=False,
num_workers=4, pin_memory=True)
# init model, ResNet18() can be also used here for training
#model = ResNet18(10).to(device)
model = WideResNet(34, 10, 10).to(device)
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
for epoch in range(1, args.epochs + 1):
# adjust learning rate for SGD
adjust_learning_rate(optimizer, epoch)
# adversarial training
train(args, model, device, train_loader, optimizer, epoch)
# evaluation on natural examples
print('================================================================')
# eval_train(model, device, train_loader)
_, test_accuracy = eval_test(model, device, test_loader)
# save checkpoint
if epoch % args.save_freq == 0:
'''
torch.save(model.module.state_dict(),
os.path.join(model_dir, 'model-epoch{}.pt'.format(epoch)))
'''
torch.save(model.module.state_dict(),
os.path.join(model_dir, 'best_model.pth'))
print('save the model')
print('================================================================')
if __name__ == '__main__':
main()
| 10,812 | 38.900369 | 116 | py |
MIAT | MIAT-main/functions/gan_losses.py | '''Losses for training basic GANs.
Most of this was taken out of the f-GAN paper. WGAN (IPM-style) is also supported.
'''
import math
import torch
import torch.nn.functional as F
from functions.misc import log_sum_exp
def raise_measure_error(measure):
supported_measures = ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1']
raise NotImplementedError(
'Measure `{}` not supported. Supported: {}'.format(measure,
supported_measures))
def get_positive_expectation(p_samples, measure, average=True):
"""Computes the positive part of a divergence / difference.
Args:
p_samples: Positive samples.
measure: Measure to compute for.
average: Average the result over samples.
Returns:
torch.Tensor
"""
log_2 = math.log(2.)
if measure == 'GAN':
Ep = - F.softplus(-p_samples)
elif measure == 'JSD':
Ep = log_2 - F.softplus(-p_samples) # Note JSD will be shifted
elif measure == 'X2':
Ep = p_samples ** 2
elif measure == 'KL':
Ep = p_samples
elif measure == 'RKL':
Ep = -torch.exp(-p_samples)
elif measure == 'DV':
Ep = p_samples
elif measure == 'H2':
Ep = 1. - torch.exp(-p_samples)
elif measure == 'W1':
Ep = p_samples
else:
raise_measure_error(measure)
if average:
return Ep.mean()
else:
return Ep
def get_negative_expectation(q_samples, measure, average=True):
"""Computes the negative part of a divergence / difference.
Args:
q_samples: Negative samples.
measure: Measure to compute for.
average: Average the result over samples.
Returns:
torch.Tensor
"""
log_2 = math.log(2.)
if measure == 'GAN':
Eq = F.softplus(-q_samples) + q_samples
elif measure == 'JSD':
Eq = F.softplus(-q_samples) + q_samples - log_2 # Note JSD will be shifted
elif measure == 'X2':
Eq = -0.5 * ((torch.sqrt(q_samples ** 2) + 1.) ** 2)
elif measure == 'KL':
Eq = torch.exp(q_samples - 1.)
elif measure == 'RKL':
Eq = q_samples - 1.
elif measure == 'DV':
Eq = log_sum_exp(q_samples, 0) - math.log(q_samples.size(0))
elif measure == 'H2':
Eq = torch.exp(q_samples) - 1.
elif measure == 'W1':
Eq = q_samples
else:
raise_measure_error(measure)
if average:
return Eq.mean()
else:
return Eq
def generator_loss(q_samples, measure, loss_type=None):
"""Computes the loss for the generator of a GAN.
Args:
q_samples: fake samples.
measure: Measure to compute loss for.
loss_type: Type of loss: basic `minimax` or `non-saturating`.
"""
if not loss_type or loss_type == 'minimax':
return get_negative_expectation(q_samples, measure)
elif loss_type == 'non-saturating':
return -get_positive_expectation(q_samples, measure)
else:
raise NotImplementedError(
'Generator loss type `{}` not supported. '
'Supported: [None, non-saturating, boundary-seek]') | 3,171 | 26.344828 | 83 | py |
MIAT | MIAT-main/functions/dim_losses.py | '''cortex_DIM losses.
'''
import math
import torch
import torch.nn.functional as F
from functions.gan_losses import get_positive_expectation, get_negative_expectation
def fenchel_dual_loss(l, m, measure=None):
'''Computes the f-divergence distance between positive and negative joint distributions.
Note that vectors should be sent as 1x1.
Divergences supported are Jensen-Shannon `JSD`, `GAN` (equivalent to JSD),
Squared Hellinger `H2`, Chi-squeared `X2`, `KL`, and reverse KL `RKL`.
Args:
l: Local feature map.
m: Multiple globals feature map.
measure: f-divergence measure.
Returns:
torch.Tensor: Loss.
'''
N, units, n_locals = l.size()
n_multis = m.size(2)
# First we make the input tensors the right shape.
l = l.view(N, units, n_locals)
l = l.permute(0, 2, 1)
l = l.reshape(-1, units)
m = m.view(N, units, n_multis)
m = m.permute(0, 2, 1)
m = m.reshape(-1, units)
# Outer product, we want a N x N x n_local x n_multi tensor.
u = torch.mm(m, l.t())
u = u.reshape(N, n_multis, N, n_locals).permute(0, 2, 3, 1)
# Since we have a big tensor with both positive and negative samples, we need to mask.
mask = torch.eye(N).to(l.device)
n_mask = 1 - mask
# Compute the positive and negative score. Average the spatial locations.
# E_pos = get_positive_expectation(u, measure, average=False).min(2)[0].mean(2)
# E_neg = get_negative_expectation(u, measure, average=False).max(2)[0].mean(2)
E_pos = get_positive_expectation(u, measure, average=False).mean(2).mean(2)
E_neg = get_negative_expectation(u, measure, average=False).mean(2).mean(2)
# Mask positive and negative terms for positive and negative parts of loss
E_pos = (E_pos * mask).sum() / mask.sum()
E_neg = (E_neg * n_mask).sum() / n_mask.sum()
loss = E_neg - E_pos
return loss
def infonce_loss(l, m):
'''Computes the noise contrastive estimation-based loss, a.k.a. infoNCE.
Note that vectors should be sent as 1x1.
Args:
l: Local feature map.
m: Multiple globals feature map.
Returns:
torch.Tensor: Loss.
'''
N, units, n_locals = l.size() # 16, 2048, 2048
_, _ , n_multis = m.size() # 16, 2048, 128
# First we make the input tensors the right shape.
l_p = l.permute(0, 2, 1) # 16, 2048, 2048
m_p = m.permute(0, 2, 1) # 16, 128, 2048
l_n = l_p.reshape(-1, units) # 16*2048, 2048
m_n = m_p.reshape(-1, units) # 16*128, 2048
# Inner product for positive samples. Outer product for negative. We need to do it this way
# for the multiclass loss. For the outer product, we want a N x N x n_local x n_multi tensor.
u_p = torch.matmul(l_p, m).unsqueeze(2) # 16, 2048, 1, 128
u_n = torch.mm(m_n, l_n.t())
u_n = u_n.reshape(N, n_multis, N, n_locals).permute(0, 2, 3, 1)#16, 16, 2048, 128
# We need to mask the diagonal part of the negative tensor.
mask = torch.eye(N)[:, :, None, None].to(l.device)
n_mask = 1 - mask
# Masking is done by shifting the diagonal before exp.
u_n = (n_mask * u_n) - 10. * mask # mask out "self" examples
u_n = u_n.reshape(N, N * n_locals, n_multis).unsqueeze(dim=1).expand(-1, n_locals, -1, -1)
# Since this is multiclass, we concat the positive along the class dimension before performing log softmax.
pred_lgt = torch.cat([u_p, u_n], dim=2)
pred_log = F.log_softmax(pred_lgt, dim=2)
# The positive score is the first element of the log softmax.
loss = -pred_log[:, :, 0].mean()
return loss
def donsker_varadhan_loss(l, m, v_out):
'''
Note that vectors should be sent as 1x1.
Args:
l: Local feature map.
m: Multiple globals feature map.
Returns:
torch.Tensor: Loss.
'''
N, units, n_locals = l.size()
n_multis = m.size(2)
# First we make the input tensors the right shape.
l = l.view(N, units, n_locals)
l = l.permute(0, 2, 1)
l = l.reshape(-1, units)
m = m.view(N, units, n_multis)
m = m.permute(0, 2, 1)
m = m.reshape(-1, units)
# Outer product, we want a N x N x n_local x n_multi tensor.
u = torch.mm(m, l.t())
u = u.reshape(N, n_multis, N, n_locals).permute(0, 2, 3, 1)
# Since we have a big tensor with both positive and negative samples, we need to mask.
mask = torch.eye(N).to(l.device).unsqueeze(2)
n_mask = 1 - mask
# Positive term is just the average of the diagonal.
# E_pos = (u.mean(2) * mask).sum() / mask.sum()
u = u.squeeze()
# u = u.mean(2).mean(2)
# E_pos = (u.mean(2) * mask).sum() / mask.sum()
E_pos = (u * mask).sum((1)) / mask.sum((1))
E_neg = torch.logsumexp(u * n_mask, (1)) - torch.log(n_mask.sum((1)))
# Negative term is the log sum exp of the off-diagonal terms. Mask out the positive.
# u -= 10. * (1 - n_mask)
# u_max = torch.max(u)
# E_neg = torch.log((n_mask * torch.exp(u - u_max)).sum() + 1e-6) \
# + u_max - math.log(n_mask.sum())
loss = (E_neg - E_pos).mean(dim=1)
if not v_out:
loss = loss.mean()
return loss | 5,148 | 30.981366 | 111 | py |
MIAT | MIAT-main/functions/dim_losses_post.py | '''cortex_DIM losses.
'''
import math
import torch
import torch.nn.functional as F
from functions.gan_losses import get_positive_expectation, get_negative_expectation
def fenchel_dual_loss(l, m, measure=None):
'''Computes the f-divergence distance between positive and negative joint distributions.
Note that vectors should be sent as 1x1.
Divergences supported are Jensen-Shannon `JSD`, `GAN` (equivalent to JSD),
Squared Hellinger `H2`, Chi-squeared `X2`, `KL`, and reverse KL `RKL`.
Args:
l: Local feature map.
m: Multiple globals feature map.
measure: f-divergence measure.
Returns:
torch.Tensor: Loss.
'''
N, units, n_locals = l.size()
n_multis = m.size(2)
# First we make the input tensors the right shape.
l = l.view(N, units, n_locals)
l = l.permute(0, 2, 1)
l = l.reshape(-1, units)
m = m.view(N, units, n_multis)
m = m.permute(0, 2, 1)
m = m.reshape(-1, units)
# Outer product, we want a N x N x n_local x n_multi tensor.
u = torch.mm(m, l.t())
u = u.reshape(N, n_multis, N, n_locals).permute(0, 2, 3, 1)
# Since we have a big tensor with both positive and negative samples, we need to mask.
mask = torch.eye(N).to(l.device)
n_mask = 1 - mask
# Compute the positive and negative score. Average the spatial locations.
# E_pos = get_positive_expectation(u, measure, average=False).min(2)[0].mean(2)
# E_neg = get_negative_expectation(u, measure, average=False).max(2)[0].mean(2)
E_pos = get_positive_expectation(u, measure, average=False).mean(2).mean(2)
E_neg = get_negative_expectation(u, measure, average=False).mean(2).mean(2)
# Mask positive and negative terms for positive and negative parts of loss
E_pos = (E_pos * mask).sum() / mask.sum()
E_neg = (E_neg * n_mask).sum() / n_mask.sum()
loss = E_neg - E_pos
return loss
def infonce_loss(l, m):
'''Computes the noise contrastive estimation-based loss, a.k.a. infoNCE.
Note that vectors should be sent as 1x1.
Args:
l: Local feature map.
m: Multiple globals feature map.
Returns:
torch.Tensor: Loss.
'''
N, units, n_locals = l.size() # 16, 2048, 2048
_, _ , n_multis = m.size() # 16, 2048, 128
# First we make the input tensors the right shape.
l_p = l.permute(0, 2, 1) # 16, 2048, 2048
m_p = m.permute(0, 2, 1) # 16, 128, 2048
l_n = l_p.reshape(-1, units) # 16*2048, 2048
m_n = m_p.reshape(-1, units) # 16*128, 2048
# Inner product for positive samples. Outer product for negative. We need to do it this way
# for the multiclass loss. For the outer product, we want a N x N x n_local x n_multi tensor.
u_p = torch.matmul(l_p, m).unsqueeze(2) # 16, 2048, 1, 128
u_n = torch.mm(m_n, l_n.t())
u_n = u_n.reshape(N, n_multis, N, n_locals).permute(0, 2, 3, 1)#16, 16, 2048, 128
# We need to mask the diagonal part of the negative tensor.
mask = torch.eye(N)[:, :, None, None].to(l.device)
n_mask = 1 - mask
# Masking is done by shifting the diagonal before exp.
u_n = (n_mask * u_n) - 10. * mask # mask out "self" examples
u_n = u_n.reshape(N, N * n_locals, n_multis).unsqueeze(dim=1).expand(-1, n_locals, -1, -1)
# Since this is multiclass, we concat the positive along the class dimension before performing log softmax.
pred_lgt = torch.cat([u_p, u_n], dim=2)
pred_log = F.log_softmax(pred_lgt, dim=2)
# The positive score is the first element of the log softmax.
loss = -pred_log[:, :, 0].mean()
return loss
def donsker_varadhan_loss(l, m, v_out):
'''
Note that vectors should be sent as 1x1.
Args:
l: Local feature map.
m: Multiple globals feature map.
Returns:
torch.Tensor: Loss.
'''
N, units, n_locals = l.size()
n_multis = m.size(2)
# First we make the input tensors the right shape.
l = l.view(N, units, n_locals)
l = l.permute(1, 0, 2) # hide units as batch
# l = l.permute(2, 0, 1)
# l = l.permute(0, 2, 1)
# l = l.reshape(-1, units)
m = m.view(N, units, n_multis)
m = m.permute(1, 2, 0)
# m = m.permute(2, 1, 0)
# m = m.permute(0, 2, 1)
# m = m.reshape(-1, units)
# Outer product, we want a N x N x n_local x n_multi tensor.
u = torch.bmm(l, m)
# u = torch.mm(m, l.t())
# u = u.reshape(N, n_multis, N, n_locals).permute(0, 2, 3, 1)
# Since we have a big tensor with both positive and negative samples, we need to mask.
mask = torch.eye(N).to(l.device).unsqueeze(0)
n_mask = 1 - mask
# Positive term is just the average of the diagonal.
# E_pos = (u.mean(2) * mask).sum() / mask.sum()
# u = u.squeeze()
# u = u.mean(2).mean(2)
E_pos = (u * mask).sum((1, 2)) / mask.sum((1, 2))
# Negative term is the log sum exp of the off-diagonal terms. Mask out the positive.
u -= 10. * (1 - n_mask)
u_max = u.max(1, keepdim=True)[0].max(2, keepdim=True)[0]
# u_max = torch.max(u)
E_neg = torch.log((n_mask * torch.exp(u - u_max)).sum((1, 2)) + 1e-6) \
+ u_max.squeeze() - math.log(n_mask.sum((1, 2)))
loss = E_neg - E_pos
if not v_out:
loss = loss.sum()
return loss | 5,264 | 30.716867 | 111 | py |
MIAT | MIAT-main/functions/gradient_penalty.py | '''Gradient penalty functions.
'''
import torch
from torch import autograd
def contrastive_gradient_penalty(network, input, penalty_amount=1.):
"""Contrastive gradient penalty.
This is essentially the loss introduced by Mescheder et al 2018.
Args:
network: Network to apply penalty through.
input: Input or list of inputs for network.
penalty_amount: Amount of penalty.
Returns:
torch.Tensor: gradient penalty loss.
"""
def _get_gradient(inp, output):
gradient = autograd.grad(outputs=output, inputs=inp,
grad_outputs=torch.ones_like(output),
create_graph=True, retain_graph=True,
only_inputs=True, allow_unused=True)[0]
return gradient
if not isinstance(input, (list, tuple)):
input = [input]
input = [inp.detach() for inp in input]
input = [inp.requires_grad_() for inp in input]
with torch.set_grad_enabled(True):
output = network(*input)[-1]
gradient = _get_gradient(input, output)
gradient = gradient.view(gradient.size()[0], -1)
penalty = (gradient ** 2).sum(1).mean()
return penalty * penalty_amount
| 1,238 | 27.813953 | 72 | py |
MIAT | MIAT-main/functions/misc.py | """Miscilaneous functions.
"""
import math
import torch
def log_sum_exp(x, axis=None):
"""Log sum exp function
Args:
x: Input.
axis: Axis over which to perform sum.
Returns:
torch.Tensor: log sum exp
"""
x_max = torch.max(x, axis)[0]
y = torch.log((torch.exp(x - x_max)).sum(axis)) + x_max
return y
def random_permute(X):
"""Randomly permutes a tensor.
Args:
X: Input tensor.
Returns:
torch.Tensor
"""
X = X.transpose(1, 2)
b = torch.rand((X.size(0), X.size(1))).cuda()
idx = b.sort(0)[1]
adx = torch.range(0, X.size(1) - 1).long()
X = X[idx, adx[None, :]].transpose(1, 2)
return X
def ms_ssim(X_a, X_b, window_size=11, size_average=True, C1=0.01**2, C2=0.03**2):
"""
Taken from Po-Hsun-Su/pytorch-ssim
"""
channel = X_a.size(1)
def gaussian(sigma=1.5):
gauss = torch.Tensor(
[math.exp(-(x - window_size // 2) **
2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window():
_1D_window = gaussian(window_size).unsqueeze(1)
_2D_window = _1D_window.mm(
_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = torch.Tensor(
_2D_window.expand(channel, 1, window_size,
window_size).contiguous())
return window.cuda()
window = create_window()
mu1 = torch.nn.functional.conv2d(X_a, window,
padding=window_size // 2, groups=channel)
mu2 = torch.nn.functional.conv2d(X_b, window,
padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = torch.nn.functional.conv2d(
X_a * X_a, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = torch.nn.functional.conv2d(
X_b * X_b, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = torch.nn.functional.conv2d(
X_a * X_b, window, padding=window_size // 2, groups=channel) - mu1_mu2
ssim_map = (((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) /
((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
| 2,417 | 25.571429 | 81 | py |
MIAT | MIAT-main/models/discriminators.py | import numpy as np
import torch
import torch.nn as nn
class PriorDisc(nn.Module):
def __init__(self):
super().__init__()
self.layer0 = nn.Sequential(
nn.Linear(64, 1000),
nn.ReLU(),
)
self.layer1 = nn.Sequential(
nn.Linear(1000, 200),
nn.ReLU(),
)
self.layer2 = nn.Sequential(
nn.Linear(200, 1),
nn.ReLU(),
)
self.layers = [self.layer0, self.layer1, self.layer2]
def forward(self, x: torch.Tensor, return_full_list=False, clip_grad=False):
'''Forward pass
Args:
x: Input.
return_full_list: Optional, returns all layer outputs.
Returns:
torch.Tensor or list of torch.Tensor.
'''
def _clip_grad(v, min, max):
v_tmp = v.expand_as(v)
v_tmp.register_hook(lambda g: g.clamp(min, max))
return v_tmp
out = []
for layer in self.layers:
x = layer(x)
if clip_grad:
x = _clip_grad(x, -clip_grad, clip_grad)
out.append(x)
if not return_full_list:
out = out[-1]
return out
class MIInternalConvNet(nn.Module):
"""Simple custorm 1x1 convnet.
"""
def __init__(self, n_input, n_units,):
"""
Args:
n_input: Number of input units.
n_units: Number of output units.
"""
super().__init__()
self.block_nonlinear = nn.Sequential(
nn.Conv2d(n_input, n_input*2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(n_input*2),
nn.ReLU(),
nn.Conv2d(n_input*2, n_input*4, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(n_input*4),
nn.ReLU(),
nn.Conv2d(n_input * 4, n_units, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(n_units),
nn.ReLU(),
nn.Conv2d(n_units, n_units, kernel_size=1, stride=1, padding=0, bias=True),
)
self.block_ln = nn.Sequential(
Permute(0, 2, 3, 1),
nn.LayerNorm(n_units),
Permute(0, 3, 1, 2)
)
self.linear_shortcut = nn.Sequential(
nn.Conv2d(n_input, n_input * 4, kernel_size=4, stride=2, padding=0, bias=False),
nn.Conv2d(n_input * 4, n_units, kernel_size=3, stride=1, padding=0, bias=False),
nn.Conv2d(n_units, n_units, kernel_size=1, stride=1, padding=0, bias=False))
# initialize shortcut to be like identity (if possible)
def forward(self, x):
"""
Args:
x: Input tensor.
Returns:
torch.Tensor: network output.
"""
h = self.block_ln(self.block_nonlinear(x) + self.linear_shortcut(x))
return h
class MIInternallastConvNet(nn.Module):
"""Simple custorm 1x1 convnet.
"""
def __init__(self, n_input, n_units,):
"""
Args:
n_input: Number of input units.
n_units: Number of output units.
"""
super().__init__()
self.block_nonlinear = nn.Sequential(
nn.Conv2d(n_input, n_input*2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(n_input*2),
nn.ReLU(),
nn.Conv2d(n_input * 2, n_units, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(n_units),
nn.ReLU(),
nn.Conv2d(n_units, n_units, kernel_size=1, stride=1, padding=0, bias=True),
)
self.block_ln = nn.Sequential(
Permute(0, 2, 3, 1),
nn.LayerNorm(n_units),
Permute(0, 3, 1, 2)
)
self.linear_shortcut = nn.Sequential(
nn.Conv2d(n_input, n_units, kernel_size=4, stride=1, padding=0, bias=False),
nn.Conv2d(n_units, n_units, kernel_size=1, stride=1, padding=0, bias=False))
# initialize shortcut to be like identity (if possible)
def forward(self, x):
"""
Args:
x: Input tensor.
Returns:
torch.Tensor: network output.
"""
h = self.block_ln(self.block_nonlinear(x) + self.linear_shortcut(x))
return h
class MI1x1ConvNet(nn.Module):
"""Simple custorm 1x1 convnet.
"""
def __init__(self, n_input, n_units,):
"""
Args:
n_input: Number of input units.
n_units: Number of output units.
"""
super().__init__()
self.block_nonlinear = nn.Sequential(
nn.Conv2d(n_input, n_units, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(n_units),
nn.ReLU(),
nn.Conv2d(n_units, n_units, kernel_size=1, stride=1, padding=0, bias=True),
)
self.block_ln = nn.Sequential(
Permute(0, 2, 3, 1),
nn.LayerNorm(n_units),
Permute(0, 3, 1, 2)
)
self.linear_shortcut = nn.Conv2d(n_input, n_units, kernel_size=1,
stride=1, padding=0, bias=False)
# initialize shortcut to be like identity (if possible)
if n_units >= n_input:
eye_mask = np.zeros((n_units, n_input, 1, 1), dtype=np.uint8)
for i in range(n_input):
eye_mask[i, i, 0, 0] = 1
self.linear_shortcut.weight.data.uniform_(-0.01, 0.01)
self.linear_shortcut.weight.data.masked_fill_(torch.tensor(eye_mask).bool(), 1.)
def forward(self, x):
"""
Args:
x: Input tensor.
Returns:
torch.Tensor: network output.
"""
h = self.block_ln(self.block_nonlinear(x) + self.linear_shortcut(x))
return h
class MIFCNet(nn.Module):
"""Simple custom network for computing MI.
"""
def __init__(self, n_input, n_units, bn =False):
"""
Args:
n_input: Number of input units.
n_units: Number of output units.
"""
super().__init__()
self.bn = bn
assert(n_units >= n_input)
self.linear_shortcut = nn.Linear(n_input, n_units)
self.block_nonlinear = nn.Sequential(
nn.Linear(n_input, n_units, bias=False),
nn.BatchNorm1d(n_units),
nn.ReLU(),
nn.Linear(n_units, n_units)
)
# initialize the initial projection to a sort of noisy copy
eye_mask = np.zeros((n_units, n_input), dtype=np.uint8)
for i in range(n_input):
eye_mask[i, i] = 1
self.linear_shortcut.weight.data.uniform_(-0.01, 0.01)
self.linear_shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.)
self.block_ln = nn.LayerNorm(n_units)
def forward(self, x):
"""
Args:
x: Input tensor.
Returns:
torch.Tensor: network output.
"""
h = self.block_nonlinear(x) + self.linear_shortcut(x)
if self.bn:
h = self.block_ln(h)
return h
class Permute(torch.nn.Module):
"""Module for permuting axes.
"""
def __init__(self, *perm):
"""
Args:
*perm: Permute axes.
"""
super().__init__()
self.perm = perm
def forward(self, input):
"""Permutes axes of tensor.
Args:
input: Input tensor.
Returns:
torch.Tensor: permuted tensor.
"""
return input.permute(*self.perm) | 7,661 | 25.42069 | 92 | py |
MIAT | MIAT-main/models/resnet.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 22:46:26 2020
@author: pc-3
"""
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, in_features, out_features):
super(Linear, self).__init__()
self.w = nn.Parameter(torch.randn(in_features, out_features))
def forward(self, x):
x = x.mm(self.w)
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out_1 = out.view(out.size(0), -1)
out_2 = self.linear(out_1)
return out_2
class ResNet_F(nn.Module):
def __init__(self, block, num_blocks, num_classes):
super(ResNet_F, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out_1 = out.view(out.size(0), -1)
out_2 = self.linear(out_1)
return out_2
def ResNet18(num_classes):
return ResNet(BasicBlock, [2,2,2,2], num_classes)
def ResNet18_F(num_classes):
return ResNet_F(BasicBlock, [2,2,2,2], num_classes)
def ResNet34(num_classes):
return ResNet(BasicBlock, [3,4,6,3], num_classes)
def ResNet50(num_classes):
return ResNet(Bottleneck, [3,4,6,3], num_classes)
def ResNet101(num_classes):
return ResNet(Bottleneck, [3,4,23,3], num_classes)
def ResNet152(num_classes):
return ResNet(Bottleneck, [3,8,36,3], num_classes) | 5,724 | 35.006289 | 102 | py |
MIAT | MIAT-main/models/resnet_new.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 22:46:26 2020
@author: pc-3
"""
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, in_features, out_features):
super(Linear, self).__init__()
self.w = nn.Parameter(torch.randn(in_features, out_features))
def forward(self, x):
x = x.mm(self.w)
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, is_internal=False, is_internal_last=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
if is_internal == True:
if is_internal_last == False:
return out
else:
out = self.layer4(out)
return out
else:
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(num_classes):
return ResNet(BasicBlock, [2,2,2,2], num_classes)
def ResNet34(num_classes):
return ResNet(BasicBlock, [3,4,6,3], num_classes)
def ResNet50(num_classes):
return ResNet(Bottleneck, [3,4,6,3], num_classes)
def ResNet101(num_classes):
return ResNet(Bottleneck, [3,4,23,3], num_classes)
def ResNet152(num_classes):
return ResNet(Bottleneck, [3,8,36,3], num_classes) | 4,523 | 33.8 | 102 | py |
MIAT | MIAT-main/models/wideresnet_new.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x, is_internal=False, is_internal_last=False):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
if is_internal == True:
if is_internal_last == False:
return out
else:
out = self.block3(out)
return out
else:
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
| 4,162 | 39.813725 | 116 | py |
MIAT | MIAT-main/models/extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResNet_Internal(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet_Internal, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
return out
class ResNet_Internal_last(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet_Internal_last, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet18_Internal():
return ResNet_Internal(BasicBlock, [2, 2, 2, 2])
def ResNet18_Internal_last():
return ResNet_Internal_last(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
| 6,178 | 33.138122 | 104 | py |
MIAT | MIAT-main/models/vggnet.py | import torch.nn as nn
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0)
def cfg(depth):
depth_lst = [11, 13, 16, 19]
assert (depth in depth_lst), "Error : VGGnet depth should be either 11, 13, 16, 19"
cf_dict = {
'11': [
64, 'mp',
128, 'mp',
256, 256, 'mp',
512, 512, 'mp',
512, 512, 'mp'],
'13': [
64, 64, 'mp',
128, 128, 'mp',
256, 256, 'mp',
512, 512, 'mp',
512, 512, 'mp'
],
'16': [
64, 64, 'mp',
128, 128, 'mp',
256, 256, 256, 'mp',
512, 512, 512, 'mp',
512, 512, 512, 'mp'
],
'19': [
64, 64, 'mp',
128, 128, 'mp',
256, 256, 256, 256, 'mp',
512, 512, 512, 512, 'mp',
512, 512, 512, 512, 'mp'
],
}
return cf_dict[str(depth)]
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
class VGG(nn.Module):
def __init__(self, depth, num_classes):
super(VGG, self).__init__()
self.features = self._make_layers(cfg(depth))
self.linear = nn.Linear(512, num_classes)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def _make_layers(self, cfg):
layers = []
in_planes = 3
for x in cfg:
if x == 'mp':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [conv3x3(in_planes, x), nn.BatchNorm2d(x), nn.ReLU(inplace=True)]
in_planes = x
# After cfg convolution
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def VGGNet16():
return VGG(16, 10)
def VGGNet19():
return VGG(19, 10)
| 2,135 | 23.837209 | 95 | py |
MIAT | MIAT-main/models/estimator.py | import numpy as np
import torch
import torch.nn as nn
class Estimator(nn.Module):
def __init__(self, n_output, cnn_input=128):
n_input = cnn_input
n_units = n_output
super().__init__()
self.layer0 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
)
self.layer1 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
)
# self.layer2 = nn.Sequential(
# nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU(),
# )
# self.layer3 = View(-1, 256 * 4 * 4)
# self.layer4 = nn.Sequential(
# nn.Linear(4096, 1024),
# nn.BatchNorm1d(1024),
# nn.ReLU(),
# )
# self.layer5 = nn.Linear(1024, 64)
self.layers = nn.ModuleList([self.layer0, self.layer1])
# self.layers = [self.layer0, self.layer1, self.layer2, self.layer3,
# self.layer4, self.layer5]
self.block_nonlinear = nn.Sequential(
nn.Conv2d(n_input, n_units, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(n_units),
nn.ReLU(),
nn.Conv2d(n_units, n_units, kernel_size=1, stride=1, padding=0, bias=True),
)
self.block_ln = nn.Sequential(
Permute(0, 2, 3, 1),
nn.LayerNorm(n_units),
Permute(0, 3, 1, 2)
)
self.linear_shortcut = nn.Conv2d(n_input, n_units, kernel_size=1,
stride=1, padding=0, bias=False)
# initialize shortcut to be like identity (if possible)
if n_units >= n_input:
eye_mask = np.zeros((n_units, n_input, 1, 1), dtype=np.uint8)
for i in range(n_input):
eye_mask[i, i, 0, 0] = 1
self.linear_shortcut.weight.data.uniform_(-0.01, 0.01)
self.linear_shortcut.weight.data.masked_fill_(torch.tensor(eye_mask).bool(), 1.)
def forward(self, x: torch.Tensor, return_full_list=False, clip_grad=False,
prop_limit=None):
'''Forward pass
Args:
x: Input.
return_full_list: Optional, returns all layer outputs.
Returns:
torch.Tensor or list of torch.Tensor.
'''
def _clip_grad(v, min, max):
v_tmp = v.expand_as(v)
v_tmp.register_hook(lambda g: g.clamp(min, max))
return v_tmp
out = []
for i, layer in enumerate(self.layers):
if prop_limit is not None and i >= prop_limit:
break
x = layer(x)
if clip_grad:
x = _clip_grad(x, -clip_grad, clip_grad)
out.append(x)
# if not return_full_list:
out = out[-1]
out = self.block_ln(self.block_nonlinear(out) + self.linear_shortcut(out))
return out
class MINIConvNet(nn.Module):
"""Simple custorm 1x1 convnet.
"""
def __init__(self, img_size, n_input, n_units_0, n_units_1, n_output):
"""
Args:
n_input: Number of input units.
n_units: Number of output units.
"""
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(n_input, n_units_0, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(n_units_0),
nn.ReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv2d(n_units_0, n_units_1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(n_units_1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
)
self.fc = nn.Sequential(
nn.Linear((img_size // 2)**2 * n_units_1, n_output)
)
def forward(self, x):
"""
Args:
x: Input tensor.
Returns:
torch.Tensor: network output.
"""
h = self.conv1(x)
h = self.conv2(h)
h = h.view(h.shape[0], -1)
h = self.fc(h)
return h
class View(torch.nn.Module):
"""Basic reshape module.
"""
def __init__(self, *shape):
"""
Args:
*shape: Input shape.
"""
super().__init__()
self.shape = shape
def forward(self, input):
"""Reshapes tensor.
Args:
input: Input tensor.
Returns:
torch.Tensor: Flattened tensor.
"""
return input.view(*self.shape)
class Permute(torch.nn.Module):
"""Module for permuting axes.
"""
def __init__(self, *perm):
"""
Args:
*perm: Permute axes.
"""
super().__init__()
self.perm = perm
def forward(self, input):
"""Permutes axes of tensor.
Args:
input: Input tensor.
Returns:
torch.Tensor: permuted tensor.
"""
return input.permute(*self.perm) | 5,189 | 26.315789 | 92 | py |
MIAT | MIAT-main/models/wideresnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
| 3,898 | 40.924731 | 116 | py |
MIAT | MIAT-main/utils/mart_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def mart_loss(model, x_natural, y, x_adv, beta=6.0):
kl = nn.KLDivLoss(reduction='none')
batch_size = len(x_natural)
logits = model(x_natural)
logits_adv = model(x_adv)
adv_probs = F.softmax(logits_adv, dim=1)
tmp1 = torch.argsort(adv_probs, dim=1)[:, -2:]
new_y = torch.where(tmp1[:, -1] == y, tmp1[:, -2], tmp1[:, -1])
loss_adv = F.cross_entropy(logits_adv, y) + F.nll_loss(torch.log(1.0001 - adv_probs + 1e-12), new_y)
nat_probs = F.softmax(logits, dim=1)
true_probs = torch.gather(nat_probs, 1, (y.unsqueeze(1)).long()).squeeze()
loss_robust = (1.0 / batch_size) * torch.sum(
torch.sum(kl(torch.log(adv_probs + 1e-12), nat_probs), dim=1) * (1.0000001 - true_probs))
loss = loss_adv + float(beta) * loss_robust
return loss
| 901 | 27.1875 | 104 | py |
MIAT | MIAT-main/utils/trades_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
def squared_l2_norm(x):
flattened = x.view(x.unsqueeze(0).shape[0], -1)
return (flattened ** 2).sum(1)
def l2_norm(x):
return squared_l2_norm(x).sqrt()
def trades_loss(model, x_natural, y, optimizer, step_size=0.003, epsilon=0.031, perturb_steps=10, beta=1.0,
distance='l_inf'):
# define KL-loss
criterion_kl = nn.KLDivLoss(size_average=False)
model.eval()
batch_size = len(x_natural)
# generate adversarial example
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1))
grad = torch.autograd.grad(loss_kl, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * criterion_kl(F.log_softmax(model(adv), dim=1),
F.softmax(model(x_natural), dim=1))
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
model.train()
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
# zero gradient
optimizer.zero_grad()
# calculate robust loss
logits = model(x_natural)
loss_natural = F.cross_entropy(logits, y)
loss_robust = (1.0 / batch_size) * criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1))
loss = loss_natural + beta * loss_robust
return loss, beta * loss_robust
def trades_loss_extend(model, x_natural, y, optimizer, step_size=0.003, epsilon=0.031, perturb_steps=10, beta=1.0,
distance='l_inf'):
# define KL-loss
criterion_kl = nn.KLDivLoss(size_average=False)
model.eval()
batch_size = len(x_natural)
# generate adversarial example
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
noisy_post, ori_logits = model(x_adv, logit_out=True)
loss_kl = criterion_kl(F.log_softmax(noisy_post, dim=1), F.softmax(model(x_natural), dim=1)) \
+ F.cross_entropy(ori_logits, y)
grad = torch.autograd.grad(loss_kl, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * criterion_kl(F.log_softmax(model(adv), dim=1),
F.softmax(model(x_natural), dim=1))
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
model.train()
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
# zero gradient
optimizer.zero_grad()
# calculate robust loss
logits, logits_ori = model(x_natural, logit_out=True)
loss_natural = F.cross_entropy(logits, y)
loss_robust = (1.0 / batch_size) * criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1))
loss = loss_natural + beta * loss_robust + F.cross_entropy(logits_ori, y)
return loss, beta * loss_robust
| 5,958 | 39.815068 | 114 | py |
MIAT | MIAT-main/utils/mma_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
def squared_l2_norm(x):
flattened = x.view(x.unsqueeze(0).shape[0], -1)
return (flattened ** 2).sum(1)
def l2_norm(x):
return squared_l2_norm(x).sqrt()
def mma_loss(model,
x_natural,
y,
optimizer,
step_size=0.003,
epsilon=0.031,
perturb_steps=10,
distance='l_inf'):
# define KL-loss
model.eval()
batch_size = len(x_natural)
# generate adversarial example
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
loss_ce = F.cross_entropy(model(x_adv), y)
grad = torch.autograd.grad(loss_ce, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * F.cross_entropy(model(x_adv), y)
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
model.train()
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
# zero gradient
optimizer.zero_grad()
# calculate robust loss
logits = model(x_natural)
pred = logits.max(1, keepdim=True)[1]
logits_adv = model(x_adv)
same_weight = (pred.squeeze() == y).float()
dif_weight = (pred.squeeze() != y).float()
loss_natural = (1.0 / batch_size) * torch.sum(
F.cross_entropy(logits, y, reduction='none') * dif_weight)
loss_robust = (1.0 / batch_size) * torch.sum(
F.cross_entropy(logits_adv, y, reduction='none') * same_weight)
loss = loss_natural + loss_robust
return loss, loss_robust
| 3,073 | 31.702128 | 91 | py |
MIAT | MIAT-main/utils/standard_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
def standard_loss(model,
x_natural,
y,
optimizer,
step_size=0.007,
epsilon=0.031,
perturb_steps=10,
distance='l_inf'):
model.eval()
# generate adversarial example
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
loss_ce = F.cross_entropy(model(x_adv), y)
grad = torch.autograd.grad(loss_ce, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
batch_size = len(x_natural)
delta = 0.001 * torch.randn(x_natural.shape).cuda().detach()
delta = Variable(delta.data, requires_grad=True)
# Setup optimizers
optimizer_delta = optim.SGD([delta], lr=epsilon / perturb_steps * 2)
for _ in range(perturb_steps):
adv = x_natural + delta
# optimize
optimizer_delta.zero_grad()
with torch.enable_grad():
loss = (-1) * F.cross_entropy(model(adv), y)
loss.backward()
# renorming gradient
grad_norms = delta.grad.view(batch_size, -1).norm(p=2, dim=1)
delta.grad.div_(grad_norms.view(-1, 1, 1, 1))
# avoid nan or inf if gradient is 0
if (grad_norms == 0).any():
delta.grad[grad_norms == 0] = torch.randn_like(delta.grad[grad_norms == 0])
optimizer_delta.step()
# projection
delta.data.add_(x_natural)
delta.data.clamp_(0, 1).sub_(x_natural)
delta.data.renorm_(p=2, dim=0, maxnorm=epsilon)
x_adv = Variable(x_natural + delta, requires_grad=False)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
model.train()
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
# zero gradient
optimizer.zero_grad()
# logits_nat = model(x_natural)
logits_adv = model(x_adv)
# loss = 0.2 * F.cross_entropy(logits_nat, y) + 0.8 * F.cross_entropy(logits_adv, y)
loss = F.cross_entropy(logits_adv, y)
return loss
def standard_loss_extend(model,
x_natural,
y,
optimizer,
step_size=0.007,
epsilon=0.031,
perturb_steps=10,
distance='l_inf'):
model.eval()
# generate adversarial example
x_adv = x_natural.detach() + 0.001 * torch.randn(x_natural.shape).cuda().detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
loss_ce = F.cross_entropy(model(x_adv), y)
grad = torch.autograd.grad(loss_ce, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
model.train()
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
# zero gradient
optimizer.zero_grad()
logits_adv = model(x_adv)
logits_nat, logits_ori = model(x_natural, logit_out=True)
loss_adv = F.cross_entropy(logits_adv, y) + F.cross_entropy(logits_nat, y)# + F.cross_entropy(logits_ori, y)
return loss_adv
| 3,813 | 33.36036 | 112 | py |
MIAT | MIAT-main/utils/dataload.py | import torch
from torch.utils.data import Dataset, DataLoader
import re
import pickle
from PIL import Image
import os
import numpy as np
def sort_key(s):
re_digits = re.compile(r'(\d+)')
pieces = re_digits.split(s)
pieces[1::2] = map(int, pieces[1::2])
return pieces
def load_variavle(filename):
f=open(filename,'rb')
r=pickle.load(f)
f.close()
return r
class DatasetIMG(Dataset):
def __init__(self, imgnat_dirs, imgadv1_dirs, imgadv2_dirs, transform=None):
self.imgnat_dirs = imgnat_dirs
self.imgadv1_dirs = imgadv1_dirs
self.imgadv2_dirs = imgadv2_dirs
self.img_names = self.__get_imgnames__()
self.transform = transform
def __get_imgnames__(self):
tmp = []
images_name = os.listdir(self.imgnat_dirs)
images_name.sort(key=sort_key)
for name in images_name:
tmp.append(os.path.join(self.imgnat_dirs, name))
return tmp
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
imagenat_path = self.img_names[idx]
imagenat = Image.open(imagenat_path).convert('L')
imageadv1_path = imagenat_path.replace(self.imgnat_dirs, self.imgadv1_dirs)
imageadv1 = Image.open(imageadv1_path).convert('L')
imageadv2_path = imagenat_path.replace(self.imgnat_dirs, self.imgadv2_dirs)
imageadv2 = Image.open(imageadv2_path).convert('L')
if self.transform:
imagenat = self.transform(imagenat)
imageadv1 = self.transform(imageadv1)
imageadv2 = self.transform(imageadv2)
return imagenat, imageadv1, imageadv2
class DatasetIMG_Label(Dataset):
def __init__(self, imgnat_dirs, imgadv1_dirs, imgadv2_dirs, label_dirs, transform=None):
self.imgnat_dirs = imgnat_dirs
self.imgadv1_dirs = imgadv1_dirs
self.imgadv2_dirs = imgadv2_dirs
self.label_dirs = label_dirs
self.img_names = self.__get_imgnames__()
self.label = self.__get_label__()
self.transform = transform
def __get_imgnames__(self):
tmp = []
images_name = os.listdir(self.imgnat_dirs)
images_name.sort(key=sort_key)
for name in images_name:
tmp.append(os.path.join(self.imgnat_dirs, name))
return tmp
def __get_label__(self):
label = load_variavle(self.label_dirs)
label = np.array(label)
label = torch.from_numpy(label)
return label
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
imagenat_path = self.img_names[idx]
imagenat = Image.open(imagenat_path).convert('L')
imageadv1_path = imagenat_path.replace(self.imgnat_dirs, self.imgadv1_dirs)
imageadv1 = Image.open(imageadv1_path).convert('L')
imageadv2_path = imagenat_path.replace(self.imgnat_dirs, self.imgadv2_dirs)
imageadv2 = Image.open(imageadv2_path).convert('L')
label = self.label[idx]
if self.transform:
imagenat = self.transform(imagenat)
imageadv1 = self.transform(imageadv1)
imageadv2 = self.transform(imageadv2)
return imagenat, imageadv1, imageadv2, label
class DatasetIMG_Dual_Lable(Dataset):
def __init__(self, imgnat_dirs, imgadv_dirs, label_dirs, transform=None):
self.imgnat_dirs = imgnat_dirs
self.imgadv_dirs = imgadv_dirs
self.label_dirs = label_dirs
self.img_names = self.__get_imgnames__()
self.label = self.__get_label__()
self.transform = transform
def __get_imgnames__(self):
tmp = []
images_name = os.listdir(self.imgnat_dirs)
images_name.sort(key=sort_key)
for name in images_name:
tmp.append(os.path.join(self.imgnat_dirs, name))
return tmp
def __get_label__(self):
label = load_variavle(self.label_dirs)
label = np.array(label)
label = torch.from_numpy(label)
return label
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
imagenat_path = self.img_names[idx]
imagenat = Image.open(imagenat_path).convert('L')
imageadv_path = imagenat_path.replace(self.imgnat_dirs, self.imgadv_dirs)
imageadv = Image.open(imageadv_path).convert('L')
label = self.label[idx]
if self.transform:
imagenat = self.transform(imagenat)
imageadv = self.transform(imageadv)
return imagenat, imageadv, label
class DatasetIMG_Dual(Dataset):
def __init__(self, imgnat_dirs, imgadv_dirs, transform=None):
self.imgnat_dirs = imgnat_dirs
self.imgadv_dirs = imgadv_dirs
self.img_names = self.__get_imgnames__()
self.transform = transform
def __get_imgnames__(self):
tmp = []
images_name = os.listdir(self.imgnat_dirs)
images_name.sort(key=sort_key)
for name in images_name:
tmp.append(os.path.join(self.imgnat_dirs, name))
return tmp
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
imagenat_path = self.img_names[idx]
imagenat = Image.open(imagenat_path).convert('L')
imageadv_path = imagenat_path.replace(self.imgnat_dirs, self.imgadv_dirs)
imageadv = Image.open(imageadv_path).convert('L')
if self.transform:
imagenat = self.transform(imagenat)
imageadv = self.transform(imageadv)
return imagenat, imageadv
class DatasetIMG_test(Dataset):
def __init__(self, img_dirs, transform=None):
self.img_dirs = img_dirs
self.img_names = self.__get_imgnames__()
self.transform = transform
def __get_imgnames__(self):
tmp = []
images_name = os.listdir(self.img_dirs)
images_name.sort(key=sort_key)
for name in images_name:
tmp.append(os.path.join(self.img_dirs, name))
return tmp
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
imagenat_path = self.img_names[idx]
image = Image.open(imagenat_path).convert('L')
if self.transform:
image = self.transform(image)
return image
class DatasetNPY(Dataset):
def __init__(self, nat_dirs, adv1_dirs, adv2_dirs, transform=None):
self.nat_dirs = nat_dirs
self.adv1_dirs = adv1_dirs
self.adv2_dirs = adv2_dirs
self.npy_names = self.__get_npynames__()
self.transform = transform
def __get_npynames__(self):
tmp = []
npy_name = os.listdir(self.nat_dirs)
npy_name.sort(key=sort_key)
for name in npy_name:
tmp.append(os.path.join(self.nat_dirs, name))
return tmp
def __len__(self):
return len(self.npy_names)
def __getitem__(self, idx):
npynat_path = self.npy_names[idx]
npynat = np.load(npynat_path)
npynat = npynat.astype(np.float32)
npyadv1_path = npynat_path.replace(self.nat_dirs, self.adv1_dirs)
npyadv1 = np.load(npyadv1_path)
npyadv1 = npyadv1.astype(np.float32)
npyadv2_path = npynat_path.replace(self.nat_dirs, self.adv2_dirs)
npyadv2 = np.load(npyadv2_path)
npyadv2 = npyadv2.astype(np.float32)
if self.transform:
npynat = self.transform(npynat)
npyadv1 = self.transform(npyadv1)
npyadv2 = self.transform(npyadv2)
return npynat, npyadv1, npyadv2
class DatasetNPY_Label(Dataset):
def __init__(self, nat_dirs, adv1_dirs, adv2_dirs, label_dirs, transform=None):
self.nat_dirs = nat_dirs
self.adv1_dirs = adv1_dirs
self.adv2_dirs = adv2_dirs
self.npy_names = self.__get_npynames__()
self.label_dirs = label_dirs
self.label = self.__get_label__()
self.transform = transform
def __get_npynames__(self):
tmp = []
npy_name = os.listdir(self.nat_dirs)
npy_name.sort(key=sort_key)
for name in npy_name:
tmp.append(os.path.join(self.nat_dirs, name))
return tmp
def __get_label__(self):
label = load_variavle(self.label_dirs)
label = np.array(label)
label = torch.from_numpy(label)
return label
def __len__(self):
return len(self.npy_names)
def __getitem__(self, idx):
npynat_path = self.npy_names[idx]
npynat = np.load(npynat_path)
npynat = npynat.astype(np.float32)
npyadv1_path = npynat_path.replace(self.nat_dirs, self.adv1_dirs)
npyadv1 = np.load(npyadv1_path)
npyadv1 = npyadv1.astype(np.float32)
npyadv2_path = npynat_path.replace(self.nat_dirs, self.adv2_dirs)
npyadv2 = np.load(npyadv2_path)
npyadv2 = npyadv2.astype(np.float32)
label = self.label[idx]
if self.transform:
npynat = self.transform(npynat)
npyadv1 = self.transform(npyadv1)
npyadv2 = self.transform(npyadv2)
return npynat, npyadv1, npyadv2, label
class DatasetNPY_Dual_Label(Dataset):
def __init__(self, nat_dirs, adv_dirs, label_dirs, transform=None):
self.nat_dirs = nat_dirs
self.adv_dirs = adv_dirs
self.npy_names = self.__get_npynames__()
self.label_dirs = label_dirs
self.label = self.__get_label__()
self.transform = transform
def __get_npynames__(self):
tmp = []
npy_name = os.listdir(self.nat_dirs)
npy_name.sort(key=sort_key)
for name in npy_name:
tmp.append(os.path.join(self.nat_dirs, name))
return tmp
def __get_label__(self):
label = load_variavle(self.label_dirs)
label = np.array(label)
label = torch.from_numpy(label)
return label
def __len__(self):
return len(self.npy_names)
def __getitem__(self, idx):
npynat_path = self.npy_names[idx]
npynat = np.load(npynat_path)
npynat = npynat.astype(np.float32)
npyadv_path = npynat_path.replace(self.nat_dirs, self.adv_dirs)
npyadv = np.load(npyadv_path)
npyadv = npyadv.astype(np.float32)
label = self.label[idx]
if self.transform:
npynat = self.transform(npynat)
npyadv = self.transform(npyadv)
return npynat, npyadv, label
class DatasetNPY_Dual(Dataset):
def __init__(self, nat_dirs, adv_dirs, transform=None):
self.nat_dirs = nat_dirs
self.adv_dirs = adv_dirs
self.npy_names = self.__get_npynames__()
self.transform = transform
def __get_npynames__(self):
tmp = []
npy_name = os.listdir(self.nat_dirs)
npy_name.sort(key=sort_key)
for name in npy_name:
tmp.append(os.path.join(self.nat_dirs, name))
return tmp
def __len__(self):
return len(self.npy_names)
def __getitem__(self, idx):
npynat_path = self.npy_names[idx]
npynat = np.load(npynat_path)
npynat = npynat.astype(np.float32)
npyadv_path = npynat_path.replace(self.nat_dirs, self.adv_dirs)
npyadv = np.load(npyadv_path)
npyadv = npyadv.astype(np.float32)
if self.transform:
npynat = self.transform(npynat)
npyadv = self.transform(npyadv)
return npynat, npyadv
class DatasetNPY_test(Dataset):
def __init__(self, npy_dirs, transform=None):
self.nat_dirs = npy_dirs
self.npy_names = self.__get_npynames__()
self.transform = transform
def __get_npynames__(self):
tmp = []
npy_name = os.listdir(self.nat_dirs)
npy_name.sort(key=sort_key)
for name in npy_name:
tmp.append(os.path.join(self.nat_dirs, name))
return tmp
def __len__(self):
return len(self.npy_names)
def __getitem__(self, idx):
npynat_path = self.npy_names[idx]
npynat = np.load(npynat_path)
npynat = npynat.astype(np.float32)
if self.transform:
npynat = self.transform(npynat)
return npynat
| 12,207 | 29.292804 | 92 | py |
STR | STR-master/main.py | import os
import pathlib
import random
import shutil
import time
import json
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.tensorboard import SummaryWriter
from utils.logging import AverageMeter, ProgressMeter
from utils.net_utils import save_checkpoint, get_lr, LabelSmoothing
from utils.schedulers import get_policy
from utils.conv_type import STRConv
from utils.conv_type import sparseFunction
from args import args
from trainer import train, validate
import data
import models
def main():
print(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Simply call main_worker function
main_worker(args)
def main_worker(args):
args.gpu = None
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model and optimizer
model = get_model(args)
model = set_gpu(args, model)
# Set up directories
run_base_dir, ckpt_base_dir, log_base_dir = get_directories(args)
# Loading pretrained model
if args.pretrained:
pretrained(args, model)
# Saving a DenseConv (nn.Conv2d) compatible model
if args.dense_conv_model:
print(f"==> DenseConv compatible model, saving at {ckpt_base_dir / 'model_best.pth'}")
save_checkpoint(
{
"epoch": 0,
"arch": args.arch,
"state_dict": model.state_dict(),
},
True,
filename=ckpt_base_dir / f"epoch_pretrained.state",
save=True,
)
return
optimizer = get_optimizer(args, model)
data = get_dataset(args)
lr_policy = get_policy(args.lr_policy)(optimizer, args)
if args.label_smoothing is None:
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = LabelSmoothing(smoothing=args.label_smoothing)
# optionally resume from a checkpoint
best_acc1 = 0.0
best_acc5 = 0.0
best_train_acc1 = 0.0
best_train_acc5 = 0.0
if args.resume:
best_acc1 = resume(args, model, optimizer)
# Evaulation of a model
if args.evaluate:
acc1, acc5 = validate(
data.val_loader, model, criterion, args, writer=None, epoch=args.start_epoch
)
return
writer = SummaryWriter(log_dir=log_base_dir)
epoch_time = AverageMeter("epoch_time", ":.4f", write_avg=False)
validation_time = AverageMeter("validation_time", ":.4f", write_avg=False)
train_time = AverageMeter("train_time", ":.4f", write_avg=False)
progress_overall = ProgressMeter(
1, [epoch_time, validation_time, train_time], prefix="Overall Timing"
)
end_epoch = time.time()
args.start_epoch = args.start_epoch or 0
acc1 = None
# Save the initial state
save_checkpoint(
{
"epoch": 0,
"arch": args.arch,
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"best_acc5": best_acc5,
"best_train_acc1": best_train_acc1,
"best_train_acc5": best_train_acc5,
"optimizer": optimizer.state_dict(),
"curr_acc1": acc1 if acc1 else "Not evaluated",
},
False,
filename=ckpt_base_dir / f"initial.state",
save=False,
)
# Start training
for epoch in range(args.start_epoch, args.epochs):
lr_policy(epoch, iteration=None)
cur_lr = get_lr(optimizer)
# Gradual pruning in GMP experiments
if args.conv_type == "GMPConv" and epoch >= args.init_prune_epoch and epoch <= args.final_prune_epoch:
total_prune_epochs = args.final_prune_epoch - args.init_prune_epoch + 1
for n, m in model.named_modules():
if hasattr(m, 'set_curr_prune_rate'):
prune_decay = (1 - ((epoch - args.init_prune_epoch)/total_prune_epochs))**3
curr_prune_rate = m.prune_rate - (m.prune_rate*prune_decay)
m.set_curr_prune_rate(curr_prune_rate)
# train for one epoch
start_train = time.time()
train_acc1, train_acc5 = train(
data.train_loader, model, criterion, optimizer, epoch, args, writer=writer
)
train_time.update((time.time() - start_train) / 60)
# evaluate on validation set
start_validation = time.time()
acc1, acc5 = validate(data.val_loader, model, criterion, args, writer, epoch)
validation_time.update((time.time() - start_validation) / 60)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
best_acc5 = max(acc5, best_acc5)
best_train_acc1 = max(train_acc1, best_train_acc1)
best_train_acc5 = max(train_acc5, best_train_acc5)
save = ((epoch % args.save_every) == 0) and args.save_every > 0
if is_best or save or epoch == args.epochs - 1:
if is_best:
print(f"==> New best, saving at {ckpt_base_dir / 'model_best.pth'}")
save_checkpoint(
{
"epoch": epoch + 1,
"arch": args.arch,
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"best_acc5": best_acc5,
"best_train_acc1": best_train_acc1,
"best_train_acc5": best_train_acc5,
"optimizer": optimizer.state_dict(),
"curr_acc1": acc1,
"curr_acc5": acc5,
},
is_best,
filename=ckpt_base_dir / f"epoch_{epoch}.state",
save=save,
)
epoch_time.update((time.time() - end_epoch) / 60)
progress_overall.display(epoch)
progress_overall.write_to_tensorboard(
writer, prefix="diagnostics", global_step=epoch
)
writer.add_scalar("test/lr", cur_lr, epoch)
end_epoch = time.time()
# Storing sparsity and threshold statistics for STRConv models
if args.conv_type == "STRConv":
count = 0
sum_sparse = 0.0
for n, m in model.named_modules():
if isinstance(m, STRConv):
sparsity, total_params, thresh = m.getSparsity()
writer.add_scalar("sparsity/{}".format(n), sparsity, epoch)
writer.add_scalar("thresh/{}".format(n), thresh, epoch)
sum_sparse += int(((100 - sparsity) / 100) * total_params)
count += total_params
total_sparsity = 100 - (100 * sum_sparse / count)
writer.add_scalar("sparsity/total", total_sparsity, epoch)
writer.add_scalar("test/lr", cur_lr, epoch)
end_epoch = time.time()
write_result_to_csv(
best_acc1=best_acc1,
best_acc5=best_acc5,
best_train_acc1=best_train_acc1,
best_train_acc5=best_train_acc5,
prune_rate=args.prune_rate,
curr_acc1=acc1,
curr_acc5=acc5,
base_config=args.config,
name=args.name,
)
if args.conv_type == "STRConv":
json_data = {}
json_thres = {}
for n, m in model.named_modules():
if isinstance(m, STRConv):
sparsity = m.getSparsity()
json_data[n] = sparsity[0]
sum_sparse += int(((100 - sparsity[0]) / 100) * sparsity[1])
count += sparsity[1]
json_thres[n] = sparsity[2]
json_data["total"] = 100 - (100 * sum_sparse / count)
if not os.path.exists("runs/layerwise_sparsity"):
os.mkdir("runs/layerwise_sparsity")
if not os.path.exists("runs/layerwise_threshold"):
os.mkdir("runs/layerwise_threshold")
with open("runs/layerwise_sparsity/{}.json".format(args.name), "w") as f:
json.dump(json_data, f)
with open("runs/layerwise_threshold/{}.json".format(args.name), "w") as f:
json.dump(json_thres, f)
def set_gpu(args, model):
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
print(f"=> Parallelizing on {args.multigpu} gpus")
torch.cuda.set_device(args.multigpu[0])
args.gpu = args.multigpu[0]
model = torch.nn.DataParallel(model, device_ids=args.multigpu).cuda(
args.multigpu[0]
)
cudnn.benchmark = True
return model
def resume(args, model, optimizer):
if os.path.isfile(args.resume):
print(f"=> Loading checkpoint '{args.resume}'")
checkpoint = torch.load(args.resume)
if args.start_epoch is None:
print(f"=> Setting new start epoch at {checkpoint['epoch']}")
args.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["best_acc1"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print(f"=> Loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})")
return best_acc1
else:
print(f"=> No checkpoint found at '{args.resume}'")
def pretrained(args, model):
if os.path.isfile(args.pretrained):
print("=> loading pretrained weights from '{}'".format(args.pretrained))
pretrained = torch.load(
args.pretrained,
map_location=torch.device("cuda:{}".format(args.multigpu[0])),
)["state_dict"]
model_state_dict = model.state_dict()
if not args.ignore_pretrained_weights:
pretrained_final = {
k: v
for k, v in pretrained.items()
if (k in model_state_dict and v.size() == model_state_dict[k].size())
}
if args.conv_type != "STRConv":
for k, v in pretrained.items():
if 'sparseThreshold' in k:
wkey = k.split('sparse')[0] + 'weight'
weight = pretrained[wkey]
pretrained_final[wkey] = sparseFunction(weight, v)
model_state_dict.update(pretrained_final)
model.load_state_dict(model_state_dict)
# Using the budgets of STR models for other models like DNW and GMP
if args.use_budget:
budget = {}
for k, v in pretrained.items():
if 'sparseThreshold' in k:
wkey = k.split('sparse')[0] + 'weight'
weight = pretrained[wkey]
sparse_weight = sparseFunction(weight, v)
budget[wkey] = (sparse_weight.abs() > 0).float().mean().item()
for n, m in model.named_modules():
if hasattr(m, 'set_prune_rate'):
pr = 1 - budget[n + '.weight']
m.set_prune_rate(pr)
print('set prune rate', n, pr)
else:
print("=> no pretrained weights found at '{}'".format(args.pretrained))
def get_dataset(args):
print(f"=> Getting {args.set} dataset")
dataset = getattr(data, args.set)(args)
return dataset
def get_model(args):
if args.first_layer_dense:
args.first_layer_type = "DenseConv"
print("=> Creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
print(f"=> Num model params {sum(p.numel() for p in model.parameters())}")
# applying sparsity to the network
if args.conv_type != "DenseConv":
print(f"==> Setting prune rate of network to {args.prune_rate}")
def _sparsity(m):
if hasattr(m, "set_prune_rate"):
m.set_prune_rate(args.prune_rate)
model.apply(_sparsity)
# freezing the weights if we are only doing mask training
if args.freeze_weights:
print(f"=> Freezing model weights")
def _freeze(m):
if hasattr(m, "mask"):
m.weight.requires_grad = False
if hasattr(m, "bias") and m.bias is not None:
m.bias.requires_grad = False
model.apply(_freeze)
return model
def get_optimizer(args, model):
for n, v in model.named_parameters():
if v.requires_grad:
pass #print("<DEBUG> gradient to", n)
if not v.requires_grad:
pass #print("<DEBUG> no gradient to", n)
if args.optimizer == "sgd":
parameters = list(model.named_parameters())
sparse_thresh = [v for n, v in parameters if ("sparseThreshold" in n) and v.requires_grad]
bn_params = [v for n, v in parameters if ("bn" in n) and v.requires_grad]
# rest_params = [v for n, v in parameters if ("bn" not in n) and ('sparseThreshold' not in n) and v.requires_grad]
rest_params = [v for n, v in parameters if ("bn" not in n) and ("sparseThreshold" not in n) and v.requires_grad]
optimizer = torch.optim.SGD(
[
{
"params": bn_params,
"weight_decay": 0 if args.no_bn_decay else args.weight_decay,
},
{
"params": sparse_thresh,
"weight_decay": args.st_decay if args.st_decay is not None else args.weight_decay,
},
{"params": rest_params, "weight_decay": args.weight_decay},
],
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=args.nesterov,
)
elif args.optimizer == "adam":
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr
)
return optimizer
def _run_dir_exists(run_base_dir):
log_base_dir = run_base_dir / "logs"
ckpt_base_dir = run_base_dir / "checkpoints"
return log_base_dir.exists() or ckpt_base_dir.exists()
def get_directories(args):
if args.config is None or args.name is None:
raise ValueError("Must have name and config")
config = pathlib.Path(args.config).stem
if args.log_dir is None:
run_base_dir = pathlib.Path(
f"runs/{config}/{args.name}/prune_rate={args.prune_rate}"
)
else:
run_base_dir = pathlib.Path(
f"{args.log_dir}/{config}/{args.name}/prune_rate={args.prune_rate}"
)
if args.width_mult != 1.0:
run_base_dir = run_base_dir / "width_mult={}".format(str(args.width_mult))
if _run_dir_exists(run_base_dir):
rep_count = 0
while _run_dir_exists(run_base_dir / str(rep_count)):
rep_count += 1
run_base_dir = run_base_dir / str(rep_count)
log_base_dir = run_base_dir / "logs"
ckpt_base_dir = run_base_dir / "checkpoints"
if not run_base_dir.exists():
os.makedirs(run_base_dir)
(run_base_dir / "settings.txt").write_text(str(args))
return run_base_dir, ckpt_base_dir, log_base_dir
def write_result_to_csv(**kwargs):
results = pathlib.Path("runs") / "results.csv"
if not results.exists():
results.write_text(
"Date Finished, "
"Base Config, "
"Name, "
"Prune Rate, "
"Current Val Top 1, "
"Current Val Top 5, "
"Best Val Top 1, "
"Best Val Top 5, "
"Best Train Top 1, "
"Best Train Top 5\n"
)
now = time.strftime("%m-%d-%y_%H:%M:%S")
with open(results, "a+") as f:
f.write(
(
"{now}, "
"{base_config}, "
"{name}, "
"{prune_rate}, "
"{curr_acc1:.02f}, "
"{curr_acc5:.02f}, "
"{best_acc1:.02f}, "
"{best_acc5:.02f}, "
"{best_train_acc1:.02f}, "
"{best_train_acc5:.02f}\n"
).format(now=now, **kwargs)
)
if __name__ == "__main__":
main()
| 16,340 | 32.213415 | 122 | py |
STR | STR-master/trainer.py | import time
import torch
import tqdm
from utils.eval_utils import accuracy
from utils.logging import AverageMeter, ProgressMeter
__all__ = ["train", "validate"]
def train(train_loader, model, criterion, optimizer, epoch, args, writer):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.3f")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix=f"Epoch: [{epoch}]",
)
# switch to train mode
model.train()
batch_size = train_loader.batch_size
num_batches = len(train_loader)
end = time.time()
for i, (images, target) in tqdm.tqdm(
enumerate(train_loader), ascii=True, total=len(train_loader)
):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True).long()
# compute output
output = model(images)
loss = criterion(output, target.view(-1))
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
t = (num_batches * epoch + i) * batch_size
progress.display(i)
progress.write_to_tensorboard(writer, prefix="train", global_step=t)
return top1.avg, top5.avg
def validate(val_loader, model, criterion, args, writer, epoch):
batch_time = AverageMeter("Time", ":6.3f", write_val=False)
losses = AverageMeter("Loss", ":.3f", write_val=False)
top1 = AverageMeter("Acc@1", ":6.2f", write_val=False)
top5 = AverageMeter("Acc@5", ":6.2f", write_val=False)
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix="Test: "
)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in tqdm.tqdm(
enumerate(val_loader), ascii=True, total=len(val_loader)
):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True).long()
# compute output
output = model(images)
loss = criterion(output, target.view(-1))
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
progress.display(len(val_loader))
if writer is not None:
progress.write_to_tensorboard(writer, prefix="test", global_step=epoch)
return top1.avg, top5.avg
| 3,536 | 29.491379 | 83 | py |
STR | STR-master/models/resnet.py | import torch.nn as nn
from utils.builder import get_builder
from args import args
# BasicBlock {{{
class BasicBlock(nn.Module):
M = 2
expansion = 1
def __init__(self, builder, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = builder.conv3x3(inplanes, planes, stride)
self.bn1 = builder.batchnorm(planes)
self.relu = builder.activation()
self.conv2 = builder.conv3x3(planes, planes)
self.bn2 = builder.batchnorm(planes, last_bn=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.bn1 is not None:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.bn2 is not None:
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# BasicBlock }}}
# Bottleneck {{{
class Bottleneck(nn.Module):
M = 3
expansion = 4
def __init__(self, builder, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = builder.conv1x1(inplanes, planes)
self.bn1 = builder.batchnorm(planes)
self.conv2 = builder.conv3x3(planes, planes, stride=stride)
self.bn2 = builder.batchnorm(planes)
self.conv3 = builder.conv1x1(planes, planes * self.expansion)
self.bn3 = builder.batchnorm(planes * self.expansion, last_bn=True)
self.relu = builder.activation()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# Bottleneck }}}
# ResNet {{{
class ResNet(nn.Module):
def __init__(self, builder, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
if args.first_layer_dense:
print("FIRST LAYER DENSE!!!!")
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
else:
self.conv1 = builder.conv7x7(3, 64, stride=2, first_layer=True)
self.bn1 = builder.batchnorm(64)
self.relu = builder.activation()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(builder, block, 64, layers[0])
self.layer2 = self._make_layer(builder, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(builder, block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(builder, block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
if args.last_layer_dense:
self.fc = nn.Conv2d(512 * block.expansion, args.num_classes, 1)
else:
self.fc = builder.conv1x1(512 * block.expansion, num_classes)
def _make_layer(self, builder, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
dconv = builder.conv1x1(
self.inplanes, planes * block.expansion, stride=stride
)
dbn = builder.batchnorm(planes * block.expansion)
if dbn is not None:
downsample = nn.Sequential(dconv, dbn)
else:
downsample = dconv
layers = []
layers.append(block(builder, self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(builder, self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), -1)
return x
# ResNet }}}
def ResNet18(pretrained=False):
# TODO: pretrained
return ResNet(get_builder(), BasicBlock, [2, 2, 2, 2], 1000)
def ResNet50(pretrained=False):
# TODO: pretrained
return ResNet(get_builder(), Bottleneck, [3, 4, 6, 3], 1000)
| 4,838 | 28.506098 | 80 | py |
STR | STR-master/models/mobilenetv1.py | import torch.nn as nn
from utils.builder import get_builder
class MobileNetV1(nn.Module):
def __init__(self):
super(MobileNetV1, self).__init__()
builder = get_builder()
def conv_bn(inp, oup, stride):
return nn.Sequential(
builder.conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
builder.conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
builder.conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
conv_bn(3, 32, 2),
conv_dw(32, 64, 1),
conv_dw(64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1),
nn.AvgPool2d(7),
)
self.fc = builder.conv1x1(1024, 1000)
def forward(self, x):
x = self.model(x)
x = self.fc(x)
x = x.view(-1, 1000)
return x
| 1,526 | 27.277778 | 79 | py |
STR | STR-master/utils/bn_type.py | import torch.nn as nn
LearnedBatchNorm = nn.BatchNorm2d
class NonAffineBatchNorm(nn.BatchNorm2d):
def __init__(self, dim):
super(NonAffineBatchNorm, self).__init__(dim, affine=False)
| 198 | 21.111111 | 67 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.