repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
GAN-STEM-Conv2MultiSlice | GAN-STEM-Conv2MultiSlice-master/pix2pix/pix2pixGray.py | from __future__ import print_function, division
import scipy
#from keras.datasets import mnist
#from keras_contrib.layers.normalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import datetime
import matplotlib.pyplot as plt
from matplotlib import gridspec
import sys
from data_loader_gray import DataLoader
import numpy as np
import tensorflow as tf
import os
plt.switch_backend('agg')
class Pix2Pix():
def __init__(self):
# Input shape
self.img_rows = 256
self.img_cols = 256
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
# Configure data loader
self.dataset_name = 'stem_gray_5'
self.data_loader = DataLoader(dataset_name=self.dataset_name,
img_res=(self.img_rows, self.img_cols))
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 64
self.df = 64
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
#-------------------------
# Construct Computational
# Graph of Generator
#-------------------------
# Build the generator
self.generator = self.build_generator()
# self.generator = self.build_generator_flat()
# Input images and their conditioning images
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# By conditioning on B generate a fake version of A
fake_A = self.generator(img_B)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# Discriminators determines validity of translated images / condition pairs
valid = self.discriminator([fake_A, img_B])
self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A])
self.combined.compile(loss=['mse', 'mae'],
loss_weights=[50, 50],
optimizer=optimizer)
self.errors = []
def build_generator(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=7, bn=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
print("d: ", d.shape.dims)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = BatchNormalization(momentum=0.8)(u)
print("u: ", u.shape.dims,"skip_input: ", skip_input.shape.dims)
u = Concatenate()([u, skip_input])
return u
def conv2d_flat(layer_input, filters, f_size=7, bn=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=1, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
print("d: ", d.shape.dims)
return d
def deconv2d_flat(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=1)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = BatchNormalization(momentum=0.8)(u)
print("u: ", u.shape.dims,"skip_input: ", skip_input.shape.dims)
u = Concatenate()([u, skip_input])
return u
# Image input
d0 = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d(d0, self.gf, bn=False)
d2 = conv2d(d1, self.gf*2)
d3 = conv2d(d2, self.gf*4)
d4 = conv2d(d3, self.gf*8)
d5 = conv2d(d4, self.gf*8)
d6 = conv2d(d5, self.gf*8)
d7 = conv2d(d6, self.gf*8)
# Upsampling
# u0 = deconv2d(d8, d7, self.gf * 8, f_size=1)
# u1 = deconv2d(u0, d6, self.gf * 8)
u1 = deconv2d(d7, d6, self.gf*8)
u2 = deconv2d(u1, d5, self.gf*8)
# u2 = deconv2d(d6, d5, self.gf * 8)
u3 = deconv2d(u2, d4, self.gf*8)
# u3 = deconv2d(d5, d4, self.gf * 8)
u4 = deconv2d(u3, d3, self.gf*4)
u5 = deconv2d(u4, d2, self.gf*2)
u6 = deconv2d(u5, d1, self.gf)
u7 = UpSampling2D(size=2)(u6)
output_img = Conv2D(self.channels, kernel_size=7, strides=1, padding='same', activation='tanh')(u7)
return Model(d0, output_img)
def build_generator_flat(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=7, bn=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
print("d: ", d.shape.dims)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = BatchNormalization(momentum=0.8)(u)
print("u: ", u.shape.dims, "skip_input: ", skip_input.shape.dims)
u = Concatenate()([u, skip_input])
return u
def conv2d_flat(layer_input, filters, f_size=7, bn=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=1, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
print("d: ", d.shape.dims)
return d
def deconv2d_flat(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=1)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = BatchNormalization(momentum=0.8)(u)
print("u: ", u.shape.dims, "skip_input: ", skip_input.shape.dims)
u = Concatenate()([u, skip_input])
return u
# Image input
d0 = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d_flat(d0, self.gf, bn=False)
d2 = conv2d_flat(d1, self.gf * 2)
d3 = conv2d_flat(d2, self.gf * 4)
d4 = conv2d_flat(d3, self.gf * 8)
d5 = conv2d_flat(d4, self.gf * 8)
d6 = conv2d_flat(d5, self.gf * 8)
d7 = conv2d_flat(d6, self.gf * 8)
d8 = conv2d_flat(d6, self.gf * 8)
d9 = conv2d_flat(d6, self.gf * 8)
d10 = conv2d_flat(d6, self.gf * 8)
d11 = conv2d_flat(d6, self.gf * 8)
# Upsampling
# u0 = deconv2d(d8, d7, self.gf * 8, f_size=1)
# u1 = deconv2d(u0, d6, self.gf * 8)
u8 = deconv2d_flat(d11, d10, self.gf * 8)
u9 = deconv2d_flat(u8, d9, self.gf * 8)
u10 = deconv2d_flat(u9, d8, self.gf * 8)
u11 = deconv2d_flat(u10, d7, self.gf * 8)
u1 = deconv2d_flat(u11, d6, self.gf * 8)
u2 = deconv2d_flat(u1, d5, self.gf * 8)
# u2 = deconv2d(d6, d5, self.gf * 8)
u3 = deconv2d_flat(u2, d4, self.gf * 8)
# u3 = deconv2d(d5, d4, self.gf * 8)
u4 = deconv2d_flat(u3, d3, self.gf * 4)
u5 = deconv2d_flat(u4, d2, self.gf * 2)
u6 = deconv2d_flat(u5, d1, self.gf)
u7 = UpSampling2D(size=1)(u6)
output_img = Conv2D(self.channels, kernel_size=7, strides=1, padding='same', activation='tanh')(u7)
return Model(d0, output_img)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, bn=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Concatenate image and conditioning image by channels to produce input
combined_imgs = Concatenate(axis=-1)([img_A, img_B])
d1 = d_layer(combined_imgs, self.df, bn=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model([img_A, img_B], validity)
def train(self, epochs, batch_size=1, sample_interval=50):
start_time = datetime.datetime.now()
# Adversarial loss ground truths
valid = np.ones((batch_size,) + self.disc_patch)
fake = np.zeros((batch_size,) + self.disc_patch)
for epoch in range(epochs):
for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)):
# ---------------------
# Train Discriminator
# ---------------------
# Condition on B and generate a translated version
fake_A = self.generator.predict(imgs_B)
# Train the discriminators (original images = real / generated = Fake)
d_loss_real = self.discriminator.train_on_batch([imgs_A, imgs_B], valid)
d_loss_fake = self.discriminator.train_on_batch([fake_A, imgs_B], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# -----------------
# Train Generator
# -----------------
# Train the generators
g_loss = self.combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_A])
elapsed_time = datetime.datetime.now() - start_time
# Plot the progress
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %f] time: %s" % (epoch, epochs,
batch_i, self.data_loader.n_batches,
d_loss[0], 100*d_loss[1],
g_loss[0],
elapsed_time))
# If at save interval => save generated image samples
if batch_i % sample_interval == 0:
self.sample_images(epoch, batch_i)
''' plt.plot(range(len(self.errors)), self.errors, 'go')
plt.title('Generator Error Over Time')
plt.ylabel('RMSE')
plt.xlabel('Epochs')
plt.savefig("images/%s/%s.png" % (self.dataset_name, "rmse_graph"))
plt.close()'''
print('Error: ',min(self.errors))
multi, imgs = self.data_loader.load_data(batch_size=35)
fakes = self.generator.predict(imgs)
residuals = multi - fakes
for i in range(len(fakes)):
np.save("images/102419/fakes/train/fake_%d.png" % (i), fakes[i])
np.save("images/102419/multislice/train/multi_%d.png" % (i), multi[i])
np.save("images/102419/convolution/train/conv_%d.png" % (i), imgs[i])
np.save("images/102419/residuals/train/residual_%d.png" % (i), residuals[i])
test_multi, test_imgs = self.data_loader.load_data(batch_size=11, is_testing=True)
test_fakes = self.generator.predict(imgs)
test_residuals = test_multi - test_fakes
for i in range(len(fakes)):
np.save("images/102419/fakes/test/fake_%d.png" % (i), test_fakes[i])
np.save("images/102419/multislice/test/multi_%d.png" % (i), test_multi[i])
np.save("images/102419/convolution/test/conv_%d.png" % (i), test_imgs[i])
np.save("images/102419/residuals/test/residual_%d.png" % (i), test_residuals[i])
def sample_images(self, epoch, batch_i):
os.makedirs('images/%s' % self.dataset_name, exist_ok=True)
r, c = 3, 3
errors = []
imgs_A, imgs_B = self.data_loader.load_data(batch_size=11, is_testing=True)
fake_A = self.generator.predict(imgs_B)
imgs_A = 0.5 * imgs_A + 0.5
fake_A = 0.5 * fake_A + 0.5
std = np.std(np.array(imgs_A))
mean = np.mean(np.array(imgs_A))
for i in range(11):
rmse = self.calculate_rmse(fake_A[i], imgs_A[i])
fs_rmse = rmse / std
errors.append(fs_rmse)
print('epoch fsrmse =', np.average(errors))
imgs_A, imgs_B = self.data_loader.load_data(batch_size=3, is_testing=True)
fake_A = self.generator.predict(imgs_B)
# gen_imgs stores a batch of convolutions, then a batch of generations, then a batch of multislice
# e.g. if batch_size=3, the generated images will be at indices 3, 4, and 5
gen_imgs = np.concatenate([imgs_B, fake_A, imgs_A])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['Convolution', '"Fake" Multislice', 'Multislice']
fig, axs = plt.subplots(r, c)
cnt = 0
# errors = []
# std = np.std(np.array(gen_imgs[6:]))
# mean = np.mean(np.array(gen_imgs[6:]))
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt][:, :, 0])
axs[i, j].set_title(titles[i])
if cnt in [3,4,5]:
fm_rmse = self.calculate_fm_rmse(gen_imgs[cnt], gen_imgs[cnt + 3])
fs_rmse = self.calculate_fs_rmse(gen_imgs[cnt], gen_imgs[cnt + 3])
rmse = self.calculate_rmse(gen_imgs[cnt], gen_imgs[cnt + 3])
# ssim_rating = compare_ssim(gen_imgs[cnt+3], gen_imgs[cnt], range=gen_imgs[cnt].max() - gen_imgs[cnt].min(), multichannel=True)
# errors.append(fs_rmse)
fig.text(0.5 + 0.29 * (j - 1), 0.07, "RMSE = " + str(rmse * 100)[:5], ha='center')
fig.text(0.5 + 0.29 * (j - 1), 0.04, "fmRMSE = " + str(rmse * 100 / mean)[:5] + "%", ha='center')
fig.text(0.5 + 0.29 * (j - 1), 0.01, "fsRMSE = " + str(rmse * 100 / std)[:5] + "%", ha='center')
'''axs[0, j].imshow(
(np.abs(gen_imgs[cnt] - gen_imgs[cnt + 3])) / (gen_imgs[cnt] - gen_imgs[cnt + 3]).max())'''
axs[i,j].axis('off')
cnt += 1
self.errors.append(np.average(errors))
# saves the whole plot
fig.savefig("images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i))
# save a full-resolution image from gen_imgs
# scipy.misc.imsave("images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i), gen_imgs[3])
plt.close()
def calculate_rmse(self, predictions, targets):
row = 0
column = 0
while True:
if targets[row] is not [0]*self.img_cols:
break
else:
row += 1
while True:
if targets.T[column] is not [0]*self.img_rows:
break
else:
column += 1
return np.sqrt(np.mean((predictions[row:256-row, column:256-column] - targets[row:256-row, column:256-column]) ** 2))
def calculate_gray_rmse(self, predictions, targets):
grayPredictions = (predictions[:, :, 0] + predictions[:, :, 1] + predictions[:, :, 2])/3
grayTargets = (targets[:, :, 0]*.299 + targets[:, :, 1]*.587 + targets[:, :, 2]*.114)
return self.calculate_rmse(grayPredictions, grayTargets)
def calculate_fm_rmse(self, predictions, targets):
row = 0
column = 0
while True:
if targets[row] is not [0] * self.img_cols:
break
else:
row += 1
while True:
if targets.T[column] is not [0] * self.img_rows:
break
else:
column += 1
return np.sqrt(np.mean(
(predictions[row:256 - row, column:256 - column] - targets[row:256 - row, column:256 - column]) ** 2)) / (
predictions[row:256 - row, column:256 - column].max()-predictions[row:256 - row, column:256 - column].min())
def calculate_fs_rmse(self, predictions, targets):
row = 0
column = 0
while True:
if targets[row] is not [0] * self.img_cols:
break
else:
row += 1
while True:
if targets.T[column] is not [0] * self.img_rows:
break
else:
column += 1
return np.sqrt(np.mean(
(predictions[row:256 - row, column:256 - column] - targets[row:256 - row, column:256 - column]) ** 2)) / (
predictions[row:256 - row, column:256 - column].std())
def calculate_mae(self, predictions, targets):
row = 0
column = 0
while True:
if targets[row] is not [0] * self.img_cols:
break
else:
row += 1
while True:
if targets.T[column] is not [0] * self.img_rows:
break
else:
column += 1
return np.mean(np.abs(predictions[row:256 - row, column:256 - column] - targets[row:256 - row, column:256 - column]))
if __name__ == '__main__':
gan = Pix2Pix()
gan.train(epochs=10000, batch_size=8, sample_interval=100)
| 18,816 | 39.729437 | 148 | py |
LSTA | LSTA-master/main_rgb.py | from __future__ import print_function, division
from attentionModel import *
from spatial_transforms import (Compose, ToTensor, CenterCrop, Scale, Normalize, MultiScaleCornerCrop,
RandomHorizontalFlip)
from tensorboardX import SummaryWriter
from makeDataset import *
import sys
import argparse
from gen_splits import *
#TODO: create separate dirs for stage1 and stage 2
def main_run(dataset, stage, root_dir, out_dir, seqLen, trainBatchSize, numEpochs, lr1, decay_factor,
decay_step, memSize, outPool_size, split, evalInterval):
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
normalize = Normalize(mean=mean, std=std)
stage = stage
test_split = split
seqLen = seqLen
memSize = memSize
c_cam_classes = outPool_size
dataset = dataset
best_acc = 0
if stage == 1:
trainBatchSize = trainBatchSize
testBatchSize = trainBatchSize
lr1 = lr1
decay_factor = decay_factor
decay_step = decay_step
numEpochs = numEpochs
elif stage == 2:
trainBatchSize = trainBatchSize
testBatchSize = trainBatchSize
lr1 = lr1
decay_factor = decay_factor
decay_step = decay_step
numEpochs = numEpochs
if dataset == 'gtea_61':
num_classes = 61
elif dataset == 'gtea_71':
num_classes = 71
elif dataset == 'egtea_gaze+':
num_classes = 106
else:
print('Wrong dataset')
sys.exit()
dataset_dir = os.path.join(root_dir, dataset)
model_folder = os.path.join('.', out_dir, dataset, str(test_split))
if not os.path.exists(model_folder):
os.makedirs(model_folder)
note_fl = open(model_folder + '/note.txt', 'w')
note_fl.write('Number of Epochs = {}\n'
'lr = {}\n'
'Train Batch Size = {}\n'
'Sequence Length = {}\n'
'Decay steps = {}\n'
'Decay factor = {}\n'
'Memory size = {}\n'
'Memory cam classes = {}\n'.format(numEpochs, lr1, trainBatchSize, seqLen, decay_step, decay_factor,
memSize, c_cam_classes))
note_fl.close()
# Log files
writer = SummaryWriter(model_folder)
train_log_loss = open((model_folder + '/train_log_loss.txt'), 'w')
train_log_acc = open((model_folder + '/train_log_acc.txt'), 'w')
train_log_loss_batch = open((model_folder + '/train_log_loss_batch.txt'), 'w')
test_log_loss = open((model_folder + '/test_log_loss.txt'), 'w')
test_log_acc = open((model_folder + '/test_log_acc.txt'), 'w')
spatial_transform = Compose([Scale(256), RandomHorizontalFlip(), MultiScaleCornerCrop([1, 0.875, 0.75, 0.65625], 224),
ToTensor(), normalize])
print('Preparing dataset...')
if dataset == 'egtea_gaze+':
trainDatasetF, testDatasetF, trainLabels, testLabels, trainNumFrames, testNumFrames = gen_split_egtea_gazePlus(dataset_dir,
test_split)
else:
trainDatasetF, testDatasetF, trainLabels, testLabels, trainNumFrames, testNumFrames, _ = gen_split(dataset_dir,
test_split)
vid_seq_train = makeDataset(trainDatasetF, trainLabels, trainNumFrames,
spatial_transform=spatial_transform,
fmt='.jpg', seqLen=seqLen)
print('Number of train samples = {}'.format(vid_seq_train.__len__()))
train_loader = torch.utils.data.DataLoader(vid_seq_train, batch_size=trainBatchSize, num_workers=4, pin_memory=True)
vid_seq_test = makeDataset(testDatasetF, testLabels, testNumFrames,
spatial_transform=Compose([Scale(256), CenterCrop(224), ToTensor(), normalize]),
fmt='.jpg', seqLen=seqLen)
print('Number of test samples = {}'.format(vid_seq_test.__len__()))
test_loader = torch.utils.data.DataLoader(vid_seq_test, batch_size=testBatchSize,
shuffle=False, num_workers=2, pin_memory=True)
train_params = []
if stage == 1:
model = attentionModel(num_classes=num_classes, mem_size=memSize, c_cam_classes=c_cam_classes)
model.train(False)
for params in model.parameters():
params.requires_grad = False
elif stage == 2:
model = attentionModel(num_classes=num_classes, mem_size=memSize, c_cam_classes=c_cam_classes)
checkpoint_path = os.path.join(model_folder, 'last_checkpoint_stage' + str(1) + '.pth.tar')
if os.path.exists(checkpoint_path):
print('Loading weights from checkpoint file {}'.format(checkpoint_path))
else:
print('Checkpoint file {} does not exist'.format(checkpoint_path))
sys.exit()
last_checkpoint = torch.load(checkpoint_path)
model.load_state_dict(last_checkpoint['model_state_dict'])
model.train(False)
for params in model.parameters():
params.requires_grad = False
for params in model.resNet.layer4[0].conv1.parameters():
params.requires_grad = True
train_params += [params]
for params in model.resNet.layer4[0].conv2.parameters():
params.requires_grad = True
train_params += [params]
for params in model.resNet.layer4[1].conv1.parameters():
params.requires_grad = True
train_params += [params]
for params in model.resNet.layer4[1].conv2.parameters():
params.requires_grad = True
train_params += [params]
for params in model.resNet.layer4[2].conv1.parameters():
params.requires_grad = True
train_params += [params]
for params in model.resNet.layer4[2].conv2.parameters():
params.requires_grad = True
train_params += [params]
for params in model.resNet.fc.parameters():
params.requires_grad = True
train_params += [params]
for params in model.lsta_cell.parameters():
params.requires_grad = True
train_params += [params]
for params in model.classifier.parameters():
params.requires_grad = True
train_params += [params]
model.classifier.train(True)
model.cuda()
loss_fn = nn.CrossEntropyLoss()
optimizer_fn = torch.optim.Adam(train_params, lr=lr1, weight_decay=5e-4, eps=1e-4)
optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer_fn, milestones=decay_step, gamma=decay_factor)
train_iter = 0
for epoch in range(numEpochs):
optim_scheduler.step()
epoch_loss = 0
numCorrTrain = 0
trainSamples = 0
iterPerEpoch = 0
model.classifier.train(True)
writer.add_scalar('lr', optimizer_fn.param_groups[0]['lr'], epoch+1)
for i, (inputs, targets) in enumerate(train_loader):
train_iter += 1
iterPerEpoch += 1
optimizer_fn.zero_grad()
inputVariable = Variable(inputs.permute(1, 0, 2, 3, 4).cuda())
labelVariable = Variable(targets.cuda())
trainSamples += inputs.size(0)
output_label, _ = model(inputVariable)
loss = loss_fn(output_label, labelVariable)
loss.backward()
optimizer_fn.step()
_, predicted = torch.max(output_label.data, 1)
numCorrTrain += (predicted == targets.cuda()).sum()
if train_iter%10 == 0:
print('Training loss after {} iterations = {} '.format(train_iter, loss.data[0]))
train_log_loss_batch.write('Training loss after {} iterations = {}\n'.format(train_iter, loss.data[0]))
writer.add_scalar('train/iter_loss', loss.data[0], train_iter)
epoch_loss += loss.data[0]
avg_loss = epoch_loss/iterPerEpoch
trainAccuracy = (numCorrTrain / trainSamples) * 100
print('Average training loss after {} epoch = {} '.format(epoch+1, avg_loss))
print('Training accuracy after {} epoch = {}% '.format(epoch+1, trainAccuracy))
writer.add_scalar('train/epoch_loss', avg_loss, epoch+1)
writer.add_scalar('train/accuracy', trainAccuracy, epoch+1)
train_log_loss.write('Training loss after {} epoch = {}\n'.format(epoch+1, avg_loss))
train_log_acc.write('Training accuracy after {} epoch = {}\n'.format(epoch+1, trainAccuracy))
save_path_model = os.path.join(model_folder, 'last_checkpoint_stage' + str(stage) + '.pth.tar')
save_file = {
'epoch': epoch + 1,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer_fn.state_dict(),
'best_acc': best_acc,
}
torch.save(save_file, save_path_model)
if (epoch+1) % evalInterval == 0:
print('Testing...')
model.train(False)
test_loss_epoch = 0
test_iter = 0
test_samples = 0
numCorr = 0
for j, (inputs, targets) in enumerate(test_loader):
print('testing inst = {}'.format(j))
test_iter += 1
test_samples += inputs.size(0)
inputVariable = Variable(inputs.permute(1, 0, 2, 3, 4).cuda(), volatile=True)
labelVariable = Variable(targets.cuda(async=True), volatile=True)
output_label, _ = model(inputVariable)
test_loss = loss_fn(output_label, labelVariable)
test_loss_epoch += test_loss.data[0]
_, predicted = torch.max(output_label.data, 1)
numCorr += (predicted == targets.cuda()).sum()
test_accuracy = (numCorr / test_samples) * 100
avg_test_loss = test_loss_epoch / test_iter
print('Test Loss after {} epochs, loss = {}'.format(epoch + 1,avg_test_loss))
print('Test Accuracy after {} epochs = {}%'.format(epoch + 1, test_accuracy))
writer.add_scalar('test/epoch_loss', avg_test_loss, epoch + 1)
writer.add_scalar('test/accuracy', test_accuracy, epoch + 1)
test_log_loss.write('Test Loss after {} epochs = {}\n'.format(epoch + 1, avg_test_loss))
test_log_acc.write('Test Accuracy after {} epochs = {}%\n'.format(epoch + 1, test_accuracy))
if test_accuracy > best_acc:
best_acc = test_accuracy
save_path_model = os.path.join(model_folder, 'best_checkpoint_stage' + str(stage) + '.pth.tar')
save_file = {
'epoch': epoch + 1,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer_fn.state_dict(),
'best_acc': best_acc,
}
torch.save(save_file, save_path_model)
train_log_loss.close()
train_log_acc.close()
test_log_acc.close()
train_log_loss_batch.close()
test_log_loss.close()
writer.export_scalars_to_json(model_folder + "/all_scalars.json")
writer.close()
def __main__():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='gtea61', help='Dataset')
parser.add_argument('--stage', type=int, default=1, help='Training stage')
parser.add_argument('--root_dir', type=str, default='dataset',
help='Dataset directory')
parser.add_argument('--outDir', type=str, default='experiments', help='Directory to save results')
parser.add_argument('--seqLen', type=int, default=25, help='Length of sequence')
parser.add_argument('--trainBatchSize', type=int, default=32, help='Training batch size')
parser.add_argument('--numEpochs', type=int, default=300, help='Number of epochs')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--stepSize', type=float, default=[25, 75, 150], nargs="+", help='Learning rate decay step')
parser.add_argument('--decayRate', type=float, default=0.1, help='Learning rate decay rate')
parser.add_argument('--memSize', type=int, default=512, help='ConvLSTM hidden state size')
parser.add_argument('--outPoolSize', type=int, default=100, help='Output pooling size')
parser.add_argument('--evalInterval', type=int, default=5, help='Evaluation interval')
parser.add_argument('--split', type=int, default=1, help='Split')
args = parser.parse_args()
dataset = args.dataset
stage = args.stage
root_dir = args.root_dir
outDir = args.outDir
seqLen = args.seqLen
trainBatchSize = args.trainBatchSize
numEpochs = args.numEpochs
lr1 = args.lr
stepSize = args.stepSize
decayRate = args.decayRate
memSize = args.memSize
outPool_size = args.outPoolSize
evalInterval = args.evalInterval
split = args.split
main_run(dataset=dataset, stage=stage, root_dir=root_dir, out_dir=outDir, seqLen=seqLen,
trainBatchSize=trainBatchSize, numEpochs=numEpochs, lr1=lr1, decay_factor=decayRate,
decay_step=stepSize, memSize=memSize, outPool_size=outPool_size, evalInterval=evalInterval,
split=split)
__main__() | 13,449 | 41.698413 | 131 | py |
LSTA | LSTA-master/resNetNew.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, noBN=False):
super(BasicBlock, self).__init__()
self.noBN = noBN
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
# print('noBN in basicBlock = ', self.noBN)
outBN = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
outBN = outBN + residual
outBN = self.relu(outBN)
if self.noBN is False:
return outBN
else:
out = out + residual
return outBN, out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, noBN=False):
self.inplanes = 64
self.noBN = noBN
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, noBN=self.noBN)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, noBN=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
# print('blocks = ', blocks)
if noBN is False:
# print('with BN')
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
else:
# print('no BN')
if blocks > 2:
# print('blocks > 2')
for i in range(1, blocks-1):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, noBN=True))
else:
# print('blocks <= 2')
layers.append(block(self.inplanes, planes, noBN=True))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if self.noBN:
conv_layer4BN, conv_layer4NBN = self.layer4(x)
else:
conv_layer4BN = self.layer4(x)
x = self.avgpool(conv_layer4BN)
x = x.view(x.size(0), -1)
x = self.fc(x)
if self.noBN:
return x, conv_layer4BN, conv_layer4NBN
else:
return x, conv_layer4BN
def resnet18(pretrained=False, noBN=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], noBN=noBN, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
def resnet34(pretrained=False, noBN=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], noBN=noBN, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 7,689 | 31.175732 | 87 | py |
LSTA | LSTA-master/attentionModel.py | import resNetNew
from torch.autograd import Variable
from MyConvLSTACell import *
class attentionModel(nn.Module):
def __init__(self, num_classes=51, mem_size=512, c_cam_classes=1000):
super(attentionModel, self).__init__()
self.num_classes = num_classes
self.resNet = resNetNew.resnet34(True, True)
self.mem_size = mem_size
self.lsta_cell = MyConvLSTACell(512, mem_size, c_cam_classes)
self.avgpool = nn.AvgPool2d(7)
self.dropout = nn.Dropout(0.7)
self.fc = nn.Linear(mem_size, self.num_classes)
self.classifier = nn.Sequential(self.dropout, self.fc)
def forward(self, inputVariable):
state_att = (Variable(torch.zeros(inputVariable.size(1), 1, 7, 7).cuda()),
Variable(torch.zeros(inputVariable.size(1), 1, 7, 7).cuda()))
state_inp = (Variable(torch.zeros((inputVariable.size(1), self.mem_size, 7, 7)).cuda()),
Variable(torch.zeros((inputVariable.size(1), self.mem_size, 7, 7)).cuda()))
for t in range(inputVariable.size(0)):
logit, feature_conv, x = self.resNet(inputVariable[t])
bz, nc, h, w = feature_conv.size()
feature_conv1 = feature_conv.view(bz, nc, h * w)
probs, idxs = logit.sort(1, True)
class_idx = idxs[:, 0]
cam = torch.bmm(self.resNet.fc.weight[class_idx].unsqueeze(1), feature_conv1).view(x.size(0), 1, 7, 7)
state_att, state_inp, _ = self.lsta_cell(x, cam, state_att, state_inp)
feats = self.avgpool(state_inp[0]).view(state_inp[0].size(0), -1)
logits = self.classifier(feats)
return logits, feats
| 1,669 | 48.117647 | 114 | py |
LSTA | LSTA-master/test_rgb.py | from __future__ import print_function, division
from attentionModel import *
from spatial_transforms import (Compose, ToTensor, CenterCrop, Scale, Normalize, MultiScaleCornerCrop,
RandomHorizontalFlip)
from tensorboardX import SummaryWriter
from makeDataset import *
import sys
import argparse
from gen_splits import *
#TODO: create separate dirs for stage1 and stage 2
def main_run(dataset, root_dir, checkpoint_path, seqLen, testBatchSize, memSize, outPool_size, split):
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
normalize = Normalize(mean=mean, std=std)
test_split = split
seqLen = seqLen
memSize = memSize
c_cam_classes = outPool_size
dataset = dataset
testBatchSize = testBatchSize
checkpoint_path = checkpoint_path
if dataset == 'gtea_61':
num_classes = 61
elif dataset == 'gtea_71':
num_classes = 71
elif dataset == 'egtea_gaze+':
num_classes = 106
else:
print('Wrong dataset')
sys.exit()
dataset_dir = os.path.join(root_dir, dataset)
print('Preparing dataset...')
if dataset == 'egtea_gaze+':
trainDatasetF, testDatasetF, trainLabels, testLabels, trainNumFrames, testNumFrames = gen_split_egtea_gazePlus(dataset_dir,
test_split)
else:
trainDatasetF, testDatasetF, trainLabels, testLabels, trainNumFrames, testNumFrames, _ = gen_split(dataset_dir,
test_split)
vid_seq_test = makeDataset(testDatasetF, testLabels, testNumFrames,
spatial_transform=Compose([Scale(256), CenterCrop(224), ToTensor(), normalize]),
fmt='.jpg', seqLen=seqLen)
print('Number of test samples = {}'.format(vid_seq_test.__len__()))
test_loader = torch.utils.data.DataLoader(vid_seq_test, batch_size=testBatchSize,
shuffle=False, num_workers=4, pin_memory=True)
model = attentionModel(num_classes=num_classes, mem_size=memSize, c_cam_classes=c_cam_classes)
if os.path.exists(checkpoint_path):
print('Loading weights from checkpoint file {}'.format(checkpoint_path))
else:
print('Checkpoint file {} does not exist'.format(checkpoint_path))
sys.exit()
last_checkpoint = torch.load(checkpoint_path)
model.load_state_dict(last_checkpoint['model_state_dict'])
model.cuda()
model.train(False)
print('Testing...')
test_iter = 0
test_samples = 0
numCorr = 0
for j, (inputs, targets) in enumerate(test_loader):
print('testing inst = {}'.format(j))
test_iter += 1
test_samples += inputs.size(0)
inputVariable = Variable(inputs.permute(1, 0, 2, 3, 4).cuda(), volatile=True)
output_label, _ = model(inputVariable)
_, predicted = torch.max(output_label.data, 1)
numCorr += (predicted == targets.cuda()).sum()
test_accuracy = (numCorr / test_samples) * 100
print('Test Accuracy after = {}%'.format(test_accuracy))
def __main__():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='gtea61', help='Dataset')
parser.add_argument('--checkpoint', type=str, default='model_epoch150.pth.tar', help='Dataset')
parser.add_argument('--root_dir', type=str, default='./dataset/',
help='Dataset directory')
parser.add_argument('--seqLen', type=int, default=25, help='Length of sequence')
parser.add_argument('--testBatchSize', type=int, default=32, help='Training batch size')
parser.add_argument('--memSize', type=int, default=512, help='ConvLSTM hidden state size')
parser.add_argument('--outPoolSize', type=int, default=100, help='Output pooling size')
parser.add_argument('--split', type=int, default=1, help='Split')
args = parser.parse_args()
dataset = args.dataset
checkpoint = args.checkpoint
root_dir = args.root_dir
seqLen = args.seqLen
testBatchSize = args.testBatchSize
memSize = args.memSize
outPool_size = args.outPoolSize
split = args.split
main_run(dataset=dataset, root_dir=root_dir, seqLen=seqLen, testBatchSize=testBatchSize, memSize=memSize,
outPool_size=outPool_size, split=split, checkpoint_path=checkpoint)
__main__() | 4,491 | 39.107143 | 131 | py |
LSTA | LSTA-master/makeDataset.py | import os
import torch
from torch.utils.data import Dataset
from PIL import Image
import numpy as np
import random
class makeDataset(Dataset):
def __init__(self, dataset, labels, numFrames, spatial_transform=None, seqLen=30,
train=True, mulSeg=False, numSeg=1, fmt='.jpg', mode='train'):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.spatial_transform = spatial_transform
self.train = train
self.mulSeg = mulSeg
self.numSeg = numSeg
self.images = dataset
self.labels = labels
self.numFrames = numFrames
self.seqLen = seqLen
self.fmt = fmt
self.mode = mode
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
vid_name = self.images[idx]
label = self.labels[idx]
numFrame = self.numFrames[idx]
inpSeq = []
# print('Vid {} | numFrames = {}'.format(vid_name, numFrame))
self.spatial_transform.randomize_parameters()
for i in np.linspace(1, numFrame, self.seqLen, endpoint=True):
fl_name = vid_name + '/' + 'image_' + str(int(np.floor(i))).zfill(5) + self.fmt
img = Image.open(fl_name)
inpSeq.append(self.spatial_transform(img.convert('RGB')))
inpSeq = torch.stack(inpSeq, 0)
if self.mode == 'eval':
return inpSeq, label, vid_name
else:
return inpSeq, label
| 1,597 | 33 | 91 | py |
LSTA | LSTA-master/MyConvLSTACell.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class MyConvLSTACell(nn.Module):
def __init__(self, input_size, memory_size, c_cam_classes=100, kernel_size=3,
stride=1, padding=1, zero_init=False):
super(MyConvLSTACell, self).__init__()
self.input_size = input_size
self.memory_size = memory_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.c_classifier = nn.Linear(memory_size, c_cam_classes, bias=False)
self.coupling_fc = nn.Linear(memory_size, c_cam_classes, bias=False)
self.avgpool = nn.AvgPool2d(7)
# Attention params
self.conv_i_s = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv_i_cam = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.conv_f_s = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv_f_cam = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.conv_a_s = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv_a_cam = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.conv_o_s = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv_o_cam = nn.Conv2d(1, 1, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
if zero_init:
torch.nn.init.constant(self.conv_i_s.weight, 0)
torch.nn.init.constant(self.conv_i_s.bias, 0)
torch.nn.init.constant(self.conv_i_cam.weight, 0)
torch.nn.init.constant(self.conv_f_s.weight, 0)
torch.nn.init.constant(self.conv_f_s.bias, 0)
torch.nn.init.constant(self.conv_f_cam.weight, 0)
torch.nn.init.constant(self.conv_a_s.weight, 0)
torch.nn.init.constant(self.conv_a_s.bias, 0)
torch.nn.init.constant(self.conv_o_s.weight, 0)
torch.nn.init.constant(self.conv_o_s.bias, 0)
torch.nn.init.constant(self.conv_o_cam.weight, 0)
else:
torch.nn.init.xavier_normal(self.conv_i_s.weight)
torch.nn.init.constant(self.conv_i_s.bias, 0)
torch.nn.init.xavier_normal(self.conv_i_cam.weight)
torch.nn.init.xavier_normal(self.conv_f_s.weight)
torch.nn.init.constant(self.conv_f_s.bias, 0)
torch.nn.init.xavier_normal(self.conv_f_cam.weight)
torch.nn.init.xavier_normal(self.conv_a_s.weight)
torch.nn.init.constant(self.conv_a_s.bias, 0)
torch.nn.init.xavier_normal(self.conv_a_cam.weight)
torch.nn.init.xavier_normal(self.conv_o_s.weight)
torch.nn.init.constant(self.conv_o_s.bias, 0)
torch.nn.init.xavier_normal(self.conv_o_cam.weight)
# Memory params
self.conv_i_x = nn.Conv2d(input_size, memory_size, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv_i_c = nn.Conv2d(memory_size, memory_size, kernel_size=kernel_size, stride=stride, padding=padding,
bias=False)
self.conv_f_x = nn.Conv2d(input_size, memory_size, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv_f_c = nn.Conv2d(memory_size, memory_size, kernel_size=kernel_size, stride=stride, padding=padding,
bias=False)
self.conv_c_x = nn.Conv2d(input_size, memory_size, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv_c_c = nn.Conv2d(memory_size, memory_size, kernel_size=kernel_size, stride=stride, padding=padding,
bias=False)
self.conv_o_x = nn.Conv2d(input_size, memory_size, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv_o_c = nn.Conv2d(memory_size, memory_size, kernel_size=kernel_size, stride=stride, padding=padding,
bias=False)
if zero_init:
torch.nn.init.constant(self.conv_i_x.weight, 0)
torch.nn.init.constant(self.conv_i_x.bias, 0)
torch.nn.init.constant(self.conv_i_c.weight, 0)
torch.nn.init.constant(self.conv_f_x.weight, 0)
torch.nn.init.constant(self.conv_f_x.bias, 0)
torch.nn.init.constant(self.conv_f_c.weight, 0)
torch.nn.init.constant(self.conv_c_x.weight, 0)
torch.nn.init.constant(self.conv_c_x.bias, 0)
torch.nn.init.constant(self.conv_c_c.weight, 0)
torch.nn.init.constant(self.conv_o_x.weight, 0)
torch.nn.init.constant(self.conv_o_x.bias, 0)
torch.nn.init.constant(self.conv_o_c.weight, 0)
else:
torch.nn.init.xavier_normal(self.conv_i_x.weight)
torch.nn.init.constant(self.conv_i_x.bias, 0)
torch.nn.init.xavier_normal(self.conv_i_c.weight)
torch.nn.init.xavier_normal(self.conv_f_x.weight)
torch.nn.init.constant(self.conv_f_x.bias, 0)
torch.nn.init.xavier_normal(self.conv_f_c.weight)
torch.nn.init.xavier_normal(self.conv_c_x.weight)
torch.nn.init.constant(self.conv_c_x.bias, 0)
torch.nn.init.xavier_normal(self.conv_c_c.weight)
torch.nn.init.xavier_normal(self.conv_o_x.weight)
torch.nn.init.constant(self.conv_o_x.bias, 0)
torch.nn.init.xavier_normal(self.conv_o_c.weight)
def forward(self, x, cam, state_att, state_inp, x_flow_i=0, x_flow_f=0, x_flow_c=0, x_flow_o=0):
# state_att = [a, s]
# state_inp = [atanh(c), o]
a_t_1 = state_att[0]
s_t_1 = state_att[1]
c_t_1 = F.tanh(state_inp[0])
o_t_1 = state_inp[1]
# Attention recurrence
i_s = F.sigmoid(self.conv_i_s(s_t_1) + self.conv_i_cam(cam))
f_s = F.sigmoid(self.conv_f_s(s_t_1) + self.conv_f_cam(cam))
o_s = F.sigmoid(self.conv_o_s(s_t_1) + self.conv_o_cam(cam))
a_tilde = F.tanh(self.conv_a_s(s_t_1) + self.conv_a_cam(cam))
a = (f_s * a_t_1) + (i_s * a_tilde)
s = o_s * F.tanh(a)
u = s + cam # hidden state + cam
u = F.softmax(u.view(u.size(0), -1), 1)
u = u.view(u.size(0), 1, 7, 7)
x_att = x * u.expand_as(x)
i_x = F.sigmoid(self.conv_i_c(o_t_1 * c_t_1) + self.conv_i_x(x_att) + x_flow_i)
f_x = F.sigmoid(self.conv_f_c(o_t_1 * c_t_1) + self.conv_f_x(x_att) + x_flow_f)
c_tilde = F.tanh(self.conv_c_c(o_t_1 * c_t_1) + self.conv_c_x(x_att) + x_flow_c)
c = (f_x * state_inp[0]) + (i_x * c_tilde)
c_vec = self.avgpool(c).view(c.size(0), -1)
c_logits = self.c_classifier(c_vec) + self.coupling_fc(self.avgpool(x_att).view(x_att.size(0), -1))
c_probs, c_idxs = c_logits.sort(1, True)
c_class_idx = c_idxs[:, 0]
c_cam = self.c_classifier.weight[c_class_idx].unsqueeze(2).unsqueeze(2) * c
o_x = F.sigmoid(self.conv_o_x(o_t_1 * c_t_1) + self.conv_o_c(c_cam))
state_att = [a, s]
state_inp = [c, o_x]
return state_att, state_inp, x_att | 7,249 | 45.774194 | 116 | py |
LSTA | LSTA-master/spatial_transforms.py | import random
import math
import numbers
import collections
import numpy as np
import torch
from PIL import Image, ImageOps
try:
import accimage
except ImportError:
accimage = None
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, inv=False, flow=False):
for t in self.transforms:
img = t(img, inv, flow)
return img
def randomize_parameters(self):
for t in self.transforms:
t.randomize_parameters()
class ToTensor(object):
"""Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __init__(self, norm_value=255):
self.norm_value = norm_value
def __call__(self, pic, inv, flow):
"""
Args:
pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
return img.float().div(self.norm_value)
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(self.norm_value)
else:
return img
def randomize_parameters(self):
pass
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
std (sequence): Sequence of standard deviations for R, G, B channels
respecitvely.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor, inv, flow):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
if flow is True:
mean = [np.mean(self.mean)]
std = [np.mean(self.std)]
else:
mean = self.mean
std = self.std
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor
def randomize_parameters(self):
pass
class Scale(object):
"""Rescale the input PIL.Image to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(w, h), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img, inv, flow):
"""
Args:
img (PIL.Image): Image to be scaled.
Returns:
PIL.Image: Rescaled image.
"""
if isinstance(self.size, int):
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img
if w < h:
ow = self.size
oh = int(self.size * h / w)
return img.resize((ow, oh), self.interpolation)
else:
oh = self.size
ow = int(self.size * w / h)
return img.resize((ow, oh), self.interpolation)
else:
return img.resize(self.size, self.interpolation)
def randomize_parameters(self):
pass
class CenterCrop(object):
"""Crops the given PIL.Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img, inv, flow):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
w, h = img.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return img.crop((x1, y1, x1 + tw, y1 + th))
def randomize_parameters(self):
pass
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, img, inv, flow):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
if self.p < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if inv is True:
img = ImageOps.invert(img)
return img
def randomize_parameters(self):
self.p = random.random()
class MultiScaleCornerCrop(object):
"""Crop the given PIL.Image to randomly selected size.
A crop of size is selected from scales of the original size.
A position of cropping is randomly selected from 4 corners and 1 center.
This crop is finally resized to given size.
Args:
scales: cropping scales of the original size
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, scales, size, interpolation=Image.BILINEAR):
self.scales = scales
self.size = size
self.interpolation = interpolation
self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br']
def __call__(self, img, inv, flow):
# print(img.size[0])
min_length = min(img.size[0], img.size[1])
crop_size = int(min_length * self.scale)
image_width = img.size[0]
image_height = img.size[1]
if self.crop_position == 'c':
center_x = image_width // 2
center_y = image_height // 2
box_half = crop_size // 2
x1 = center_x - box_half
y1 = center_y - box_half
x2 = center_x + box_half
y2 = center_y + box_half
elif self.crop_position == 'tl':
x1 = 0
y1 = 0
x2 = crop_size
y2 = crop_size
elif self.crop_position == 'tr':
x1 = image_width - crop_size
y1 = 1
x2 = image_width
y2 = crop_size
elif self.crop_position == 'bl':
x1 = 1
y1 = image_height - crop_size
x2 = crop_size
y2 = image_height
elif self.crop_position == 'br':
x1 = image_width - crop_size
y1 = image_height - crop_size
x2 = image_width
y2 = image_height
img = img.crop((x1, y1, x2, y2))
return img.resize((self.size, self.size), self.interpolation)
def randomize_parameters(self):
self.scale = self.scales[random.randint(0, len(self.scales) - 1)]
self.crop_position = self.crop_positions[random.randint(0, len(self.crop_positions) - 1)]
class FiveCrops(object):
"""Crop the given PIL.Image to randomly selected size.
A crop of size is selected from scales of the original size.
A position of cropping is randomly selected from 4 corners and 1 center.
This crop is finally resized to given size.
Args:
scales: cropping scales of the original size
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], interpolation=Image.BILINEAR, tenCrops=False):
self.size = size
self.interpolation = interpolation
self.mean = mean
self.std = std
self.to_Tensor = ToTensor()
self.normalize = Normalize(self.mean, self.std)
self.tenCrops = tenCrops
def __call__(self, img, inv, flow):
# print(img.size[0])
crop_size = self.size
image_width = img.size[0]
image_height = img.size[1]
crop_positions = []
# center
center_x = image_width // 2
center_y = image_height // 2
box_half = crop_size // 2
x1 = center_x - box_half
y1 = center_y - box_half
x2 = center_x + box_half
y2 = center_y + box_half
crop_positions += [[x1, y1, x2, y2]]
# tl
x1 = 0
y1 = 0
x2 = crop_size
y2 = crop_size
crop_positions += [[x1, y1, x2, y2]]
# tr
x1 = image_width - crop_size
y1 = 1
x2 = image_width
y2 = crop_size
crop_positions += [[x1, y1, x2, y2]]
# bl
x1 = 1
y1 = image_height - crop_size
x2 = crop_size
y2 = image_height
crop_positions += [[x1, y1, x2, y2]]
# br
x1 = image_width - crop_size
y1 = image_height - crop_size
x2 = image_width
y2 = image_height
crop_positions += [[x1, y1, x2, y2]]
cropped_imgs = [img.crop(crop_positions[i]).resize((self.size, self.size), self.interpolation) for i in range(5)]
# cropped_imgs = [img.resize(self.size, self.size, self.interpolation) for img in cropped_imgs]
if self.tenCrops is True:
if inv is True:
flipped_imgs = [ImageOps.invert(cropped_imgs[i].transpose(Image.FLIP_LEFT_RIGHT)) for i in range(5)]
else:
flipped_imgs = [cropped_imgs[i].transpose(Image.FLIP_LEFT_RIGHT) for i in range(5)]
cropped_imgs += flipped_imgs
# cropped_imgs.append(img1.transpose(Image.FLIP_LEFT_RIGHT))
tensor_imgs = [self.to_Tensor(img, inv, flow) for img in cropped_imgs]
normalized_imgs = [self.normalize(img, inv, flow) for img in tensor_imgs]
fiveCropImgs = torch.stack(normalized_imgs, 0)
return fiveCropImgs
def randomize_parameters(self):
pass
class TenCrops(object):
"""Crop the given PIL.Image to randomly selected size.
A crop of size is selected from scales of the original size.
A position of cropping is randomly selected from 4 corners and 1 center.
This crop is finally resized to given size.
Args:
scales: cropping scales of the original size
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0], interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.mean = mean
self.std = std
self.fiveCrops = FiveCrops(self.size, self.mean, self.std, self.interpolation, True)
def __call__(self, img, inv, flow):
# print(img.size[0])
return self.fiveCrops(img, inv, flow)
def randomize_parameters(self):
pass
class FlippedImagesTest(object):
"""Image and its horizontally flipped versions
"""
def __init__(self, mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]):
self.mean = mean
self.std = std
self.to_Tensor = ToTensor()
self.normalize = Normalize(self.mean, self.std)
def __call__(self, img, inv, flow):
# print(img.size[0])
img_flipped = img.transpose(Image.FLIP_LEFT_RIGHT)
if inv is True:
img_flipped = ImageOps.invert(img_flipped)
# center
tensor_img = self.to_Tensor(img, inv, flow)
tensor_img_flipped = self.to_Tensor(img_flipped, inv, flow)
normalized_img = self.normalize(tensor_img, inv, flow)
normalized_img_flipped = self.normalize(tensor_img_flipped, inv, flow)
horFlippedTest_imgs = [normalized_img, normalized_img_flipped]
horFlippedTest_imgs = torch.stack(horFlippedTest_imgs, 0)
return horFlippedTest_imgs
def randomize_parameters(self):
pass | 13,813 | 31.734597 | 121 | py |
AutoCAT | AutoCAT-main/src/models/dnn_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import ModelConfigDict, TensorType
from models.dnn import DNNEncoder
class DNNModel(TorchModelV2, nn.Module):
def __init__(self, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, num_outputs: int,
model_config: ModelConfigDict, name: str, **kwargs) -> None:
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
if len(kwargs) > 0:
custom_model_config = kwargs
else:
custom_model_config = model_config["custom_model_config"]
self.latency_dim = custom_model_config["latency_dim"]
self.victim_acc_dim = custom_model_config["victim_acc_dim"]
self.action_dim = custom_model_config["action_dim"]
self.step_dim = custom_model_config["step_dim"]
self.window_size = custom_model_config["window_size"]
self.action_embed_dim = custom_model_config["action_embed_dim"]
self.step_embed_dim = custom_model_config["step_embed_dim"]
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim + self.step_embed_dim) * self.window_size
self.hidden_dim = custom_model_config["hidden_dim"]
self.output_dim = num_outputs
self.num_blocks = custom_model_config.get("num_blocks", 1)
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.backbone = DNNEncoder(self.input_dim, self.hidden_dim,
self.hidden_dim, self.num_blocks)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
self._features = None
def make_one_hot(self, src: torch.Tensor,
num_classes: int) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor,
embed: nn.Embedding) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
@override(TorchModelV2)
def forward(self, input_dict: Dict[str,
TensorType], state: List[TensorType],
seq_lens: TensorType) -> Tuple[TensorType, List[TensorType]]:
if self._device is None:
self._device = next(self.parameters()).device
obs = input_dict["obs"].to(self._device)
obs = obs.to(torch.int64)
assert obs.dim() == 3
batch_size = obs.size(0)
(l, v, act, step) = torch.unbind(obs, dim=-1)
l = self.make_one_hot(l, self.latency_dim)
v = self.make_one_hot(v, self.victim_acc_dim)
act = self.make_embedding(act, self.action_embed)
step = self.make_embedding(step, self.step_embed)
x = torch.cat((l, v, act, step), dim=-1)
x = x.view(batch_size, -1)
h = self.backbone(x)
a = self.linear_a(h)
self._features = h
return a, state
@override(TorchModelV2)
def value_function(self) -> TensorType:
assert self._features is not None
v = self.linear_v(self._features)
return v.squeeze(1)
ModelCatalog.register_custom_model("dnn_model", DNNModel)
| 4,084 | 36.477064 | 88 | py |
AutoCAT | AutoCAT-main/src/models/transformer_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import ModelConfigDict, TensorType
from models.dnn import DNNEncoder
class TransformerModel(TorchModelV2, nn.Module):
def __init__(self, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, num_outputs: int,
model_config: ModelConfigDict, name: str, **kwargs) -> None:
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
if len(kwargs) > 0:
custom_model_config = kwargs
else:
custom_model_config = model_config["custom_model_config"]
self.latency_dim = custom_model_config["latency_dim"]
self.victim_acc_dim = custom_model_config["victim_acc_dim"]
self.action_dim = custom_model_config["action_dim"]
self.step_dim = custom_model_config["step_dim"]
self.window_size = custom_model_config["window_size"]
self.action_embed_dim = custom_model_config["action_embed_dim"]
self.step_embed_dim = custom_model_config["step_embed_dim"]
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim + self.step_embed_dim)
self.hidden_dim = custom_model_config["hidden_dim"]
self.output_dim = num_outputs
self.num_blocks = custom_model_config.get("num_blocks", 1)
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.linear_i = nn.Linear(self.input_dim, self.hidden_dim)
# self.linear_o = nn.Linear(self.hidden_dim * self.window_size,
# self.hidden_dim)
encoder_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim,
nhead=8)
self.encoder = nn.TransformerEncoder(encoder_layer, self.num_blocks)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
self._features = None
def make_one_hot(self, src: torch.Tensor,
num_classes: int) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor,
embed: nn.Embedding) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
@override(TorchModelV2)
def forward(self, input_dict: Dict[str,
TensorType], state: List[TensorType],
seq_lens: TensorType) -> Tuple[TensorType, List[TensorType]]:
if self._device is None:
self._device = next(self.parameters()).device
obs = input_dict["obs"].to(self._device)
obs = obs.to(torch.int64)
assert obs.dim() == 3
# batch_size = obs.size(0)
l, v, act, stp = torch.unbind(obs, dim=-1)
l = self.make_one_hot(l, self.latency_dim)
v = self.make_one_hot(v, self.victim_acc_dim)
act = self.make_embedding(act, self.action_embed)
stp = self.make_embedding(stp, self.step_embed)
x = torch.cat((l, v, act, stp), dim=-1)
x = self.linear_i(x)
x = x.transpose(0, 1).contiguous()
h = self.encoder(x)
# h = self.linear_o(h.view(batch_size, -1))
h = h.mean(dim=0)
a = self.linear_a(h)
self._features = h
return a, state
@override(TorchModelV2)
def value_function(self) -> TensorType:
assert self._features is not None
v = self.linear_v(self._features)
return v.squeeze(1)
ModelCatalog.register_custom_model("transformer_model", TransformerModel)
| 4,470 | 36.889831 | 77 | py |
AutoCAT | AutoCAT-main/src/models/backbone.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from models.dnn import DNNEncoder
class CacheBackbone(nn.Module):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
window_size: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
num_blocks: int = 1) -> None:
super().__init__()
self.latency_dim = latency_dim
self.victim_acc_dim = victim_acc_dim
self.action_dim = action_dim
self.step_dim = step_dim
self.window_size = window_size
self.action_embed_dim = action_embed_dim
self.step_embed_dim = step_embed_dim
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim +
self.step_embed_dim) * self.window_size
self.hidden_dim = hidden_dim
self.num_blocks = num_blocks
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.dnn_encoder = DNNEncoder(self.input_dim, self.hidden_dim,
self.hidden_dim, self.num_blocks)
def make_one_hot(self, src: torch.Tensor,
num_classes: int) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor,
embed: nn.Embedding) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def forward(self, obs: torch.Tensor) -> torch.Tensor:
obs = obs.to(torch.int64)
assert obs.dim() == 3
batch_size = obs.size(0)
(l, v, act, step) = torch.unbind(obs, dim=-1)
l = self.make_one_hot(l, self.latency_dim)
v = self.make_one_hot(v, self.victim_acc_dim)
act = self.make_embedding(act, self.action_embed)
step = self.make_embedding(step, self.step_embed)
x = torch.cat((l, v, act, step), dim=-1)
x = x.view(batch_size, -1)
y = self.dnn_encoder(x)
return y
| 2,768 | 32.768293 | 76 | py |
AutoCAT | AutoCAT-main/src/models/dnn.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, dim: int) -> None:
super(ResidualBlock, self).__init__()
self.dim = dim
layers = []
layers.append(nn.ReLU())
layers.append(nn.Linear(self.dim, self.dim))
layers.append(nn.ReLU())
layers.append(nn.Linear(self.dim, self.dim))
self.layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self.layers(x)
class DNNEncoder(nn.Module):
def __init__(self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_blocks: int = 1) -> None:
super(DNNEncoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_blocks = num_blocks
layers = []
layers.append(nn.Linear(self.input_dim, self.hidden_dim))
for _ in range(self.num_blocks):
layers.append(ResidualBlock(self.hidden_dim))
layers.append(nn.ReLU())
layers.append(nn.Linear(self.hidden_dim, self.output_dim))
self.layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.layers(x)
| 1,521 | 29.44 | 73 | py |
AutoCAT | AutoCAT-main/src/rllib/run_gym_rllib_example_multicore_largel3.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
#'super_verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": False,
"attacker_addr_s": 8,
"attacker_addr_e": 23,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 7,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_1_core_2": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_2": {
"blocks": 16,
"associativity": 2,
"hit_time": 16,
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | 3,186 | 30.87 | 95 | py |
AutoCAT | AutoCAT-main/src/rllib/run_gym_rllib_example.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": True,
"attacker_addr_s": 0,
"attacker_addr_e": 8,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 0,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 4,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | 2,755 | 30.318182 | 95 | py |
AutoCAT | AutoCAT-main/src/rllib/run_gym_rllib_example_multicore_flush.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
#'super_verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': True,
"allow_victim_multi_access": False,
"allow_empty_victim_access": False,
"attacker_addr_s": 0,
"attacker_addr_e": 3,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 3,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"rep_policy": "lru"
#"prefetcher": "nextline"
},
##"cache_1_core_2": {#required
## "blocks": 4,#4,
## "associativity": 1,
## "hit_time": 1, #cycles
## "prefetcher": "nextline"
##},
##"cache_2": {
## "blocks": 4,
## "associativity": 1,
## "hit_time": 16,
##},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train()
| 3,239 | 30.764706 | 95 | py |
AutoCAT | AutoCAT-main/src/rllib/test_custom_policy_diversity_works.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# using ray 1.92 to run
# python 3.9
from ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy
from ray.rllib.agents.a3c.a3c_torch_policy import A3CTorchPolicy
from ray.rllib.agents.a3c.a2c import A2CTrainer
from ray.rllib.agents.ppo import PPOTrainer
import gym
import ray.tune as tune
from torch.nn import functional as F
from typing import Optional, Dict
import torch.nn as nn
import ray
from collections import deque
#from ray.rllib.agents.ppo.ppo_torch_policy import ValueNetworkMixin
from ray.rllib.evaluation.episode import MultiAgentEpisode
from ray.rllib.evaluation.postprocessing import compute_gae_for_sample_batch, \
Postprocessing
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
#from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import Deprecated
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, sequence_mask
from ray.rllib.utils.typing import TrainerConfigDict, TensorType, \
PolicyID, LocalOptimizer
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
import copy
import numpy as np
import sys
import math
sys.path.append("../src")
torch, nn = try_import_torch()
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
from categorization_parser import *
def custom_init(policy: Policy, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, config: TrainerConfigDict)->None:
#pass
policy.past_len = 5
policy.past_models = deque(maxlen =policy.past_len)
policy.timestep = 0
def copy_model(model: ModelV2) -> ModelV2:
copdied_model= TorchModelV2(
obs_space = model.obs_space,
action_space = model.action_space,
num_outputs = model.num_outputs,
model_config = model.model_config,
name = 'copied')
return copied_model
def compute_div_loss(policy: Policy, model: ModelV2,
dist_class: ActionDistribution,
train_batch: SampleBatch):
#original_weight = copy.deepcopy(policy.get_weights())
logits, _ = model.from_batch(train_batch)
values = model.value_function()
valid_mask = torch.ones_like(values, dtype=torch.bool)
dist = dist_class(logits, model)
#log_probs = dist.logp(train_batch[SampleBatch.ACTIONS])#.reshape(-1)
print('log_probs')
#print(log_probs)
divs = []
#div_metric = nn.KLDivLoss(size_average=False, reduce=False)
div_metric = nn.KLDivLoss(reduction = 'batchmean')
#div_metric = nn.CrossEntropyLoss()
#if len(policy.past_models) > 1:
# assert(policy.past_models[0].state_dict() == policy.past_models[1].state_dict())
for idx, past_model in enumerate(policy.past_models):
#for idx, past_weights in enumerate(policy.past_weights):
#temp_policy = pickle.loads(pickle.dumps(policy))
#temp_policy.set_weights(past_weights)
#temp_model = pickle.loads(pickle.dumps(policy.model))
#temp_model.load_state_dict(past_weights)
#past_model.load_state_dict(policy.past_weights[i])
#past_model = temp_model.set_weights(past_weights)
#assert(False)
past_logits, _ = past_model.from_batch(train_batch)
past_values = past_model.value_function()
past_valid_mask = torch.ones_like(past_values, dtype=torch.bool)
past_dist = dist_class(train_batch[SampleBatch.ACTION_DIST_INPUTS], past_model)
div = math.atan( - policy.timestep_array[idx] + policy.timestep ) * math.exp( ( policy.timestep_array[idx] - policy.timestep ) / policy.timestep_array[idx]) * dist.kl(past_dist)
###print(div)
###print(dist)
###print(past_dist)
###print(train_batch[SampleBatch.ACTION_DIST_INPUTS])
#print(train_batch[SampleBatch.ACTIONS])
#print(log_probs)
#print(past_log_probs)
#print(train_batch[Postprocessing.ADVANTAGES])
#print(log_probs * train_batch[Postprocessing.ADVANTAGES])
#print(past_log_probs * train_batch[Postprocessing.ADVANTAGES])
#div = dist.multi_kl(past_dist) * train_batch[Postprocessing.ADVANTAGES]
#assert(
if idx == 0 and True:#policy.timestep % 10 == 0:
print('past_model.state_dict()')
#print(past_model.state_dict())
print('model.state_dict()')
#print(model.state_dict())
#div = past_dist.multi_kl(dist)
print('div')
#print(div)
div = div.sum().mean(0)
divs.append(div)
print('divs')
#print(divs)
div_loss = 0
div_loss_orig = 0
for div in divs:
div_loss += div
div_loss_orig += div
if len(policy.past_models) > 0:
div_loss = div_loss / len(policy.past_models)#policy.past_len
print('len(policy.past_models)')
print(len(policy.past_models))
#policy.set_weights(original_weight)
return div_loss
def compute_div_loss_weight(policy: Policy, weight,
dist_class: ActionDistribution,
train_batch: SampleBatch):
original_weight = copy.deepcopy(policy.get_weights())
policy.set_weights(weight)
model = policy.model
logits, _ = model.from_batch(train_batch)
values = model.value_function()
valid_mask = torch.ones_like(values, dtype=torch.bool)
dist = dist_class(logits, model)
log_probs = dist.logp(train_batch[SampleBatch.ACTIONS])#.reshape(-1)
print('log_probs')
#print(log_probs)
divs = []
div_metric = nn.KLDivLoss(size_average=False, reduce=False)
#div_metric = nn.CrossEntropyLoss()
#if len(policy.past_models) > 1:
# assert(policy.past_models[0].state_dict() == policy.past_models[1].state_dict())
for idx, past_weight in enumerate(policy.past_weights):
#assert(False)
policy.set_weights(past_weight)
past_model = policy.model
past_logits, _ = past_model.from_batch(train_batch)
past_values = past_model.value_function()
past_valid_mask = torch.ones_like(past_values, dtype=torch.bool)
past_dist = dist_class(past_logits, past_model)
past_log_probs = past_dist.logp(train_batch[SampleBatch.ACTIONS])#.reshape(-1)
div = div_metric(log_probs * train_batch[Postprocessing.ADVANTAGES], past_log_probs* train_batch[Postprocessing.ADVANTAGES])
#div = div_metric(log_probs, past_log_probs) * train_batch[Postprocessing.ADVANTAGES]
#div = dist.multi_kl(past_dist) * train_batch[Postprocessing.ADVANTAGES]
#assert(
if idx == 0 and True:#policy.timestep % 10 == 0:
print('past_model.state_dict()')
#print(past_model.state_dict())
print('model.state_dict()')
#print(model.state_dict())
#div = past_dist.multi_kl(dist)
print('div')
#print(div)
div = div.mean(0)
divs.append(div)
print('divs')
#print(divs)
div_loss = 0
div_loss_orig = 0
for div in divs:
div_loss += div
div_loss_orig += div
if len(policy.past_weights) > 0:
div_loss = div_loss / len(policy.past_weights)#policy.past_len
#print('len(policy.past_weights)')
#print(len(policy.past_weights))
#policy.set_weights(original_weight)
return div_loss
import pickle
def custom_loss(policy: Policy, model: ModelV2,
dist_class: ActionDistribution,
train_batch: SampleBatch) -> TensorType:
logits, _ = model.from_batch(train_batch)
values = model.value_function()
policy.timestep += 1
#if len(policy.devices) > 1:
# copy weights of main model (tower-0) to all other towers type
if policy.timestep % 100 == 0:
copied_model = pickle.loads(pickle.dumps(model))
copied_model.load_state_dict(model.state_dict())
policy.past_models.append(copied_model)
if policy.is_recurrent():
B = len(train_batch[SampleBatch.SEQ_LENS])
max_seq_len = logits.shape[0] // B
mask_orig = sequence_mask(train_batch[SampleBatch.SEQ_LENS],
max_seq_len)
valid_mask = torch.reshape(mask_orig, [-1])
else:
valid_mask = torch.ones_like(values, dtype=torch.bool)
dist = dist_class(logits, model)
log_probs = dist.logp(train_batch[SampleBatch.ACTIONS]).reshape(-1)
#print('log_probs')
#print(log_probs)
pi_err = -torch.sum(
torch.masked_select(log_probs * train_batch[Postprocessing.ADVANTAGES],
valid_mask))
# Compute a value function loss.
if policy.config["use_critic"]:
value_err = 0.5 * torch.sum(
torch.pow(
torch.masked_select(
values.reshape(-1) -
train_batch[Postprocessing.VALUE_TARGETS], valid_mask),
2.0))
# Ignore the value function.
else:
value_err = 0.0
entropy = torch.sum(torch.masked_select(dist.entropy(), valid_mask))
div_loss = compute_div_loss(policy, model, dist_class, train_batch)
total_loss = (pi_err + value_err * policy.config["vf_loss_coeff"] -
entropy * policy.config["entropy_coeff"] - 1000 * div_loss )
print('pi_err')
#print(pi_err)
print('value_err')
#print(value_err)
print('div_loss')
print(div_loss)
print('pi_err')
print(pi_err)
print('total_loss')
print(total_loss)
# Store values for stats function in model (tower), such that for
# multi-GPU, we do not override them during the parallel loss phase.
model.tower_stats["entropy"] = entropy
model.tower_stats["pi_err"] = pi_err
model.tower_stats["value_err"] = value_err
return total_loss
CustomPolicy = A3CTorchPolicy.with_updates(
name="MyCustomA3CTorchPolicy",
loss_fn=custom_loss,
#make_model= make_model,
before_init=custom_init)
CustomTrainer = A2CTrainer.with_updates(
get_policy_class=lambda _: CustomPolicy)
#PPOCustomPolicy = PPOTorchPolicy.with_updates(
# name="MyCustomA3CTorchPolicy",
# loss_fn=custom_loss,
# #make_model= make_model,
# before_init=custom_init)
from typing import Dict, List, Type, Union
from ray.rllib.utils.annotations import override
class CustomPPOTorchPolicy(PPOTorchPolicy):
def __init__(self, observation_space, action_space, config):
self.past_len = 10
#self.categorization_parser = CategorizationParser()
self.past_models = deque(maxlen =self.past_len)
#self.past_weights = deque(maxlen= self.past_len)
self.timestep = 0
self.timestep_array = deque(maxlen=self.past_len)
super(CustomPPOTorchPolicy, self).__init__(observation_space, action_space, config)
#@override(PPOTorchPolicy)
def loss(self, model: ModelV2, dist_class: Type[ActionDistribution],
train_batch: SampleBatch, extern_trigger = True ) -> Union[TensorType, List[TensorType]]:
#return custom_loss(self, model, dist_class, train_batch)
self.timestep += 1
if self.timestep % 20 == 0 and extern_trigger == False:
copied_model = pickle.loads(pickle.dumps(model))
copied_model.load_state_dict(model.state_dict())
self.past_models.append(copied_model)
total_loss = PPOTorchPolicy.loss(self, model, dist_class, train_batch)
#self.past_len
div_loss = 0 #compute_div_loss(self, model, dist_class, train_batch)
#div_loss = compute_div_loss_weight(self, copy.deepcopy(self.get_weights()), dist_class, train_batch)
print('total_loss')
print(total_loss)
print('div_loss')
print(div_loss)
#assert(False)
ret_loss = total_loss - 0.03 * div_loss
return ret_loss
'''
new_loss = []
if issubclass(type(total_loss),TensorType):
return total_loss - compute_div_loss(self, model, dist_class, train_batch)
else:
for loss in total_loss:
new_loss.append(loss - compute_div_loss(self, model, dist_class, train_batch))
return new_loss
'''
def replay_agent(self, env):
# no cache randomization
# rangomized inference ( 10 times)
pattern_buffer = []
num_guess = 0
num_correct = 0
for victim_addr in range(env.victim_address_min, env.victim_address_max + 1):
for repeat in range(1):
obs = env.reset(victim_address=victim_addr)
action_buffer = []
done = False
while done == False:
print(f"-> Sending observation {obs}")
action = self.compute_single_action(obs, explore=False) # randomized inference
print(f"<- Received response {action}")
obs, reward, done, info = env.step(action)
action_buffer.append((action, obs[0]))
if reward > 0:
correct = True
num_correct += 1
else:
correct = False
num_guess += 1
pattern_buffer.append((victim_addr, action_buffer, correct))
pprint.pprint(pattern_buffer)
return 1.0 * num_correct / num_guess, pattern_buffer
def push_current_model(self):
#print('len(self.past_weights)')
#print(len(self.past_weights))
model = pickle.loads(pickle.dumps(self.model))
model.load_state_dict(copy.deepcopy(self.model.state_dict()))
self.past_models.append(model)
self.timestep_array.append(self.timestep)
#self.past_weights.append(copy.deepcopy(self.get_weights()))
#self.past_weights.append(copy.deepcopy(agent.get_weights()))
return
#TODO(Mulong): is there an standard initialization condition???
#def is_same_agent(self, weight1, weight2, env, trainer):
def is_same_agent(self, model1, model2, env, trainer):
categorization_parser = CategorizationParser(env)
original_state_dict = copy.deepcopy(self.model.state_dict())
#original_weights = copy.deepcopy(self.get_weights())
for victim_addr in range(env.victim_address_min, env.victim_address_max + 1):
obs = env.reset(victim_address=victim_addr)
#from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
#pp = trainer.workers.local_worker().preprocessors[DEFAULT_POLICY_ID]
#obs = pp.transform(obs)
done = False
#while done == False:
# self.model.load_state_dict(model1.state_dict())
# #self.set_weights(weight1)
# action1 = trainer.compute_single_action(obs, explore=False) # randomized inference
# self.model.load_state_dict(model2.state_dict())
# #self.set_weights(weight2)
# action2 = trainer.compute_single_action(obs, explore=False) # randomized inference
# if action1 != action2:
# self.model.load_state_dict(original_state_dict)
# #self.set_weights(original_weights)
# return False
# else:
# action = action1
# obs, reward, done, info = env.step(action)
seq1 = []
while done == False:
self.model.load_state_dict(model1.state_dict())
action1 = trainer.compute_single_action(obs, explore=False) # randomized inference
seq1.append(action1)
obs, reward, done, info = env.step(action1)
seq2 = []
while done == False:
self.model.load_state_dict(model2.state_dict())
action2 = trainer.compute_single_action(obs, explore=False) # randomized inference
seq1.append(action2)
obs, reward, done, info = env.step(action2)
if categorization_parser.is_same_base_pattern(seq1, seq2) == False:
return False
self.model.load_state_dict(original_state_dict)
#self.set_weights(original_weights)
return True
def existing_agent(self, env, trainer):
print('existing_agent')
current_model = pickle.loads(pickle.dumps(self.model))
#current_weights = copy.deepcopy(self.get_weights())
#current_model.load_state_dict(self.model.state_dict())
for idx, past_model in enumerate(self.past_models):
#for idx, past_weights in enumerate(self.past_weights):
print(idx)
if self.is_same_agent(current_model, past_model, env, trainer):
#if self.is_same_agent(current_weights, past_weights, env, trainer):
return True
return False
PPOCustomTrainer = PPOTrainer.with_updates(
get_policy_class=lambda _: CustomPPOTorchPolicy)
import models.dnn_model
#tune.run(CustomTrainer, config={"env": 'Frostbite-v0', "num_gpus":0})#, 'model': { 'custom_model': 'test_model' }})
tune.register_env("cache_guessing_game_env_fix", CacheGuessingGameEnv)#Fix)
# Two ways of training
# method 2b
config = {
'env': 'cache_guessing_game_env_fix', #'cache_simulator_diversity_wrapper',
"evaluation_num_workers": 1,
"evaluation_interval": 5,
'env_config': {
'verbose': 1,
"force_victim_hit": False,
'flush_inst': False,#True,
"allow_victim_multi_access": True, #False,
"attacker_addr_s": 0,
"attacker_addr_e": 15,
"victim_addr_s": 0,
"victim_addr_e": 7,
"reset_limit": 1,
"length_violation_reward": -1,
"double_victim_access_reward": -0.001, # must be large value if not allow victim multi access
"victim_access_reward": -0.001,
"correct_reward": 0.02,
"wrong_reward": -1,
"step_reward": -0.001,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 8,
"associativity": 8,
"hit_time": 1 #cycles
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
'lr': 1e-3, # decrease lr if unstable
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
### 'custom_model': 'dnn_model',#'rnn',
### 'custom_model_config': {
### 'window_size': 40, #16, #need to match
### 'latency_dim': 3,
### 'victim_acc_dim': 2,
### 'action_dim': 200, # need to be precise
### 'step_dim': 80,#40, # need to be precise
### 'action_embed_dim': 32,#,8, # can be increased 32
### 'step_embed_dim': 6,#4, # can be increased less than 16
### 'hidden_dim': 32,
### 'num_blocks': 1
### }
},
'framework': 'torch',
}
if __name__ == "__main__":
tune.run(PPOCustomTrainer, config=config)#config={"env": 'Freeway-v0', "num_gpus":1})
| 19,897 | 38.558648 | 185 | py |
AutoCAT | AutoCAT-main/src/rllib/cache_query_env.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.12
Usage: wrapper for cachequery that interact with the gym environment
the observation space and action space should be the same as the original autocat
'''
from collections import deque
import signal
import numpy as np
import random
import os
import yaml, logging
import sys
from itertools import permutations
import gym
from gym import spaces
import os, cmd, sys, getopt, re, subprocess, configparser
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
#sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))+ '/third_party/cachequery/tool/')
from cache_query_wrapper import CacheQueryWrapper as CacheQuery
class CacheQueryEnv(gym.Env):
def __init__(self, env_config):
#sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
self.env = CacheGuessingGameEnv(env_config)
self.action_space_size = self.env.action_space.n + 1 # increase the action space by one
self.action_space = spaces.Discrete(self.action_space_size)
self.observation_space = self.env.observation_space
self.revealed = False # initially
done = False
reward = 0
info = {}
state = self.env.reset()
self.last_unmasked_tuple = (state, reward, done, info)
'''
instantiate the CacheQuery
'''
# flags
output = None
verbose = False
interactive = False
# options
config_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))+ '/third_party/cachequery/tool/cachequery.ini' # default path
batch = None
# config overwrite
cacheset = None
level = None
cacheset='34'
level = 'L2' # for 4-way cache
# read config
try:
config = configparser.ConfigParser()
config.read(config_path)
# add method for dynamic cache check
def cache(self, prop):
return self.get(self.get('General', 'level'), prop)
def set_cache(self, prop, val):
return self.set(self.get('General', 'level'), prop, val)
setattr(configparser.ConfigParser, 'cache', cache)
setattr(configparser.ConfigParser, 'set_cache', set_cache)
except:
print("[!] Error: invalid config file")
sys.exit(1)
# overwrite options
if level:
config.set('General', 'level', level)
if cacheset:
config.set_cache('set', cacheset)
if output:
config.set('General', 'log_file', output)
# instantiate cq
self.CQ = CacheQuery(config)
self.cq_command = "A B C D E F G H A B" #establish the address alphabet to number mapping
def reset(self):
self.revealed = False # reset the revealed
done = False
reward = 0
info = {}
state = self.env.reset()
self.last_unmasked_tuple = (state, reward, done, info)
#reset CacheQuery Command
self.cq_command = "A B C D E F G H A B"
return state
def step(self, action):
if action == self.action_space_size - 1:
if self.revealed == True:
self.env.vprint("double reveal! terminated!")
state, reward, done, info = self.last_unmasked_tuple
reward = self.env.wrong_reward
done = True
return state, reward, done, info
self.revealed = True
# return the revealed obs, reward,# return the revealed obs, reward,
state, reward, done, info = self.last_unmasked_tuple
reward = 0 # reveal action does not cost anything
self.env.vprint("reveal observation")
# when doing reveal, launch the actual cachequery
#self.CQ.command(self.cq_command)
answer = self.CQ.run(self.cq_command)[0]
#print(answer)
if answer != None:
lat_cq = answer.split()[answer.split().index('->')+1:]
lat_cq_cnt = len(lat_cq) - 1
for i in range(len(state)):
if state[i][0] != 2 and lat_cq_cnt >= 0:
if int(lat_cq[lat_cq_cnt]) > 50: # hit
state[i][0] = 0
else: # miss
state[i][0] = 1
lat_cq_cnt -= 1
print(state)
return state, reward, done, info
elif action < self.action_space_size - 1: # this time the action must be smaller than sction_space_size -1
tmpaction = self.env.parse_action(action)
address = hex(tmpaction[0]+self.env.attacker_address_min)[2:] # attacker address in attacker_address_space
is_guess = tmpaction[1] # check whether to guess or not
is_victim = tmpaction[2] # check whether to invoke victim
is_flush = tmpaction[3] # check whether to flush
victim_addr = hex(tmpaction[4] + self.env.victim_address_min)[2:] # victim address
# need to check if revealed first
# if revealed, must make a guess
# if not revealed can do any thing
if self.revealed == True:
if is_guess == 0: # revealed but not guess # huge penalty
self.env.vprint("reveal but no guess! terminate")
done = True
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
elif is_guess != 0: # this must be guess and terminate
done = True
#return self.env.step(action)
if int(victim_addr,16) == self.env.victim_address:
reward = self.env.correct_reward
else:
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
elif self.revealed == False:
if is_guess != 0:
# guess without revewl --> huge penalty
self.env.vprint("guess without reward! terminate")
done = True
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
else:
state, reward, done, info = self.env.step(action)
# append to the cq_command
if is_victim == True:
self.cq_command += (' ' + chr(ord('A') + self.env.victim_address))
elif is_flush == True:
self.cq_command += (' ' + chr(ord('A') + int(address, 16)) + '!')
else:
self.cq_command += (' ' + chr(ord('A') + int(address, 16)) + '?')
self.last_unmasked_tuple = ( state.copy(), reward, done, info )
# mask the state so that nothing is revealed
state[:,0] = - np.ones((state.shape[0],)) # use -1 as the default (unrevealed value)
#print(state)
return state, reward, done, info
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheQueryEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"prefetcher": "nextline",
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": False,
"attacker_addr_s": 0,
"attacker_addr_e": 7,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 3,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
# for L2 cache of Intel i7-6700
# it is a 4-way cache, this should not be changed
"cache_1": {#required
"blocks": 4,#4,
"associativity": 4,
"hit_time": 1 #cycles
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | 10,303 | 38.478927 | 160 | py |
AutoCAT | AutoCAT-main/src/rllib/run_gym_rllib_agent_blacklist.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# look at https://github.com/ray-project/ray/blob/ea2bea7e309cd60457aa0e027321be5f10fa0fe5/rllib/examples/custom_env.py#L2
#from CacheSimulator.src.gym_cache.envs.cache_simulator_wrapper import CacheSimulatorWrapper
#from CacheSimulator.src.replay_checkpint import replay_agent
import gym
import ray
import ray.tune as tune
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.modelv2 import restore_original_dimensions
import torch.nn as nn
import numpy as np
from ray.rllib.models import ModelCatalog
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.sac import SACTrainer
import json
import sys
import copy
import torch
def replay_agent(trainer, env, randomize_init=False, non_deterministic=False, repeat_time=-1):
# no cache randomization
# rangomized inference ( 10 times)
pattern_buffer = []
num_guess = 0
num_correct = 0
if randomize_init == False and non_deterministic == False:
repeat_times = 1
else:
if repeat_time == -1:
repeat_times = 50
for victim_addr in range(env.victim_address_min, env.victim_address_max + 1):
for repeat in range(repeat_times):
obs = env.reset(victim_address=victim_addr)
if randomize_init:
env._randomize_cache("union")
action_buffer = []
done = False
while done == False:
print(f"-> Sending observation {obs}")
action = trainer.compute_single_action(obs, explore = non_deterministic) # randomized inference
print(f"<- Received response {action}")
obs, reward, done, info = env.step(action)
action_buffer.append((action, obs[0]))
if reward > 0:
correct = True
num_correct += 1
else:
correct = False
num_guess += 1
pattern_buffer.append((victim_addr, action_buffer, correct))
pprint.pprint(pattern_buffer)
return 1.0 * num_correct / num_guess, pattern_buffer
if __name__ == "__main__":
import signal
import sys
import pickle
from test_custom_policy_diversity_works import *
if len(sys.argv) > 1:
config_name = sys.argv[1]
print(config_name)
f = open(config_name)
config = json.load(f)
if len(sys.argv) == 5:
nset = int(sys.argv[2])
nway = int(sys.argv[3])
nopt = int(sys.argv[4])
config["env_config"]["cache_configs"]["cache_1"]["associativity"] = nway
config["env_config"]["cache_configs"]["cache_1"]["blocks"] = nset * nway
config["env_config"]["victim_addr_s"] = 0
config["env_config"]["victim_addr_e"] = nset * nway - 1
if nopt == 0: # shared
config["env_config"]["attacker_addr_s"] = 0
config["env_config"]["attacker_addr_e"] = nset * nway - 1
config["env_config"]["flush_inst"] = True
elif nopt == 1: # not shared
config["env_config"]["attacker_addr_s"] = nset * nway
config["env_config"]["attacker_addr_e"] = 2 * nset * nway - 1
config["env_config"]["flush_inst"] = False
elif nopt == 2: # all + clflush allowed
config["env_config"]["attacker_addr_s"] = 0
config["env_config"]["attacker_addr_e"] = 2 * nset * nway - 1
config["env_config"]["flush_inst"] = False
elif nopt == 3: # all + clflush not allowed
config["env_config"]["attacker_addr_s"] = 0
config["env_config"]["attacker_addr_e"] = 2 * nset * nway - 1
config["env_config"]["flush_inst"] = True
#print(config)
#exit(0)
elif len(sys.argv)!= 2:
print("not correct number of argument. Exit!!!")
exit(-1)
else:
print("(warning) config file not specified! use default configrations!")
#tune.run(PPOTrainer, config=config)#config={"env": 'Freeway-v0', "num_gpus":1})
from ray.tune.logger import pretty_print
#tune.register_env("cache_guessing_game_env_fix", CacheSimulatorMultiGuessWrapper)
#from run_gym_rllib_simd import *
#config['num_workers'] = 6
#config['num_envs_per_worker']= 2
print(config)
env = CacheGuessingGameEnv(config["env_config"])
#env = CacheSimulatorMultiGuessWrapper(config["env_config"])
trainer = PPOTrainer(config=config)
#trainer = SACTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
i = checkpoint.rfind('/')
config_name = checkpoint[0:i] + '/../env.config'
print("env config saved ad ", config_name)
#### dump the binary config file
###with open(config_name, 'wb') as handle:
### pickle.dump(config["env_config"], handle)
#### dump the txt config file
###with open(config_name + '.txt', 'w') as txtfile:
### txtfile.write(json.dumps(config["env_config"]))
policy = trainer.get_policy()
for model in policy.past_models:
print(model.state_dict()['_hidden_layers.1._model.0.weight'], protocol=pickle.HIGHEST_PROTOCOL)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
i = 0
thre =0.95 #0.98
#buf = []
all_raw_patterns = []
all_categorized_patterns = []
while True:
# Perform one iteration of training the policy with PPO
result = trainer.train()
print(pretty_print(result))
i += 1
if i % 1 == 0: # give enought interval to achieve small verificaiton overhead
accuracy, patterns = replay_agent(trainer, env, randomize_init=True, non_deterministic=True)
if i == 1:
checkpoint = trainer.save()
print("Initial checkpoint saved at", checkpoint)
i = checkpoint.rfind('/')
config_name = checkpoint[0:i] + '/../env.config'
print("env config saved ad ", config_name)
# dump the binary config file
with open(config_name, 'wb') as handle:
pickle.dump(config["env_config"], handle)
# dump the txt config file
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(config["env_config"])
with open(config_name + '.txt', 'w') as txtfile:
#txtfile.write(pp.pprint(config["env_config"]))
txtfile.write(json.dumps(config, indent=4, sort_keys=True))
# just with lower reward
# HOW TO PREVENT THE SAME AGENT FROM BEING ADDED TWICE????
# HOW TO TELL IF THEY ARE CONSIDERED THE SAME AGENT?
# HOW TO FORCE TRAINER TO KNOW THAT THEY ARE STILL DISCOVERING THE SAME AGENT???
if accuracy > thre:
# if the agent is different from the known agent
policy = trainer.get_policy()
if policy.existing_agent(env, trainer) == False:
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
# this agent might have high accuracy but
# it ccould be that it is still the same agent
# add this agent to blacklist
trainer.get_policy().push_current_model()
#buf.append(copy.deepcopy(trainer.get_weights()))
policy = trainer.get_policy()
for model in policy.past_models:
print(model.state_dict()['_hidden_layers.1._model.0.weight'])
#for weight in policy.past_weights:
# print(weight['_value_branch._model.0.bias'])
#print(weight['default_policy']['_value_branch._model.0.bias'])
#print(policy.model.state_dict()['_hidden_layers.1._model.0.weight'])
#for w in buf:
# print(w['default_policy']['_value_branch._model.0.bias']) | 8,329 | 42.385417 | 122 | py |
AutoCAT | AutoCAT-main/src/rllib/run_gym_rllib_reveal_action.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.10
Function: Add one reveal action so that the agent has to explicit reveal the secret,
once the secret is revealed, it must make a guess immediately
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
import signal
from sklearn import svm
from sklearn.model_selection import cross_val_score
import numpy as np
class CacheGuessingGameWithRevealEnv(gym.Env):
def __init__(self, env_config):
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
self.env = CacheGuessingGameEnv(env_config)
self.action_space_size = self.env.action_space.n + 1 # increase the action space by one
self.action_space = spaces.Discrete(self.action_space_size)
self.observation_space = self.env.observation_space
self.revealed = False # initially
done = False
reward = 0
info = {}
state = self.env.reset()
self.last_unmasked_tuple = (state, reward, done, info)
def reset(self):
self.revealed = False # reset the revealed
done = False
reward = 0
info = {}
state = self.env.reset()
self.last_unmasked_tuple = (state, reward, done, info)
return state
def step(self, action):
if action == self.action_space_size - 1:
if self.revealed == True:
self.env.vprint("double reveal! terminated!")
state, reward, done, info = self.last_unmasked_tuple
reward = self.env.wrong_reward
done = True
return state, reward, done, info
self.revealed = True
self.env.vprint("reveal observation")
# return the revealed obs, reward,# return the revealed obs, reward,
state, reward, done, info = self.last_unmasked_tuple
reward = 0 # reveal action does not cost anything
return state, reward, done, info
elif action < self.action_space_size - 1: # this time the action must be smaller than sction_space_size -1
_, is_guess, _, _, _ = self.env.parse_action(action)
# need to check if revealed first
# if revealed, must make a guess
# if not revealed can do any thing
if self.revealed == True:
if is_guess == 0: # revealed but not guess # huge penalty
self.env.vprint("reveal but no guess! terminate")
done = True
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
elif is_guess != 0: # this must be guess and terminate
return self.env.step(action)
elif self.revealed == False:
if is_guess != 0:
# guess without revewl --> huge penalty
self.env.vprint("guess without reward! terminate")
done = True
reward = self.env.wrong_reward
info = {}
state = self.env.reset()
return state, reward, done, info
else:
state, reward, done, info = self.env.step(action)
self.last_unmasked_tuple = ( state.copy(), reward, done, info )
# mask the state so that nothing is revealed
state[:,0] = np.zeros((state.shape[0],))
return state, reward, done, info
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameWithRevealEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": True,
"attacker_addr_s": 0,
"attacker_addr_e": 8,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 0,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 4,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | 6,095 | 35.945455 | 114 | py |
AutoCAT | AutoCAT-main/src/rllib/run_gym_rllib_example_multicore_largel2.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
#'super_verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": False,
"attacker_addr_s": 8,
"attacker_addr_e": 23,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 7,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_1_core_2": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_2": {
"blocks": 16,
"associativity": 2,
"hit_time": 16,
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | 3,186 | 30.87 | 95 | py |
AutoCAT | AutoCAT-main/src/rllib/run_gym_rllib_guessability.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.10
Description:
split the agent into two different agent
P1: just generate the sequence but not the guess
P2: just make the guess, given the memory access sequence and observations
P1: action space: autoCAT's memory access
observation space: guessability
P2: action space: NOP
observation space: original observation space
P1 wrapper of CacheGuessingGameEnv
blocking the guess action or just have one guess action
when guess is structed, calculate the guessability as the reward
observation space becomes concatenated observations
reward becomes agregated reward
'''
from random import random
import sys
import os
import gym
import sys
import numpy as np
from gym import spaces
import signal
from sklearn import svm
from sklearn.model_selection import cross_val_score
class CacheSimulatorP1Wrapper(gym.Env):
def __init__(self, env_config):
#sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
# for offline training, the environment returns filler observations and zero reward
# until the guess
# the step reward is also temporarily accumulated until the end
self.offline_training = True
self.copy = 1
self.env_list = []
self.env_config = env_config
self.cache_state_reset = False # has to force no reset
self.env = CacheGuessingGameEnv(env_config)
self.victim_address_min = self.env.victim_address_min
self.victim_address_max = self.env.victim_address_max
self.window_size = self.env.window_size
self.secret_size = self.victim_address_max - self.victim_address_min + 1
self.max_box_value = self.env.max_box_value
self.feature_size = self.env.feature_size
# expand the observation space
self.observation_space = spaces.Box(low=-1, high=self.max_box_value, shape=(self.window_size, self.feature_size * self.secret_size * self.copy))
# merge all guessing into one action
self.action_space_size = (self.env.action_space.n - self.secret_size+1)
print(self.env.action_space.n)
print(self.env.get_act_space_dim())
self.action_space = spaces.Discrete(self.action_space_size)
# instantiate the environment
self.env_list.append(CacheGuessingGameEnv(env_config))
self.env_config['verbose'] = False
for _ in range(1,self.secret_size * self.copy):
self.env_list.append(CacheGuessingGameEnv(env_config))
# instantiate the latency_buffer
# for each permuted secret, latency_buffer stores the latency
self.latency_buffer = []
for i in range(0, self.secret_size * self.copy):
self.latency_buffer.append([])
#permute the victim addresses
self.victim_addr_arr = np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
self.victim_addr_arr = []
for i in range(self.victim_address_min, self.victim_address_max+1):
self.victim_addr_arr.append(i)
# reset the addresses
self.env_config['verbose'] = True
self.env_list[0].reset(self.victim_addr_arr[0])
self.env_config['verbose'] = False
self.reset_state = np.array([[]] * self.window_size)
# initialize the offline_state as filler state if we use offline training
if self.offline_training == True:
self.offline_state = self.env.reset(seed=-1)
self.offline_reward = 0
self.offline_action_buffer = []
self.last_offline_state = self.env.reset()
for cp in range(0, self.copy):
seed = -1#random.randint(1, 1000000)
for i in range(0, len(self.victim_addr_arr)):
state = self.env_list[i + cp * len(self.victim_addr_arr)].reset(victim_address = self.victim_addr_arr[i], seed= seed)
self.reset_state = np.concatenate((self.reset_state, state), axis=1)
# same seed esure the initial state are teh same
def reset(self):
# permute the victim addresses
#self.victim_addr_arr = np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
self.victim_addr_arr = []
for i in range(self.victim_address_min, self.victim_address_max+1):
self.victim_addr_arr.append(i)
# restore the total state
total_state = np.array([[]] * self.window_size)
for i in range(len(self.env_list)):
seed = -1#random.randint(1, 1000000)
env = self.env_list[i]
state = env.reset(victim_address = self.victim_addr_arr[i % len(self.victim_addr_arr)], seed = seed)
total_state = np.concatenate((total_state, state), axis=1)
if self.offline_training == True:
state = self.offline_state
self.offline_action_buffer = []
# reset the latency_buffer
self.latency_buffer = []
for i in range(0, self.secret_size * self.copy):
self.latency_buffer.append([])
self.last_offline_state = self.env.reset()
return total_state
#return self.reset_state
# feed the actions to all subenv with different secret
def step(self, action):
early_done_reward = 0
total_reward = 0
total_state = []
total_done = False
done_arr = []
total_state = np.array([[]] * self.window_size)
#parsed_orig_action = action #self.env.parse_action(action)
if action == self.action_space_size - 1: # guessing action
info = {}
# for offline training the total_reward needs to include the history reward
if self.offline_training == True:
# just similate all actions here
i = 0
print(self.offline_action_buffer)
for env in self.env_list:
for act in self.offline_action_buffer:
#print('simulate in offline_action_buffer')
state, reward, done, info = env.step(act)
total_reward += reward
latency = state[0][0]
self.latency_buffer[i].append(latency) #
if done == True:
break
i += 1
# TODO(MUlong): need to think whether the last observation is needt for the agent
total_state = self.reset_state
self.offline_action_buffer = []
total_reward = self.P2oracle()
else:
#calculate the reward and terminate
for env in self.env_list:
state, reward, done, info = env.step(action)
#total_state = np.concatenate((total_state, state), axis=1)
total_state = self.reset_state
total_reward = self.P2oracle()
total_done = True
else: # use the action and collect and concatenate observation
### for offline RL, we need to mask the state and accumulate reward
# for offline RL, just store the action
if self.offline_training == True:
total_reward = 0
self.offline_action_buffer.append(action)
# feferining to cahce_gurssing_game_env_impl.py to create an empty next state
step_count = 1 + self.last_offline_state[0][3]
if step_count == self.env.window_size:
print('length violation!!!')
total_done = True
#total_reward = len(self.env_list) * self.env.length_violation_reward
i = 0
#print(self.offline_action_buffer)
for env in self.env_list:
for act in self.offline_action_buffer:
#print('simulate in offline_action_buffer')
state, reward, done, info = env.step(act)
total_reward += reward
latency = state[0][0]
self.latency_buffer[i].append(latency) #
if done == True:
break
i += 1
total_done = done
print(total_reward)
original_action = action #self.last_offline_state[0][2]
_, _, is_victim, _, _ = self.env.parse_action(action)
if is_victim == 1:
victim_accessed = 1
else:
if self.last_offline_state[0][1] == 1:
victim_accessed = 1
else:
victim_accessed = 0
r = self.last_offline_state[0][0]
new_obs = np.array([[r, victim_accessed, original_action, step_count]])
#del self.last_offline_state[-1]
self.last_offline_state = np.concatenate((new_obs, self.last_offline_state[0:-1,]), axis= 0)
state = self.last_offline_state
# state is a n * 4 matrix
# r, victim_accesesd, original_action, self.step_count
# we only need to mask the r
state[:,0] = self.offline_state[:, 0]
for env in self.env_list:
total_state = np.concatenate((total_state, state), axis=1)
#print(total_state)
#print('step')
info={}
else: #online RL
i = 0
for env in self.env_list:
state, reward, done, info = env.step(action)
latency = state[0][0]
# length violation or other type of violation
if done == True:
env.reset()
total_done = True
self.latency_buffer[i].append(latency) #
total_reward += reward
total_state = np.concatenate((total_state, state), axis=1)
i += 1
info = {}
total_reward = total_reward * 1.0 / len(self.env_list)#self.secret_size
return total_state, total_reward, total_done, info
# given the existing sequence, calculate the P2 oracle reward
# calculate the expected guessing correctness
def P2oracle(self):
# score
# calculate the total score
# which correspond to the number of distinguishable secret
latency_dict = {}
for i in range(0, len(self.latency_buffer)):
latency_dict[tuple(self.latency_buffer[i])] = 1
score = 1.0 * len(latency_dict) / len(self.latency_buffer)
print(self.latency_buffer)
print(' P2oracle score %f'% score)
return score * self.env.correct_reward + ( 1 - score ) * self.env.wrong_reward
# use SVM to evaluate the guessability (oracle guessing correctness rate)
def P2SVMOracle(self):
if len(self.latency_buffer[0]) == 0:
score = 0
else:
X = self.latency_buffer
y = []
for cp in range(0, self.copy):
for sec in range(0, len(self.victim_addr_arr)):
y.append(self.victim_addr_arr[sec])
clf = svm.SVC(random_state=0)
print(len(X))
print(len(y))
#print(X)
#print(y)
ans = cross_val_score(clf, X, y, cv=4, scoring='accuracy')
score = ans.mean()
print("P2 SVM accuracy %f" % score)
return score * self.env.correct_reward + ( 1 - score ) * self.env.wrong_reward
if __name__ == "__main__":
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1)
if ray.is_initialized():
ray.shutdown()
#tune.register_env("cache_guessing_game_env_fix", CacheSimulatorSIMDWrapper)#
tune.register_env("cache_guessing_game_env_fix", CacheSimulatorP1Wrapper)
config = {
'env': 'cache_guessing_game_env_fix', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"force_victim_hit": False,
'flush_inst': True,#False,
"allow_victim_multi_access": True,#False,
"attacker_addr_s": 0,
"attacker_addr_e": 7,
"victim_addr_s": 0,
"victim_addr_e": 3,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,
"associativity": 1,
"hit_time": 1 #cycles
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | 14,432 | 40.474138 | 152 | py |
AutoCAT | AutoCAT-main/src/rllib/run_gym_rllib_example_multicore.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
Author: Mulong Luo
Date: 2022.7.11
Function: An example rllib training script
'''
from random import random
import sys
import os
###sys.path.append('../src')
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
import signal
import numpy as np
if __name__ == "__main__":
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1, local_mode=True)
if ray.is_initialized():
ray.shutdown()
tune.register_env("cache_guessing_game_env", CacheGuessingGameEnv)
config = {
'env': 'cache_guessing_game_env', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
#'super_verbose': 1,
"rerandomize_victim": False,
"force_victim_hit": False,
'flush_inst': False,
"allow_victim_multi_access": True,#False,
"allow_empty_victim_access": False,
"attacker_addr_s": 4,
"attacker_addr_e": 7,#4,#11,#15,
"victim_addr_s": 0,
"victim_addr_e": 3,#7,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_1_core_2": {#required
"blocks": 4,#4,
"associativity": 1,
"hit_time": 1, #cycles
"prefetcher": "nextline"
},
"cache_2": {
"blocks": 4,
"associativity": 1,
"hit_time": 16,
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | 3,184 | 30.85 | 95 | py |
AutoCAT | AutoCAT-main/src/rllib/run_gym_rllib_simd.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
'''
CacheSimulatorSIMDWrapper
wraps multiple environment with different initialization into a single env
'''
#from msilib.schema import DuplicateFile
from random import random
import sys
import os
import gym
from gym import spaces
from cache_guessing_game_env_wrapper import CacheGuessingGameEnvWrapper as CacheGuessingGameEnv
#from cache_guessing_game_env_impl import *
import pdb
import sys
import signal
# random initialization
# same secret
class CacheSimulatorSIMDWrapper(gym.Env):
def __init__(self, env_config, duplicate = 1, victim_addr = -1):
self.duplicate = duplicate
self.env_list = []
self.env_config = env_config
self.victim_addr = victim_addr
self.env = CacheGuessingGameEnv(env_config)
self.victim_address_min = self.env.victim_address_min
self.victim_address_max = self.env.victim_address_max
self.observation_space = spaces.MultiDiscrete(list(self.env.observation_space.nvec) * self.duplicate)
self.action_space = self.env.action_space
self.env_list.append(CacheGuessingGameEnv(env_config))
self.env_config['verbose'] = False
for _ in range(1,self.duplicate):
self.env_list.append(CacheGuessingGameEnv(env_config))
def reset(self, victim_addr = -1):
total_state = []
# same victim_addr (secret) for all environments
if self.victim_addr == -1 and victim_addr == -1:
victim_addr = random.randint(self.env.victim_address_min, self.env.victim_address_max)
elif victim_addr == -1:
victim_addr = self.victim_addr
for env in self.env_list:
state = env.reset(victim_addr)
env._randomize_cache()#mode="union")
total_state += list(state)
return total_state
def step(self, action):
early_done_reward = 0
total_reward = 0
total_state = []
total_done = False
done_arr = []
for env in self.env_list:
state, reward, done, info = env.step(action)
total_reward += reward
total_state += list(state)
done_arr.append(done)
if done:
total_done = True
if total_done:
for done in done_arr:
if done == False:
total_reward -= early_done_reward
info = {}
return total_state, total_reward, total_done, info
# multiple initialization
# multiple secret
class CacheSimulatorMultiGuessWrapper(gym.Env):
def __init__(self, env_config):
self.duplicate = 4
self.block_duplicate = 4
self.env_list = []
self.env_config = env_config
self.env = CacheSimulatorSIMDWrapper(env_config, duplicate=self.duplicate)
#permute the victim addresses
self.secret_size = self.env.victim_address_max - self.env.victim_address_min + 1
self.victim_addr_arr = [] #np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
for _ in range(self.block_duplicate):
#for _ in range(self.secret_size):
rand = random.randint(self.env.victim_address_min, self.env.victim_address_max )
self.victim_addr_arr.append(rand)
self.observation_space = spaces.MultiDiscrete(list(self.env.observation_space.nvec) * self.block_duplicate )
self.action_space = spaces.MultiDiscrete([self.env.action_space.n] + [self.secret_size] * self.block_duplicate)
self.env_config['verbose'] = True
self.env_list.append(CacheSimulatorSIMDWrapper(env_config, duplicate=self.duplicate, victim_addr=self.victim_addr_arr[0]))
self.env_config['verbose'] = False
for i in range(1, len(self.victim_addr_arr)):
#for victim_addr in self.victim_addr_arr:
#self.env_list.append(CacheSimulatorSIMDWrapper(env_config, duplicate=self.duplicate, victim_addr = victim_addr))
#self.env_config['verbose'] = False
#for _ in range(0,self.block_duplicate):
self.env_list.append(CacheSimulatorSIMDWrapper(env_config, duplicate=self.duplicate, victim_addr=self.victim_addr_arr[i]))
def reset(self):
total_state = []
# same victim_addr (secret) for all environments
#self.victim_addr_arr = np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
self.victim_addr_arr = [] #np.random.permutation(range(self.env.victim_address_min, self.env.victim_address_max+1))
for _ in range(self.block_duplicate):
#for _ in range(self.secret_size):
rand = random.randint(self.env.victim_address_min, self.env.victim_address_max)
#print('self.env.victim_address_min')
#print(self.env.victim_address_min)
#print('self.env.victim_address_max')
#print(self.env.victim_address_max)
#print('rand')
#print(rand)
#pdb.set_trace()
#exit(0)
self.victim_addr_arr.append(rand)
for i in range(len(self.env_list)):
env = self.env_list[i]
#print('len(self.env_list)')
#print(len(self.env_list))
#print('i')
#print(i)
#print('victim_addr_arr')
#print(len(self.victim_addr_arr))
state = env.reset(self.victim_addr_arr[i])
total_state += list(state)
return total_state
def step(self, action):
early_done_reward = 0
total_reward = 0
total_state = []
total_done = False
done_arr = []
orig_action = action[0] # first digit is the original action
parsed_orig_action = self.env.env.parse_action(orig_action)
is_guess = parsed_orig_action[1] # check whether to guess or not
is_victim = parsed_orig_action[2] # check whether to invoke victim
#is_flush = orig_action[3] # check if it is a guess
if is_victim != True and is_guess == True:
guess_addrs = action[1:]
for i in range(0, len(self.env_list)):
env = self.env_list[i]
#pdb.set_trace()
action = orig_action - orig_action % self.secret_size + guess_addrs[i] - self.env.env.victim_address_min
_, is_guesss, _, _, _ = self.env.env.parse_action(action)
state, reward, done, info = env.step(action)
assert(is_guesss == True)
assert(done == True)
total_reward += reward
total_state += list(state)
info = {}
return total_state, total_reward * 1.0 / self.duplicate / self.block_duplicate, True, info
for env in self.env_list:
state, reward, done, info = env.step(orig_action)
total_reward += reward
total_state += list(state)
done_arr.append(done)
if done:
total_done = True
info = {}
return total_state, total_reward * 1.0 / self.duplicate / self.block_duplicate , total_done, info
if __name__ == "__main__":
from ray.rllib.agents.ppo import PPOTrainer
import ray
import ray.tune as tune
ray.init(include_dashboard=False, ignore_reinit_error=True, num_gpus=1)
if ray.is_initialized():
ray.shutdown()
#tune.register_env("cache_guessing_game_env_fix", CacheSimulatorSIMDWrapper)#
tune.register_env("cache_guessing_game_env_fix", CacheSimulatorMultiGuessWrapper)
config = {
'env': 'cache_guessing_game_env_fix', #'cache_simulator_diversity_wrapper',
'env_config': {
'verbose': 1,
"force_victim_hit": False,
'flush_inst': True,#False,
"allow_victim_multi_access": True,#False,
"attacker_addr_s": 0,
"attacker_addr_e": 7,
"victim_addr_s": 0,
"victim_addr_e": 3,
"reset_limit": 1,
"cache_configs": {
# YAML config file for cache simulaton
"architecture": {
"word_size": 1, #bytes
"block_size": 1, #bytes
"write_back": True
},
"cache_1": {#required
"blocks": 4,
"associativity": 1,
"hit_time": 1 #cycles
},
"mem": {#required
"hit_time": 1000 #cycles
}
}
},
#'gamma': 0.9,
'num_gpus': 1,
'num_workers': 1,
'num_envs_per_worker': 1,
#'entropy_coeff': 0.001,
#'num_sgd_iter': 5,
#'vf_loss_coeff': 1e-05,
'model': {
#'custom_model': 'test_model',#'rnn',
#'max_seq_len': 20,
#'custom_model_config': {
# 'cell_size': 32
# }
},
'framework': 'torch',
}
#tune.run(PPOTrainer, config=config)
trainer = PPOTrainer(config=config)
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while True:
result = trainer.train() | 9,661 | 40.114894 | 134 | py |
AutoCAT | AutoCAT-main/src/rlmeta/sample_cchunter.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import sys
from typing import Dict, Optional, Sequence, Union
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autocorrelation import autocorrelation
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def max_autocorr(data: Sequence[int], n: int) -> float:
n = min(len(data), n)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
return corr.max()
def run_loop(
env: Env,
agent: PPOAgent,
victim_addr: int = -1,
threshold: Union[float, Sequence[float]] = 0.75) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
autocorr_n = (env.env.env._env.cache_size *
env.env.env.cc_hunter_check_length)
max_ac = max_autocorr(env.env.cc_hunter_history, autocorr_n)
if isinstance(threshold, float):
threshold = (threshold, )
detect = [max_ac >= t for t in threshold]
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"max_autocorr": max_ac,
}
for t, d in zip(threshold, detect):
metrics[f"detect_rate-{t}"] = d
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False,
threshold: Union[float, Sequence[float]] = 0.75) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env,
agent,
victim_addr=victim_addr,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env,
agent,
victim_addr=-1,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCCHunterWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
env.action_space.n, cfg.checkpoint)
model.eval()
# Create agent
agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env,
agent,
cfg.num_episodes,
cfg.seed,
threshold=cfg.threshold)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| 5,516 | 28.821622 | 79 | py |
AutoCAT | AutoCAT-main/src/rlmeta/sample_cchunter_textbook.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import sys
from typing import Dict, Optional, Sequence, Union
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
from textbook_attacker import TextbookAgent
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autocorrelation import autocorrelation
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from autocorrelation import autocorrelation
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
# act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def max_autocorr(data: Sequence[int], n: int) -> float:
n = min(len(data), n)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
return corr.max()
def run_loop(
env: Env,
agent: PPOAgent,
victim_addr: int = -1,
threshold: Union[float, Sequence[float]] = 0.75) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
autocorr_n = (env.env.env._env.cache_size *
env.env.env.cc_hunter_check_length)
max_ac = max_autocorr(env.env.cc_hunter_history, autocorr_n)
if isinstance(threshold, float):
threshold = (threshold, )
detect = [max_ac >= t for t in threshold]
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"max_autocorr": max_ac,
}
for t, d in zip(threshold, detect):
metrics[f"detect_rate-{t}"] = d
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False,
threshold: Union[float, Sequence[float]] = 0.75) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env,
agent,
victim_addr=victim_addr,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env,
agent,
victim_addr=-1,
threshold=threshold)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCCHunterWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
### Load model
##model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
## env.action_space.n, cfg.checkpoint)
##model.eval()
# Create agent
#agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
agent = TextbookAgent(cfg.env_config)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| 5,675 | 29.191489 | 81 | py |
AutoCAT | AutoCAT-main/src/rlmeta/train_ppo_cchunter.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
import logging
import os
import time
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.multiprocessing as mp
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
import model_utils
from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
from metric_callbacks import CCHunterMetricCallbacks
@hydra.main(config_path="./config", config_name="ppo_cchunter")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
print(f"workding_dir = {os.getcwd()}")
my_callbacks = CCHunterMetricCallbacks()
logging.info(hydra_utils.config_to_json(cfg))
env_fac = CacheEnvCCHunterWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
train_model = model_utils.get_model(
cfg.model_config, cfg.env_config.window_size,
env.action_space.n).to(cfg.train_device)
infer_model = copy.deepcopy(train_model).to(cfg.infer_device)
infer_model.eval()
optimizer = make_optimizer(train_model.parameters(), **cfg.optimizer)
ctrl = Controller()
rb = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(RemotableModelPool(infer_model, seed=cfg.seed))
r_server.add_service(rb)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
a_model = wrap_downstream_model(train_model, m_server)
t_model = make_remote_model(infer_model, m_server)
e_model = make_remote_model(infer_model, m_server)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
a_rb = make_remote_replay_buffer(rb, r_server, prefetch=cfg.prefetch)
t_rb = make_remote_replay_buffer(rb, r_server)
agent = PPOAgent(a_model,
replay_buffer=a_rb,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
entropy_coeff=cfg.get("entropy_coeff", 0.01),
model_push_period=cfg.model_push_period)
t_agent_fac = AgentFactory(PPOAgent, t_model, replay_buffer=t_rb)
e_agent_fac = AgentFactory(PPOAgent, e_model, deterministic_policy=True)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_train_rollouts,
num_workers=cfg.num_train_workers,
seed=cfg.seed,
episode_callbacks=my_callbacks)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_eval_rollouts,
num_workers=cfg.num_eval_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_train_rollouts),
episode_callbacks=my_callbacks)
loops = LoopList([t_loop, e_loop])
servers.start()
loops.start()
agent.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = agent.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = agent.eval(cfg.num_eval_episodes)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(train_model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| 5,617 | 36.205298 | 77 | py |
AutoCAT | AutoCAT-main/src/rlmeta/sample_attack.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
from typing import Dict, Optional
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvWrapperFactory
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def run_loop(env: Env,
agent: PPOAgent,
victim_addr: int = -1,
reset_cache_state: bool = False) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
if victim_addr == -1:
timestep = env.reset(reset_cache_state=reset_cache_state)
else:
timestep = env.reset(victim_address=victim_addr,
reset_cache_state=reset_cache_state)
agent.observe_init(timestep)
while not timestep.terminated or timestep.truncated:
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
# Only correct guess has positive reward.
correct_rate = float(episode_return > 0.0)
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"correct_rate": correct_rate,
}
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env,
agent,
victim_addr=victim_addr,
reset_cache_state=reset_cache_state)
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env,
agent,
victim_addr=-1,
reset_cache_state=reset_cache_state)
metrics.extend(cur_metrics)
return metrics
@hydra.main(config_path="./config", config_name="sample_attack")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvWrapperFactory(OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
env.action_space.n, cfg.checkpoint)
model.eval()
# Create agent
agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed,
cfg.reset_cache_state)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| 3,914 | 28.659091 | 79 | py |
AutoCAT | AutoCAT-main/src/rlmeta/cache_ppo_mlp_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo.ppo_model import PPOModel
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from models.backbone import CacheBackbone
class CachePPOMlpModel(PPOModel):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
window_size: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int = 1) -> None:
super().__init__()
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.backbone = CacheBackbone(latency_dim, victim_acc_dim, action_dim,
step_dim, window_size, action_embed_dim,
step_embed_dim, hidden_dim, num_layers)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
h = self.backbone(obs)
p = self.linear_a(h)
logpi = F.log_softmax(p, dim=-1)
v = self.linear_v(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if self._device is None:
self._device = next(self.parameters()).device
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| 2,350 | 30.77027 | 78 | py |
AutoCAT | AutoCAT-main/src/rlmeta/model_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
from cache_ppo_mlp_model import CachePPOMlpModel
from cache_ppo_lstm_model import CachePPOLstmModel
from cache_ppo_transformer_model import CachePPOTransformerModel
def get_model(cfg: Dict[str, Any],
window_size: int,
output_dim: int,
checkpoint: Optional[str] = None) -> nn.Module:
cfg.args.step_dim = window_size
if "window_size" in cfg.args:
cfg.args.window_size = window_size
cfg.args.output_dim = output_dim
model = None
if cfg.type == "mlp":
model = CachePPOMlpModel(**cfg.args)
elif cfg.type == "lstm":
model = CachePPOLstmModel(**cfg.args)
elif cfg.type == "transformer":
model = CachePPOTransformerModel(**cfg.args)
if model is not None and checkpoint is not None:
params = torch.load(checkpoint)
model.load_state_dict(params)
return model
| 1,126 | 28.657895 | 73 | py |
AutoCAT | AutoCAT-main/src/rlmeta/plot_cchunter.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# script for plotting figure on paper
import logging
from typing import Dict
#import hydra
#import torch
#import torch.nn
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# sys.path.append("/home/mulong/RL_SCA/src/CacheSimulator/src")
#import rlmeta.utils.nested_utils as nested_utils
import numpy as np
#from rlmeta.agents.ppo.ppo_agent import PPOAgent
#from rlmeta.core.types import Action
#from rlmeta.envs.env import Env
#from rlmeta.utils.stats_dict import StatsDict
#from cchunter_wrapper import CCHunterWrapper
#from cache_env_wrapper import CacheEnvWrapperFactory
#from cache_ppo_model import CachePPOModel
#from cache_ppo_transformer_model import CachePPOTransformerModel
#from textbook_attacker import TextbookAgent
# from cache_guessing_game_env_impl import CacheGuessingGameEnv
# from cchunter_wrapper import CCHunterWrapper
#from cache_env_wrapper import CacheEnvWrapperFactory
#from cache_ppo_model import CachePPOModel
#from cache_ppo_transformer_model import CachePPOTransformerModel
#from cache_ppo_transformer_periodic_model import CachePPOTransformerPeriodicModel
import matplotlib.pyplot as plt
import pandas as pd
#from cache_env_wrapper import CacheEnvCCHunterWrapperFactory
import matplotlib.font_manager as font_manager
from autocorrelation import autocorrelation
fontaxes = {
'family': 'Arial',
# 'color': 'black',
'weight': 'bold',
#'size': 6,
}
fontaxes_title = {
'family': 'Arial',
# 'color': 'black',
'weight': 'bold',
# 'size': 9,
}
font = font_manager.FontProperties(family='Arial',
weight='bold',
style='normal')
def autocorrelation_plot_forked(series, ax=None, n_lags=None, change_deno=False, change_core=False, **kwds):
"""
Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
n_lags: maximum number of lags to show. Default is len(series)
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
class:`matplotlib.axis.Axes`
"""
import matplotlib.pyplot as plt
n_full = len(series)
if n_full <= 2:
raise ValueError("""len(series) = %i but should be > 2
to maintain at least 2 points of intersection when autocorrelating
with lags"""%n_full)
# Calculate the maximum number of lags permissible
# Subtract 2 to keep at least 2 points of intersection,
# otherwise pandas.Series.autocorr will throw a warning about insufficient
# degrees of freedom
n_maxlags = n_full #- 2
# calculate the actual number of lags
if n_lags is None:
# Choosing a reasonable number of lags varies between datasets,
# but if the data longer than 200 points, limit this to 100 lags as a
# reasonable default for plotting when n_lags is not specified
n_lags = min(n_maxlags, 100)
else:
if n_lags > n_maxlags:
raise ValueError("n_lags should be < %i (i.e. len(series)-2)"%n_maxlags)
if ax is None:
ax = plt.gca(xlim=(0, n_lags), ylim=(-1.1, 1.6))
if not change_core:
data = np.asarray(series)
def r(h: int) -> float:
return autocorrelation(data, h)
else:
def r(h):
return series.autocorr(lag=h)
# x = np.arange(n_lags) + 1
x = np.arange(n_lags)
# y = lmap(r, x)
y = np.array([r(xi) for xi in x])
print(y)
print(f"y = {y}")
print(f"y_max = {np.max(y[1:])}")
z95 = 1.959963984540054
z99 = 2.5758293035489004
# ax.axhline(y=-z95 / np.sqrt(n_full), color='grey')
# ax.axhline(y=-z99 / np.sqrt(n_full), linestyle='--', color='grey')
ax.set_xlabel("Lag (p)", fontdict = fontaxes)
ax.set_ylabel("Autocorrelation \n Coefficient", fontdict = fontaxes)
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
def main():
plt.figure(num=None, figsize=(5, 2), dpi=300, facecolor='w')
series_human = [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
#series_baseline = [1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# sampled from python sample_cchunter.py checkpoint=/home/ml2558/CacheSimulator/src/rlmeta/data/table8/hpca_ae_exp_8_baseline_new/exp1/ppo_agent-499.pth num_episodes=1
series_baseline = [0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
#series_l2 = [0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1]
# sampled from python sample_cchunter.py checkpoint=/home/ml2558/CacheSimulator/src/rlmeta/data/table8/hpca_ae_exp_8_autocor_new/exp1/ppo_agent-499.pth num_episodes=1
series_l2 = [0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0]
#series_l2 = [0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1]
for i in range(0, len(series_baseline)):
series_baseline[i] += 1.2
for i in range(0, len(series_l2)):
series_l2[i] += 2.4
series_human = series_human[0:50]
series_baseline = series_baseline[0:50]
series_l2 = series_l2[0:50]
ax = plt.subplot(121)
ax.set_xlim([0, 48] )
ax.set_ylim([-0.1, 3.7])
ax.set_yticks([])
plt.tick_params(left=False)
text_x = -10
ax.text(text_x, 0.15, 'A->V', fontproperties=font)
ax.text(text_x, 0.85, 'V->A', fontproperties=font)
ax.text(text_x, 0.15+1.2, 'A->V',fontproperties=font)
ax.text(text_x, 0.85+1.2, 'V->A',fontproperties=font)
ax.text(text_x, 0.15+2.4, 'A->V', fontproperties=font)
ax.text(text_x, 0.85+2.4, 'V->A',fontproperties=font)
#ax.set_xlim([0, 60])
ax.plot(series_human)#, linewidth=4 )
ax.plot(series_baseline)
ax.plot(series_l2)
ax.set_xlabel("Number of cache conflicts", fontdict = fontaxes)
ax.legend(prop={'size': 6, 'family': 'Arial', 'weight':'bold'})
ax.legend(['textbook', 'RL_baseline', 'RL_autocor'], ncol=3,bbox_to_anchor=(2.2,1.28), prop=font)
data_human = pd.Series(series_human)
data_baseline = pd.Series(series_baseline)
data_l2 = pd.Series(series_l2)
cache_size = 4
#plt.figure(num=None, figsize=(5.2, 2), dpi=300, facecolor='w')
#plt.subplots_adjust(right = 0.98, top =0.97, bottom=0.24,left=0.13,wspace=0, hspace=0.2)
ax = plt.subplot(122)
autocorrelation_plot_forked(data_human,ax=ax, n_lags= 8 * cache_size, change_deno=True) #consider removing -2
autocorrelation_plot_forked(data_baseline, ax=ax,n_lags= 8 * cache_size, change_deno=True) #consider removing -2
autocorrelation_plot_forked(data_l2, ax=ax, n_lags= 8 * cache_size, change_deno=True) #consider removing -2
#plt.legend(['textbook', 'RL_baseline', 'RL_autocor'], ncol=3, prop=font)
plt.plot([0,40],[0.75,0.75], linestyle='--', color='grey')
# ax.axhline(y=z95 / np.sqrt(n_full), color='grey')
plt.plot([0,40],[0,0], color='black')
ax.set_xlim([0, 32] )
ax.yaxis.set_label_coords(-0.09, .5)
#plt.savefig('cchunter_compare.pdf')
#plt.savefig('cchunter_compare.png')
plt.subplots_adjust(right = 0.999, top =0.85, bottom=0.22,left=0.085,wspace=0.28, hspace=0.2)
plt.savefig('event_train.pdf')
plt.savefig('event_train.png')
if __name__ == "__main__":
main()
'''
human
Reset...(also the cache state)
victim address 3
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:0.79
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.8
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.81
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:0.82
Reset...(cache state the same)
victim address 1
victim_address! 3 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.83
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.84
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.85
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.86
Reset...(cache state the same)
victim address 2
victim_address! 1 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.87
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.88
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.89
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.9
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.91
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:0.92
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.93
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.94
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.95
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:0.96
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.97
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:0.98
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:0.99
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
acceee 5 miss
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 1 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 5 hit
Step...
acceee 6 miss
Step...
access 7 hit
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 5 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Episode number of guess: 26
Episode number of corrects: 26
correct rate: 1.0
bandwidth rate: 0.1625
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
/home/mulong/RL_SCA/src/CacheSimulator/src/rlmeta/sample_cchunter.py:75: MatplotlibDeprecationWarning: Calling gca() with keyword arguments was deprecated in Matplotlib 3.4. Starting two minor releases later, gca() will take no keyword arguments. The gca() function should only be used to get the current axes, or if no axes exist, create new axes with default keyword arguments. To create a new axes with non-default arguments, use plt.axes() or plt.subplot().
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
y = [ 1. -0.98113208 0.96223727 -0.94339623 0.92447455 -0.90566038
0.88671182 -0.86792453 0.84894909 -0.83018868 0.81118637 -0.79245283
0.77342364 -0.75471698 0.73566091 -0.71698113 0.69789819 -0.67924528
0.66013546 -0.64150943 0.62237274 -0.60377358 0.58461001 -0.56603774
0.54684728 -0.52830189 0.50908456 -0.49056604 0.47132183 -0.45283019
0.4335591 -0.41509434]
y_max = 0.9622372735580283
Figure saved as 'cchunter_hit_trace_3_acf.png
Total number of guess: 104
Total number of corrects: 104
Episode total: 640
correct rate: 1.0
bandwidth rate: 0.1625
'''
'''
l2
Reset...(also the cache state)
victim address 3
Step...
victim access 3
Step...
acceee 5 miss
Step...
acceee 4 miss
Step...
acceee 6 miss
Step...
victim access 3
Step...
access 5 hit
Step...
access 4 hit
Step...
access 6 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 5 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 4 hit
Step...
access 6 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 6 hit
Step...
access 4 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
acceee 7 miss
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 5 hit
Step...
access 4 hit
Step...
access 6 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
access 5 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
acceee 7 miss
Step...
victim access 1
Step...
access 6 hit
Step...
access 4 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 1 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 6 hit
Step...
access 4 hit
Step...
access 5 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
access 6 hit
Step...
access 4 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
access 6 hit
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
acceee 7 miss
Step...
victim access 1
Step...
victim access 1
Step...
access 6 hit
Step...
access 4 hit
Step...
acceee 5 miss
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 1 correct guess! True
Step...
victim access 3
Step...
access 6 hit
Step...
access 4 hit
Step...
access 5 hit
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 7 miss
Step...
victim access 0
Step...
victim access 0
Step...
access 6 hit
Step...
acceee 4 miss
Step...
victim access 0
Step...
victim access 0
Step...
victim access 0
Step...
acceee 4 miss
Step...
access 4 hit
Step...
access 4 hit
Step...
access 4 hit
Step...
access 4 hit
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Episode number of guess: 32
Episode number of corrects: 32
correct rate: 1.0
bandwidth rate: 0.19753086419753085
[0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1]
/home/mulong/RL_SCA/src/CacheSimulator/src/rlmeta/sample_cchunter.py:75: MatplotlibDeprecationWarning: Calling gca() with keyword arguments was deprecated in Matplotlib 3.4. Starting two minor releases later, gca() will take no keyword arguments. The gca() function should only be used to get the current axes, or if no axes exist, create new axes with default keyword arguments. To create a new axes with non-default arguments, use plt.axes() or plt.subplot().
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
y = [ 1. -0.6823596 0.42214715 -0.34085761 0.25558463 -0.17101461
0.0498516 -0.0011716 -0.01648051 -0.03524565 0.08539014 -0.13729204
0.1872608 -0.30731079 0.42507615 -0.40935718 0.35528782 -0.30748653
0.25324143 -0.13764352 0.01525033 0.03219948 0.01689057 -0.00187456
-0.01718347 -0.06820667 0.08468718 -0.10228072 0.11858549 -0.1040967
0.08451144 -0.13817074]
y_max = 0.42507615402640003
Figure saved as 'cchunter_hit_trace_3_acf.png
Total number of guess: 134
Total number of corrects: 134
Episode total: 648
correct rate: 1.0
bandwidth rate: 0.20679012345679013
'''
'''
baseline
Reset...(also the cache state)
victim address 3
Step...
acceee 4 miss
Step...
acceee 7 miss
Step...
acceee 6 miss
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 3 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 0 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 1 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 2 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 3 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 6 hit
Step...
access 7 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
access 6 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 0 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 7 hit
Step...
access 6 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 1
victim_address! 2 correct guess! True
Step...
victim access 1
Step...
access 4 hit
Step...
access 7 hit
Step...
access 6 hit
Step...
correct guess 1
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 1 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 2 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 3 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 0 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 2 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Step...
victim access 0
Step...
acceee 4 miss
Step...
correct guess 0
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 0 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 3
victim_address! 3 correct guess! True
Step...
victim access 3
Step...
access 4 hit
Step...
acceee 7 miss
Step...
correct guess 3
correct rate:1.0
Reset...(cache state the same)
victim address 2
victim_address! 3 correct guess! True
Step...
victim access 2
Step...
access 4 hit
Step...
access 7 hit
Step...
acceee 6 miss
Step...
correct guess 2
correct rate:1.0
Reset...(cache state the same)
victim address 0
victim_address! 2 correct guess! True
Episode number of guess: 38
Episode number of corrects: 38
correct rate: 1.0
bandwidth rate: 0.2360248447204969
[1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
/home/mulong/RL_SCA/src/CacheSimulator/src/rlmeta/sample_cchunter.py:75: MatplotlibDeprecationWarning: Calling gca() with keyword arguments was deprecated in Matplotlib 3.4. Starting two minor releases later, gca() will take no keyword arguments. The gca() function should only be used to get the current axes, or if no axes exist, create new axes with default keyword arguments. To create a new axes with non-default arguments, use plt.axes() or plt.subplot().
ax = plt.gca(xlim=(1, n_lags), ylim=(-1.0, 1.0))
y = [ 1. -0.92995169 0.94312692 -0.92874396 0.91403162 -0.89975845
0.88493632 -0.87077295 0.85584102 -0.84178744 0.82674572 -0.81280193
0.79765042 -0.78381643 0.76855512 -0.75483092 0.73945982 -0.72584541
0.71036451 -0.6968599 0.68126921 -0.6678744 0.65217391 -0.63888889
0.62307861 -0.60990338 0.59398331 -0.58091787 0.56488801 -0.55193237
0.53579271 -0.52294686]
y_max = 0.9431269213877909
Figure saved as 'cchunter_hit_trace_3_acf.png
Total number of guess: 147
Total number of corrects: 147
Episode total: 643
correct rate: 1.0
bandwidth rate: 0.2286158631415241
'''
| 36,087 | 21.153468 | 461 | py |
AutoCAT | AutoCAT-main/src/rlmeta/train_ppo_attack.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
import logging
import os
import time
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.multiprocessing as mp
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
import model_utils
from cache_env_wrapper import CacheEnvWrapperFactory
from metric_callbacks import MetricCallbacks
@hydra.main(config_path="./config", config_name="ppo_attack")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
print(f"workding_dir = {os.getcwd()}")
my_callbacks = MetricCallbacks()
logging.info(hydra_utils.config_to_json(cfg))
env_fac = CacheEnvWrapperFactory(OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
train_model = model_utils.get_model(
cfg.model_config, cfg.env_config.window_size,
env.action_space.n).to(cfg.train_device)
infer_model = copy.deepcopy(train_model).to(cfg.infer_device)
infer_model.eval()
optimizer = make_optimizer(train_model.parameters(), **cfg.optimizer)
ctrl = Controller()
rb = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(RemotableModelPool(infer_model, seed=cfg.seed))
r_server.add_service(rb)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
a_model = wrap_downstream_model(train_model, m_server)
t_model = make_remote_model(infer_model, m_server)
e_model = make_remote_model(infer_model, m_server)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
a_rb = make_remote_replay_buffer(rb, r_server, prefetch=cfg.prefetch)
t_rb = make_remote_replay_buffer(rb, r_server)
agent = PPOAgent(a_model,
replay_buffer=a_rb,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
entropy_coeff=cfg.get("entropy_coeff", 0.01),
model_push_period=cfg.model_push_period)
t_agent_fac = AgentFactory(PPOAgent, t_model, replay_buffer=t_rb)
e_agent_fac = AgentFactory(PPOAgent, e_model, deterministic_policy=True)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_train_rollouts,
num_workers=cfg.num_train_workers,
seed=cfg.seed,
episode_callbacks=my_callbacks)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_eval_rollouts,
num_workers=cfg.num_eval_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_train_rollouts),
episode_callbacks=my_callbacks)
loops = LoopList([t_loop, e_loop])
servers.start()
loops.start()
agent.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = agent.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = agent.eval(cfg.num_eval_episodes, keep_training_loops=True)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(train_model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| 5,600 | 36.34 | 77 | py |
AutoCAT | AutoCAT-main/src/rlmeta/cache_ppo_transformer_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo.ppo_model import PPOModel
class CachePPOTransformerModel(PPOModel):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int = 1) -> None:
super().__init__()
self.latency_dim = latency_dim
self.victim_acc_dim = victim_acc_dim
self.action_dim = action_dim
self.step_dim = step_dim
# self.window_size = window_size
self.action_embed_dim = action_embed_dim
self.step_embed_dim = step_embed_dim
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim + self.step_embed_dim)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.linear_i = nn.Linear(self.input_dim, self.hidden_dim)
# self.linear_o = nn.Linear(self.hidden_dim * self.window_size,
# self.hidden_dim)
encoder_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim,
nhead=8,
dropout=0.0)
self.encoder = nn.TransformerEncoder(encoder_layer, self.num_layers)
self.linear_a = nn.Linear(self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(self.hidden_dim, 1)
self._device = None
def make_one_hot(self, src: torch.Tensor, num_classes: int,
mask: torch.Tensor) -> torch.Tensor:
# mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor, embed: nn.Embedding,
mask: torch.Tensor) -> torch.Tensor:
# mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
obs = obs.to(torch.int64)
assert obs.dim() == 3
# batch_size = obs.size(0)
l, v, act, stp = torch.unbind(obs, dim=-1)
mask = (stp == -1)
l = self.make_one_hot(l, self.latency_dim, mask)
v = self.make_one_hot(v, self.victim_acc_dim, mask)
act = self.make_embedding(act, self.action_embed, mask)
stp = self.make_embedding(stp, self.step_embed, mask)
x = torch.cat((l, v, act, stp), dim=-1)
x = self.linear_i(x)
x = x.transpose(0, 1).contiguous()
h = self.encoder(x)
h = h.mean(dim=0)
p = self.linear_a(h)
logpi = F.log_softmax(p, dim=-1)
v = self.linear_v(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| 4,143 | 34.118644 | 78 | py |
AutoCAT | AutoCAT-main/src/rlmeta/sample_cyclone.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
from typing import Dict, Optional, Sequence
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def run_loop(env: Env,
agent: PPOAgent,
victim_addr: int = -1) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
cyclone_attack = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
cyclone_attack += timestep.info.get("cyclone_attack", 0)
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"cyclone_attack": cyclone_attack,
}
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env, agent, victim_addr=victim_addr)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env, agent, victim_addr=-1)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCycloneWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
env.action_space.n, cfg.checkpoint)
model.eval()
# Create agent
agent = PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| 4,357 | 28.053333 | 79 | py |
AutoCAT | AutoCAT-main/src/rlmeta/train_ppo_cyclone.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import copy
import logging
import os
import time
import hydra
from omegaconf import DictConfig, OmegaConf
import torch
import torch.multiprocessing as mp
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.types import Action, TimeStep
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
import model_utils
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
from metric_callbacks import CycloneMetricCallbacks
@hydra.main(config_path="./config", config_name="ppo_cyclone")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
print(f"workding_dir = {os.getcwd()}")
my_callbacks = CycloneMetricCallbacks()
logging.info(hydra_utils.config_to_json(cfg))
env_fac = CacheEnvCycloneWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
train_model = model_utils.get_model(
cfg.model_config, cfg.env_config.window_size,
env.action_space.n).to(cfg.train_device)
infer_model = copy.deepcopy(train_model).to(cfg.infer_device)
infer_model.eval()
optimizer = make_optimizer(train_model.parameters(), **cfg.optimizer)
ctrl = Controller()
rb = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(RemotableModelPool(infer_model, seed=cfg.seed))
r_server.add_service(rb)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
a_model = wrap_downstream_model(train_model, m_server)
t_model = make_remote_model(infer_model, m_server)
e_model = make_remote_model(infer_model, m_server)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
a_rb = make_remote_replay_buffer(rb, r_server, prefetch=cfg.prefetch)
t_rb = make_remote_replay_buffer(rb, r_server)
agent = PPOAgent(a_model,
replay_buffer=a_rb,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
entropy_coeff=cfg.get("entropy_coeff", 0.01),
model_push_period=cfg.model_push_period)
t_agent_fac = AgentFactory(PPOAgent, t_model, replay_buffer=t_rb)
e_agent_fac = AgentFactory(PPOAgent, e_model, deterministic_policy=True)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_train_rollouts,
num_workers=cfg.num_train_workers,
seed=cfg.seed,
episode_callbacks=my_callbacks)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_eval_rollouts,
num_workers=cfg.num_eval_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_train_rollouts),
episode_callbacks=my_callbacks)
loops = LoopList([t_loop, e_loop])
servers.start()
loops.start()
agent.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = agent.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = agent.eval(cfg.num_eval_episodes)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(train_model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| 5,612 | 36.172185 | 77 | py |
AutoCAT | AutoCAT-main/src/rlmeta/sample_cyclone_textbook.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import sys
from typing import Dict, Optional, Sequence, Union
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
import logging
from typing import Dict, Optional, Sequence
import hydra
from omegaconf import DictConfig, OmegaConf
import numpy as np
import torch
import torch.nn
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
import model_utils
from cache_env_wrapper import CacheEnvCycloneWrapperFactory
from textbook_attacker import TextbookAgent
def batch_obs(timestep: TimeStep) -> TimeStep:
obs, reward, terminated, truncated, info = timestep
return TimeStep(obs.unsqueeze(0), reward, terminated, truncated, info)
def unbatch_action(action: Action) -> Action:
act, info = action
# act.squeeze_(0)
info = nested_utils.map_nested(lambda x: x.squeeze(0), info)
return Action(act, info)
def max_autocorr(data: Sequence[int], n: int) -> float:
n = min(len(data), n)
x = np.asarray(data)
corr = [autocorrelation(x, i) for i in range(n)]
corr = np.asarray(corr[1:])
corr = np.nan_to_num(corr)
return corr.max()
def run_loop(env: Env,
agent: PPOAgent,
victim_addr: int = -1) -> Dict[str, float]:
episode_length = 0
episode_return = 0.0
num_guess = 0
num_correct = 0
cyclone_attack = 0
if victim_addr == -1:
timestep = env.reset()
else:
timestep = env.reset(victim_address=victim_addr)
agent.observe_init(timestep)
while not (timestep.terminated or timestep.truncated):
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep = batch_obs(timestep)
action = agent.act(timestep)
# Unbatch the action.
action = unbatch_action(action)
timestep = env.step(action)
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
if "guess_correct" in timestep.info:
num_guess += 1
if timestep.info["guess_correct"]:
num_correct += 1
cyclone_attack += timestep.info.get("cyclone_attack", 0)
metrics = {
"episode_length": episode_length,
"episode_return": episode_return,
"num_guess": num_guess,
"num_correct": num_correct,
"correct_rate": num_correct / num_guess,
"bandwith": num_guess / episode_length,
"cyclone_attack": cyclone_attack,
}
return metrics
def run_loops(env: Env,
agent: PPOAgent,
num_episodes: int = -1,
seed: int = 0,
reset_cache_state: bool = False) -> StatsDict:
# env.seed(seed)
env.reset(seed=seed)
metrics = StatsDict()
num_guess = 0
num_correct = 0
tot_length = 0
if num_episodes == -1:
start = env.env.victim_address_min
stop = env.env.victim_address_max + 1 + int(
env.env._env.allow_empty_victim_access)
for victim_addr in range(start, stop):
cur_metrics = run_loop(env, agent, victim_addr=victim_addr)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
else:
for _ in range(num_episodes):
cur_metrics = run_loop(env, agent, victim_addr=-1)
num_guess += cur_metrics["num_guess"]
num_correct += cur_metrics["num_correct"]
tot_length += cur_metrics["episode_length"]
metrics.extend(cur_metrics)
metrics.add("overall_correct_rate", num_correct / num_guess)
metrics.add("overall_bandwith", num_guess / tot_length)
return metrics
@hydra.main(config_path="./config", config_name="sample_cchunter")
def main(cfg):
# Create env
cfg.env_config.verbose = 1
env_fac = CacheEnvCycloneWrapperFactory(
OmegaConf.to_container(cfg.env_config))
env = env_fac(index=0)
# Load model
#model = model_utils.get_model(cfg.model_config, cfg.env_config.window_size,
# env.action_space.n, cfg.checkpoint)
#model.eval()
# Create agent
agent = TextbookAgent(
cfg.env_config
) #PPOAgent(model, deterministic_policy=cfg.deterministic_policy)
# Run loops
metrics = run_loops(env, agent, cfg.num_episodes, cfg.seed)
logging.info("\n\n" + metrics.table(info="sample") + "\n")
if __name__ == "__main__":
main()
| 5,143 | 26.508021 | 80 | py |
AutoCAT | AutoCAT-main/src/rlmeta/cache_ppo_lstm_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import os
import sys
from typing import Dict, List, Tuple
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo.ppo_model import PPOModel
class CachePPOLstmModel(PPOModel):
def __init__(self,
latency_dim: int,
victim_acc_dim: int,
action_dim: int,
step_dim: int,
action_embed_dim: int,
step_embed_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int = 1) -> None:
super().__init__()
self.latency_dim = latency_dim
self.victim_acc_dim = victim_acc_dim
self.action_dim = action_dim
self.step_dim = step_dim
# self.window_size = window_size
self.action_embed_dim = action_embed_dim
# self.step_embed_dim = step_embed_dim
# self.input_dim = (self.latency_dim + self.victim_acc_dim +
# self.action_embed_dim + self.step_embed_dim)
self.input_dim = (self.latency_dim + self.victim_acc_dim +
self.action_embed_dim)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.action_embed = nn.Embedding(self.action_dim,
self.action_embed_dim)
# self.step_embed = nn.Embedding(self.step_dim, self.step_embed_dim)
self.linear_i = nn.Linear(self.input_dim, self.hidden_dim)
self.encoder = nn.LSTM(
self.hidden_dim,
self.hidden_dim,
self.num_layers,
bias=False, # Disable bias for pre-padding sequence
bidirectional=False)
self.linear_a = nn.Linear(2 * self.hidden_dim, self.output_dim)
self.linear_v = nn.Linear(2 * self.hidden_dim, 1)
self._device = None
def make_one_hot(self, src: torch.Tensor,
num_classes: int) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = F.one_hot(src, num_classes)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def make_embedding(self, src: torch.Tensor,
embed: nn.Embedding) -> torch.Tensor:
mask = (src == -1)
src = src.masked_fill(mask, 0)
ret = embed(src)
return ret.masked_fill(mask.unsqueeze(-1), 0.0)
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
obs = obs.to(torch.int64)
assert obs.dim() == 3
# batch_size = obs.size(0)
obs = torch.flip(obs, dims=(1, )) # Reverse input to pre-padding
l, v, act, _ = torch.unbind(obs, dim=-1)
l = self.make_one_hot(l, self.latency_dim)
v = self.make_one_hot(v, self.victim_acc_dim)
act = self.make_embedding(act, self.action_embed)
# stp = self.make_embedding(stp, self.step_embed)
x = torch.cat((l, v, act), dim=-1)
x = self.linear_i(x)
x = x.transpose(0, 1).contiguous()
_, (h, c) = self.encoder(x)
h = h.mean(dim=0)
c = c.mean(dim=0)
h = torch.cat((h, c), dim=-1)
p = self.linear_a(h)
logpi = F.log_softmax(p, dim=-1)
v = self.linear_v(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| 4,131 | 32.322581 | 78 | py |
AutoCAT | AutoCAT-main/src/rlmeta/cyclone_svm_trainer.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Author: Mulong Luo
# date: 2022.6.28
# usage: to train the svm classifier of cycloen by feeding
# the date from TextbookAgent as malicious traces
# and spec traces for benign traces
import logging
from typing import Dict
import hydra
import torch
import torch.nn
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# sys.path.append("/home/mulong/RL_SCA/src/CacheSimulator/src")
import rlmeta.utils.nested_utils as nested_utils
import numpy as np
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.types import Action
from rlmeta.envs.env import Env
from rlmeta.utils.stats_dict import StatsDict
from textbook_attacker import TextbookAgent
# from cache_guessing_game_env_impl import CacheGuessingGameEnv
# from cchunter_wrapper import CCHunterWrapper
from cache_env_wrapper import CacheEnvWrapperFactory, CacheEnvCycloneWrapperFactory
from cyclone_wrapper import CycloneWrapper
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
checkpoint = trainer.save()
print("checkpoint saved at", checkpoint)
sys.exit(0)
class SpecAgent():
def __init__(self, env_config, trace_file):
self.local_step = 0
self.lat = []
self.no_prime = False # set to true after first prime
if "cache_configs" in env_config:
#self.logger.info('Load config from JSON')
self.configs = env_config["cache_configs"]
self.num_ways = self.configs['cache_1']['associativity']
self.cache_size = self.configs['cache_1']['blocks']
attacker_addr_s = env_config["attacker_addr_s"] if "attacker_addr_s" in env_config else 4
attacker_addr_e = env_config["attacker_addr_e"] if "attacker_addr_e" in env_config else 7
victim_addr_s = env_config["victim_addr_s"] if "victim_addr_s" in env_config else 0
victim_addr_e = env_config["victim_addr_e"] if "victim_addr_e" in env_config else 3
flush_inst = env_config["flush_inst"] if "flush_inst" in env_config else False
self.allow_empty_victim_access = env_config["allow_empty_victim_access"] if "allow_empty_victim_access" in env_config else False
assert(self.num_ways == 1) # currently only support direct-map cache
assert(flush_inst == False) # do not allow flush instruction
assert(attacker_addr_e - attacker_addr_s == victim_addr_e - victim_addr_s ) # address space must be shared
#must be no shared address space
assert( ( attacker_addr_e + 1 == victim_addr_s ) or ( victim_addr_e + 1 == attacker_addr_s ) )
assert(self.allow_empty_victim_access == False)
self.trace_file = trace_file
# load the data SPEC bengin traces
self.fp = open(self.trace_file)
line = self.fp.readline().split()
self.domain_id_0 = line[0]
self.domain_id_1 = line[0]
line = self.fp.readline().split()
while line != '':
self.domain_id_1 = line[0]
if self.domain_id_1 != self.domain_id_0:
break
line = self.fp.readline().split()
self.fp.close()
self.fp = open(self.trace_file)
def act(self, timestep):
info = {}
line = self.fp.readline().split()
if len(line) == 0:
action = self.cache_size
addr = 0#addr % self.cache_size
info={"file_done" : True}
return action, info
domain_id = line[0]
cache_line_size = 8
addr = int( int(line[3], 16) / cache_line_size )
print(addr)
if domain_id == self.domain_id_0: # attacker access
action = addr % self.cache_size
info ={}
else: # domain_id = self.domain_id_1: # victim access
action = self.cache_size
addr = addr % self.cache_size
info={"reset_victim_addr": True, "victim_addr": addr}
return action, info
@hydra.main(config_path="./config", config_name="sample_cyclone")
def main(cfg):
repeat = 80000
trace_file = '/home/mulong/remix3.txt'
svm_data_path = 'autocat.svm.txt' #trace_file + '.svm.txt'
#create env
cfg.env_config['verbose'] = 1
# generate dataset for malicious traces
cfg.env_config['cyclone_collect_data'] = True
cfg.env_config['cyclone_malicious_trace'] = True
env_fac = CacheEnvCycloneWrapperFactory(cfg.env_config)
env = env_fac(index=0)
env.svm_data_path = svm_data_path
fp = open(svm_data_path,'w')
fp.close()
agent = TextbookAgent(cfg.env_config)
episode_length = 0
episode_return = 0.0
for i in range(repeat):
timestep = env.reset()
num_guess = 0
num_correct = 0
while not timestep.done:
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep.observation.unsqueeze_(0)
action, info = agent.act(timestep)
action = Action(action, info)
# unbatch the action
victim_addr = env._env.victim_address
timestep = env.step(action)
obs, reward, done, info = timestep
if "guess_correct" in info:
num_guess += 1
if info["guess_correct"]:
print(f"victim_address! {victim_addr} correct guess! {info['guess_correct']}")
num_correct += 1
else:
correct = False
agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
env.reset(save_data=True) # save data to file
# generate benign traces
'''
cfg.env_config['cyclone_collect_data'] = True
cfg.env_config['cyclone_malicious_trace'] = False
env_fac = CacheEnvCycloneWrapperFactory(cfg.env_config)
env = env_fac(index=0)
print("mix.txt opened!")
agent = SpecAgent(cfg.env_config, trace_file)
episode_length = 0
episode_return = 0.0
file_done = False
# generate dataset for benign traces
iter = 0
while not file_done:
#for i in range(repeat):
timestep = env.reset()
num_guess = 0
num_correct = 0
done = False
count = 0
iter += 1
while not done:
# Model server requires a batch_dim, so unsqueeze here for local runs.
timestep.observation.unsqueeze_(0)
action, info = agent.act(timestep)
if "file_done" in info:
file_done = True
break
if "victim_addr" in info:
print(info["victim_addr"])
#env.set_victim(info["victim_addr"])
env._env.set_victim(info["victim_addr"])
action = Action(action, info)
else:
action = Action(action, info)
# unbatch the action
victim_addr = env._env.victim_address
timestep = env.step(action)
obs, reward, done, info = timestep
count += 1
#if count % 10 == 0:
#action = Action(agent.cache_size * 2, {})
#timestep = env.step(action)
#obs, reward, done, info = timestep
if count == 160:
action = Action(agent.cache_size * 2, {})
timestep = env.step(action)
obs, reward, done, info = timestep
done = True
count = 0
#if "guess_correct" in info:
# num_guess += 1
# if info["guess_correct"]:
# print(f"victim_address! {victim_addr} correct guess! {info['guess_correct']}")
# num_correct += 1
# else:
# correct = False
#agent.observe(action, timestep)
episode_length += 1
episode_return += timestep.reward
env.reset(save_data=True) # save data to file
'''
#cfg.env_config['cyclone_malicious_trace'] = False
#env_fac = CacheEnvCCHunterWrapperFactory(cfg.env_config)
#env = env_fac(index=0)
if __name__ == "__main__":
main()
| 8,416 | 35.437229 | 140 | py |
second.pytorch | second.pytorch-master/second/script_server.py | from second.pytorch.train import train, evaluate
from google.protobuf import text_format
from second.protos import pipeline_pb2
from pathlib import Path
from second.utils import config_tool, model_tool
import datetime
from second.data.all_dataset import get_dataset_class
def _div_up(a, b):
return (a + b - 1) // b
def _get_config(path):
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
return config
def _nuscenes_modify_step(config,
epochs,
eval_epoch,
data_sample_factor,
num_examples=28130):
input_cfg = config.train_input_reader
train_cfg = config.train_config
batch_size = input_cfg.batch_size
data_sample_factor_to_name = {
1: "NuScenesDataset",
2: "NuScenesDatasetD2",
3: "NuScenesDatasetD3",
4: "NuScenesDatasetD4",
5: "NuScenesDatasetD5",
6: "NuScenesDatasetD6",
7: "NuScenesDatasetD7",
8: "NuScenesDatasetD8",
}
dataset_name = data_sample_factor_to_name[data_sample_factor]
input_cfg.dataset.dataset_class_name = dataset_name
ds = get_dataset_class(dataset_name)(
root_path=input_cfg.dataset.kitti_root_path,
info_path=input_cfg.dataset.kitti_info_path,
)
num_examples_after_sample = len(ds)
step_per_epoch = _div_up(num_examples_after_sample, batch_size)
step_per_eval = step_per_epoch * eval_epoch
total_step = step_per_epoch * epochs
train_cfg.steps = total_step
train_cfg.steps_per_eval = step_per_eval
def train_nuscenes_lite():
config = Path(
__file__).resolve().parent / "configs/nuscenes/car.lite.nu.config"
ckpt_path = "/home/yy/deeplearning/voxelnet_torch_sparse/car_lite_small_v1/voxelnet-15500.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "car_lite_with_pretrain" / ("test_" + date_str),
pretrained_path=ckpt_path)
def train_nuscenes():
config = Path(
__file__).resolve().parent / "configs/nuscenes/car.fhd.nu.config"
ckpt_path = "/home/yy/deeplearning/voxelnet_torch_sparse/car_fhd_small_v1/voxelnet-27855.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "car_fhd_with_pretrain" / ("test_" + date_str),
pretrained_path=ckpt_path)
def train_nuscenes_pp():
config = Path(
__file__).resolve().parent / "configs/nuscenes/pp.nu.config"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "car_pp_with_pretrain" / ("test_" + date_str),
pretrained_path=ckpt_path)
def train_nuscenes_all():
config = Path(
__file__).resolve().parent / "configs/nuscenes/all.fhd.config"
ckpt_path = "/home/yy/deeplearning/voxelnet_torch_sparse/car_fhd_small_v1/voxelnet-27855.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "all_fhd" / ("test_" + date_str))
def train_nuscenes_pp_all():
config = Path(
__file__).resolve().parent / "configs/nuscenes/all.pp.config"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "all_pp" / ("test_" + date_str),
pretrained_path=ckpt_path)
def train_nuscenes_pp_all_sample():
config = Path(
__file__).resolve().parent / "configs/nuscenes/all.pp.sample.config"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "all_pp_sample" / ("test_" + date_str),
pretrained_path=ckpt_path)
def train_nuscenes_pp_all_sample_v2():
config = Path(
__file__).resolve().parent / "configs/nuscenes/all.pp.sample.v2.config"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "all_pp_sample_v2" / ("test_" + date_str))
def train_nuscenes_pp_all_v2():
config = Path(
__file__).resolve().parent / "configs/nuscenes/all.pp.v2.config"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "all_pp_v2" / ("test_" + date_str))
def train_nuscenes_pp_vel():
config = Path(
__file__).resolve().parent / "configs/nuscenes/all.pp.vel.config"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "pp_vel" / ("test_" + date_str))
def train_nuscenes_pp_vel_v2():
config = Path(
__file__).resolve().parent / "configs/nuscenes/all.pp.vel.v2.config"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "pp_vel" / ("test_" + date_str))
def train_nuscenes_pp_car():
config = Path(
__file__).resolve().parent / "configs/nuscenes/car.pp.config"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
train(
config,
model_dir_root / "pp_car" / ("test_" + date_str))
def resume_nuscenes_pp_all():
config = Path(
__file__).resolve().parent / "configs/nuscenes/all.pp.config"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
ckpt_path = "/home/yy/deeplearning/model_dirs/kitti/car_pp_long_v0/voxelnet-296960.tckpt"
# config = Path(__file__).resolve().parent() / "configs/car.fhd.nu.config"
config = _get_config(config)
_nuscenes_modify_step(config, 50, 5, 8)
model_dir_root = Path("/home/yy/deeplearning/model_dirs/nuscene")
train(
config,
model_dir_root / "all_pp" / ("test_190424_232942"), resume=True)
if __name__ == "__main__":
model_tool.rm_invalid_model_dir("/home/yy/deeplearning/model_dirs/nuscene")
# train_nuscenes_lite_hrz()
resume_nuscenes_pp_all() | 9,949 | 42.832599 | 100 | py |
second.pytorch | second.pytorch-master/second/script.py | from second.pytorch.train import train, evaluate
from google.protobuf import text_format
from second.protos import pipeline_pb2
from pathlib import Path
from second.utils import config_tool
def train_multi_rpn_layer_num():
config_path = "./configs/car.lite.config"
model_root = Path.home() / "second_test" # don't forget to change this.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
input_cfg = config.eval_input_reader
model_cfg = config.model.second
layer_nums = [2, 4, 7, 9]
for l in layer_nums:
model_dir = str(model_root / f"car_lite_L{l}")
model_cfg.rpn.layer_nums[:] = [l]
train(config, model_dir)
def eval_multi_threshold():
config_path = "./configs/car.fhd.config"
ckpt_name = "/path/to/your/model_ckpt" # don't forget to change this.
assert "/path/to/your" not in ckpt_name
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
model_cfg = config.model.second
threshs = [0.3]
for thresh in threshs:
model_cfg.nms_score_threshold = thresh
# don't forget to change this.
result_path = Path.home() / f"second_test_eval_{thresh:.2f}"
evaluate(
config,
result_path=result_path,
ckpt_path=str(ckpt_name),
batch_size=1,
measure_time=True)
if __name__ == "__main__":
eval_multi_threshold() | 1,594 | 32.93617 | 76 | py |
second.pytorch | second.pytorch-master/second/kittiviewer/viewer.py | import io as sysio
import json
import os
import pickle
import sys
import time
from functools import partial
from pathlib import Path
import datetime
import fire
import matplotlib.pyplot as plt
import numba
import numpy as np
import OpenGL.GL as pygl
import pyqtgraph.opengl as gl
import skimage
from matplotlib.backends.backend_qt5agg import \
FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import \
NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from PyQt5 import QtCore, QtGui
from PyQt5.QtCore import QTimer, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QIcon, QMouseEvent, QPainter
from PyQt5.QtWidgets import (
QApplication, QCheckBox, QComboBox, QDialog, QFormLayout, QGroupBox,
QHBoxLayout, QLabel, QLineEdit, QMainWindow, QPlainTextEdit, QTextEdit,
QPushButton, QSizePolicy, QVBoxLayout, QWidget, QProgressBar)
from shapely.geometry import Polygon
from skimage import io
import second.core.box_np_ops as box_np_ops
import second.core.preprocess as prep
import second.kittiviewer.control_panel as panel
from second.core.anchor_generator import AnchorGeneratorStride
from second.core.box_coders import GroundBox3dCoder
from second.core.point_cloud.point_cloud_ops import points_to_voxel
from second.core.region_similarity import (
DistanceSimilarity, NearestIouSimilarity, RotateIouSimilarity)
from second.core.sample_ops import (
sample_from_database_v2, sample_from_database_v3, sample_from_database_v4,
DataBaseSamplerV2)
from second.core.target_assigner import TargetAssigner
from second.data import kitti_common as kitti
from second.kittiviewer.glwidget import KittiGLViewWidget
from second.protos import pipeline_pb2
from second.utils import bbox_plot
from second.utils.bbox_plot import GLColor
from second.utils.eval import get_coco_eval_result, get_official_eval_result
from second.pytorch.inference import TorchInferenceContext
from second.utils.progress_bar import list_bar
"""
from wavedata.tools.obj_detection import obj_utils
from avod.core.anchor_generators import grid_anchor_3d_generator
"""
class KittiDrawControl(panel.ControlPanel):
def __init__(self, title, parent=None):
super().__init__(column_nums=[2, 1, 1, 2], tab_num=4, parent=parent)
self.setWindowTitle(title)
with self.tab(0, "common"):
with self.column(0):
self.add_listedit("UsedClass", str)
self.add_fspinbox("PointSize", 0.01, 0.5, 0.01, 0.05)
self.add_fspinbox("PointAlpha", 0.0, 1.0, 0.05, 0.5)
self.add_colorbutton("PointColor",
bbox_plot.gl_color(GLColor.Gray))
self.add_fspinbox("GTPointSize", 0.01, 0.5, 0.01, 0.2)
self.add_fspinbox("GTPointAlpha", 0.0, 1.0, 0.05, 0.5)
self.add_colorbutton("GTPointColor",
bbox_plot.gl_color(GLColor.Purple))
self.add_checkbox("WithReflectivity")
self.add_checkbox("DrawGTBoxes")
self.add_checkbox("DrawGTLabels")
self.add_colorbutton("GTBoxColor",
bbox_plot.gl_color(GLColor.Green))
self.add_fspinbox("GTBoxAlpha", 0.0, 1.0, 0.05, 0.5)
self.add_checkbox("DrawDTBoxes")
self.add_checkbox("DrawDTLabels")
self.add_checkbox("DTScoreAsAlpha")
self.add_fspinbox("DTScoreThreshold", 0.0, 1.0, 0.01, 0.3)
self.add_colorbutton("DTBoxColor",
bbox_plot.gl_color(GLColor.Blue))
self.add_fspinbox("DTBoxAlpha", 0.0, 1.0, 0.05, 0.5)
self.add_fspinbox("DTBoxLineWidth", 0.25, 10.0, 0.25, 1.0)
with self.column(1):
self.add_arrayedit("CoorsRange", np.float64,
[-40, -40, -2, 40, 40, 4], [6])
self.add_arrayedit("VoxelSize", np.float64, [0.2, 0.2, 0.4],
[3])
self.add_checkbox("DrawVoxels")
self.add_colorbutton("PosVoxelColor",
bbox_plot.gl_color(GLColor.Yellow))
self.add_fspinbox("PosVoxelAlpha", 0.0, 1.0, 0.05, 0.5)
self.add_colorbutton("NegVoxelColor",
bbox_plot.gl_color(GLColor.Purple))
self.add_fspinbox("NegVoxelAlpha", 0.0, 1.0, 0.05, 0.5)
self.add_checkbox("DrawPositiveVoxelsOnly")
self.add_checkbox("RemoveOutsidePoint")
with self.tab(1, "inference"):
with self.column(0):
self.add_checkbox("TensorflowInference")
with self.tab(2, "anchors"):
with self.column(0):
self.add_checkbox("DrawAnchors")
self.add_arrayedit("AnchorSize", np.float64, [1.6, 3.9, 1.56],
[3])
self.add_arrayedit("AnchorOffset", np.float64,
[0, -39.8, -1.0], [3])
self.add_arrayedit("AnchorStride", np.float64, [0.4, 0.4, 0.0],
[3])
self.add_fspinbox("MatchThreshold", 0.0, 1.0, 0.1)
self.add_fspinbox("UnMatchThreshold", 0.0, 1.0, 0.1)
self.add_combobox("IoUMethod", ["RotateIoU", "NearestIoU"])
with self.tab(3, "sample and augmentation"):
with self.column(0):
self.add_checkbox("EnableSample")
self.add_jsonedit("SampleGroups")
self.add_arrayedit("SampleGlobleRotRange", np.float64, [0.78, 2.35],
[2])
with self.column(1):
self.add_checkbox("EnableAugmentation")
self.add_checkbox("GroupNoisePerObject")
class Settings:
def __init__(self, cfg_path):
self._cfg_path = cfg_path
self._settings = {}
self._setting_defaultvalue = {}
if not Path(self._cfg_path).exists():
with open(self._cfg_path, 'w') as f:
f.write(json.dumps(self._settings, indent=2, sort_keys=True))
else:
with open(self._cfg_path, 'r') as f:
self._settings = json.loads(f.read())
def set(self, name, value):
self._settings[name] = value
with open(self._cfg_path, 'w') as f:
f.write(json.dumps(self._settings, indent=2, sort_keys=True))
def get(self, name, default_value=None):
if name in self._settings:
return self._settings[name]
if default_value is None:
raise ValueError("name not exist")
return default_value
def save(self, path):
with open(path, 'w') as f:
f.write(json.dumps(self._settings, indent=2, sort_keys=True))
def load(self, path):
with open(self._cfg_path, 'r') as f:
self._settings = json.loads(f.read())
def _riou3d_shapely(rbboxes1, rbboxes2):
N, K = rbboxes1.shape[0], rbboxes2.shape[0]
corners1 = box_np_ops.center_to_corner_box2d(
rbboxes1[:, :2], rbboxes1[:, 3:5], rbboxes1[:, 6])
corners2 = box_np_ops.center_to_corner_box2d(
rbboxes2[:, :2], rbboxes2[:, 3:5], rbboxes2[:, 6])
iou = np.zeros([N, K], dtype=np.float32)
for i in range(N):
for j in range(K):
iw = (min(rbboxes1[i, 2] + rbboxes1[i, 5],
rbboxes2[j, 2] + rbboxes2[j, 5]) - max(
rbboxes1[i, 2], rbboxes2[j, 2]))
if iw > 0:
p1 = Polygon(corners1[i])
p2 = Polygon(corners2[j])
inc = p1.intersection(p2).area * iw
# inc = p1.intersection(p2).area
if inc > 0:
iou[i, j] = inc / (p1.area * rbboxes1[i, 5] +
p2.area * rbboxes2[j, 5] - inc)
# iou[i, j] = inc / (p1.area + p2.area - inc)
return iou
def kitti_anno_to_corners(info, annos=None):
calib = info["calib"]
rect = calib['R0_rect']
P2 = calib['P2']
Tr_velo_to_cam = calib['Tr_velo_to_cam']
if annos is None:
annos = info['annos']
dims = annos['dimensions']
loc = annos['location']
rots = annos['rotation_y']
scores = None
if 'score' in annos:
scores = annos['score']
boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
boxes_lidar = box_np_ops.box_camera_to_lidar(boxes_camera, rect,
Tr_velo_to_cam)
boxes_corners = box_np_ops.center_to_corner_box3d(
boxes_lidar[:, :3],
boxes_lidar[:, 3:6],
boxes_lidar[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2)
return boxes_corners, scores, boxes_lidar
class MatPlotLibView(FigureCanvas):
def __init__(self, parent=None, rect=[5, 4], dpi=100):
# super().__init__()
self.fig = Figure(figsize=(rect[0], rect[1]), dpi=dpi)
self.ax = self.fig.add_subplot(1, 1, 1)
# self.ax.axis('off')
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
#self.axes.set_ylim([-1,1])
#self.axes.set_xlim([0,31.4159*2])
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.draw()
def reset_plot(self):
self.fig.clf()
self.ax = self.fig.add_subplot(1, 1, 1)
class MatPlotLibViewTab(QWidget):
def __init__(self, num_rect=[5, 4], dpi=100, parent=None):
# super().__init__()
self.fig = Figure(figsize=(rect[0], rect[1]), dpi=dpi)
self.ax = self.fig.add_subplot(1, 1, 1)
# self.ax.axis('off')
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
#self.axes.set_ylim([-1,1])
#self.axes.set_xlim([0,31.4159*2])
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.draw()
def reset_plot(self):
self.fig.clf()
self.ax = self.fig.add_subplot(1, 1, 1)
class MatPlotLibWidget(QWidget):
def __init__(self, parent=None, rect=[5, 4], dpi=100):
self.w_plot = MatPlotLibView(self, rect, dpi)
self.w_plt_toolbar = NavigationToolbar(self.w_plot, self)
plt_layout = QVBoxLayout()
plt_layout.addWidget(self.w_plot)
plt_layout.addWidget(self.w_plt_toolbar)
def reset_plot(self):
return self.w_plot.reset_plot()
@property
def axis(self):
return self.w_plot.ax
class KittiPointCloudView(KittiGLViewWidget):
def __init__(self,
config,
parent=None,
voxel_size=None,
coors_range=None,
max_voxels=50000,
max_num_points=35):
super().__init__(parent=parent)
if voxel_size is None:
voxel_size = [0.2, 0.2, 0.4]
if coors_range is None:
coors_range = [0, -40, -3, 70.4, 40, 1]
self.w_config = config
self._voxel_size = voxel_size
self._coors_range = coors_range
self._max_voxels = max_voxels
self._max_num_points = max_num_points
bk_color = (0.8, 0.8, 0.8, 1.0)
bk_color = list([int(v * 255) for v in bk_color])
self.setBackgroundColor(*bk_color)
# self.setBackgroundColor('w')
self.mousePressed.connect(self.on_mousePressed)
self.setCameraPosition(distance=20, azimuth=-180, elevation=30)
def on_mousePressed(self, pos):
pass
def reset_camera(self):
self.set_camera_position(
center=(5, 0, 0), distance=20, azimuth=-180, elevation=30)
self.update()
def draw_frustum(self, bboxes, rect, Trv2c, P2):
# Y = C(R @ (rect @ Trv2c @ X) + T)
# uv = [Y0/Y2, Y1/Y2]
frustums = []
C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
frustums = box_np_ops.get_frustum_v2(bboxes, C)
frustums -= T
# frustums = np.linalg.inv(R) @ frustums.T
frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
self.boxes3d('frustums', frustums, colors=GLColor.Write, alpha=0.5)
def draw_cropped_frustum(self, bboxes, rect, Trv2c, P2):
# Y = C(R @ (rect @ Trv2c @ X) + T)
# uv = [Y0/Y2, Y1/Y2]
self.boxes3d(
'cropped_frustums',
prep.random_crop_frustum(bboxes, rect, Trv2c, P2),
colors=GLColor.Write,
alpha=0.5)
def draw_anchors(self,
gt_boxes_lidar,
points=None,
image_idx=0,
gt_names=None):
# print(gt_names)
voxel_size = np.array(self._voxel_size, dtype=np.float32)
# voxel_size = np.array([0.2, 0.2, 0.4], dtype=np.float32)
coors_range = np.array(self._coors_range, dtype=np.float32)
# coors_range = np.array([0, -40, -3, 70.4, 40, 1], dtype=np.float32)
grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
# print(grid_size)
bv_range = coors_range[[0, 1, 3, 4]]
anchor_generator = AnchorGeneratorStride(
# sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],
sizes=[0.6, 1.76, 1.73],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[0.2, -39.8, -1.465],
rotations=[0, 1.5707963267948966],
match_threshold=0.5,
unmatch_threshold=0.35,
)
anchor_generator1 = AnchorGeneratorStride(
# sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],
sizes=[0.6, 0.8, 1.73],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[0.2, -39.8, -1.465],
rotations=[0, 1.5707963267948966],
match_threshold=0.5,
unmatch_threshold=0.35,
)
anchor_generator2 = AnchorGeneratorStride(
# sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[0.2, -39.8, -1.55442884],
rotations=[0, 1.5707963267948966],
# rotations=[0],
match_threshold=0.6,
unmatch_threshold=0.45,
)
anchor_generators = [anchor_generator2]
box_coder = GroundBox3dCoder()
# similarity_calc = DistanceSimilarity(1.0)
similarity_calc = NearestIouSimilarity()
target_assigner = TargetAssigner(box_coder, anchor_generators,
similarity_calc)
# anchors = box_np_ops.create_anchors_v2(
# bv_range, grid_size[:2] // 2, sizes=anchor_dims)
# matched_thresholds = [0.45, 0.45, 0.6]
# unmatched_thresholds = [0.3, 0.3, 0.45]
t = time.time()
feature_map_size = grid_size[:2] // 2
feature_map_size = [*feature_map_size, 1][::-1]
print(feature_map_size)
# """
ret = target_assigner.generate_anchors(feature_map_size)
anchors = ret["anchors"]
anchors = anchors.reshape([-1, 7])
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
print(f"num_anchors_ {len(anchors)}")
if points is not None:
voxels, coors, num_points = points_to_voxel(
points,
self._voxel_size,
# self._coors_range,
coors_range,
self._max_num_points,
reverse_index=True,
max_voxels=self._max_voxels)
# print(np.min(coors, 0), np.max(coors, 0))
dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
coors, tuple(grid_size[::-1][1:]))
dense_voxel_map = dense_voxel_map.cumsum(0)
dense_voxel_map = dense_voxel_map.cumsum(1)
anchors_mask = box_np_ops.fused_get_anchors_area(
dense_voxel_map, anchors_bv, voxel_size, coors_range,
grid_size) > 1
print(np.sum(anchors_mask), anchors_mask.shape)
class_names = [
'Car', "Pedestrian", "Cyclist", 'Van', 'Truck', "Tram", 'Misc',
'Person_sitting'
]
gt_classes = np.array(
[class_names.index(n) + 1 for n in gt_names], dtype=np.int32)
t = time.time()
target_dict = target_assigner.assign(
anchors,
gt_boxes_lidar,
anchors_mask,
gt_classes=gt_classes,
matched_thresholds=matched_thresholds,
unmatched_thresholds=unmatched_thresholds)
labels = target_dict["labels"]
reg_targets = target_dict["bbox_targets"]
reg_weights = target_dict["bbox_outside_weights"]
# print(labels[labels > 0])
# decoded_reg_targets = box_np_ops.second_box_decode(reg_targets, anchors)
# print(decoded_reg_targets.reshape(-1, 7)[labels > 0])
print("target time", (time.time() - t))
print(f"num_pos={np.sum(labels > 0)}")
colors = np.zeros([anchors.shape[0], 4])
ignored_color = bbox_plot.gl_color(GLColor.Gray, 0.5)
pos_color = bbox_plot.gl_color(GLColor.Cyan, 0.5)
colors[labels == -1] = ignored_color
colors[labels > 0] = pos_color
cared_anchors_mask = np.logical_and(labels != 0, anchors_mask)
colors = colors[cared_anchors_mask]
anchors_not_neg = box_np_ops.rbbox3d_to_corners(anchors)[
cared_anchors_mask]
self.boxes3d("anchors", anchors_not_neg, colors=colors)
def draw_anchors_trunk(self,
gt_boxes_lidar,
points=None,
image_idx=0,
gt_names=None):
# print(gt_names)
voxel_size = np.array(self._voxel_size, dtype=np.float32)
# voxel_size = np.array([0.2, 0.2, 0.4], dtype=np.float32)
coors_range = np.array(self._coors_range, dtype=np.float32)
# coors_range = np.array([0, -40, -3, 70.4, 40, 1], dtype=np.float32)
grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
# print(grid_size)
bv_range = coors_range[[0, 1, 3, 4]]
ag0 = AnchorGeneratorStride(
# sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],
sizes=[2.0210402, 4.50223291, 0.75530619],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[-39.8, -39.8, -1.465],
rotations=[0, 1.5707963267948966],
match_threshold=0.6,
unmatch_threshold=0.45,
)
ag1 = AnchorGeneratorStride(
# sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],
sizes=[2.49181956, 4.36252121, 2.07457216],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[-39.8, -39.8, -1.465],
rotations=[0, 1.5707963267948966],
match_threshold=0.6,
unmatch_threshold=0.45,
)
ag2 = AnchorGeneratorStride(
# sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],
sizes=[2.59346555, 12.12584471, 2.70386401],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[-39.8, -39.8, -1.55442884],
rotations=[0, 1.5707963267948966],
# rotations=[0],
match_threshold=0.6,
unmatch_threshold=0.45,
)
anchor_generators = [ag0, ag1, ag2]
box_coder = GroundBox3dCoder()
# similarity_calc = DistanceSimilarity(1.0)
similarity_calc = NearestIouSimilarity()
target_assigner = TargetAssigner(box_coder, anchor_generators,
similarity_calc)
# anchors = box_np_ops.create_anchors_v2(
# bv_range, grid_size[:2] // 2, sizes=anchor_dims)
# matched_thresholds = [0.45, 0.45, 0.6]
# unmatched_thresholds = [0.3, 0.3, 0.45]
t = time.time()
feature_map_size = grid_size[:2] // 2
feature_map_size = [*feature_map_size, 1][::-1]
print(feature_map_size)
# """
ret = target_assigner.generate_anchors(feature_map_size)
anchors = ret["anchors"]
anchors = anchors.reshape([-1, 7])
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
print(f"num_anchors_ {len(anchors)}")
anchor_threshold = 1
if points is not None:
voxels, coors, num_points = points_to_voxel(
points,
voxel_size,
# self._coors_range,
coors_range,
self._max_num_points,
reverse_index=True,
max_voxels=self._max_voxels)
# print(np.min(coors, 0), np.max(coors, 0))
dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
coors, tuple(grid_size[::-1][1:]))
dense_voxel_map = dense_voxel_map.cumsum(0)
dense_voxel_map = dense_voxel_map.cumsum(1)
anchors_mask = box_np_ops.fused_get_anchors_area(
dense_voxel_map, anchors_bv, voxel_size, coors_range,
grid_size) > anchor_threshold
class_names = [
'Car', "Pedestrian", "Cyclist", 'Van', 'Truck', "Tram", 'Misc',
'Person_sitting', "car", "tractor", "trailer"
]
gt_classes = np.array(
[class_names.index(n) + 1 for n in gt_names], dtype=np.int32)
t = time.time()
target_dict = target_assigner.assign(
anchors,
gt_boxes_lidar,
anchors_mask,
gt_classes=gt_classes,
matched_thresholds=matched_thresholds,
unmatched_thresholds=unmatched_thresholds)
labels = target_dict["labels"]
reg_targets = target_dict["bbox_targets"]
reg_weights = target_dict["bbox_outside_weights"]
# print(labels[labels > 0])
# decoded_reg_targets = box_np_ops.second_box_decode(reg_targets, anchors)
# print(decoded_reg_targets.reshape(-1, 7)[labels > 0])
print("target time", (time.time() - t))
print(f"num_pos={np.sum(labels > 0)}")
colors = np.zeros([anchors.shape[0], 4])
ignored_color = bbox_plot.gl_color(GLColor.Gray, 0.5)
pos_color = bbox_plot.gl_color(GLColor.Cyan, 0.5)
colors[labels == -1] = ignored_color
colors[labels > 0] = pos_color
cared_anchors_mask = np.logical_and(labels != 0, anchors_mask)
colors = colors[cared_anchors_mask]
anchors_not_neg = box_np_ops.rbbox3d_to_corners(anchors)[
cared_anchors_mask]
self.boxes3d("anchors_trunk", anchors_not_neg, colors=colors)
def draw_bounding_box(self):
bbox = box_np_ops.minmax_to_corner_3d(np.array([self.w_config.get("CoorsRange")]))
self.boxes3d("bound", bbox, GLColor.Green)
def draw_voxels(self, points, gt_boxes=None):
pos_color = self.w_config.get("PosVoxelColor")[:3]
pos_color = (*pos_color, self.w_config.get("PosVoxelAlpha"))
neg_color = self.w_config.get("NegVoxelColor")[:3]
neg_color = (*neg_color, self.w_config.get("NegVoxelAlpha"))
voxel_size = np.array(self.w_config.get("VoxelSize"), dtype=np.float32)
coors_range = np.array(
self.w_config.get("CoorsRange"), dtype=np.float32)
voxels, coors, num_points = points_to_voxel(
points,
voxel_size,
coors_range,
self._max_num_points,
reverse_index=True,
max_voxels=self._max_voxels)
# print("num_voxels", num_points.shape[0])
"""
total_num_points = 0
for i in range(self._max_num_points):
num = np.sum(num_points.astype(np.int64) == i)
total_num_points += num * i
if num > 0:
print(f"num={i} have {num} voxels")
print("total_num_points", points.shape[0], total_num_points)
"""
grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_maxs = voxel_origins + voxel_size
voxel_boxes = np.concatenate([voxel_origins, voxel_maxs], axis=1)
voxel_box_corners = box_np_ops.minmax_to_corner_3d(voxel_boxes)
pos_only = self.w_config.get("DrawPositiveVoxelsOnly")
if gt_boxes is not None:
labels = box_np_ops.assign_label_to_voxel(
gt_boxes, coors, voxel_size, coors_range).astype(np.bool)
if pos_only:
voxel_box_corners = voxel_box_corners[labels]
colors = np.zeros([voxel_box_corners.shape[0], 4])
if pos_only:
colors[:] = pos_color
else:
colors[np.logical_not(labels)] = neg_color
colors[labels] = pos_color
else:
if not pos_only:
colors = np.zeros([voxel_box_corners.shape[0], 4])
colors[:] = neg_color
else:
voxel_box_corners = np.zeros((0, 8, 3))
colors = np.zeros((0, 4))
self.boxes3d("voxels", voxel_box_corners, colors)
class KittiViewer(QMainWindow):
def __init__(self):
super().__init__()
self.title = 'KittiViewer'
self.bbox_window = [10, 10, 1600, 900]
self.sstream = sysio.StringIO()
self.json_setting = Settings(str(Path.home() / ".kittiviewerrc"))
self.kitti_infos = None
self.detection_annos = None
self.image_idxes = None
self.root_path = None
self.current_idx = 0
self.dt_image_idxes = None
self.current_image = None
self.kitti_info = None
self.points = None
self.gt_boxes = None
self.gt_names = None
self.difficulty = None
self.group_ids = None
self.inference_ctx = None
self.init_ui()
def init_ui(self):
self.setWindowTitle(self.title)
self.setGeometry(*self.bbox_window)
# self.statusBar().showMessage('Message in statusbar.')
control_panel_layout = QVBoxLayout()
root_path = self.json_setting.get("kitti_root_path", "")
self.w_root_path = QLineEdit(root_path)
iamge_idx = self.json_setting.get("image_idx", "0")
self.w_imgidx = QLineEdit(iamge_idx)
info_path = self.json_setting.get("latest_info_path", "")
self.w_info_path = QLineEdit(info_path)
det_path = self.json_setting.get("latest_det_path", "")
self.w_det_path = QLineEdit(det_path)
# self.w_cmd = QLineEdit()
# self.w_cmd.returnPressed.connect(self.on_CmdReturnPressed)
self.w_load = QPushButton('load info')
self.w_load.clicked.connect(self.on_loadButtonPressed)
self.w_load_det = QPushButton('load detection')
self.w_load_det.clicked.connect(self.on_loadDetPressed)
self.w_config = KittiDrawControl('ctrl')
config = self.json_setting.get("config", "")
if config != "":
self.w_config.loads(config)
self.w_config.configChanged.connect(self.on_configchanged)
self.w_plot = QPushButton('plot')
self.w_plot.clicked.connect(self.on_plotButtonPressed)
self.w_plot_all = QPushButton('plot all')
self.w_plot_all.clicked.connect(self.on_plotAllButtonPressed)
self.w_show_panel = QPushButton('control panel')
self.w_show_panel.clicked.connect(self.on_panel_clicked)
center_widget = QWidget(self)
self.w_output = QTextEdit()
self.w_config_gbox = QGroupBox("Read Config")
layout = QFormLayout()
layout.addRow(QLabel("root path:"), self.w_root_path)
layout.addRow(QLabel("info path:"), self.w_info_path)
layout.addRow(QLabel("image idx:"), self.w_imgidx)
layout.addRow(QLabel("det path:"), self.w_det_path)
self.w_config_gbox.setLayout(layout)
self.w_plt = MatPlotLibView()
self.w_plt_toolbar = NavigationToolbar(self.w_plt, center_widget)
# self.w_plt.ax.set_axis_off()
# self.w_plt.ax.set_yticklabels([])
# self.w_plt.ax.set_xticklabels([])
plt_layout = QVBoxLayout()
plt_layout.addWidget(self.w_plt)
plt_layout.addWidget(self.w_plt_toolbar)
control_panel_layout.addWidget(self.w_config_gbox)
# control_panel_layout.addWidget(self.w_info_path)
h_layout = QHBoxLayout()
h_layout.addWidget(self.w_load)
h_layout.addWidget(self.w_load_det)
control_panel_layout.addLayout(h_layout)
h_layout = QHBoxLayout()
h_layout.addWidget(self.w_plot)
h_layout.addWidget(self.w_plot_all)
control_panel_layout.addLayout(h_layout)
control_panel_layout.addWidget(self.w_show_panel)
vcfg_path = self.json_setting.get("latest_vxnet_cfg_path", "")
self.w_vconfig_path = QLineEdit(vcfg_path)
vckpt_path = self.json_setting.get("latest_vxnet_ckpt_path", "")
self.w_vckpt_path = QLineEdit(vckpt_path)
layout = QFormLayout()
layout.addRow(QLabel("VoxelNet config path:"), self.w_vconfig_path)
layout.addRow(QLabel("VoxelNet ckpt path:"), self.w_vckpt_path)
control_panel_layout.addLayout(layout)
self.w_build_net = QPushButton('Build VoxelNet')
self.w_build_net.clicked.connect(self.on_BuildVxNetPressed)
self.w_load_ckpt = QPushButton('load VoxelNet checkpoint')
self.w_load_ckpt.clicked.connect(self.on_loadVxNetCkptPressed)
h_layout = QHBoxLayout()
h_layout.addWidget(self.w_build_net)
h_layout.addWidget(self.w_load_ckpt)
control_panel_layout.addLayout(h_layout)
self.w_inference = QPushButton('inferenct VoxelNet')
self.w_inference.clicked.connect(self.on_InferenceVxNetPressed)
control_panel_layout.addWidget(self.w_inference)
self.w_load_infer = QPushButton('Load and Inferenct VoxelNet')
self.w_load_infer.clicked.connect(self.on_LoadInferenceVxNetPressed)
control_panel_layout.addWidget(self.w_load_infer)
self.w_eval_net = QPushButton('Evaluation VoxelNet')
self.w_eval_net.clicked.connect(self.on_EvalVxNetPressed)
control_panel_layout.addWidget(self.w_eval_net)
layout = QFormLayout()
self.w_cb_gt_curcls = QCheckBox("Indexed by GroundTruth Class")
self.w_cb_gt_curcls.setChecked(True)
self.w_cb_gt_curcls.stateChanged.connect(
self.on_gt_checkbox_statechanged)
self.gt_combobox = QComboBox()
self.gt_combobox.addItem("All")
for cls_name in kitti.get_classes():
self.gt_combobox.addItem(cls_name)
self._current_gt_cls_ids = None
self._current_gt_cls_idx = 0
self.gt_combobox.currentTextChanged.connect(
self.on_gt_combobox_changed)
layout.addRow(self.w_cb_gt_curcls, self.gt_combobox)
self.w_cb_dt_curcls = QCheckBox("Indexed by Detection Class")
self.w_cb_dt_curcls.setChecked(False)
self.w_cb_dt_curcls.stateChanged.connect(
self.on_dt_checkbox_statechanged)
self.dt_combobox = QComboBox()
self.dt_combobox.addItem("All")
self._current_dt_cls_ids = None
self._current_dt_cls_idx = 0
self.dt_combobox.currentTextChanged.connect(
self.on_dt_combobox_changed)
layout.addRow(self.w_cb_dt_curcls, self.dt_combobox)
control_panel_layout.addLayout(layout)
self.w_next = QPushButton('next')
self.w_next.clicked.connect(
partial(self.on_nextOrPrevPressed, prev=False))
self.w_prev = QPushButton('prev')
self.w_prev.clicked.connect(
partial(self.on_nextOrPrevPressed, prev=True))
layout = QHBoxLayout()
layout.addWidget(self.w_prev)
layout.addWidget(self.w_next)
control_panel_layout.addLayout(layout)
self.w_next = QPushButton('next current class')
self.w_next.clicked.connect(
partial(self.on_nextOrPrevCurClsPressed, prev=False))
self.w_prev = QPushButton('prev current class')
self.w_prev.clicked.connect(
partial(self.on_nextOrPrevCurClsPressed, prev=True))
layout = QHBoxLayout()
layout.addWidget(self.w_prev)
layout.addWidget(self.w_next)
control_panel_layout.addLayout(layout)
control_panel_layout.addLayout(plt_layout)
save_image_path = self.json_setting.get("save_image_path", "")
self.w_image_save_path = QLineEdit(save_image_path)
# self.w_cmd = QLineEdit()
# self.w_cmd.returnPressed.connect(self.on_CmdReturnPressed)
self.w_save_image = QPushButton('save image')
self.w_save_image.clicked.connect(self.on_saveimg_clicked)
control_panel_layout.addWidget(self.w_image_save_path)
control_panel_layout.addWidget(self.w_save_image)
# control_panel_layout.addWidget(self.w_cmd)
control_panel_layout.addWidget(self.w_output)
self.center_layout = QHBoxLayout()
self.w_pc_viewer = KittiPointCloudView(
self.w_config, coors_range=self.w_config.get("CoorsRange"))
self.center_layout.addWidget(self.w_pc_viewer)
self.center_layout.addLayout(control_panel_layout)
self.center_layout.setStretch(0, 2)
self.center_layout.setStretch(1, 1)
center_widget.setLayout(self.center_layout)
self.setCentralWidget(center_widget)
self.show()
self.on_loadButtonPressed()
# self.on_plotButtonPressed()
def on_panel_clicked(self):
if self.w_config.isHidden():
self.w_config.show()
else:
self.w_config.hide()
def on_saveimg_clicked(self):
self.save_image(self.current_image)
def on_gt_checkbox_statechanged(self):
self.w_cb_gt_curcls.setChecked(True)
self.w_cb_dt_curcls.setChecked(False)
def on_dt_checkbox_statechanged(self):
self.w_cb_gt_curcls.setChecked(False)
self.w_cb_dt_curcls.setChecked(True)
def on_gt_combobox_changed(self):
self._current_gt_cls_idx = 0
self.on_loadButtonPressed()
def on_dt_combobox_changed(self):
self._current_dt_cls_idx = 0
annos = kitti.filter_empty_annos(self.detection_annos)
if self.dt_image_idxes is not None and annos is not None:
current_class = self.dt_combobox.currentText()
if current_class == "All":
self._current_dt_cls_ids = self.dt_image_idxes
else:
self._current_dt_cls_ids = [
anno["image_idx"][0] for anno in annos
if current_class in anno["name"]
]
def message(self, value, *arg, color="Black"):
colorHtml = f"<font color=\"{color}\">"
endHtml = "</font><br>"
msg = self.print_str(value, *arg)
self.w_output.insertHtml(colorHtml + msg + endHtml)
self.w_output.verticalScrollBar().setValue(
self.w_output.verticalScrollBar().maximum())
def error(self, value, *arg):
time_str = datetime.datetime.now().strftime("[%H:%M:%S]")
return self.message(time_str, value, *arg, color="Red")
def info(self, value, *arg):
time_str = datetime.datetime.now().strftime("[%H:%M:%S]")
return self.message(time_str, value, *arg, color="Black")
def warning(self, value, *arg):
time_str = datetime.datetime.now().strftime("[%H:%M:%S]")
return self.message(time_str, value, *arg, color="Yellow")
def save_image(self, image):
img_path = self.w_image_save_path.text()
self.json_setting.set("save_image_path", img_path)
if self.current_image is not None:
io.imsave(img_path, image)
# p = self.w_pc_viewer.grab()
p = self.w_pc_viewer.grabFrameBuffer()
# p = QtGui.QPixmap.grabWindow(self.w_pc_viewer)
pc_img_path = str(
Path(img_path).parent / (str(Path(img_path).stem) + "_pc.jpg"))
# p.save(pc_img_path, 'jpg')
p.save(pc_img_path, 'jpg')
self.info("image saved to", img_path)
def print_str(self, value, *arg):
#self.strprint.flush()
self.sstream.truncate(0)
self.sstream.seek(0)
print(value, *arg, file=self.sstream)
return self.sstream.getvalue()
def on_nextOrPrevPressed(self, prev):
if prev is True:
self.current_idx = max(self.current_idx - 1, 0)
else:
info_len = len(self.image_idxes)
self.current_idx = min(self.current_idx + 1, info_len - 1)
image_idx = self.image_idxes[self.current_idx]
self.w_imgidx.setText(str(image_idx))
self.plot_all(image_idx)
def on_nextOrPrevCurClsPressed(self, prev):
if self.w_cb_dt_curcls.isChecked():
if prev is True:
self._current_dt_cls_idx = max(self._current_dt_cls_idx - 1, 0)
else:
info_len = len(self._current_dt_cls_ids)
self._current_dt_cls_idx = min(self._current_dt_cls_idx + 1,
info_len - 1)
image_idx = self._current_dt_cls_ids[self._current_dt_cls_idx]
self.info("current dt image idx:", image_idx)
elif self.w_cb_gt_curcls.isChecked():
if prev is True:
self._current_gt_cls_idx = max(self._current_gt_cls_idx - 1, 0)
else:
info_len = len(self._current_gt_cls_ids)
self._current_gt_cls_idx = min(self._current_gt_cls_idx + 1,
info_len - 1)
image_idx = self._current_gt_cls_ids[self._current_gt_cls_idx]
self.info("current gt image idx:", image_idx)
self.plot_all(image_idx)
def on_CmdReturnPressed(self):
cmd = self.print_str(self.cmd.text())
self.output.insertPlainText(cmd)
def on_loadButtonPressed(self):
self.root_path = Path(self.w_root_path.text())
if not (self.root_path / "training").exists():
self.error("ERROR: your root path is incorrect.")
self.json_setting.set("kitti_root_path", str(self.root_path))
info_path = self.w_info_path.text()
if info_path == '':
info_path = self.root_path / 'kitti_infos_val.pkl'
else:
info_path = Path(info_path)
if not info_path.exists():
self.error("ERROR: info file not exist")
return
self.json_setting.set("latest_info_path", str(info_path))
with open(info_path, 'rb') as f:
self.kitti_infos = pickle.load(f)
db_infos_path = Path(self.root_path) / "kitti_dbinfos_train.pkl"
if db_infos_path.exists():
with open(db_infos_path, 'rb') as f:
self.db_infos = pickle.load(f)
global_rot_range = self.w_config.get("SampleGlobleRotRange")
groups = self.w_config.get("SampleGroups")
self.info("init database sampler with group:")
self.info(groups)
self.db_sampler = DataBaseSamplerV2(self.db_infos, groups, global_rot_range=global_rot_range)
self.info("load db_infos.")
self.image_idxes = [info["image"]['image_idx'] for info in self.kitti_infos]
self.info("load", len(self.kitti_infos), "infos.")
current_class = self.gt_combobox.currentText()
if current_class == "All":
self._current_gt_cls_ids = self.image_idxes
else:
self._current_gt_cls_ids = [
info["image_idx"] for info in self.kitti_infos
if current_class in info["annos"]["name"]
]
self._current_gt_cls_idx = 0
def on_loadDetPressed(self):
det_path = self.w_det_path.text()
if Path(det_path).is_file():
with open(det_path, "rb") as f:
dt_annos = pickle.load(f)
else:
dt_annos = kitti.get_label_annos(det_path)
if len(dt_annos) == 0:
self.warning("detection path contain nothing.")
return
self.detection_annos = dt_annos
self.info(f"load {len(dt_annos)} detections.")
self.json_setting.set("latest_det_path", det_path)
annos = kitti.filter_empty_annos(self.detection_annos)
self.dt_image_idxes = [anno["image_idx"][0] for anno in annos]
# get class in dt
available_cls = []
for anno in self.detection_annos:
for name in anno["name"]:
if name not in available_cls:
available_cls.append(name)
self.dt_combobox.clear()
self.dt_combobox.addItem("All")
for cls_name in available_cls:
self.dt_combobox.addItem(cls_name)
current_class = self.dt_combobox.currentText()
if current_class == "All":
self._current_dt_cls_ids = self.dt_image_idxes
else:
self._current_dt_cls_ids = [
anno["image_idx"][0] for anno in annos
if anno["name"] == current_class
]
self._current_dt_cls_idx = 0
"""
if self.kitti_infos is not None:
t = time.time()
gt_annos = [info["annos"] for info in self.kitti_infos]
self.message(get_official_eval_result(gt_annos, dt_annos, 0))
self.message(f"eval use time: {time.time() - t:.4f}")
"""
def sample_to_current_data(self):
if self.kitti_info is None:
self.error("you must load infos and choose a existing image idx first.")
return
sampled_difficulty = []
# class_names = ["Car"]
pc_info = self.kitti_info["point_cloud"]
calib = self.kitti_info["calib"]
rect = calib['R0_rect']
P2 = calib['P2']
Trv2c = calib['Tr_velo_to_cam']
num_features = 4
if 'num_features' in pc_info:
num_features = pc_info['num_features']
# class_names = self.w_config.get("UsedClass")
# class_names_group = [["trailer", "tractor"]]
if self.db_sampler is not None:
# gt_boxes_mask = np.array(
# [n in class_names for n in self.gt_names], dtype=np.bool_)
gt_boxes_mask = np.ones((self.gt_names.shape[0],), np.bool_)
sampled_dict = self.db_sampler.sample_all(
self.root_path,
self.gt_boxes,
self.gt_names,
num_features,
False,
gt_group_ids=self.group_ids)
if sampled_dict is not None:
sampled_gt_names = sampled_dict["gt_names"]
sampled_gt_boxes = sampled_dict["gt_boxes"]
sampled_points = sampled_dict["points"]
sampled_gt_masks = sampled_dict["gt_masks"]
sampled_difficulty = sampled_dict["difficulty"]
# gt_names = gt_names[gt_boxes_mask].tolist()
self.gt_names = np.concatenate(
[self.gt_names, sampled_gt_names], axis=0)
# gt_names += [s["name"] for s in sampled]
self.gt_boxes = np.concatenate(
[self.gt_boxes, sampled_gt_boxes])
gt_boxes_mask = np.concatenate(
[gt_boxes_mask, sampled_gt_masks], axis=0)
self.difficulty = np.concatenate(
[self.difficulty, sampled_difficulty], axis=0)
self.points = np.concatenate(
[sampled_points, self.points], axis=0)
sampled_group_ids = sampled_dict["group_ids"]
if self.group_ids is not None:
self.group_ids = np.concatenate(
[self.group_ids, sampled_group_ids])
'''
prep.noise_per_object_(
self.gt_boxes,
self.points,
gt_boxes_mask,
rotation_perturb=[-1.57, 1.57],
center_noise_std=[1.0, 1.0, 1.0],
num_try=50)'''
# should remove unrelated objects after noise per object
self.gt_boxes = self.gt_boxes[gt_boxes_mask]
self.gt_names = self.gt_names[gt_boxes_mask]
self.difficulty = self.difficulty[gt_boxes_mask]
if self.group_ids is not None:
self.group_ids = self.group_ids[gt_boxes_mask]
else:
self.error("you enable sample but not provide a database")
def data_augmentation(self):
if self.kitti_info is None:
self.error("you must load infos and choose a existing image idx first.")
return
seed = np.random.randint(5000000)
np.random.seed(seed)
# seed = 1798767
self.info(f"prep random seed: {seed}")
t = time.time()
group_ids = None
if self.w_config.get("GroupNoisePerObject"):
group_ids = self.group_ids
prep.noise_per_object_v3_(
self.gt_boxes,
self.points,
# rotation_perturb=0.0,
# center_noise_std=0,
global_random_rot_range=[np.pi / 4, np.pi / 4 * 3],
# global_random_rot_range=[0, 6.28],
group_ids=group_ids,
num_try=100)
self.info("noise time", time.time() - t)
# self.gt_boxes, self.points = prep.random_flip(
# self.gt_boxes, self.points)
# self.gt_boxes, self.points = prep.global_rotation(
# self.gt_boxes, self.points)
# self.gt_boxes[:, 6] = box_np_ops.limit_angles(self.gt_boxes[:, 6])
# self.gt_boxes, self.points = prep.global_scaling(
# self.gt_boxes, self.points)
# mask = prep.filter_gt_box_outside_range(
# self.gt_boxes, [0, -40, 70.4, 40])
# self.gt_boxes = self.gt_boxes[mask]
def draw_gt_in_image(self):
if self.kitti_info is None:
self.error("you must load infos and choose a existing image idx first.")
return
calib = self.kitti_info["calib"]
rect = calib['R0_rect']
P2 = calib['P2']
Trv2c = calib['Tr_velo_to_cam']
gt_boxes_camera = box_np_ops.box_lidar_to_camera(
self.gt_boxes, rect, Trv2c)
boxes_3d = box_np_ops.center_to_corner_box3d(gt_boxes_camera[:, :3],
gt_boxes_camera[:, 3:6],
gt_boxes_camera[:, 6],
origin=[0.5, 1.0, 0.5],
axis=1)
boxes_3d = boxes_3d.reshape((-1, 3))
boxes_3d_p2 = box_np_ops.project_to_image(boxes_3d, P2)
boxes_3d_p2 = boxes_3d_p2.reshape([-1, 8, 2])
if self.current_image is not None:
bbox_plot.draw_3d_bbox_in_ax(
self.w_plt.ax, boxes_3d_p2, colors='b')
def draw_detection(self, detection_anno, label_color=GLColor.Blue):
if self.kitti_info is None:
self.error("you must load infos and choose a existing image idx first.")
return
dt_box_color = self.w_config.get("DTBoxColor")[:3]
dt_box_color = (*dt_box_color, self.w_config.get("DTBoxAlpha"))
pc_info = self.kitti_info["point_cloud"]
calib = self.kitti_info["calib"]
rect = calib['R0_rect']
P2 = calib['P2']
Trv2c = calib['Tr_velo_to_cam']
# detection_anno = kitti.remove_low_height(detection_anno, 25)
detection_anno = kitti.remove_low_score(detection_anno, self.w_config.get("DTScoreThreshold"))
dt_bboxes = detection_anno["bbox"]
dt_boxes_corners, scores, dt_box_lidar = kitti_anno_to_corners(
self.kitti_info, detection_anno)
print("DEBUG", dt_box_lidar)
print("DEBUG Scores", scores)
if self.gt_boxes is not None:
iou = _riou3d_shapely(self.gt_boxes, dt_box_lidar)
if iou.shape[0] != 0:
dt_to_gt_box_iou = iou.max(0)
else:
dt_to_gt_box_iou = np.zeros([0, 0])
num_dt = dt_box_lidar.shape[0]
dt_boxes_corners_cam = box_np_ops.lidar_to_camera(
dt_boxes_corners, rect, Trv2c)
dt_boxes_corners_cam = dt_boxes_corners_cam.reshape((-1, 3))
dt_boxes_corners_cam_p2 = box_np_ops.project_to_image(
dt_boxes_corners_cam, P2)
dt_boxes_corners_cam_p2 = dt_boxes_corners_cam_p2.reshape([-1, 8, 2])
dt_labels = detection_anno["name"]
dt_scores_text = None
if scores is not None:
if self.gt_boxes is not None:
dt_scores_text = [
f'score={s:.2f}, iou={i:.2f}'
for s, i in zip(scores, dt_to_gt_box_iou)
]
else:
dt_scores_text = [
f'score={s:.2f}, z={z:.2f}'
for s, z in zip(scores, dt_box_lidar[:, 2])
]
if self.w_config.get("DrawDTLabels"):
self.w_pc_viewer.labels("dt_boxes/labels",
dt_boxes_corners[:, 1, :], dt_scores_text,
label_color, 15)
dt_box_color = np.tile(np.array(dt_box_color)[np.newaxis, ...], [num_dt, 1])
if self.w_config.get("DTScoreAsAlpha") and scores is not None:
dt_box_color = np.concatenate([dt_box_color[:, :3], scores[..., np.newaxis]], axis=1)
self.w_pc_viewer.boxes3d("dt_boxes", dt_boxes_corners, dt_box_color,
self.w_config.get("DTBoxLineWidth"), 1.0)
def plot_gt_boxes_in_pointcloud(self):
if self.kitti_info is None:
self.error("you must load infos and choose a existing image idx first.")
return
if 'annos' in self.kitti_info:
gt_box_color = self.w_config.get("GTBoxColor")[:3]
gt_box_color = (*gt_box_color, self.w_config.get("GTBoxAlpha"))
diff = self.difficulty.tolist()
diff_to_name = {-1: "unk", 0: "easy", 1: "moderate", 2: "hard"}
diff_names = [diff_to_name[d] for d in diff]
label_idx = list(range(self.gt_names.shape[0]))
labels_ = [
f'{i}:{l}, {d}'
for i, l, d in zip(label_idx, self.gt_names, diff_names)
]
boxes_corners = box_np_ops.center_to_corner_box3d(
self.gt_boxes[:, :3],
self.gt_boxes[:, 3:6],
self.gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2)
# print(self.gt_boxes[:, 6])
# print(self.gt_boxes[:, :3])
self.w_pc_viewer.boxes3d("gt_boxes", boxes_corners, gt_box_color,
3.0, 1.0)
if self.w_config.get("DrawGTLabels"):
self.w_pc_viewer.labels("gt_boxes/labels", boxes_corners[:, 0, :],
labels_, GLColor.Green, 15)
def plot_pointcloud(self):
if self.kitti_info is None:
self.error("you must load infos and choose a existing image idx first.")
return
pc_info = self.kitti_info["point_cloud"]
image_info = self.kitti_info["image"]
point_color = self.w_config.get("PointColor")[:3]
point_color = (*point_color, self.w_config.get("PointAlpha"))
point_color = np.tile(np.array(point_color), [self.points.shape[0], 1])
# self.w_pc_viewer.reset_camera()
point_size = np.full(
[self.points.shape[0]],
self.w_config.get("PointSize"),
dtype=np.float32)
# self.w_pc_viewer.draw_point_cloud(self.points, color=points_rgb, with_reflectivity=False, size=0.1)
# self.w_pc_viewer.draw_bounding_box()
idx = self.image_idxes.index(image_info["image_idx"])
if 'annos' in self.kitti_info:
# poses = np.zeros([self.gt_boxes.shape[0], 3])
# self.w_pc_viewer.circles(
# "circles", poses, np.linalg.norm(
# self.gt_boxes[:, :3], axis=-1))
# self.w_pc_viewer.draw_anchors_trunk(
# self.gt_boxes, self.points, gt_names=gt_names)
# self.w_pc_viewer.draw_anchors_v1(
# self.gt_boxes, self.points, gt_names=gt_names)
# self.w_pc_viewer.draw_frustum(bboxes, rect, Trv2c, P2)
# self.w_pc_viewer.draw_cropped_frustum(bboxes, rect, Trv2c, P2)
gt_point_mask = box_np_ops.points_in_rbbox(self.points,
self.gt_boxes).any(1)
point_size[gt_point_mask] = self.w_config.get("GTPointSize")
gt_point_color = self.w_config.get("GTPointColor")
gt_point_color = (*gt_point_color[:3],
self.w_config.get("GTPointAlpha"))
point_color[gt_point_mask] = gt_point_color
self.w_pc_viewer.remove("dt_boxes/labels")
self.w_pc_viewer.remove("dt_boxes")
if self.detection_annos is not None and self.w_config.get("DrawDTBoxes"):
detection_anno = self.detection_annos[idx]
self.draw_detection(detection_anno)
if self.w_config.get("WithReflectivity"):
if self.points.shape[1] < 4:
self.error("Your pointcloud don't contain reflectivity.")
else:
point_color = np.concatenate(
[point_color[:, :3], self.points[:, 3:4] * 0.8 + 0.2],
axis=1)
self.w_pc_viewer.scatter(
"pointcloud", self.points[:, :3], point_color, size=point_size)
"""
coors_range = np.array(self.w_config.get("CoorsRange"), dtype=np.float32)
bv_range = coors_range[[0, 1, 3, 4]]
voxel_size = np.array(self.w_config.get("VoxelSize"), dtype=np.float32)
grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
foo_map_size = grid_size[:2]
# foo_map_size = [200, 200]
x = np.arange(foo_map_size[0])
y = np.arange(foo_map_size[1])
shift = coors_range[:2]
x = x * voxel_size[0] + shift[0] + 0.5 * voxel_size[0]
y = y * voxel_size[1] + shift[1] + 0.5 * voxel_size[1]
xy1, xy2 = np.meshgrid(x, y)
def gaussian2d(x, y, A, ux=0, uy=0, stdx=1, stdy=1):
return A * np.exp(-0.5 * ((x - ux) / stdx) ** 2 - 0.5 * ((y - uy) / stdy) ** 2)
z = gaussian2d(xy1, xy2, 0, 0, 2, 2) - 20
self.w_pc_viewer.surface("test", x, y, z, GLColor.Purple, 0.5)
"""
def load_info(self, image_idx):
if self.kitti_infos is None:
self.error("you must load infos first.")
return
if image_idx not in self.image_idxes:
self.error(f"index{image_idx} not exist.")
return False
self.json_setting.set("image_idx", str(image_idx))
idx = self.image_idxes.index(image_idx)
self.kitti_info = self.kitti_infos[idx]
pc_info = self.kitti_info["point_cloud"]
image_info = self.kitti_info["image"]
calib = self.kitti_info["calib"]
if "timestamp" in self.kitti_info:
self.message("timestamp", self.kitti_info["timestamp"])
image = None
if 'image_path' in image_info:
img_path = image_info['image_path']
if img_path != "":
image = io.imread(str(self.root_path / img_path))
self.current_image = image
else:
self.current_image = None
else:
self.current_image = None
v_path = str(self.root_path / pc_info['velodyne_path'])
num_features = 4
if 'num_features' in pc_info:
num_features = pc_info['num_features']
points = np.fromfile(
v_path, dtype=np.float32, count=-1).reshape([-1, num_features])
self.points = points
rect = calib['R0_rect'].astype(np.float32)
Trv2c = calib['Tr_velo_to_cam'].astype(np.float32)
P2 = calib['P2'].astype(np.float32)
image_shape = None
if 'image_shape' in image_info:
image_shape = image_info['image_shape']
# self.info("num_points before remove:", self.points.shape[0])
if self.w_config.get("RemoveOutsidePoint"):
self.points = box_np_ops.remove_outside_points(
self.points, rect, Trv2c, P2, image_shape)
# self.info("num_points after remove:", self.points.shape[0])
img_path = self.w_image_save_path.text()
img_path = str(Path(img_path).parent / f"{image_idx}.jpg")
self.w_image_save_path.setText(img_path)
self.json_setting.set("save_image_path", img_path)
if 'annos' in self.kitti_info:
annos = self.kitti_info['annos']
# annos = kitti.filter_kitti_anno(annos,
# self.w_config.get("UsedClass"))
labels = annos['name']
num_obj = len([n for n in annos['name'] if n != 'DontCare'])
# print(annos["group_ids"].shape)
dims = annos['dimensions'][:num_obj]
loc = annos['location'][:num_obj]
rots = annos['rotation_y'][:num_obj]
self.difficulty = annos["difficulty"][:num_obj]
self.gt_names = labels[:num_obj]
gt_boxes_camera = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
self.gt_boxes = box_np_ops.box_camera_to_lidar(
gt_boxes_camera, rect, Trv2c)
box_np_ops.change_box3d_center_(self.gt_boxes, [0.5, 0.5, 0], [0.5, 0.5, 0.5])
if 'group_ids' in annos:
self.group_ids = annos['group_ids'][:num_obj]
if self.w_config.get("EnableSample"):
self.sample_to_current_data()
if self.w_config.get("EnableAugmentation"):
self.data_augmentation()
def plot_image(self):
if self.kitti_info is None:
self.error("you need to load the info first before plot image")
return False
if self.current_image is not None:
self.w_plt.ax.imshow(self.current_image)
if 'annos' in self.kitti_info:
annos = self.kitti_info['annos']
annos = kitti.filter_kitti_anno(annos,
self.w_config.get("UsedClass"))
print("DEBUG", self.w_config.get("UsedClass"))
print("DEBUG", len(annos['name']))
labels = annos['name']
num_obj = len([n for n in annos['name'] if n != 'DontCare'])
bbox_plot.draw_bbox_in_ax(
self.w_plt.ax,
annos['bbox'],
edgecolors=['g'] * num_obj + ['b'] * num_obj,
labels=[f'{i}: {labels[i]}' for i in range(len(labels))])
def plot_all(self, image_idx):
self.w_plt.reset_plot()
self.load_info(image_idx)
self.plot_image()
self.draw_gt_in_image()
self.w_plt.draw() # this isn't supported in ubuntu.
self.plot_pointcloud()
if self.w_config.get("DrawGTBoxes"):
self.plot_gt_boxes_in_pointcloud()
if self.w_config.get("DrawVoxels"):
self.w_pc_viewer.draw_voxels(self.points, self.gt_boxes)
return True
def on_plotButtonPressed(self):
if self.kitti_infos is None:
self.error("you must load Kitti Infos first.")
return
image_idx = int(self.w_imgidx.text())
if self.plot_all(image_idx):
self.current_idx = self.image_idxes.index(image_idx)
def on_plotAllButtonPressed(self):
for idx in self.image_idxes:
self.plot_all(idx)
self.update()
self.w_pc_viewer.updateGL()
p = self.w_pc_viewer.grabFrameBuffer()
img_path = self.w_image_save_path.text()
img_path = str(Path(img_path).parent / f"{idx}.jpg")
p.save(img_path, 'jpg', 100)
self.info("image saved to", img_path)
print("image saved to", img_path)
def closeEvent(self, event):
config_str = self.w_config.dumps()
self.json_setting.set("config", config_str)
return super().closeEvent(event)
def on_configchanged(self, msg):
# self.warning(msg.name, msg.value)
# save config to file
idx = self.image_idxes.index(self.kitti_info["image"]["image_idx"])
config_str = self.w_config.dumps()
self.json_setting.set("config", config_str)
pc_redraw_msgs = ["PointSize", "PointAlpha", "GTPointSize"]
pc_redraw_msgs += ["GTPointAlpha", "WithReflectivity"]
pc_redraw_msgs += ["PointColor", "GTPointColor"]
box_redraw = ["GTBoxColor", "GTBoxAlpha"]
dt_redraw = ["DTBoxColor", "DTBoxAlpha", "DrawDTLabels", "DTScoreAsAlpha", "DTScoreThreshold", "DTBoxLineWidth"]
vx_redraw_msgs = ["DrawPositiveVoxelsOnly", "DrawVoxels"]
vx_redraw_msgs += ["PosVoxelColor", "PosVoxelAlpha"]
vx_redraw_msgs += ["NegVoxelColor", "NegVoxelAlpha"]
all_redraw_msgs = ["RemoveOutsidePoint"]
if msg.name in vx_redraw_msgs:
if self.w_config.get("DrawVoxels"):
self.w_pc_viewer.draw_voxels(self.points, self.gt_boxes)
else:
self.w_pc_viewer.remove("voxels")
elif msg.name in pc_redraw_msgs:
self.plot_pointcloud()
elif msg.name in all_redraw_msgs:
self.on_plotButtonPressed()
elif msg.name in box_redraw:
self.plot_gt_boxes_in_pointcloud()
elif msg.name in dt_redraw:
if self.detection_annos is not None and self.w_config.get("DrawDTBoxes"):
detection_anno = self.detection_annos[idx]
self.draw_detection(detection_anno)
def on_loadVxNetCkptPressed(self):
ckpt_path = Path(self.w_vckpt_path.text())
self.json_setting.set("latest_vxnet_ckpt_path",
self.w_vckpt_path.text())
self.inference_ctx.restore(ckpt_path)
# self.w_load_ckpt.setText(self.w_load_ckpt.text() + f": {ckpt_path.stem}")
self.info("load VoxelNet ckpt succeed.")
def on_BuildVxNetPressed(self):
if self.w_config.get("TensorflowInference"):
self.inference_ctx = TFInferenceContext()
else:
self.inference_ctx = TorchInferenceContext()
vconfig_path = Path(self.w_vconfig_path.text())
self.inference_ctx.build(vconfig_path)
self.json_setting.set("latest_vxnet_cfg_path", str(vconfig_path))
self.info("Build VoxelNet ckpt succeed.")
# self.w_load_config.setText(self.w_load_config.text() + f": {vconfig_path.stem}")
def on_InferenceVxNetPressed(self):
t = time.time()
inputs = self.inference_ctx.get_inference_input_dict(
self.kitti_info, self.points)
self.info("input preparation time:", time.time() - t)
t = time.time()
with self.inference_ctx.ctx():
det_annos = self.inference_ctx.inference(inputs)
self.info("detection time:", time.time() - t)
self.draw_detection(det_annos[0])
def on_LoadInferenceVxNetPressed(self):
self.on_BuildVxNetPressed()
self.on_loadVxNetCkptPressed()
self.on_InferenceVxNetPressed()
def on_EvalVxNetPressed(self):
if "annos" not in self.kitti_infos[0]:
self.error("ERROR: infos don't contain gt label.")
t = time.time()
det_annos = []
input_cfg = self.inference_ctx.config.eval_input_reader
model_cfg = self.inference_ctx.config.model.second
class_names = list(input_cfg.class_names)
num_features = model_cfg.num_point_features
with self.inference_ctx.ctx():
for info in list_bar(self.kitti_infos):
v_path = self.root_path / info['velodyne_path']
# v_path = v_path.parent.parent / (
# v_path.parent.stem + "_reduced") / v_path.name
points = np.fromfile(
str(v_path), dtype=np.float32,
count=-1).reshape([-1, num_features])
rect = info['calib/R0_rect']
P2 = info['calib/P2']
Trv2c = info['calib/Tr_velo_to_cam']
image_shape = info['img_shape']
if self.w_config.get("RemoveOutsidePoint"):
points = box_np_ops.remove_outside_points(
points, rect, Trv2c, P2, image_shape)
inputs = self.inference_ctx.get_inference_input_dict(
info, points)
det_annos += self.inference_ctx.inference(inputs)
self.info("total detection time:", time.time() - t)
gt_annos = [i["annos"] for i in self.kitti_infos]
self.info(get_official_eval_result(gt_annos, det_annos, class_names))
@staticmethod
def get_simpify_labels(labels):
label_map = {
"Car": "V",
"Pedestrian": "P",
"Cyclist": "C",
"car": "C",
"tractor": "T1",
"trailer": "T2",
}
label_count = {
"Car": 0,
"Pedestrian": 0,
"Cyclist": 0,
"car": 0,
"tractor": 0,
"trailer": 0,
}
ret = []
for i, name in enumerate(labels):
count = 0
if name in label_count:
count = label_count[name]
label_count[name] += 1
else:
label_count[name] = 0
ret.append(f"{label_map[name]}{count}")
return ret
@staticmethod
def get_false_pos_neg(gt_boxes, dt_boxes, labels, fp_thresh=0.1):
iou = _riou3d_shapely(gt_boxes, dt_boxes)
ret = np.full([len(gt_boxes)], 2, dtype=np.int64)
assigned_dt = np.zeros([len(dt_boxes)], dtype=np.bool_)
label_thresh_map = {
"Car": 0.7,
"Pedestrian": 0.5,
"Cyclist": 0.5,
"car": 0.7,
"tractor": 0.7,
"trailer": 0.7,
}
tp_thresh = np.array([label_thresh_map[n] for n in labels])
if len(gt_boxes) != 0 and len(dt_boxes) != 0:
iou_max_dt_for_gt = iou.max(1)
dt_iou_max_dt_for_gt = iou.argmax(1)
ret[iou_max_dt_for_gt >= tp_thresh] = 0
ret[np.logical_and(iou_max_dt_for_gt < tp_thresh,
iou_max_dt_for_gt > fp_thresh)] = 1 # FP
assigned_dt_inds = dt_iou_max_dt_for_gt
assigned_dt_inds = assigned_dt_inds[iou_max_dt_for_gt >= fp_thresh]
assigned_dt[assigned_dt_inds] = True
return ret, assigned_dt
if __name__ == '__main__':
app = QApplication(sys.argv)
print("++++++????")
ex = KittiViewer()
print(ex.kitti_info)
sys.exit(app.exec_())
| 68,112 | 41.838365 | 120 | py |
second.pytorch | second.pytorch-master/second/kittiviewer/backend/main.py | """This backend now only support lidar. camera is no longer supported.
"""
import base64
import datetime
import io as sysio
import json
import pickle
import time
from pathlib import Path
import fire
import torch
import numpy as np
import skimage
from flask import Flask, jsonify, request
from flask_cors import CORS
from google.protobuf import text_format
from skimage import io
from second.data import kitti_common as kitti
from second.data.all_dataset import get_dataset_class
from second.protos import pipeline_pb2
from second.pytorch.builder import (box_coder_builder, input_reader_builder,
lr_scheduler_builder, optimizer_builder,
second_builder)
from second.pytorch.train import build_network, example_convert_to_torch
app = Flask("second")
CORS(app)
class SecondBackend:
def __init__(self):
self.root_path = None
self.image_idxes = None
self.dt_annos = None
self.dataset = None
self.net = None
self.device = None
BACKEND = SecondBackend()
def error_response(msg):
response = {}
response["status"] = "error"
response["message"] = "[ERROR]" + msg
print("[ERROR]" + msg)
return response
@app.route('/api/readinfo', methods=['POST'])
def readinfo():
global BACKEND
instance = request.json
root_path = Path(instance["root_path"])
response = {"status": "normal"}
BACKEND.root_path = root_path
info_path = Path(instance["info_path"])
dataset_class_name = instance["dataset_class_name"]
BACKEND.dataset = get_dataset_class(dataset_class_name)(root_path=root_path, info_path=info_path)
BACKEND.image_idxes = list(range(len(BACKEND.dataset)))
response["image_indexes"] = BACKEND.image_idxes
response = jsonify(results=[response])
response.headers['Access-Control-Allow-Headers'] = '*'
return response
@app.route('/api/read_detection', methods=['POST'])
def read_detection():
global BACKEND
instance = request.json
det_path = Path(instance["det_path"])
response = {"status": "normal"}
if BACKEND.root_path is None:
return error_response("root path is not set")
if Path(det_path).is_file():
with open(det_path, "rb") as f:
dt_annos = pickle.load(f)
else:
dt_annos = kitti.get_label_annos(det_path)
BACKEND.dt_annos = dt_annos
response = jsonify(results=[response])
response.headers['Access-Control-Allow-Headers'] = '*'
return response
@app.route('/api/get_pointcloud', methods=['POST'])
def get_pointcloud():
global BACKEND
instance = request.json
response = {"status": "normal"}
if BACKEND.root_path is None:
return error_response("root path is not set")
image_idx = instance["image_idx"]
enable_int16 = instance["enable_int16"]
idx = BACKEND.image_idxes.index(image_idx)
sensor_data = BACKEND.dataset.get_sensor_data(idx)
# img_shape = image_info["image_shape"] # hw
if 'annotations' in sensor_data["lidar"]:
annos = sensor_data["lidar"]['annotations']
gt_boxes = annos["boxes"].copy()
response["locs"] = gt_boxes[:, :3].tolist()
response["dims"] = gt_boxes[:, 3:6].tolist()
rots = np.concatenate([np.zeros([gt_boxes.shape[0], 2], dtype=np.float32), -gt_boxes[:, 6:7]], axis=1)
response["rots"] = rots.tolist()
response["labels"] = annos["names"].tolist()
# response["num_features"] = sensor_data["lidar"]["points"].shape[1]
response["num_features"] = 3
points = sensor_data["lidar"]["points"][:, :3]
if enable_int16:
int16_factor = instance["int16_factor"]
points *= int16_factor
points = points.astype(np.int16)
pc_str = base64.b64encode(points.tobytes())
response["pointcloud"] = pc_str.decode("utf-8")
# if "score" in annos:
# response["score"] = score.tolist()
response = jsonify(results=[response])
response.headers['Access-Control-Allow-Headers'] = '*'
print("send response with size {}!".format(len(pc_str)))
return response
@app.route('/api/get_image', methods=['POST'])
def get_image():
global BACKEND
instance = request.json
response = {"status": "normal"}
if BACKEND.root_path is None:
return error_response("root path is not set")
image_idx = instance["image_idx"]
idx = BACKEND.image_idxes.index(image_idx)
query = {
"lidar": {
"idx": idx
},
"cam": {}
}
sensor_data = BACKEND.dataset.get_sensor_data(query)
if "cam" in sensor_data and "data" in sensor_data["cam"] and sensor_data["cam"]["data"] is not None:
image_str = sensor_data["cam"]["data"]
response["image_b64"] = base64.b64encode(image_str).decode("utf-8")
response["image_b64"] = 'data:image/{};base64,'.format(sensor_data["cam"]["datatype"]) + response["image_b64"]
print("send an image with size {}!".format(len(response["image_b64"])))
else:
response["image_b64"] = ""
response = jsonify(results=[response])
response.headers['Access-Control-Allow-Headers'] = '*'
return response
@app.route('/api/build_network', methods=['POST'])
def build_network_():
global BACKEND
instance = request.json
cfg_path = Path(instance["config_path"])
ckpt_path = Path(instance["checkpoint_path"])
response = {"status": "normal"}
if BACKEND.root_path is None:
return error_response("root path is not set")
if not cfg_path.exists():
return error_response("config file not exist.")
if not ckpt_path.exists():
return error_response("ckpt file not exist.")
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(cfg_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
device = device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = build_network(config.model.second).to(device).float().eval()
net.load_state_dict(torch.load(ckpt_path))
eval_input_cfg = config.eval_input_reader
BACKEND.dataset = input_reader_builder.build(
eval_input_cfg,
config.model.second,
training=False,
voxel_generator=net.voxel_generator,
target_assigner=net.target_assigner).dataset
BACKEND.net = net
BACKEND.config = config
BACKEND.device = device
response = jsonify(results=[response])
response.headers['Access-Control-Allow-Headers'] = '*'
print("build_network successful!")
return response
@app.route('/api/inference_by_idx', methods=['POST'])
def inference_by_idx():
global BACKEND
instance = request.json
response = {"status": "normal"}
if BACKEND.root_path is None:
return error_response("root path is not set")
image_idx = instance["image_idx"]
# remove_outside = instance["remove_outside"]
idx = BACKEND.image_idxes.index(image_idx)
example = BACKEND.dataset[idx]
# don't forget to pad batch idx in coordinates
example["coordinates"] = np.pad(
example["coordinates"], ((0, 0), (1, 0)),
mode='constant',
constant_values=0)
# don't forget to add newaxis for anchors
example["anchors"] = example["anchors"][np.newaxis, ...]
example_torch = example_convert_to_torch(example, device=BACKEND.device)
pred = BACKEND.net(example_torch)[0]
box3d = pred["box3d_lidar"].detach().cpu().numpy()
locs = box3d[:, :3]
dims = box3d[:, 3:6]
rots = np.concatenate([np.zeros([locs.shape[0], 2], dtype=np.float32), -box3d[:, 6:7]], axis=1)
response["dt_locs"] = locs.tolist()
response["dt_dims"] = dims.tolist()
response["dt_rots"] = rots.tolist()
response["dt_labels"] = pred["label_preds"].detach().cpu().numpy().tolist()
response["dt_scores"] = pred["scores"].detach().cpu().numpy().tolist()
response = jsonify(results=[response])
response.headers['Access-Control-Allow-Headers'] = '*'
return response
def main(port=16666):
app.run(host='127.0.0.1', threaded=True, port=port)
if __name__ == '__main__':
fire.Fire()
| 8,121 | 34.313043 | 118 | py |
second.pytorch | second.pytorch-master/second/data/dataset.py | import pathlib
import pickle
import time
from functools import partial
import numpy as np
from second.core import box_np_ops
from second.core import preprocess as prep
from second.data import kitti_common as kitti
REGISTERED_DATASET_CLASSES = {}
def register_dataset(cls, name=None):
global REGISTERED_DATASET_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_DATASET_CLASSES, f"exist class: {REGISTERED_DATASET_CLASSES}"
REGISTERED_DATASET_CLASSES[name] = cls
return cls
def get_dataset_class(name):
global REGISTERED_DATASET_CLASSES
assert name in REGISTERED_DATASET_CLASSES, f"available class: {REGISTERED_DATASET_CLASSES}"
return REGISTERED_DATASET_CLASSES[name]
class Dataset(object):
"""An abstract class representing a pytorch-like Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
NumPointFeatures = -1
def __getitem__(self, index):
"""This function is used for preprocess.
you need to create a input dict in this function for network inference.
format: {
anchors
voxels
num_points
coordinates
if training:
labels
reg_targets
[optional]anchors_mask, slow in SECOND v1.5, don't use this.
[optional]metadata, in kitti, image index is saved in metadata
}
"""
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def get_sensor_data(self, query):
"""Dataset must provide a unified function to get data.
Args:
query: int or dict. this param must support int for training.
if dict, should have this format (no example yet):
{
sensor_name: {
sensor_meta
}
}
if int, will return all sensor data.
(TODO: how to deal with unsynchronized data?)
Returns:
sensor_data: dict.
if query is int (return all), return a dict with all sensors:
{
sensor_name: sensor_data
...
metadata: ... (for kitti, contains image_idx)
}
if sensor is lidar (all lidar point cloud must be concatenated to one array):
e.g. If your dataset have two lidar sensor, you need to return a single dict:
{
"lidar": {
"points": ...
...
}
}
sensor_data: {
points: [N, 3+]
[optional]annotations: {
"boxes": [N, 7] locs, dims, yaw, in lidar coord system. must tested
in provided visualization tools such as second.utils.simplevis
or web tool.
"names": array of string.
}
}
if sensor is camera (not used yet):
sensor_data: {
data: image string (array is too large)
[optional]annotations: {
"boxes": [N, 4] 2d bbox
"names": array of string.
}
}
metadata: {
# dataset-specific information.
# for kitti, must have image_idx for label file generation.
image_idx: ...
}
[optional]calib # only used for kitti
"""
raise NotImplementedError
def evaluation(self, dt_annos, output_dir):
"""Dataset must provide a evaluation function to evaluate model."""
raise NotImplementedError
| 3,922 | 33.716814 | 95 | py |
second.pytorch | second.pytorch-master/second/pytorch/inference.py | from pathlib import Path
import numpy as np
import torch
import torchplus
from second.core import box_np_ops
from second.core.inference import InferenceContext
from second.builder import target_assigner_builder, voxel_builder
from second.pytorch.builder import box_coder_builder, second_builder
from second.pytorch.models.voxelnet import VoxelNet
from second.pytorch.train import predict_to_kitti_label, example_convert_to_torch
class TorchInferenceContext(InferenceContext):
def __init__(self):
super().__init__()
self.net = None
self.anchor_cache = None
def _build(self):
config = self.config
input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
batch_size = 1
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
grid_size = voxel_generator.grid_size
self.voxel_generator = voxel_generator
vfe_num_filters = list(model_cfg.voxel_feature_extractor.num_filters)
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(
target_assigner_cfg, bv_range, box_coder)
self.target_assigner = target_assigner
out_size_factor = model_cfg.rpn.layer_strides[0] / model_cfg.rpn.upsample_strides[0]
out_size_factor *= model_cfg.middle_feature_extractor.downsample_factor
out_size_factor = int(out_size_factor)
self.net = second_builder.build(model_cfg, voxel_generator,
target_assigner)
self.net.cuda().eval()
if train_cfg.enable_mixed_precision:
self.net.half()
self.net.metrics_to_float()
self.net.convert_norm_to_float(self.net)
feature_map_size = grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]
ret = target_assigner.generate_anchors(feature_map_size)
anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
anchors = ret["anchors"]
anchors = anchors.reshape([-1, 7])
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
anchor_cache = {
"anchors": anchors,
"anchors_bv": anchors_bv,
"matched_thresholds": matched_thresholds,
"unmatched_thresholds": unmatched_thresholds,
"anchors_dict": anchors_dict,
}
self.anchor_cache = anchor_cache
def _restore(self, ckpt_path):
ckpt_path = Path(ckpt_path)
assert ckpt_path.suffix == ".tckpt"
torchplus.train.restore(str(ckpt_path), self.net)
def _inference(self, example):
train_cfg = self.config.train_config
input_cfg = self.config.eval_input_reader
model_cfg = self.config.model.second
example_torch = example_convert_to_torch(example)
result_annos = predict_to_kitti_label(
self.net, example_torch, list(
self.target_assigner.classes),
model_cfg.post_center_limit_range, model_cfg.lidar_input)
return result_annos
def _ctx(self):
return None
| 3,452 | 39.623529 | 92 | py |
second.pytorch | second.pytorch-master/second/pytorch/train.py | import copy
import json
import os
from pathlib import Path
import pickle
import shutil
import time
import re
import fire
import numpy as np
import torch
from google.protobuf import text_format
import second.data.kitti_common as kitti
import torchplus
from second.builder import target_assigner_builder, voxel_builder
from second.core import box_np_ops
from second.data.preprocess import merge_second_batch, merge_second_batch_multigpu
from second.protos import pipeline_pb2
from second.pytorch.builder import (box_coder_builder, input_reader_builder,
lr_scheduler_builder, optimizer_builder,
second_builder)
from second.utils.log_tool import SimpleModelLog
from second.utils.progress_bar import ProgressBar
import psutil
def example_convert_to_torch(example, dtype=torch.float32,
device=None) -> dict:
device = device or torch.device("cuda:0")
example_torch = {}
float_names = [
"voxels", "anchors", "reg_targets", "reg_weights", "bev_map", "importance"
]
for k, v in example.items():
if k in float_names:
# slow when directly provide fp32 data with dtype=torch.half
example_torch[k] = torch.tensor(
v, dtype=torch.float32, device=device).to(dtype)
elif k in ["coordinates", "labels", "num_points"]:
example_torch[k] = torch.tensor(
v, dtype=torch.int32, device=device)
elif k in ["anchors_mask"]:
example_torch[k] = torch.tensor(
v, dtype=torch.uint8, device=device)
elif k == "calib":
calib = {}
for k1, v1 in v.items():
calib[k1] = torch.tensor(
v1, dtype=dtype, device=device).to(dtype)
example_torch[k] = calib
elif k == "num_voxels":
example_torch[k] = torch.tensor(v)
else:
example_torch[k] = v
return example_torch
def build_network(model_cfg, measure_time=False):
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
box_coder.custom_ndim = target_assigner._anchor_generators[0].custom_ndim
net = second_builder.build(
model_cfg, voxel_generator, target_assigner, measure_time=measure_time)
return net
def _worker_init_fn(worker_id):
time_seed = np.array(time.time(), dtype=np.int32)
np.random.seed(time_seed + worker_id)
print(f"WORKER {worker_id} seed:", np.random.get_state()[1][0])
def freeze_params(params: dict, include: str=None, exclude: str=None):
assert isinstance(params, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
remain_params = []
for k, p in params.items():
if include_re is not None:
if include_re.match(k) is not None:
continue
if exclude_re is not None:
if exclude_re.match(k) is None:
continue
remain_params.append(p)
return remain_params
def freeze_params_v2(params: dict, include: str=None, exclude: str=None):
assert isinstance(params, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
for k, p in params.items():
if include_re is not None:
if include_re.match(k) is not None:
p.requires_grad = False
if exclude_re is not None:
if exclude_re.match(k) is None:
p.requires_grad = False
def filter_param_dict(state_dict: dict, include: str=None, exclude: str=None):
assert isinstance(state_dict, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
res_dict = {}
for k, p in state_dict.items():
if include_re is not None:
if include_re.match(k) is None:
continue
if exclude_re is not None:
if exclude_re.match(k) is not None:
continue
res_dict[k] = p
return res_dict
def train(config_path,
model_dir,
result_path=None,
create_folder=False,
display_step=50,
summary_step=5,
pretrained_path=None,
pretrained_include=None,
pretrained_exclude=None,
freeze_include=None,
freeze_exclude=None,
multi_gpu=False,
measure_time=False,
resume=False):
"""train a VoxelNet model specified by a config file.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_dir = str(Path(model_dir).resolve())
if create_folder:
if Path(model_dir).exists():
model_dir = torchplus.train.create_folder(model_dir)
model_dir = Path(model_dir)
if not resume and model_dir.exists():
raise ValueError("model dir exists and you don't specify resume.")
model_dir.mkdir(parents=True, exist_ok=True)
if result_path is None:
result_path = model_dir / 'results'
config_file_bkp = "pipeline.config"
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to train with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
proto_str = text_format.MessageToString(config, indent=2)
with (model_dir / config_file_bkp).open("w") as f:
f.write(proto_str)
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, measure_time).to(device)
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
print("num parameters:", len(list(net.parameters())))
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
if pretrained_path is not None:
model_dict = net.state_dict()
pretrained_dict = torch.load(pretrained_path)
pretrained_dict = filter_param_dict(pretrained_dict, pretrained_include, pretrained_exclude)
new_pretrained_dict = {}
for k, v in pretrained_dict.items():
if k in model_dict and v.shape == model_dict[k].shape:
new_pretrained_dict[k] = v
print("Load pretrained parameters:")
for k, v in new_pretrained_dict.items():
print(k, v.shape)
model_dict.update(new_pretrained_dict)
net.load_state_dict(model_dict)
freeze_params_v2(dict(net.named_parameters()), freeze_include, freeze_exclude)
net.clear_global_step()
net.clear_metrics()
if multi_gpu:
net_parallel = torch.nn.DataParallel(net)
else:
net_parallel = net
optimizer_cfg = train_cfg.optimizer
loss_scale = train_cfg.loss_scale_factor
fastai_optimizer = optimizer_builder.build(
optimizer_cfg,
net,
mixed=False,
loss_scale=loss_scale)
if loss_scale < 0:
loss_scale = "dynamic"
if train_cfg.enable_mixed_precision:
max_num_voxels = input_cfg.preprocess.max_number_of_voxels * input_cfg.batch_size
assert max_num_voxels < 65535, "spconv fp16 training only support this"
from apex import amp
net, amp_optimizer = amp.initialize(net, fastai_optimizer,
opt_level="O2",
keep_batchnorm_fp32=True,
loss_scale=loss_scale
)
net.metrics_to_float()
else:
amp_optimizer = fastai_optimizer
torchplus.train.try_restore_latest_checkpoints(model_dir,
[fastai_optimizer])
lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, amp_optimizer,
train_cfg.steps)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
if multi_gpu:
num_gpu = torch.cuda.device_count()
print(f"MULTI-GPU: use {num_gpu} gpu")
collate_fn = merge_second_batch_multigpu
else:
collate_fn = merge_second_batch
num_gpu = 1
######################
# PREPARE INPUT
######################
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
multi_gpu=multi_gpu)
eval_dataset = input_reader_builder.build(
eval_input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=input_cfg.batch_size * num_gpu,
shuffle=True,
num_workers=input_cfg.preprocess.num_workers * num_gpu,
pin_memory=False,
collate_fn=collate_fn,
worker_init_fn=_worker_init_fn,
drop_last=not multi_gpu)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=eval_input_cfg.batch_size, # only support multi-gpu train
shuffle=False,
num_workers=eval_input_cfg.preprocess.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
######################
# TRAINING
######################
model_logging = SimpleModelLog(model_dir)
model_logging.open()
model_logging.log_text(proto_str + "\n", 0, tag="config")
start_step = net.get_global_step()
total_step = train_cfg.steps
t = time.time()
steps_per_eval = train_cfg.steps_per_eval
clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch
amp_optimizer.zero_grad()
step_times = []
step = start_step
try:
while True:
if clear_metrics_every_epoch:
net.clear_metrics()
for example in dataloader:
lr_scheduler.step(net.get_global_step())
time_metrics = example["metrics"]
example.pop("metrics")
example_torch = example_convert_to_torch(example, float_dtype)
batch_size = example["anchors"].shape[0]
ret_dict = net_parallel(example_torch)
cls_preds = ret_dict["cls_preds"]
loss = ret_dict["loss"].mean()
cls_loss_reduced = ret_dict["cls_loss_reduced"].mean()
loc_loss_reduced = ret_dict["loc_loss_reduced"].mean()
cls_pos_loss = ret_dict["cls_pos_loss"].mean()
cls_neg_loss = ret_dict["cls_neg_loss"].mean()
loc_loss = ret_dict["loc_loss"]
cls_loss = ret_dict["cls_loss"]
cared = ret_dict["cared"]
labels = example_torch["labels"]
if train_cfg.enable_mixed_precision:
with amp.scale_loss(loss, amp_optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)
amp_optimizer.step()
amp_optimizer.zero_grad()
net.update_global_step()
net_metrics = net.update_metrics(cls_loss_reduced,
loc_loss_reduced, cls_preds,
labels, cared)
step_time = (time.time() - t)
step_times.append(step_time)
t = time.time()
metrics = {}
num_pos = int((labels > 0)[0].float().sum().cpu().numpy())
num_neg = int((labels == 0)[0].float().sum().cpu().numpy())
if 'anchors_mask' not in example_torch:
num_anchors = example_torch['anchors'].shape[1]
else:
num_anchors = int(example_torch['anchors_mask'][0].sum())
global_step = net.get_global_step()
if global_step % display_step == 0:
if measure_time:
for name, val in net.get_avg_time_dict().items():
print(f"avg {name} time = {val * 1000:.3f} ms")
loc_loss_elem = [
float(loc_loss[:, :, i].sum().detach().cpu().numpy() /
batch_size) for i in range(loc_loss.shape[-1])
]
metrics["runtime"] = {
"step": global_step,
"steptime": np.mean(step_times),
}
metrics["runtime"].update(time_metrics[0])
step_times = []
metrics.update(net_metrics)
metrics["loss"]["loc_elem"] = loc_loss_elem
metrics["loss"]["cls_pos_rt"] = float(
cls_pos_loss.detach().cpu().numpy())
metrics["loss"]["cls_neg_rt"] = float(
cls_neg_loss.detach().cpu().numpy())
if model_cfg.use_direction_classifier:
dir_loss_reduced = ret_dict["dir_loss_reduced"].mean()
metrics["loss"]["dir_rt"] = float(
dir_loss_reduced.detach().cpu().numpy())
metrics["misc"] = {
"num_vox": int(example_torch["voxels"].shape[0]),
"num_pos": int(num_pos),
"num_neg": int(num_neg),
"num_anchors": int(num_anchors),
"lr": float(amp_optimizer.lr),
"mem_usage": psutil.virtual_memory().percent,
}
model_logging.log_metrics(metrics, global_step)
if global_step % steps_per_eval == 0:
torchplus.train.save_models(model_dir, [net, amp_optimizer],
net.get_global_step())
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("# EVAL", global_step)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("Generate output labels...", global_step)
t = time.time()
detections = []
prog_bar = ProgressBar()
net.clear_timer()
prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1)
// eval_input_cfg.batch_size)
for example in iter(eval_dataloader):
example = example_convert_to_torch(example, float_dtype)
detections += net(example)
prog_bar.print_bar()
sec_per_ex = len(eval_dataset) / (time.time() - t)
model_logging.log_text(
f'generate label finished({sec_per_ex:.2f}/s). start eval:',
global_step)
result_dict = eval_dataset.dataset.evaluation(
detections, str(result_path_step))
for k, v in result_dict["results"].items():
model_logging.log_text("Evaluation {}".format(k), global_step)
model_logging.log_text(v, global_step)
model_logging.log_metrics(result_dict["detail"], global_step)
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(detections, f)
net.train()
step += 1
if step >= total_step:
break
if step >= total_step:
break
except Exception as e:
print(json.dumps(example["metadata"], indent=2))
model_logging.log_text(str(e), step)
model_logging.log_text(json.dumps(example["metadata"], indent=2), step)
torchplus.train.save_models(model_dir, [net, amp_optimizer],
step)
raise e
finally:
model_logging.close()
torchplus.train.save_models(model_dir, [net, amp_optimizer],
net.get_global_step())
def evaluate(config_path,
model_dir=None,
result_path=None,
ckpt_path=None,
measure_time=False,
batch_size=None,
**kwargs):
"""Don't support pickle_result anymore. if you want to generate kitti label file,
please use kitti_anno_to_label_file and convert_detection_to_kitti_annos
in second.data.kitti_dataset.
"""
assert len(kwargs) == 0
model_dir = str(Path(model_dir).resolve())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
result_name = 'eval_results'
if result_path is None:
model_dir = Path(model_dir)
result_path = model_dir / result_name
else:
result_path = Path(result_path)
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to eval with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, measure_time=measure_time).to(device)
if train_cfg.enable_mixed_precision:
net.half()
print("half inference!")
net.metrics_to_float()
net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
if ckpt_path is None:
assert model_dir is not None
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
else:
torchplus.train.restore(ckpt_path, net)
batch_size = batch_size or input_cfg.batch_size
eval_dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=input_cfg.preprocess.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
if train_cfg.enable_mixed_precision:
float_dtype = torch.float16
else:
float_dtype = torch.float32
net.eval()
result_path_step = result_path / f"step_{net.get_global_step()}"
result_path_step.mkdir(parents=True, exist_ok=True)
t = time.time()
detections = []
print("Generate output labels...")
bar = ProgressBar()
bar.start((len(eval_dataset) + batch_size - 1) // batch_size)
prep_example_times = []
prep_times = []
t2 = time.time()
for example in iter(eval_dataloader):
if measure_time:
prep_times.append(time.time() - t2)
torch.cuda.synchronize()
t1 = time.time()
example = example_convert_to_torch(example, float_dtype)
if measure_time:
torch.cuda.synchronize()
prep_example_times.append(time.time() - t1)
with torch.no_grad():
detections += net(example)
bar.print_bar()
if measure_time:
t2 = time.time()
sec_per_example = len(eval_dataset) / (time.time() - t)
print(f'generate label finished({sec_per_example:.2f}/s). start eval:')
if measure_time:
print(
f"avg example to torch time: {np.mean(prep_example_times) * 1000:.3f} ms"
)
print(f"avg prep time: {np.mean(prep_times) * 1000:.3f} ms")
for name, val in net.get_avg_time_dict().items():
print(f"avg {name} time = {val * 1000:.3f} ms")
with open(result_path_step / "result.pkl", 'wb') as f:
pickle.dump(detections, f)
result_dict = eval_dataset.dataset.evaluation(detections,
str(result_path_step))
if result_dict is not None:
for k, v in result_dict["results"].items():
print("Evaluation {}".format(k))
print(v)
def helper_tune_target_assigner(config_path, target_rate=None, update_freq=200, update_delta=0.01, num_tune_epoch=5):
"""get information of target assign to tune thresholds in anchor generator.
"""
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to train with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
proto_str = text_format.MessageToString(config, indent=2)
input_cfg = config.train_input_reader
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
net = build_network(model_cfg, False)
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
target_assigner = net.target_assigner
voxel_generator = net.voxel_generator
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
multi_gpu=False)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=False,
collate_fn=merge_second_batch,
worker_init_fn=_worker_init_fn,
drop_last=False)
class_count = {}
anchor_count = {}
class_count_tune = {}
anchor_count_tune = {}
for c in target_assigner.classes:
class_count[c] = 0
anchor_count[c] = 0
class_count_tune[c] = 0
anchor_count_tune[c] = 0
step = 0
classes = target_assigner.classes
if target_rate is None:
num_tune_epoch = 0
for epoch in range(num_tune_epoch):
for example in dataloader:
gt_names = example["gt_names"]
for name in gt_names:
class_count_tune[name] += 1
labels = example['labels']
for i in range(1, len(classes) + 1):
anchor_count_tune[classes[i - 1]] += int(np.sum(labels == i))
if target_rate is not None:
for name, rate in target_rate.items():
if class_count_tune[name] > update_freq:
# calc rate
current_rate = anchor_count_tune[name] / class_count_tune[name]
if current_rate > rate:
target_assigner._anchor_generators[classes.index(name)].match_threshold += update_delta
target_assigner._anchor_generators[classes.index(name)].unmatch_threshold += update_delta
else:
target_assigner._anchor_generators[classes.index(name)].match_threshold -= update_delta
target_assigner._anchor_generators[classes.index(name)].unmatch_threshold -= update_delta
anchor_count_tune[name] = 0
class_count_tune[name] = 0
step += 1
for c in target_assigner.classes:
class_count[c] = 0
anchor_count[c] = 0
total_voxel_gene_time = 0
count = 0
for example in dataloader:
gt_names = example["gt_names"]
total_voxel_gene_time += example["metrics"][0]["voxel_gene_time"]
count += 1
for name in gt_names:
class_count[name] += 1
labels = example['labels']
for i in range(1, len(classes) + 1):
anchor_count[classes[i - 1]] += int(np.sum(labels == i))
print("avg voxel gene time", total_voxel_gene_time / count)
print(json.dumps(class_count, indent=2))
print(json.dumps(anchor_count, indent=2))
if target_rate is not None:
for ag in target_assigner._anchor_generators:
if ag.class_name in target_rate:
print(ag.class_name, ag.match_threshold, ag.unmatch_threshold)
def mcnms_parameters_search(config_path,
model_dir,
preds_path):
pass
if __name__ == '__main__':
fire.Fire()
| 26,138 | 38.365964 | 117 | py |
second.pytorch | second.pytorch-master/second/pytorch/core/ghm_loss.py | #####################
# THIS LOSS IS NOT WORKING!!!!
#####################
"""
The implementation of GHM-C and GHM-R losses.
Details can be found in the paper `Gradient Harmonized Single-stage Detector`:
https://arxiv.org/abs/1811.05181
Copyright (c) 2018 Multimedia Laboratory, CUHK.
Licensed under the MIT License (see LICENSE for details)
Written by Buyu Li
"""
from second.pytorch.core.losses import Loss, _sigmoid_cross_entropy_with_logits
import torch
class GHMCLoss(Loss):
def __init__(self, bins=10, momentum=0):
self.bins = bins
self.momentum = momentum
self.edges = [float(x) / bins for x in range(bins+1)]
self.edges[-1] += 1e-6
if momentum > 0:
self.acc_sum = [0.0 for _ in range(bins)]
self.count = 50
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
""" Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
"""
input = prediction_tensor
target = target_tensor
batch_size = prediction_tensor.shape[0]
num_anchors = prediction_tensor.shape[1]
num_class = prediction_tensor.shape[2]
edges = self.edges
weights_ghm = torch.zeros_like(input).view(-1, num_class)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
# gradient length
g = torch.abs(input.sigmoid().detach() - target).view(-1, num_class)
valid = weights.view(-1, 1).expand(-1, num_class) > 0
num_examples = max(valid.float().sum().item(), 1.0)
num_valid_bins = 0 # n valid bins
self.count -= 1
num_bins = []
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i+1]) & valid
num_in_bin = inds.sum().item()
num_bins.append(num_in_bin)
if num_in_bin > 0:
if self.momentum > 0:
self.acc_sum[i] = self.momentum * self.acc_sum[i] \
+ (1 - self.momentum) * num_in_bin
weights_ghm[inds] = num_examples / self.acc_sum[i]
else:
weights_ghm[inds] = num_examples / num_in_bin
num_valid_bins += 1
if self.count <= 0:
print("GHMC loss bins:", num_bins)
self.count = 50
if num_valid_bins > 0:
weights_ghm = weights_ghm / num_valid_bins
return per_entry_cross_ent * weights_ghm.view(batch_size, num_anchors, num_class) / num_examples
class GHMRLoss(Loss):
def __init__(self, mu=0.02, bins=10, momentum=0, code_weights=None):
self.mu = mu
self.bins = bins
self.edges = [float(x) / bins for x in range(bins+1)]
self.edges[-1] = 1e3
self.momentum = momentum
if momentum > 0:
self.acc_sum = [0.0 for _ in range(bins)]
self._codewise = True
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights):
""" Args:
input [batch_num, class_num]:
The direct prediction of classification fc layer.
target [batch_num, class_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
"""
# ASL1 loss
diff = prediction_tensor - target_tensor
loss = torch.sqrt(diff * diff + self.mu * self.mu) - self.mu
batch_size = prediction_tensor.shape[0]
num_anchors = prediction_tensor.shape[1]
num_codes = prediction_tensor.shape[2]
# gradient length
g = torch.abs(diff / torch.sqrt(self.mu * self.mu + diff * diff)).detach().view(-1, num_codes)
weights_ghm = torch.zeros_like(g)
valid = weights.view(-1, 1).expand(-1, num_codes) > 0
# print(g.shape, prediction_tensor.shape, valid.shape)
num_examples = max(valid.float().sum().item() / num_codes, 1.0)
num_valid_bins = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= self.edges[i]) & (g < self.edges[i+1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
num_valid_bins += 1
if self.momentum > 0:
self.acc_sum[i] = self.momentum * self.acc_sum[i] \
+ (1 - self.momentum) * num_in_bin
weights_ghm[inds] = num_examples / self.acc_sum[i]
else:
weights_ghm[inds] = num_examples / num_in_bin
if num_valid_bins > 0:
weights_ghm /= num_valid_bins
weights_ghm = weights_ghm.view(batch_size, num_anchors, num_codes)
loss = loss * weights_ghm / num_examples
return loss
| 5,148 | 39.226563 | 104 | py |
second.pytorch | second.pytorch-master/second/pytorch/core/losses.py | """Classification and regression loss functions for object detection.
Localization losses:
* WeightedL2LocalizationLoss
* WeightedSmoothL1LocalizationLoss
Classification losses:
* WeightedSigmoidClassificationLoss
* WeightedSoftmaxClassificationLoss
* BootstrappedSigmoidClassificationLoss
"""
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
import torchplus
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=np.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
dense = torch.zeros(size).fill_(default_value)
dense[indices] = indices_value
return dense
class Loss(object):
"""Abstract base class for loss functions."""
__metaclass__ = ABCMeta
def __call__(self,
prediction_tensor,
target_tensor,
ignore_nan_targets=False,
scope=None,
**params):
"""Call the loss function.
Args:
prediction_tensor: an N-d tensor of shape [batch, anchors, ...]
representing predicted quantities.
target_tensor: an N-d tensor of shape [batch, anchors, ...] representing
regression or classification targets.
ignore_nan_targets: whether to ignore nan targets in the loss computation.
E.g. can be used if the target tensor is missing groundtruth data that
shouldn't be factored into the loss.
scope: Op scope name. Defaults to 'Loss' if None.
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: a tensor representing the value of the loss function.
"""
if ignore_nan_targets:
target_tensor = torch.where(torch.isnan(target_tensor),
prediction_tensor,
target_tensor)
return self._compute_loss(prediction_tensor, target_tensor, **params)
@abstractmethod
def _compute_loss(self, prediction_tensor, target_tensor, **params):
"""Method to be overridden by implementations.
Args:
prediction_tensor: a tensor representing predicted quantities
target_tensor: a tensor representing regression or classification targets
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per
anchor
"""
pass
class WeightedL2LocalizationLoss(Loss):
"""L2 localization loss function with anchorwise output support.
Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2
"""
def __init__(self, code_weights=None):
super().__init__()
if code_weights is not None:
self._code_weights = np.array(code_weights, dtype=np.float32)
self._code_weights = torch.from_numpy(self._code_weights)
else:
self._code_weights = None
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
diff = prediction_tensor - target_tensor
if self._code_weights is not None:
self._code_weights = self._code_weights.type_as(prediction_tensor).to(prediction_tensor.device)
self._code_weights = self._code_weights.view(1, 1, -1)
diff = self._code_weights * diff
weighted_diff = diff * weights.unsqueeze(-1)
square_diff = 0.5 * weighted_diff * weighted_diff
return square_diff.sum(2)
class WeightedSmoothL1LocalizationLoss(Loss):
"""Smooth L1 localization loss function.
The smooth L1_loss is defined elementwise as .5 x^2 if |x|<1 and |x|-.5
otherwise, where x is the difference between predictions and target.
See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015)
"""
def __init__(self, sigma=3.0, code_weights=None, codewise=True):
super().__init__()
self._sigma = sigma
if code_weights is not None:
self._code_weights = np.array(code_weights, dtype=np.float32)
self._code_weights = torch.from_numpy(self._code_weights)
else:
self._code_weights = None
self._codewise = codewise
def _compute_loss(self, prediction_tensor, target_tensor, weights=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
diff = prediction_tensor - target_tensor
if self._code_weights is not None:
code_weights = self._code_weights.type_as(prediction_tensor).to(target_tensor.device)
diff = code_weights.view(1, 1, -1) * diff
abs_diff = torch.abs(diff)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (self._sigma**2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * self._sigma, 2) \
+ (abs_diff - 0.5 / (self._sigma**2)) * (1. - abs_diff_lt_1)
if self._codewise:
anchorwise_smooth_l1norm = loss
if weights is not None:
anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
else:
anchorwise_smooth_l1norm = torch.sum(loss, 2)# * weights
if weights is not None:
anchorwise_smooth_l1norm *= weights
return anchorwise_smooth_l1norm
def _sigmoid_cross_entropy_with_logits(logits, labels):
# to be compatible with tensorflow, we don't use ignore_idx
loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits)
loss += torch.log1p(torch.exp(-torch.abs(logits)))
# loss = nn.BCEWithLogitsLoss(reduce="none")(logits, labels.type_as(logits))
# transpose_param = [0] + [param[-1]] + param[1:-1]
# logits = logits.permute(*transpose_param)
# loss_ftor = nn.NLLLoss(reduce=False)
# loss = loss_ftor(F.logsigmoid(logits), labels)
return loss
def _softmax_cross_entropy_with_logits(logits, labels):
param = list(range(len(logits.shape)))
transpose_param = [0] + [param[-1]] + param[1:-1]
logits = logits.permute(*transpose_param) # [N, ..., C] -> [N, C, ...]
loss_ftor = nn.CrossEntropyLoss(reduction='none')
loss = loss_ftor(logits, labels.max(dim=-1)[1])
return loss
class WeightedSigmoidClassificationLoss(Loss):
"""Sigmoid cross entropy classification loss function."""
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(-1)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights
class SigmoidFocalClassificationLoss(Loss):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
"""
self._alpha = alpha
self._gamma = gamma
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(2)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
prediction_probabilities = torch.sigmoid(prediction_tensor)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = (target_tensor * self._alpha +
(1 - target_tensor) * (1 - self._alpha))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
class SoftmaxFocalClassificationLoss(Loss):
"""Softmax focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
"""
self._alpha = alpha
self._gamma = gamma
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(2)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_softmax_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
# convert [N, num_anchors] to [N, num_anchors, num_classes]
per_entry_cross_ent = per_entry_cross_ent.unsqueeze(-1) * target_tensor
prediction_probabilities = F.softmax(prediction_tensor, dim=-1)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = torch.where(target_tensor[..., 0] == 1,
torch.tensor(1 - self._alpha).type_as(per_entry_cross_ent),
torch.tensor(self._alpha).type_as(per_entry_cross_ent))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
class WeightedSoftmaxClassificationLoss(Loss):
"""Softmax loss function."""
def __init__(self, logit_scale=1.0):
"""Constructor.
Args:
logit_scale: When this value is high, the prediction is "diffused" and
when this value is low, the prediction is made peakier.
(default 1.0)
"""
self._logit_scale = logit_scale
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
"""
num_classes = prediction_tensor.shape[-1]
prediction_tensor = torch.div(
prediction_tensor, self._logit_scale)
per_row_cross_ent = (_softmax_cross_entropy_with_logits(
labels=target_tensor.view(-1, num_classes),
logits=prediction_tensor.view(-1, num_classes)))
return per_row_cross_ent.view(weights.shape) * weights
class BootstrappedSigmoidClassificationLoss(Loss):
"""Bootstrapped sigmoid cross entropy classification loss function.
This loss uses a convex combination of training labels and the current model's
predictions as training targets in the classification loss. The idea is that
as the model improves over time, its predictions can be trusted more and we
can use these predictions to mitigate the damage of noisy/incorrect labels,
because incorrect labels are likely to be eventually highly inconsistent with
other stimuli predicted to have the same label by the model.
In "soft" bootstrapping, we use all predicted class probabilities, whereas in
"hard" bootstrapping, we use the single class favored by the model.
See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by
Reed et al. (ICLR 2015).
"""
def __init__(self, alpha, bootstrap_type='soft'):
"""Constructor.
Args:
alpha: a float32 scalar tensor between 0 and 1 representing interpolation
weight
bootstrap_type: set to either 'hard' or 'soft' (default)
Raises:
ValueError: if bootstrap_type is not either 'hard' or 'soft'
"""
if bootstrap_type != 'hard' and bootstrap_type != 'soft':
raise ValueError('Unrecognized bootstrap_type: must be one of '
'\'hard\' or \'soft.\'')
self._alpha = alpha
self._bootstrap_type = bootstrap_type
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
if self._bootstrap_type == 'soft':
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * torch.sigmoid(prediction_tensor)
else:
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * (torch.sigmoid(prediction_tensor) > 0.5).float()
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=bootstrap_target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights.unsqueeze(2)
| 18,114 | 38.988962 | 101 | py |
second.pytorch | second.pytorch-master/second/pytorch/core/box_torch_ops.py | import math
from functools import reduce
import numpy as np
import torch
from torch import stack as tstack
import torchplus
from torchplus.tools import torch_to_np_dtype
from second.core.non_max_suppression.nms_gpu import (nms_gpu_cc, rotate_iou_gpu,
rotate_nms_gpu)
from second.core.non_max_suppression.nms_cpu import rotate_nms_cc
import spconv
def second_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box encode for VoxelNet
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, l, w, h, r
anchors ([N, 7] Tensor): anchors
"""
box_ndim = anchors.shape[-1]
cas, cgs = [], []
if box_ndim > 7:
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, wg, lg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1)
else:
xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1)
xg, yg, zg, wg, lg, hg, rg = torch.split(boxes, 1, dim=-1)
diagonal = torch.sqrt(la**2 + wa**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
cts = [g - a for g, a in zip(cgs, cas)]
if smooth_dim:
lt = lg / la - 1
wt = wg / wa - 1
ht = hg / ha - 1
else:
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
ht = torch.log(hg / ha)
if encode_angle_to_vector:
rgx = torch.cos(rg)
rgy = torch.sin(rg)
rax = torch.cos(ra)
ray = torch.sin(ra)
rtx = rgx - rax
rty = rgy - ray
return torch.cat([xt, yt, zt, wt, lt, ht, rtx, rty, *cts], dim=-1)
else:
rt = rg - ra
return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1)
def second_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
"""
box_ndim = anchors.shape[-1]
cas, cts = [], []
if box_ndim > 7:
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
if encode_angle_to_vector:
xt, yt, zt, wt, lt, ht, rtx, rty, *cts = torch.split(
box_encodings, 1, dim=-1)
else:
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
else:
xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1)
if encode_angle_to_vector:
xt, yt, zt, wt, lt, ht, rtx, rty = torch.split(
box_encodings, 1, dim=-1)
else:
xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
# za = za + ha / 2
# xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
if smooth_dim:
lg = (lt + 1) * la
wg = (wt + 1) * wa
hg = (ht + 1) * ha
else:
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
hg = torch.exp(ht) * ha
if encode_angle_to_vector:
rax = torch.cos(ra)
ray = torch.sin(ra)
rgx = rtx + rax
rgy = rty + ray
rg = torch.atan2(rgy, rgx)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1)
def bev_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box encode for VoxelNet
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, l, w, h, r
anchors ([N, 7] Tensor): anchors
"""
xa, ya, wa, la, ra = torch.split(anchors, 1, dim=-1)
xg, yg, wg, lg, rg = torch.split(boxes, 1, dim=-1)
diagonal = torch.sqrt(la**2 + wa**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
if smooth_dim:
lt = lg / la - 1
wt = wg / wa - 1
else:
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
if encode_angle_to_vector:
rgx = torch.cos(rg)
rgy = torch.sin(rg)
rax = torch.cos(ra)
ray = torch.sin(ra)
rtx = rgx - rax
rty = rgy - ray
return torch.cat([xt, yt, wt, lt, rtx, rty], dim=-1)
else:
rt = rg - ra
return torch.cat([xt, yt, wt, lt, rt], dim=-1)
# rt = rg - ra
# return torch.cat([xt, yt, zt, wt, lt, ht, rt], dim=-1)
def bev_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
"""
xa, ya, wa, la, ra = torch.split(anchors, 1, dim=-1)
if encode_angle_to_vector:
xt, yt, wt, lt, rtx, rty = torch.split(
box_encodings, 1, dim=-1)
else:
xt, yt, wt, lt, rt = torch.split(box_encodings, 1, dim=-1)
# xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
if smooth_dim:
lg = (lt + 1) * la
wg = (wt + 1) * wa
else:
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
if encode_angle_to_vector:
rax = torch.cos(ra)
ray = torch.sin(ra)
rgx = rtx + rax
rgy = rty + ray
rg = torch.atan2(rgy, rgx)
else:
rg = rt + ra
return torch.cat([xg, yg, wg, lg, rg], dim=-1)
def corners_nd(dims, origin=0.5):
"""generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
dtype (output dtype, optional): Defaults to np.float32
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
"""
ndim = int(dims.shape[1])
dtype = torch_to_np_dtype(dims.dtype)
if isinstance(origin, float):
origin = [origin] * ndim
corners_norm = np.stack(
np.unravel_index(np.arange(2**ndim), [2] * ndim), axis=1).astype(dtype)
# now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1
# (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
# so need to convert to a format which is convenient to do other computing.
# for 2d boxes, format is clockwise start from minimum point
# for 3d boxes, please draw them by your hand.
if ndim == 2:
# generate clockwise box corners
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dtype)
corners_norm = torch.from_numpy(corners_norm).type_as(dims)
corners = dims.view(-1, 1, ndim) * corners_norm.view(1, 2**ndim, ndim)
return corners
def corners_2d(dims, origin=0.5):
"""generate relative 2d box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, 2]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
dtype (output dtype, optional): Defaults to np.float32
Returns:
float array, shape=[N, 4, 2]: returned corners.
point layout: x0y0, x0y1, x1y1, x1y0
"""
return corners_nd(dims, origin)
def corner_to_standup_nd(boxes_corner):
ndim = boxes_corner.shape[2]
standup_boxes = []
for i in range(ndim):
standup_boxes.append(torch.min(boxes_corner[:, :, i], dim=1)[0])
for i in range(ndim):
standup_boxes.append(torch.max(boxes_corner[:, :, i], dim=1)[0])
return torch.stack(standup_boxes, dim=1)
def rotation_3d_in_axis(points, angles, axis=0):
# points: [N, point_size, 3]
# angles: [N]
rot_sin = torch.sin(angles)
rot_cos = torch.cos(angles)
ones = torch.ones_like(rot_cos)
zeros = torch.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = tstack([
tstack([rot_cos, zeros, -rot_sin]),
tstack([zeros, ones, zeros]),
tstack([rot_sin, zeros, rot_cos])
])
elif axis == 2 or axis == -1:
rot_mat_T = tstack([
tstack([rot_cos, -rot_sin, zeros]),
tstack([rot_sin, rot_cos, zeros]),
tstack([zeros, zeros, ones])
])
elif axis == 0:
rot_mat_T = tstack([
tstack([zeros, rot_cos, -rot_sin]),
tstack([zeros, rot_sin, rot_cos]),
tstack([ones, zeros, zeros])
])
else:
raise ValueError("axis should in range")
# print(points.shape, rot_mat_T.shape)
return torch.einsum('aij,jka->aik', points, rot_mat_T)
def rotation_points_single_angle(points, angle, axis=0):
# points: [N, 3]
rot_sin = math.sin(angle)
rot_cos = math.cos(angle)
point_type = torchplus.get_tensor_class(points)
if axis == 1:
rot_mat_T = torch.stack([
point_type([rot_cos, 0, -rot_sin]),
point_type([0, 1, 0]),
point_type([rot_sin, 0, rot_cos])
])
elif axis == 2 or axis == -1:
rot_mat_T = torch.stack([
point_type([rot_cos, -rot_sin, 0]),
point_type([rot_sin, rot_cos, 0]),
point_type([0, 0, 1])
])
elif axis == 0:
rot_mat_T = torch.stack([
point_type([1, 0, 0]),
point_type([0, rot_cos, -rot_sin]),
point_type([0, rot_sin, rot_cos])
])
else:
raise ValueError("axis should in range")
return points @ rot_mat_T
def rotation_2d(points, angles):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = torch.sin(angles)
rot_cos = torch.cos(angles)
rot_mat_T = torch.stack(
[tstack([rot_cos, -rot_sin]),
tstack([rot_sin, rot_cos])])
return torch.einsum('aij,jka->aik', (points, rot_mat_T))
def center_to_corner_box3d(centers,
dims,
angles,
origin=(0.5, 0.5, 0.5),
axis=1):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 8, 3]
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.view(-1, 1, 3)
return corners
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 4, 2]
if angles is not None:
corners = rotation_2d(corners, angles)
corners += centers.view(-1, 1, 2)
return corners
def project_to_image(points_3d, proj_mat):
points_num = list(points_3d.shape)[:-1]
points_shape = np.concatenate([points_num, [1]], axis=0).tolist()
points_4 = torch.cat(
[points_3d, torch.zeros(*points_shape).type_as(points_3d)], dim=-1)
# point_2d = points_4 @ tf.transpose(proj_mat, [1, 0])
point_2d = torch.matmul(points_4, proj_mat.t())
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
return point_2d_res
def limit_period(val, offset=0.5, period=np.pi):
return val - torch.floor(val / period + offset) * period
def camera_to_lidar(points, r_rect, velo2cam):
num_points = points.shape[0]
points = torch.cat(
[points, torch.ones(num_points, 1).type_as(points)], dim=-1)
lidar_points = points @ torch.inverse((r_rect @ velo2cam).t())
return lidar_points[..., :3]
def lidar_to_camera(points, r_rect, velo2cam):
num_points = points.shape[0]
points = torch.cat(
[points, torch.ones(num_points, 1).type_as(points)], dim=-1)
camera_points = points @ (r_rect @ velo2cam).t()
return camera_points[..., :3]
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[..., 0:3]
l, h, w = data[..., 3:4], data[..., 4:5], data[..., 5:6]
r = data[..., 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return torch.cat([xyz_lidar, w, l, h, r], dim=-1)
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[..., 0:3]
w, l, h = data[..., 3:4], data[..., 4:5], data[..., 5:6]
r = data[..., 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return torch.cat([xyz, l, h, w, r], dim=-1)
def multiclass_nms(nms_func,
boxes,
scores,
num_class,
pre_max_size=None,
post_max_size=None,
score_thresh=0.0,
iou_threshold=0.5):
# only output [selected] * num_class, please slice by your self
selected_per_class = []
assert len(boxes.shape) == 3, "bbox must have shape [N, num_cls, 7]"
assert len(scores.shape) == 2, "score must have shape [N, num_cls]"
num_class = scores.shape[1]
if not (boxes.shape[1] == scores.shape[1] or boxes.shape[1] == 1):
raise ValueError('second dimension of boxes must be either 1 or equal '
'to the second dimension of scores')
num_boxes = boxes.shape[0]
num_scores = scores.shape[0]
num_classes = scores.shape[1]
boxes_ids = (range(num_classes)
if boxes.shape[1] > 1 else [0] * num_classes)
for class_idx, boxes_idx in zip(range(num_classes), boxes_ids):
# for class_idx in range(1, num_class):
class_scores = scores[:, class_idx]
class_boxes = boxes[:, boxes_idx]
if score_thresh > 0.0:
class_scores_keep = torch.nonzero(class_scores >= score_thresh)
if class_scores_keep.shape[0] != 0:
class_scores_keep = class_scores_keep[:, 0]
else:
selected_per_class.append(None)
continue
class_scores = class_scores[class_scores_keep]
if class_scores.shape[0] != 0:
if score_thresh > 0.0:
class_boxes = class_boxes[class_scores_keep]
keep = nms_func(class_boxes, class_scores, pre_max_size,
post_max_size, iou_threshold)
if keep.shape[0] != 0:
if score_thresh > 0.0:
selected_per_class.append(class_scores_keep[keep])
else:
selected_per_class.append(keep)
else:
selected_per_class.append(None)
else:
selected_per_class.append(None)
return selected_per_class
def nms(bboxes,
scores,
pre_max_size=None,
post_max_size=None,
iou_threshold=0.5):
if pre_max_size is not None:
num_keeped_scores = scores.shape[0]
pre_max_size = min(num_keeped_scores, pre_max_size)
scores, indices = torch.topk(scores, k=pre_max_size)
bboxes = bboxes[indices]
dets = torch.cat([bboxes, scores.unsqueeze(-1)], dim=1)
dets_np = dets.data.cpu().numpy()
if len(dets_np) == 0:
keep = np.array([], dtype=np.int64)
else:
ret = np.array(nms_gpu_cc(dets_np, iou_threshold), dtype=np.int64)
keep = ret[:post_max_size]
if keep.shape[0] == 0:
return torch.zeros([0]).long().to(bboxes.device)
if pre_max_size is not None:
keep = torch.from_numpy(keep).long().to(bboxes.device)
return indices[keep]
else:
return torch.from_numpy(keep).long().to(bboxes.device)
def nms_v2(bboxes,
scores,
pre_max_size=None,
post_max_size=None,
iou_threshold=0.5):
if pre_max_size is None:
pre_max_size = -1
if post_max_size is None:
post_max_size = -1
res = spconv.ops.nms(bboxes.cpu(), scores.cpu(), pre_max_size, post_max_size, iou_threshold, 1.0)
return res.to(bboxes.device)
def rotate_nms(rbboxes,
scores,
pre_max_size=None,
post_max_size=None,
iou_threshold=0.5):
if pre_max_size is not None:
num_keeped_scores = scores.shape[0]
pre_max_size = min(num_keeped_scores, pre_max_size)
scores, indices = torch.topk(scores, k=pre_max_size)
rbboxes = rbboxes[indices]
dets = torch.cat([rbboxes, scores.unsqueeze(-1)], dim=1)
dets_np = dets.data.cpu().numpy()
if len(dets_np) == 0:
keep = np.array([], dtype=np.int64)
else:
ret = np.array(rotate_nms_cc(dets_np, iou_threshold), dtype=np.int64)
keep = ret[:post_max_size]
if keep.shape[0] == 0:
return torch.zeros([0]).long().to(rbboxes.device)
if pre_max_size is not None:
keep = torch.from_numpy(keep).long().to(rbboxes.device)
return indices[keep]
else:
return torch.from_numpy(keep).long().to(rbboxes.device)
| 18,421 | 34.70155 | 101 | py |
second.pytorch | second.pytorch-master/second/pytorch/core/box_coders.py | import torch
from second.core.box_coders import BevBoxCoder, GroundBox3dCoder
from second.pytorch.core import box_torch_ops
class GroundBox3dCoderTorch(GroundBox3dCoder):
def encode_torch(self, boxes, anchors):
return box_torch_ops.second_box_encode(boxes, anchors, self.vec_encode,
self.linear_dim)
def decode_torch(self, boxes, anchors):
return box_torch_ops.second_box_decode(boxes, anchors, self.vec_encode,
self.linear_dim)
class BevBoxCoderTorch(BevBoxCoder):
def encode_torch(self, boxes, anchors):
anchors = anchors[..., [0, 1, 3, 4, 6]]
boxes = boxes[..., [0, 1, 3, 4, 6]]
return box_torch_ops.bev_box_encode(boxes, anchors, self.vec_encode,
self.linear_dim)
def decode_torch(self, encodings, anchors):
anchors = anchors[..., [0, 1, 3, 4, 6]]
ret = box_torch_ops.bev_box_decode(encodings, anchors, self.vec_encode,
self.linear_dim)
z_fixed = torch.full([*ret.shape[:-1], 1],
self.z_fixed,
dtype=ret.dtype,
device=ret.device)
h_fixed = torch.full([*ret.shape[:-1], 1],
self.h_fixed,
dtype=ret.dtype,
device=ret.device)
return torch.cat(
[ret[..., :2], z_fixed, ret[..., 2:4], h_fixed, ret[..., 4:]],
dim=-1)
| 1,598 | 40 | 79 | py |
second.pytorch | second.pytorch-master/second/pytorch/models/voxelnet.py | import time
from enum import Enum
from functools import reduce
import contextlib
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import torchplus
from second.pytorch.core import box_torch_ops
from second.pytorch.core.losses import (WeightedSigmoidClassificationLoss,
WeightedSmoothL1LocalizationLoss,
WeightedSoftmaxClassificationLoss)
from second.pytorch.models import middle, pointpillars, rpn, voxel_encoder
from torchplus import metrics
from second.pytorch.utils import torch_timer
def _get_pos_neg_loss(cls_loss, labels):
# cls_loss: [N, num_anchors, num_class]
# labels: [N, num_anchors]
batch_size = cls_loss.shape[0]
if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:
cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(
batch_size, -1)
cls_neg_loss = (labels == 0).type_as(cls_loss) * cls_loss.view(
batch_size, -1)
cls_pos_loss = cls_pos_loss.sum() / batch_size
cls_neg_loss = cls_neg_loss.sum() / batch_size
else:
cls_pos_loss = cls_loss[..., 1:].sum() / batch_size
cls_neg_loss = cls_loss[..., 0].sum() / batch_size
return cls_pos_loss, cls_neg_loss
REGISTERED_NETWORK_CLASSES = {}
def register_voxelnet(cls, name=None):
global REGISTERED_NETWORK_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_NETWORK_CLASSES, f"exist class: {REGISTERED_NETWORK_CLASSES}"
REGISTERED_NETWORK_CLASSES[name] = cls
return cls
def get_voxelnet_class(name):
global REGISTERED_NETWORK_CLASSES
assert name in REGISTERED_NETWORK_CLASSES, f"available class: {REGISTERED_NETWORK_CLASSES}"
return REGISTERED_NETWORK_CLASSES[name]
class LossNormType(Enum):
NormByNumPositives = "norm_by_num_positives"
NormByNumExamples = "norm_by_num_examples"
NormByNumPosNeg = "norm_by_num_pos_neg"
DontNorm = "dont_norm"
@register_voxelnet
class VoxelNet(nn.Module):
def __init__(self,
output_shape,
num_class=2,
num_input_features=4,
vfe_class_name="VoxelFeatureExtractor",
vfe_num_filters=[32, 128],
with_distance=False,
middle_class_name="SparseMiddleExtractor",
middle_num_input_features=-1,
middle_num_filters_d1=[64],
middle_num_filters_d2=[64, 64],
rpn_class_name="RPN",
rpn_num_input_features=-1,
rpn_layer_nums=[3, 5, 5],
rpn_layer_strides=[2, 2, 2],
rpn_num_filters=[128, 128, 256],
rpn_upsample_strides=[1, 2, 4],
rpn_num_upsample_filters=[256, 256, 256],
use_norm=True,
use_groupnorm=False,
num_groups=32,
use_direction_classifier=True,
use_sigmoid_score=False,
encode_background_as_zeros=True,
use_rotate_nms=True,
multiclass_nms=False,
nms_score_thresholds=None,
nms_pre_max_sizes=None,
nms_post_max_sizes=None,
nms_iou_thresholds=None,
target_assigner=None,
cls_loss_weight=1.0,
loc_loss_weight=1.0,
pos_cls_weight=1.0,
neg_cls_weight=1.0,
direction_loss_weight=1.0,
loss_norm_type=LossNormType.NormByNumPositives,
encode_rad_error_by_sin=False,
loc_loss_ftor=None,
cls_loss_ftor=None,
measure_time=False,
voxel_generator=None,
post_center_range=None,
dir_offset=0.0,
sin_error_factor=1.0,
nms_class_agnostic=False,
num_direction_bins=2,
direction_limit_offset=0,
name='voxelnet'):
super().__init__()
self.name = name
self._sin_error_factor = sin_error_factor
self._num_class = num_class
self._use_rotate_nms = use_rotate_nms
self._multiclass_nms = multiclass_nms
self._nms_score_thresholds = nms_score_thresholds
self._nms_pre_max_sizes = nms_pre_max_sizes
self._nms_post_max_sizes = nms_post_max_sizes
self._nms_iou_thresholds = nms_iou_thresholds
self._use_sigmoid_score = use_sigmoid_score
self._encode_background_as_zeros = encode_background_as_zeros
self._use_direction_classifier = use_direction_classifier
self._num_input_features = num_input_features
self._box_coder = target_assigner.box_coder
self.target_assigner = target_assigner
self.voxel_generator = voxel_generator
self._pos_cls_weight = pos_cls_weight
self._neg_cls_weight = neg_cls_weight
self._encode_rad_error_by_sin = encode_rad_error_by_sin
self._loss_norm_type = loss_norm_type
self._dir_loss_ftor = WeightedSoftmaxClassificationLoss()
self._diff_loc_loss_ftor = WeightedSmoothL1LocalizationLoss()
self._dir_offset = dir_offset
self._loc_loss_ftor = loc_loss_ftor
self._cls_loss_ftor = cls_loss_ftor
self._direction_loss_weight = direction_loss_weight
self._cls_loss_weight = cls_loss_weight
self._loc_loss_weight = loc_loss_weight
self._post_center_range = post_center_range or []
self.measure_time = measure_time
self._nms_class_agnostic = nms_class_agnostic
self._num_direction_bins = num_direction_bins
self._dir_limit_offset = direction_limit_offset
self.voxel_feature_extractor = voxel_encoder.get_vfe_class(vfe_class_name)(
num_input_features,
use_norm,
num_filters=vfe_num_filters,
with_distance=with_distance,
voxel_size=self.voxel_generator.voxel_size,
pc_range=self.voxel_generator.point_cloud_range,
)
self.middle_feature_extractor = middle.get_middle_class(middle_class_name)(
output_shape,
use_norm,
num_input_features=middle_num_input_features,
num_filters_down1=middle_num_filters_d1,
num_filters_down2=middle_num_filters_d2)
self.rpn = rpn.get_rpn_class(rpn_class_name)(
use_norm=True,
num_class=num_class,
layer_nums=rpn_layer_nums,
layer_strides=rpn_layer_strides,
num_filters=rpn_num_filters,
upsample_strides=rpn_upsample_strides,
num_upsample_filters=rpn_num_upsample_filters,
num_input_features=rpn_num_input_features,
num_anchor_per_loc=target_assigner.num_anchors_per_location,
encode_background_as_zeros=encode_background_as_zeros,
use_direction_classifier=use_direction_classifier,
use_groupnorm=use_groupnorm,
num_groups=num_groups,
box_code_size=target_assigner.box_coder.code_size,
num_direction_bins=self._num_direction_bins)
self.rpn_acc = metrics.Accuracy(
dim=-1, encode_background_as_zeros=encode_background_as_zeros)
self.rpn_precision = metrics.Precision(dim=-1)
self.rpn_recall = metrics.Recall(dim=-1)
self.rpn_metrics = metrics.PrecisionRecall(
dim=-1,
thresholds=[0.1, 0.3, 0.5, 0.7, 0.8, 0.9, 0.95],
use_sigmoid_score=use_sigmoid_score,
encode_background_as_zeros=encode_background_as_zeros)
self.rpn_cls_loss = metrics.Scalar()
self.rpn_loc_loss = metrics.Scalar()
self.rpn_total_loss = metrics.Scalar()
self.register_buffer("global_step", torch.LongTensor(1).zero_())
self._time_dict = {}
self._time_total_dict = {}
self._time_count_dict = {}
def start_timer(self, *names):
if not self.measure_time:
return
torch.cuda.synchronize()
for name in names:
self._time_dict[name] = time.time()
def end_timer(self, name):
if not self.measure_time:
return
torch.cuda.synchronize()
time_elapsed = time.time() - self._time_dict[name]
if name not in self._time_count_dict:
self._time_count_dict[name] = 1
self._time_total_dict[name] = time_elapsed
else:
self._time_count_dict[name] += 1
self._time_total_dict[name] += time_elapsed
self._time_dict[name] = 0
def clear_timer(self):
self._time_count_dict.clear()
self._time_dict.clear()
self._time_total_dict.clear()
@contextlib.contextmanager
def profiler(self):
old_measure_time = self.measure_time
self.measure_time = True
yield
self.measure_time = old_measure_time
def get_avg_time_dict(self):
ret = {}
for name, val in self._time_total_dict.items():
count = self._time_count_dict[name]
ret[name] = val / max(1, count)
return ret
def update_global_step(self):
self.global_step += 1
def get_global_step(self):
return int(self.global_step.cpu().numpy()[0])
def clear_global_step(self):
self.global_step.zero_()
def loss(self, example, preds_dict):
box_preds = preds_dict["box_preds"]
cls_preds = preds_dict["cls_preds"]
batch_size_dev = cls_preds.shape[0]
self.start_timer("loss forward")
labels = example['labels']
reg_targets = example['reg_targets']
importance = example['importance']
self.start_timer("prepare weight forward")
cls_weights, reg_weights, cared = prepare_loss_weights(
labels,
pos_cls_weight=self._pos_cls_weight,
neg_cls_weight=self._neg_cls_weight,
loss_norm_type=self._loss_norm_type,
dtype=box_preds.dtype)
cls_targets = labels * cared.type_as(labels)
cls_targets = cls_targets.unsqueeze(-1)
self.end_timer("prepare weight forward")
self.start_timer("create_loss forward")
loc_loss, cls_loss = create_loss(
self._loc_loss_ftor,
self._cls_loss_ftor,
box_preds=box_preds,
cls_preds=cls_preds,
cls_targets=cls_targets,
cls_weights=cls_weights * importance,
reg_targets=reg_targets,
reg_weights=reg_weights * importance,
num_class=self._num_class,
encode_rad_error_by_sin=self._encode_rad_error_by_sin,
encode_background_as_zeros=self._encode_background_as_zeros,
box_code_size=self._box_coder.code_size,
sin_error_factor=self._sin_error_factor,
num_direction_bins=self._num_direction_bins,
)
loc_loss_reduced = loc_loss.sum() / batch_size_dev
loc_loss_reduced *= self._loc_loss_weight
cls_pos_loss, cls_neg_loss = _get_pos_neg_loss(cls_loss, labels)
cls_pos_loss /= self._pos_cls_weight
cls_neg_loss /= self._neg_cls_weight
cls_loss_reduced = cls_loss.sum() / batch_size_dev
cls_loss_reduced *= self._cls_loss_weight
loss = loc_loss_reduced + cls_loss_reduced
self.end_timer("create_loss forward")
if self._use_direction_classifier:
dir_targets = get_direction_target(
example['anchors'],
reg_targets,
dir_offset=self._dir_offset,
num_bins=self._num_direction_bins)
dir_logits = preds_dict["dir_cls_preds"].view(
batch_size_dev, -1, self._num_direction_bins)
weights = (labels > 0).type_as(dir_logits) * importance
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
dir_loss = self._dir_loss_ftor(
dir_logits, dir_targets, weights=weights)
dir_loss = dir_loss.sum() / batch_size_dev
loss += dir_loss * self._direction_loss_weight
self.end_timer("loss forward")
res = {
"loss": loss,
"cls_loss": cls_loss,
"loc_loss": loc_loss,
"cls_pos_loss": cls_pos_loss,
"cls_neg_loss": cls_neg_loss,
"cls_preds": cls_preds,
"cls_loss_reduced": cls_loss_reduced,
"loc_loss_reduced": loc_loss_reduced,
"cared": cared,
}
if self._use_direction_classifier:
res["dir_loss_reduced"] = dir_loss
return res
def network_forward(self, voxels, num_points, coors, batch_size):
"""this function is used for subclass.
you can add custom network architecture by subclass VoxelNet class
and override this function.
Returns:
preds_dict: {
box_preds: ...
cls_preds: ...
dir_cls_preds: ...
}
"""
self.start_timer("voxel_feature_extractor")
voxel_features = self.voxel_feature_extractor(voxels, num_points,
coors)
self.end_timer("voxel_feature_extractor")
self.start_timer("middle forward")
spatial_features = self.middle_feature_extractor(
voxel_features, coors, batch_size)
self.end_timer("middle forward")
self.start_timer("rpn forward")
preds_dict = self.rpn(spatial_features)
self.end_timer("rpn forward")
return preds_dict
def forward(self, example):
"""module's forward should always accept dict and return loss.
"""
voxels = example["voxels"]
num_points = example["num_points"]
coors = example["coordinates"]
if len(num_points.shape) == 2: # multi-gpu
num_voxel_per_batch = example["num_voxels"].cpu().numpy().reshape(
-1)
voxel_list = []
num_points_list = []
coors_list = []
for i, num_voxel in enumerate(num_voxel_per_batch):
voxel_list.append(voxels[i, :num_voxel])
num_points_list.append(num_points[i, :num_voxel])
coors_list.append(coors[i, :num_voxel])
voxels = torch.cat(voxel_list, dim=0)
num_points = torch.cat(num_points_list, dim=0)
coors = torch.cat(coors_list, dim=0)
batch_anchors = example["anchors"]
batch_size_dev = batch_anchors.shape[0]
# features: [num_voxels, max_num_points_per_voxel, 7]
# num_points: [num_voxels]
# coors: [num_voxels, 4]
preds_dict = self.network_forward(voxels, num_points, coors, batch_size_dev)
# need to check size.
box_preds = preds_dict["box_preds"].view(batch_size_dev, -1, self._box_coder.code_size)
err_msg = f"num_anchors={batch_anchors.shape[1]}, but num_output={box_preds.shape[1]}. please check size"
assert batch_anchors.shape[1] == box_preds.shape[1], err_msg
if self.training:
return self.loss(example, preds_dict)
else:
self.start_timer("predict")
with torch.no_grad():
res = self.predict(example, preds_dict)
self.end_timer("predict")
return res
def predict(self, example, preds_dict):
"""start with v1.6.0, this function don't contain any kitti-specific code.
Returns:
predict: list of pred_dict.
pred_dict: {
box3d_lidar: [N, 7] 3d box.
scores: [N]
label_preds: [N]
metadata: meta-data which contains dataset-specific information.
for kitti, it contains image idx (label idx),
for nuscenes, sample_token is saved in it.
}
"""
batch_size = example['anchors'].shape[0]
if "metadata" not in example or len(example["metadata"]) == 0:
meta_list = [None] * batch_size
else:
meta_list = example["metadata"]
batch_anchors = example["anchors"].view(batch_size, -1,
example["anchors"].shape[-1])
if "anchors_mask" not in example:
batch_anchors_mask = [None] * batch_size
else:
batch_anchors_mask = example["anchors_mask"].view(batch_size, -1)
t = time.time()
batch_box_preds = preds_dict["box_preds"]
batch_cls_preds = preds_dict["cls_preds"]
batch_box_preds = batch_box_preds.view(batch_size, -1,
self._box_coder.code_size)
num_class_with_bg = self._num_class
if not self._encode_background_as_zeros:
num_class_with_bg = self._num_class + 1
batch_cls_preds = batch_cls_preds.view(batch_size, -1,
num_class_with_bg)
batch_box_preds = self._box_coder.decode_torch(batch_box_preds,
batch_anchors)
if self._use_direction_classifier:
batch_dir_preds = preds_dict["dir_cls_preds"]
batch_dir_preds = batch_dir_preds.view(batch_size, -1,
self._num_direction_bins)
else:
batch_dir_preds = [None] * batch_size
predictions_dicts = []
post_center_range = None
if len(self._post_center_range) > 0:
post_center_range = torch.tensor(
self._post_center_range,
dtype=batch_box_preds.dtype,
device=batch_box_preds.device).float()
for box_preds, cls_preds, dir_preds, a_mask, meta in zip(
batch_box_preds, batch_cls_preds, batch_dir_preds,
batch_anchors_mask, meta_list):
if a_mask is not None:
box_preds = box_preds[a_mask]
cls_preds = cls_preds[a_mask]
box_preds = box_preds.float()
cls_preds = cls_preds.float()
if self._use_direction_classifier:
if a_mask is not None:
dir_preds = dir_preds[a_mask]
dir_labels = torch.max(dir_preds, dim=-1)[1]
if self._encode_background_as_zeros:
# this don't support softmax
assert self._use_sigmoid_score is True
total_scores = torch.sigmoid(cls_preds)
else:
# encode background as first element in one-hot vector
if self._use_sigmoid_score:
total_scores = torch.sigmoid(cls_preds)[..., 1:]
else:
total_scores = F.softmax(cls_preds, dim=-1)[..., 1:]
# Apply NMS in birdeye view
if self._use_rotate_nms:
nms_func = box_torch_ops.rotate_nms
else:
nms_func = box_torch_ops.nms
feature_map_size_prod = batch_box_preds.shape[
1] // self.target_assigner.num_anchors_per_location
if self._multiclass_nms:
assert self._encode_background_as_zeros is True
boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]
if not self._use_rotate_nms:
box_preds_corners = box_torch_ops.center_to_corner_box2d(
boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],
boxes_for_nms[:, 4])
boxes_for_nms = box_torch_ops.corner_to_standup_nd(
box_preds_corners)
selected_boxes, selected_labels, selected_scores = [], [], []
selected_dir_labels = []
scores = total_scores
boxes = boxes_for_nms
selected_per_class = []
score_threshs = self._nms_score_thresholds
pre_max_sizes = self._nms_pre_max_sizes
post_max_sizes = self._nms_post_max_sizes
iou_thresholds = self._nms_iou_thresholds
for class_idx, score_thresh, pre_ms, post_ms, iou_th in zip(
range(self._num_class),
score_threshs,
pre_max_sizes, post_max_sizes, iou_thresholds):
if self._nms_class_agnostic:
class_scores = total_scores.view(
feature_map_size_prod, -1,
self._num_class)[..., class_idx]
class_scores = class_scores.contiguous().view(-1)
class_boxes_nms = boxes.view(-1,
boxes_for_nms.shape[-1])
class_boxes = box_preds
class_dir_labels = dir_labels
else:
anchors_range = self.target_assigner.anchors_range(class_idx)
class_scores = total_scores.view(
-1,
self._num_class)[anchors_range[0]:anchors_range[1], class_idx]
class_boxes_nms = boxes.view(-1,
boxes_for_nms.shape[-1])[anchors_range[0]:anchors_range[1], :]
class_scores = class_scores.contiguous().view(-1)
class_boxes_nms = class_boxes_nms.contiguous().view(
-1, boxes_for_nms.shape[-1])
class_boxes = box_preds.view(-1,
box_preds.shape[-1])[anchors_range[0]:anchors_range[1], :]
class_boxes = class_boxes.contiguous().view(
-1, box_preds.shape[-1])
if self._use_direction_classifier:
class_dir_labels = dir_labels.view(-1)[anchors_range[0]:anchors_range[1]]
class_dir_labels = class_dir_labels.contiguous(
).view(-1)
if score_thresh > 0.0:
class_scores_keep = class_scores >= score_thresh
if class_scores_keep.shape[0] == 0:
selected_per_class.append(None)
continue
class_scores = class_scores[class_scores_keep]
if class_scores.shape[0] != 0:
if score_thresh > 0.0:
class_boxes_nms = class_boxes_nms[
class_scores_keep]
class_boxes = class_boxes[class_scores_keep]
class_dir_labels = class_dir_labels[
class_scores_keep]
keep = nms_func(class_boxes_nms, class_scores, pre_ms,
post_ms, iou_th)
if keep.shape[0] != 0:
selected_per_class.append(keep)
else:
selected_per_class.append(None)
else:
selected_per_class.append(None)
selected = selected_per_class[-1]
if selected is not None:
selected_boxes.append(class_boxes[selected])
selected_labels.append(
torch.full([class_boxes[selected].shape[0]],
class_idx,
dtype=torch.int64,
device=box_preds.device))
if self._use_direction_classifier:
selected_dir_labels.append(
class_dir_labels[selected])
selected_scores.append(class_scores[selected])
selected_boxes = torch.cat(selected_boxes, dim=0)
selected_labels = torch.cat(selected_labels, dim=0)
selected_scores = torch.cat(selected_scores, dim=0)
if self._use_direction_classifier:
selected_dir_labels = torch.cat(selected_dir_labels, dim=0)
else:
# get highest score per prediction, than apply nms
# to remove overlapped box.
if num_class_with_bg == 1:
top_scores = total_scores.squeeze(-1)
top_labels = torch.zeros(
total_scores.shape[0],
device=total_scores.device,
dtype=torch.long)
else:
top_scores, top_labels = torch.max(
total_scores, dim=-1)
if self._nms_score_thresholds[0] > 0.0:
top_scores_keep = top_scores >= self._nms_score_thresholds[0]
top_scores = top_scores.masked_select(top_scores_keep)
if top_scores.shape[0] != 0:
if self._nms_score_thresholds[0] > 0.0:
box_preds = box_preds[top_scores_keep]
if self._use_direction_classifier:
dir_labels = dir_labels[top_scores_keep]
top_labels = top_labels[top_scores_keep]
boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]
if not self._use_rotate_nms:
box_preds_corners = box_torch_ops.center_to_corner_box2d(
boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],
boxes_for_nms[:, 4])
boxes_for_nms = box_torch_ops.corner_to_standup_nd(
box_preds_corners)
# the nms in 3d detection just remove overlap boxes.
selected = nms_func(
boxes_for_nms,
top_scores,
pre_max_size=self._nms_pre_max_sizes[0],
post_max_size=self._nms_post_max_sizes[0],
iou_threshold=self._nms_iou_thresholds[0],
)
else:
selected = []
# if selected is not None:
selected_boxes = box_preds[selected]
if self._use_direction_classifier:
selected_dir_labels = dir_labels[selected]
selected_labels = top_labels[selected]
selected_scores = top_scores[selected]
# finally generate predictions.
if selected_boxes.shape[0] != 0:
box_preds = selected_boxes
scores = selected_scores
label_preds = selected_labels
if self._use_direction_classifier:
dir_labels = selected_dir_labels
period = (2 * np.pi / self._num_direction_bins)
dir_rot = box_torch_ops.limit_period(
box_preds[..., 6] - self._dir_offset,
self._dir_limit_offset, period)
box_preds[
...,
6] = dir_rot + self._dir_offset + period * dir_labels.to(
box_preds.dtype)
final_box_preds = box_preds
final_scores = scores
final_labels = label_preds
if post_center_range is not None:
mask = (final_box_preds[:, :3] >=
post_center_range[:3]).all(1)
mask &= (final_box_preds[:, :3] <=
post_center_range[3:]).all(1)
predictions_dict = {
"box3d_lidar": final_box_preds[mask],
"scores": final_scores[mask],
"label_preds": label_preds[mask],
"metadata": meta,
}
else:
predictions_dict = {
"box3d_lidar": final_box_preds,
"scores": final_scores,
"label_preds": label_preds,
"metadata": meta,
}
else:
dtype = batch_box_preds.dtype
device = batch_box_preds.device
predictions_dict = {
"box3d_lidar":
torch.zeros([0, box_preds.shape[-1]],
dtype=dtype,
device=device),
"scores":
torch.zeros([0], dtype=dtype, device=device),
"label_preds":
torch.zeros([0], dtype=top_labels.dtype, device=device),
"metadata":
meta,
}
predictions_dicts.append(predictions_dict)
return predictions_dicts
def metrics_to_float(self):
self.rpn_acc.float()
self.rpn_metrics.float()
self.rpn_cls_loss.float()
self.rpn_loc_loss.float()
self.rpn_total_loss.float()
def update_metrics(self, cls_loss, loc_loss, cls_preds, labels, sampled):
batch_size = cls_preds.shape[0]
num_class = self._num_class
if not self._encode_background_as_zeros:
num_class += 1
cls_preds = cls_preds.view(batch_size, -1, num_class)
rpn_acc = self.rpn_acc(labels, cls_preds, sampled).numpy()[0]
prec, recall = self.rpn_metrics(labels, cls_preds, sampled)
prec = prec.numpy()
recall = recall.numpy()
rpn_cls_loss = self.rpn_cls_loss(cls_loss).numpy()[0]
rpn_loc_loss = self.rpn_loc_loss(loc_loss).numpy()[0]
ret = {
"loss": {
"cls_loss": float(rpn_cls_loss),
"cls_loss_rt": float(cls_loss.data.cpu().numpy()),
'loc_loss': float(rpn_loc_loss),
"loc_loss_rt": float(loc_loss.data.cpu().numpy()),
},
"rpn_acc": float(rpn_acc),
"pr": {},
}
for i, thresh in enumerate(self.rpn_metrics.thresholds):
ret["pr"][f"prec@{int(thresh*100)}"] = float(prec[i])
ret["pr"][f"rec@{int(thresh*100)}"] = float(recall[i])
return ret
def clear_metrics(self):
self.rpn_acc.clear()
self.rpn_metrics.clear()
self.rpn_cls_loss.clear()
self.rpn_loc_loss.clear()
self.rpn_total_loss.clear()
@staticmethod
def convert_norm_to_float(net):
'''
BatchNorm layers to have parameters in single precision.
Find all layers and convert them back to float. This can't
be done with built in .apply as that function will apply
fn to all modules, parameters, and buffers. Thus we wouldn't
be able to guard the float conversion based on the module type.
'''
if isinstance(net, torch.nn.modules.batchnorm._BatchNorm):
net.float()
for child in net.children():
VoxelNet.convert_norm_to_float(child)
return net
def add_sin_difference(boxes1, boxes2, boxes1_rot, boxes2_rot, factor=1.0):
if factor != 1.0:
boxes1_rot = factor * boxes1_rot
boxes2_rot = factor * boxes2_rot
rad_pred_encoding = torch.sin(boxes1_rot) * torch.cos(boxes2_rot)
rad_tg_encoding = torch.cos(boxes1_rot) * torch.sin(boxes2_rot)
boxes1 = torch.cat([boxes1[..., :6], rad_pred_encoding, boxes1[..., 7:]],
dim=-1)
boxes2 = torch.cat([boxes2[..., :6], rad_tg_encoding, boxes2[..., 7:]],
dim=-1)
return boxes1, boxes2
def create_loss(loc_loss_ftor,
cls_loss_ftor,
box_preds,
cls_preds,
cls_targets,
cls_weights,
reg_targets,
reg_weights,
num_class,
encode_background_as_zeros=True,
encode_rad_error_by_sin=True,
sin_error_factor=1.0,
box_code_size=7,
num_direction_bins=2):
batch_size = int(box_preds.shape[0])
box_preds = box_preds.view(batch_size, -1, box_code_size)
if encode_background_as_zeros:
cls_preds = cls_preds.view(batch_size, -1, num_class)
else:
cls_preds = cls_preds.view(batch_size, -1, num_class + 1)
cls_targets = cls_targets.squeeze(-1)
one_hot_targets = torchplus.nn.one_hot(
cls_targets, depth=num_class + 1, dtype=box_preds.dtype)
if encode_background_as_zeros:
one_hot_targets = one_hot_targets[..., 1:]
if encode_rad_error_by_sin:
# sin(a - b) = sinacosb-cosasinb
# reg_tg_rot = box_torch_ops.limit_period(
# reg_targets[..., 6:7], 0.5, 2 * np.pi / num_direction_bins)
box_preds, reg_targets = add_sin_difference(box_preds, reg_targets, box_preds[..., 6:7], reg_targets[..., 6:7],
sin_error_factor)
loc_losses = loc_loss_ftor(
box_preds, reg_targets, weights=reg_weights) # [N, M]
cls_losses = cls_loss_ftor(
cls_preds, one_hot_targets, weights=cls_weights) # [N, M]
return loc_losses, cls_losses
def prepare_loss_weights(labels,
pos_cls_weight=1.0,
neg_cls_weight=1.0,
loss_norm_type=LossNormType.NormByNumPositives,
dtype=torch.float32):
"""get cls_weights and reg_weights from labels.
"""
cared = labels >= 0
# cared: [N, num_anchors]
positives = labels > 0
negatives = labels == 0
negative_cls_weights = negatives.type(dtype) * neg_cls_weight
cls_weights = negative_cls_weights + pos_cls_weight * positives.type(dtype)
reg_weights = positives.type(dtype)
if loss_norm_type == LossNormType.NormByNumExamples:
num_examples = cared.type(dtype).sum(1, keepdim=True)
num_examples = torch.clamp(num_examples, min=1.0)
cls_weights /= num_examples
bbox_normalizer = positives.sum(1, keepdim=True).type(dtype)
reg_weights /= torch.clamp(bbox_normalizer, min=1.0)
elif loss_norm_type == LossNormType.NormByNumPositives: # for focal loss
pos_normalizer = positives.sum(1, keepdim=True).type(dtype)
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
elif loss_norm_type == LossNormType.NormByNumPosNeg:
pos_neg = torch.stack([positives, negatives], dim=-1).type(dtype)
normalizer = pos_neg.sum(1, keepdim=True) # [N, 1, 2]
cls_normalizer = (pos_neg * normalizer).sum(-1) # [N, M]
cls_normalizer = torch.clamp(cls_normalizer, min=1.0)
# cls_normalizer will be pos_or_neg_weight/num_pos_or_neg
normalizer = torch.clamp(normalizer, min=1.0)
reg_weights /= normalizer[:, 0:1, 0]
cls_weights /= cls_normalizer
elif loss_norm_type == LossNormType.DontNorm: # support ghm loss
pos_normalizer = positives.sum(1, keepdim=True).type(dtype)
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
else:
raise ValueError(
f"unknown loss norm type. available: {list(LossNormType)}")
return cls_weights, reg_weights, cared
def assign_weight_to_each_class(labels,
weight_per_class,
norm_by_num=True,
dtype=torch.float32):
weights = torch.zeros(labels.shape, dtype=dtype, device=labels.device)
for label, weight in weight_per_class:
positives = (labels == label).type(dtype)
weight_class = weight * positives
if norm_by_num:
normalizer = positives.sum()
normalizer = torch.clamp(normalizer, min=1.0)
weight_class /= normalizer
weights += weight_class
return weights
def get_direction_target(anchors,
reg_targets,
one_hot=True,
dir_offset=0,
num_bins=2):
batch_size = reg_targets.shape[0]
anchors = anchors.view(batch_size, -1, anchors.shape[-1])
rot_gt = reg_targets[..., 6] + anchors[..., 6]
offset_rot = box_torch_ops.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)
dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()
dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)
if one_hot:
dir_cls_targets = torchplus.nn.one_hot(
dir_cls_targets, num_bins, dtype=anchors.dtype)
return dir_cls_targets
| 37,009 | 43.64415 | 119 | py |
second.pytorch | second.pytorch-master/second/pytorch/models/resnet.py | import spconv
from torch import nn
from torch.nn import functional as F
from torchplus.nn import Empty, GroupNorm, Sequential
def conv3x3(in_planes, out_planes, stride=1, indice_key=None):
"""3x3 convolution with padding"""
return spconv.SubMConv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
indice_key=indice_key)
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
"""1x1 convolution"""
return spconv.SubMConv3d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=1,
bias=False,
indice_key=indice_key)
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
indice_key=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, indice_key=indice_key)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes, indice_key=indice_key)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity.features
out.features = self.relu(out.features)
return out
class SparseBottleneck(spconv.SparseModule):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
indice_key=None):
super(SparseBottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes, indice_key=indice_key)
self.bn1 = nn.BatchNorm1d(planes)
self.conv2 = conv3x3(planes, planes, stride, indice_key=indice_key)
self.bn2 = nn.BatchNorm1d(planes)
self.conv3 = conv1x1(
planes, planes * self.expansion, indice_key=indice_key)
self.bn3 = nn.BatchNorm1d(planes * self.expansion)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
out.features = self.relu(out.features)
out = self.conv3(out)
out.features = self.bn3(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out.features = self.relu(out.features)
return out
| 3,059 | 26.567568 | 77 | py |
second.pytorch | second.pytorch-master/second/pytorch/models/middle.py | import time
import numpy as np
import spconv
import torch
from torch import nn
from torch.nn import functional as F
from second.pytorch.models.resnet import SparseBasicBlock
from torchplus.nn import Empty, GroupNorm, Sequential
from torchplus.ops.array_ops import gather_nd, scatter_nd
from torchplus.tools import change_default_args
from second.pytorch.utils import torch_timer
REGISTERED_MIDDLE_CLASSES = {}
def register_middle(cls, name=None):
global REGISTERED_MIDDLE_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_MIDDLE_CLASSES, f"exist class: {REGISTERED_MIDDLE_CLASSES}"
REGISTERED_MIDDLE_CLASSES[name] = cls
return cls
def get_middle_class(name):
global REGISTERED_MIDDLE_CLASSES
assert name in REGISTERED_MIDDLE_CLASSES, f"available class: {REGISTERED_MIDDLE_CLASSES}"
return REGISTERED_MIDDLE_CLASSES[name]
@register_middle
class SparseMiddleExtractor(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SparseMiddleExtractor'):
super(SparseMiddleExtractor, self).__init__()
self.name = name
if use_norm:
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Linear = change_default_args(bias=False)(nn.Linear)
else:
BatchNorm1d = Empty
Linear = change_default_args(bias=True)(nn.Linear)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.scn_input = scn.InputLayer(3, sparse_shape.tolist())
self.voxel_output_shape = output_shape
middle_layers = []
num_filters = [num_input_features] + num_filters_down1
# num_filters = [64] + num_filters_down1
filters_pairs_d1 = [[num_filters[i], num_filters[i + 1]]
for i in range(len(num_filters) - 1)]
for i, o in filters_pairs_d1:
middle_layers.append(
spconv.SubMConv3d(i, o, 3, bias=False, indice_key="subm0"))
middle_layers.append(BatchNorm1d(o))
middle_layers.append(nn.ReLU())
middle_layers.append(
spconv.SparseConv3d(
num_filters[-1],
num_filters[-1], (3, 1, 1), (2, 1, 1),
bias=False))
middle_layers.append(BatchNorm1d(num_filters[-1]))
middle_layers.append(nn.ReLU())
# assert len(num_filters_down2) > 0
if len(num_filters_down1) == 0:
num_filters = [num_filters[-1]] + num_filters_down2
else:
num_filters = [num_filters_down1[-1]] + num_filters_down2
filters_pairs_d2 = [[num_filters[i], num_filters[i + 1]]
for i in range(len(num_filters) - 1)]
for i, o in filters_pairs_d2:
middle_layers.append(
spconv.SubMConv3d(i, o, 3, bias=False, indice_key="subm1"))
middle_layers.append(BatchNorm1d(o))
middle_layers.append(nn.ReLU())
middle_layers.append(
spconv.SparseConv3d(
num_filters[-1],
num_filters[-1], (3, 1, 1), (2, 1, 1),
bias=False))
middle_layers.append(BatchNorm1d(num_filters[-1]))
middle_layers.append(nn.ReLU())
self.middle_conv = spconv.SparseSequential(*middle_layers)
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
@register_middle
class SpMiddleFHD(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHD'):
super(SpMiddleFHD, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 6
# self.grid = torch.full([self.max_batch_size, *sparse_shape], -1, dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
@register_middle
class SpMiddleFHDPeople(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHD'):
super(SpMiddleFHDPeople, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 21] -> [800, 600, 11]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]), # [800, 600, 11] -> [400, 300, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [400, 300, 5] -> [400, 300, 2]
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 6
# self.grid = torch.full([self.max_batch_size, *sparse_shape], -1, dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
@register_middle
class SpMiddle2K(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddle2K'):
super(SpMiddle2K, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(
num_input_features, 8, 3,
indice_key="subm0"), # [3200, 2400, 81] -> [1600, 1200, 41]
BatchNorm1d(8),
nn.ReLU(),
SubMConv3d(8, 8, 3, indice_key="subm0"),
BatchNorm1d(8),
nn.ReLU(),
SpConv3d(8, 16, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm1"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm2"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
self.max_batch_size = 3
self.grid = torch.full([self.max_batch_size, *sparse_shape],
-1,
dtype=torch.int32).cuda()
def forward(self, voxel_features, coors, batch_size):
# coors[:, 1] += 1
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size, self.grid)
# t = time.time()
# torch.cuda.synchronize()
ret = self.middle_conv(ret)
# torch.cuda.synchronize()
# print("spconv forward time", time.time() - t)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
@register_middle
class SpMiddleFHDLite(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDLite'):
super(SpMiddleFHDLite, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SpConv3d(num_input_features, 16, 3, 2,
padding=1), # [1600, 1200, 41] -> [800, 600, 21]
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [800, 600, 21] -> [400, 300, 11]
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=[0, 1, 1]), # [400, 300, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
# ret.features = F.relu(ret.features)
# print(self.middle_conv.fused())
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
@register_middle
class SpMiddleFHDLiteHRZ(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHDLite'):
super(SpMiddleFHDLiteHRZ, self).__init__()
self.name = name
if use_norm:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
BatchNorm1d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SpConv3d(num_input_features, 32, 3, 2,
padding=1), # [1600, 1200, 81] -> [800, 600, 41]
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 41] -> [400, 300, 21]
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=1), # [400, 300, 21] -> [200, 150, 11]
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
@register_middle
class SpMiddleFHDHRZ(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddleFHD'):
super(SpMiddleFHDHRZ, self).__init__()
self.name = name
if use_norm:
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
else:
BatchNorm1d = Empty
SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.voxel_output_shape = output_shape
# input: # [1600, 1200, 41]
self.middle_conv = spconv.SparseSequential(
SubMConv3d(num_input_features, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SubMConv3d(16, 16, 3, indice_key="subm0"),
BatchNorm1d(16),
nn.ReLU(),
SpConv3d(16, 32, 3, 2,
padding=1), # [1600, 1200, 81] -> [800, 600, 41]
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SubMConv3d(32, 32, 3, indice_key="subm1"),
BatchNorm1d(32),
nn.ReLU(),
SpConv3d(32, 64, 3, 2,
padding=1), # [800, 600, 41] -> [400, 300, 21]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm2"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, 3, 2,
padding=1), # [400, 300, 21] -> [200, 150, 11]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm3"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 11] -> [200, 150, 5]
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SubMConv3d(64, 64, 3, indice_key="subm4"),
BatchNorm1d(64),
nn.ReLU(),
SpConv3d(64, 64, (3, 1, 1),
(2, 1, 1)), # [200, 150, 5] -> [200, 150, 2]
BatchNorm1d(64),
nn.ReLU(),
)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.middle_conv(ret)
ret = ret.dense()
N, C, D, H, W = ret.shape
ret = ret.view(N, C * D, H, W)
return ret
| 25,183 | 38.166407 | 100 | py |
second.pytorch | second.pytorch-master/second/pytorch/models/rpn.py | import time
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import resnet
from torchplus.nn import Empty, GroupNorm, Sequential
from torchplus.tools import change_default_args
REGISTERED_RPN_CLASSES = {}
def register_rpn(cls, name=None):
global REGISTERED_RPN_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_RPN_CLASSES, f"exist class: {REGISTERED_RPN_CLASSES}"
REGISTERED_RPN_CLASSES[name] = cls
return cls
def get_rpn_class(name):
global REGISTERED_RPN_CLASSES
assert name in REGISTERED_RPN_CLASSES, f"available class: {REGISTERED_RPN_CLASSES}"
return REGISTERED_RPN_CLASSES[name]
@register_rpn
class RPN(nn.Module):
def __init__(self,
use_norm=True,
num_class=2,
layer_nums=(3, 5, 5),
layer_strides=(2, 2, 2),
num_filters=(128, 128, 256),
upsample_strides=(1, 2, 4),
num_upsample_filters=(256, 256, 256),
num_input_features=128,
num_anchor_per_loc=2,
encode_background_as_zeros=True,
use_direction_classifier=True,
use_groupnorm=False,
num_groups=32,
box_code_size=7,
num_direction_bins=2,
name='rpn'):
"""deprecated. exists for checkpoint backward compilability (SECOND v1.0)
"""
super(RPN, self).__init__()
self._num_anchor_per_loc = num_anchor_per_loc
self._use_direction_classifier = use_direction_classifier
assert len(layer_nums) == 3
assert len(layer_strides) == len(layer_nums)
assert len(num_filters) == len(layer_nums)
assert len(upsample_strides) == len(layer_nums)
assert len(num_upsample_filters) == len(layer_nums)
upsample_strides = [
np.round(u).astype(np.int64) for u in upsample_strides
]
factors = []
for i in range(len(layer_nums)):
assert int(np.prod(
layer_strides[:i + 1])) % upsample_strides[i] == 0
factors.append(
np.prod(layer_strides[:i + 1]) // upsample_strides[i])
assert all([x == factors[0] for x in factors])
if use_norm:
if use_groupnorm:
BatchNorm2d = change_default_args(
num_groups=num_groups, eps=1e-3)(GroupNorm)
else:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
# note that when stride > 1, conv2d with same padding isn't
# equal to pad-conv2d. we should use pad-conv2d.
block2_input_filters = num_filters[0]
self.block1 = Sequential(
nn.ZeroPad2d(1),
Conv2d(
num_input_features, num_filters[0], 3,
stride=layer_strides[0]),
BatchNorm2d(num_filters[0]),
nn.ReLU(),
)
for i in range(layer_nums[0]):
self.block1.add(
Conv2d(num_filters[0], num_filters[0], 3, padding=1))
self.block1.add(BatchNorm2d(num_filters[0]))
self.block1.add(nn.ReLU())
self.deconv1 = Sequential(
ConvTranspose2d(
num_filters[0],
num_upsample_filters[0],
upsample_strides[0],
stride=upsample_strides[0]),
BatchNorm2d(num_upsample_filters[0]),
nn.ReLU(),
)
self.block2 = Sequential(
nn.ZeroPad2d(1),
Conv2d(
block2_input_filters,
num_filters[1],
3,
stride=layer_strides[1]),
BatchNorm2d(num_filters[1]),
nn.ReLU(),
)
for i in range(layer_nums[1]):
self.block2.add(
Conv2d(num_filters[1], num_filters[1], 3, padding=1))
self.block2.add(BatchNorm2d(num_filters[1]))
self.block2.add(nn.ReLU())
self.deconv2 = Sequential(
ConvTranspose2d(
num_filters[1],
num_upsample_filters[1],
upsample_strides[1],
stride=upsample_strides[1]),
BatchNorm2d(num_upsample_filters[1]),
nn.ReLU(),
)
self.block3 = Sequential(
nn.ZeroPad2d(1),
Conv2d(num_filters[1], num_filters[2], 3, stride=layer_strides[2]),
BatchNorm2d(num_filters[2]),
nn.ReLU(),
)
for i in range(layer_nums[2]):
self.block3.add(
Conv2d(num_filters[2], num_filters[2], 3, padding=1))
self.block3.add(BatchNorm2d(num_filters[2]))
self.block3.add(nn.ReLU())
self.deconv3 = Sequential(
ConvTranspose2d(
num_filters[2],
num_upsample_filters[2],
upsample_strides[2],
stride=upsample_strides[2]),
BatchNorm2d(num_upsample_filters[2]),
nn.ReLU(),
)
if encode_background_as_zeros:
num_cls = num_anchor_per_loc * num_class
else:
num_cls = num_anchor_per_loc * (num_class + 1)
self.conv_cls = nn.Conv2d(sum(num_upsample_filters), num_cls, 1)
self.conv_box = nn.Conv2d(
sum(num_upsample_filters), num_anchor_per_loc * box_code_size, 1)
if use_direction_classifier:
self.conv_dir_cls = nn.Conv2d(
sum(num_upsample_filters),
num_anchor_per_loc * num_direction_bins, 1)
if self._use_rc_net:
self.conv_rc = nn.Conv2d(
sum(num_upsample_filters), num_anchor_per_loc * box_code_size,
1)
def forward(self, x):
# t = time.time()
# torch.cuda.synchronize()
x = self.block1(x)
up1 = self.deconv1(x)
x = self.block2(x)
up2 = self.deconv2(x)
x = self.block3(x)
up3 = self.deconv3(x)
x = torch.cat([up1, up2, up3], dim=1)
box_preds = self.conv_box(x)
cls_preds = self.conv_cls(x)
# [N, C, y(H), x(W)]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
ret_dict = {
"box_preds": box_preds,
"cls_preds": cls_preds,
}
if self._use_direction_classifier:
dir_cls_preds = self.conv_dir_cls(x)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
ret_dict["dir_cls_preds"] = dir_cls_preds
if self._use_rc_net:
rc_preds = self.conv_rc(x)
rc_preds = rc_preds.permute(0, 2, 3, 1).contiguous()
ret_dict["rc_preds"] = rc_preds
# torch.cuda.synchronize()
# print("rpn forward time", time.time() - t)
return ret_dict
class RPNNoHeadBase(nn.Module):
def __init__(self,
use_norm=True,
num_class=2,
layer_nums=(3, 5, 5),
layer_strides=(2, 2, 2),
num_filters=(128, 128, 256),
upsample_strides=(1, 2, 4),
num_upsample_filters=(256, 256, 256),
num_input_features=128,
num_anchor_per_loc=2,
encode_background_as_zeros=True,
use_direction_classifier=True,
use_groupnorm=False,
num_groups=32,
box_code_size=7,
num_direction_bins=2,
name='rpn'):
"""upsample_strides support float: [0.25, 0.5, 1]
if upsample_strides < 1, conv2d will be used instead of convtranspose2d.
"""
super(RPNNoHeadBase, self).__init__()
self._layer_strides = layer_strides
self._num_filters = num_filters
self._layer_nums = layer_nums
self._upsample_strides = upsample_strides
self._num_upsample_filters = num_upsample_filters
self._num_input_features = num_input_features
self._use_norm = use_norm
self._use_groupnorm = use_groupnorm
self._num_groups = num_groups
assert len(layer_strides) == len(layer_nums)
assert len(num_filters) == len(layer_nums)
assert len(num_upsample_filters) == len(upsample_strides)
self._upsample_start_idx = len(layer_nums) - len(upsample_strides)
must_equal_list = []
for i in range(len(upsample_strides)):
must_equal_list.append(upsample_strides[i] / np.prod(
layer_strides[:i + self._upsample_start_idx + 1]))
for val in must_equal_list:
assert val == must_equal_list[0]
if use_norm:
if use_groupnorm:
BatchNorm2d = change_default_args(
num_groups=num_groups, eps=1e-3)(GroupNorm)
else:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
in_filters = [num_input_features, *num_filters[:-1]]
blocks = []
deblocks = []
for i, layer_num in enumerate(layer_nums):
block, num_out_filters = self._make_layer(
in_filters[i],
num_filters[i],
layer_num,
stride=layer_strides[i])
blocks.append(block)
if i - self._upsample_start_idx >= 0:
stride = upsample_strides[i - self._upsample_start_idx]
if stride >= 1:
stride = np.round(stride).astype(np.int64)
deblock = nn.Sequential(
ConvTranspose2d(
num_out_filters,
num_upsample_filters[i - self._upsample_start_idx],
stride,
stride=stride),
BatchNorm2d(
num_upsample_filters[i -
self._upsample_start_idx]),
nn.ReLU(),
)
else:
stride = np.round(1 / stride).astype(np.int64)
deblock = nn.Sequential(
Conv2d(
num_out_filters,
num_upsample_filters[i - self._upsample_start_idx],
stride,
stride=stride),
BatchNorm2d(
num_upsample_filters[i -
self._upsample_start_idx]),
nn.ReLU(),
)
deblocks.append(deblock)
self._num_out_filters = num_out_filters
self.blocks = nn.ModuleList(blocks)
self.deblocks = nn.ModuleList(deblocks)
@property
def downsample_factor(self):
factor = np.prod(self._layer_strides)
if len(self._upsample_strides) > 0:
factor /= self._upsample_strides[-1]
return factor
def _make_layer(self, inplanes, planes, num_blocks, stride=1):
raise NotImplementedError
def forward(self, x):
ups = []
stage_outputs = []
for i in range(len(self.blocks)):
x = self.blocks[i](x)
stage_outputs.append(x)
if i - self._upsample_start_idx >= 0:
ups.append(self.deblocks[i - self._upsample_start_idx](x))
if len(ups) > 0:
x = torch.cat(ups, dim=1)
res = {}
for i, up in enumerate(ups):
res[f"up{i}"] = up
for i, out in enumerate(stage_outputs):
res[f"stage{i}"] = out
res["out"] = x
return res
class RPNBase(RPNNoHeadBase):
def __init__(self,
use_norm=True,
num_class=2,
layer_nums=(3, 5, 5),
layer_strides=(2, 2, 2),
num_filters=(128, 128, 256),
upsample_strides=(1, 2, 4),
num_upsample_filters=(256, 256, 256),
num_input_features=128,
num_anchor_per_loc=2,
encode_background_as_zeros=True,
use_direction_classifier=True,
use_groupnorm=False,
num_groups=32,
box_code_size=7,
num_direction_bins=2,
name='rpn'):
"""upsample_strides support float: [0.25, 0.5, 1]
if upsample_strides < 1, conv2d will be used instead of convtranspose2d.
"""
super(RPNBase, self).__init__(
use_norm=use_norm,
num_class=num_class,
layer_nums=layer_nums,
layer_strides=layer_strides,
num_filters=num_filters,
upsample_strides=upsample_strides,
num_upsample_filters=num_upsample_filters,
num_input_features=num_input_features,
num_anchor_per_loc=num_anchor_per_loc,
encode_background_as_zeros=encode_background_as_zeros,
use_direction_classifier=use_direction_classifier,
use_groupnorm=use_groupnorm,
num_groups=num_groups,
box_code_size=box_code_size,
num_direction_bins=num_direction_bins,
name=name)
self._num_anchor_per_loc = num_anchor_per_loc
self._num_direction_bins = num_direction_bins
self._num_class = num_class
self._use_direction_classifier = use_direction_classifier
self._box_code_size = box_code_size
if encode_background_as_zeros:
num_cls = num_anchor_per_loc * num_class
else:
num_cls = num_anchor_per_loc * (num_class + 1)
if len(num_upsample_filters) == 0:
final_num_filters = self._num_out_filters
else:
final_num_filters = sum(num_upsample_filters)
self.conv_cls = nn.Conv2d(final_num_filters, num_cls, 1)
self.conv_box = nn.Conv2d(final_num_filters,
num_anchor_per_loc * box_code_size, 1)
if use_direction_classifier:
self.conv_dir_cls = nn.Conv2d(
final_num_filters, num_anchor_per_loc * num_direction_bins, 1)
def forward(self, x):
res = super().forward(x)
x = res["out"]
box_preds = self.conv_box(x)
cls_preds = self.conv_cls(x)
# [N, C, y(H), x(W)]
C, H, W = box_preds.shape[1:]
box_preds = box_preds.view(-1, self._num_anchor_per_loc,
self._box_code_size, H, W).permute(
0, 1, 3, 4, 2).contiguous()
cls_preds = cls_preds.view(-1, self._num_anchor_per_loc,
self._num_class, H, W).permute(
0, 1, 3, 4, 2).contiguous()
# box_preds = box_preds.permute(0, 2, 3, 1).contiguous()
# cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
ret_dict = {
"box_preds": box_preds,
"cls_preds": cls_preds,
}
if self._use_direction_classifier:
dir_cls_preds = self.conv_dir_cls(x)
dir_cls_preds = dir_cls_preds.view(
-1, self._num_anchor_per_loc, self._num_direction_bins, H,
W).permute(0, 1, 3, 4, 2).contiguous()
# dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
ret_dict["dir_cls_preds"] = dir_cls_preds
return ret_dict
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
@register_rpn
class ResNetRPN(RPNBase):
def __init__(self, *args, **kw):
self.inplanes = -1
super(ResNetRPN, self).__init__(*args, **kw)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# if zero_init_residual:
for m in self.modules():
if isinstance(m, resnet.Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, resnet.BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, inplanes, planes, num_blocks, stride=1):
if self.inplanes == -1:
self.inplanes = self._num_input_features
block = resnet.BasicBlock
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers), self.inplanes
@register_rpn
class RPNV2(RPNBase):
def _make_layer(self, inplanes, planes, num_blocks, stride=1):
if self._use_norm:
if self._use_groupnorm:
BatchNorm2d = change_default_args(
num_groups=self._num_groups, eps=1e-3)(GroupNorm)
else:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
block = Sequential(
nn.ZeroPad2d(1),
Conv2d(inplanes, planes, 3, stride=stride),
BatchNorm2d(planes),
nn.ReLU(),
)
for j in range(num_blocks):
block.add(Conv2d(planes, planes, 3, padding=1))
block.add(BatchNorm2d(planes))
block.add(nn.ReLU())
return block, planes
@register_rpn
class RPNNoHead(RPNNoHeadBase):
def _make_layer(self, inplanes, planes, num_blocks, stride=1):
if self._use_norm:
if self._use_groupnorm:
BatchNorm2d = change_default_args(
num_groups=self._num_groups, eps=1e-3)(GroupNorm)
else:
BatchNorm2d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=False)(
nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=True)(
nn.ConvTranspose2d)
block = Sequential(
nn.ZeroPad2d(1),
Conv2d(inplanes, planes, 3, stride=stride),
BatchNorm2d(planes),
nn.ReLU(),
)
for j in range(num_blocks):
block.add(Conv2d(planes, planes, 3, padding=1))
block.add(BatchNorm2d(planes))
block.add(nn.ReLU())
return block, planes
| 20,512 | 37.703774 | 87 | py |
second.pytorch | second.pytorch-master/second/pytorch/models/net_multi_head.py | import time
from enum import Enum
from functools import reduce
import contextlib
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from second.pytorch.models.voxelnet import register_voxelnet, VoxelNet
from second.pytorch.models import rpn
class SmallObjectHead(nn.Module):
def __init__(self, num_filters, num_class, num_anchor_per_loc,
box_code_size, num_direction_bins, use_direction_classifier,
encode_background_as_zeros):
super().__init__()
self._num_anchor_per_loc = num_anchor_per_loc
self._num_direction_bins = num_direction_bins
self._num_class = num_class
self._use_direction_classifier = use_direction_classifier
self._box_code_size = box_code_size
if encode_background_as_zeros:
num_cls = num_anchor_per_loc * num_class
else:
num_cls = num_anchor_per_loc * (num_class + 1)
self.net = nn.Sequential(
nn.Conv2d(num_filters, 64, 3, bias=False, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, 3, bias=False, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, 3, bias=False, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
)
final_num_filters = 64
self.conv_cls = nn.Conv2d(final_num_filters, num_cls, 1)
self.conv_box = nn.Conv2d(final_num_filters,
num_anchor_per_loc * box_code_size, 1)
if use_direction_classifier:
self.conv_dir_cls = nn.Conv2d(
final_num_filters, num_anchor_per_loc * num_direction_bins, 1)
def forward(self, x):
x = self.net(x)
batch_size = x.shape[0]
box_preds = self.conv_box(x)
cls_preds = self.conv_cls(x)
# [N, C, y(H), x(W)]
C, H, W = box_preds.shape[1:]
box_preds = box_preds.view(-1, self._num_anchor_per_loc,
self._box_code_size, H, W).permute(
0, 1, 3, 4, 2).contiguous()
cls_preds = cls_preds.view(-1, self._num_anchor_per_loc,
self._num_class, H, W).permute(
0, 1, 3, 4, 2).contiguous()
ret_dict = {
"box_preds": box_preds.view(batch_size, -1, self._box_code_size),
"cls_preds": cls_preds.view(batch_size, -1, self._num_class),
}
if self._use_direction_classifier:
dir_cls_preds = self.conv_dir_cls(x)
dir_cls_preds = dir_cls_preds.view(
-1, self._num_anchor_per_loc, self._num_direction_bins, H,
W).permute(0, 1, 3, 4, 2).contiguous()
ret_dict["dir_cls_preds"] = dir_cls_preds.view(batch_size, -1, self._num_direction_bins)
return ret_dict
class DefaultHead(nn.Module):
def __init__(self, num_filters, num_class, num_anchor_per_loc,
box_code_size, num_direction_bins, use_direction_classifier,
encode_background_as_zeros):
super().__init__()
self._num_anchor_per_loc = num_anchor_per_loc
self._num_direction_bins = num_direction_bins
self._num_class = num_class
self._use_direction_classifier = use_direction_classifier
self._box_code_size = box_code_size
if encode_background_as_zeros:
num_cls = num_anchor_per_loc * num_class
else:
num_cls = num_anchor_per_loc * (num_class + 1)
final_num_filters = num_filters
self.conv_cls = nn.Conv2d(final_num_filters, num_cls, 1)
self.conv_box = nn.Conv2d(final_num_filters,
num_anchor_per_loc * box_code_size, 1)
if use_direction_classifier:
self.conv_dir_cls = nn.Conv2d(
final_num_filters, num_anchor_per_loc * num_direction_bins, 1)
def forward(self, x):
batch_size = x.shape[0]
box_preds = self.conv_box(x)
cls_preds = self.conv_cls(x)
# [N, C, y(H), x(W)]
C, H, W = box_preds.shape[1:]
box_preds = box_preds.view(-1, self._num_anchor_per_loc,
self._box_code_size, H, W).permute(
0, 1, 3, 4, 2).contiguous()
cls_preds = cls_preds.view(-1, self._num_anchor_per_loc,
self._num_class, H, W).permute(
0, 1, 3, 4, 2).contiguous()
ret_dict = {
"box_preds": box_preds.view(batch_size, -1, self._box_code_size),
"cls_preds": cls_preds.view(batch_size, -1, self._num_class),
}
if self._use_direction_classifier:
dir_cls_preds = self.conv_dir_cls(x)
dir_cls_preds = dir_cls_preds.view(
-1, self._num_anchor_per_loc, self._num_direction_bins, H,
W).permute(0, 1, 3, 4, 2).contiguous()
ret_dict["dir_cls_preds"] = dir_cls_preds.view(batch_size, -1, self._num_direction_bins)
return ret_dict
@register_voxelnet
class VoxelNetNuscenesMultiHead(VoxelNet):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
assert self._num_class == 10
assert isinstance(self.rpn, rpn.RPNNoHead)
self.small_classes = ["pedestrian", "traffic_cone", "bicycle", "motorcycle", "barrier"]
self.large_classes = ["car", "truck", "trailer", "bus", "construction_vehicle"]
small_num_anchor_loc = sum([self.target_assigner.num_anchors_per_location_class(c) for c in self.small_classes])
large_num_anchor_loc = sum([self.target_assigner.num_anchors_per_location_class(c) for c in self.large_classes])
self.small_head = SmallObjectHead(
num_filters=self.rpn._num_filters[0],
num_class=self._num_class,
num_anchor_per_loc=small_num_anchor_loc,
encode_background_as_zeros=self._encode_background_as_zeros,
use_direction_classifier=self._use_direction_classifier,
box_code_size=self._box_coder.code_size,
num_direction_bins=self._num_direction_bins,
)
self.large_head = DefaultHead(
num_filters=np.sum(self.rpn._num_upsample_filters),
num_class=self._num_class,
num_anchor_per_loc=large_num_anchor_loc,
encode_background_as_zeros=self._encode_background_as_zeros,
use_direction_classifier=self._use_direction_classifier,
box_code_size=self._box_coder.code_size,
num_direction_bins=self._num_direction_bins,
)
def network_forward(self, voxels, num_points, coors, batch_size):
self.start_timer("voxel_feature_extractor")
voxel_features = self.voxel_feature_extractor(voxels, num_points,
coors)
self.end_timer("voxel_feature_extractor")
self.start_timer("middle forward")
spatial_features = self.middle_feature_extractor(
voxel_features, coors, batch_size)
self.end_timer("middle forward")
self.start_timer("rpn forward")
rpn_out = self.rpn(spatial_features)
r1 = rpn_out["stage0"]
_, _, H, W = r1.shape
cropsize40x40 = np.round(H * 0.1).astype(np.int64)
r1 = r1[:, :, cropsize40x40:-cropsize40x40, cropsize40x40:-cropsize40x40]
small = self.small_head(r1)
large = self.large_head(rpn_out["out"])
self.end_timer("rpn forward")
# concated preds MUST match order in class_settings in config.
res = {
"box_preds": torch.cat([large["box_preds"], small["box_preds"]], dim=1),
"cls_preds": torch.cat([large["cls_preds"], small["cls_preds"]], dim=1),
}
if self._use_direction_classifier:
res["dir_cls_preds"] = torch.cat([large["dir_cls_preds"], small["dir_cls_preds"]], dim=1)
return res
| 8,065 | 44.570621 | 120 | py |
second.pytorch | second.pytorch-master/second/pytorch/models/pointpillars.py | """
PointPillars fork from SECOND.
Code written by Alex Lang and Oscar Beijbom, 2018.
Licensed under MIT License [see LICENSE].
"""
import torch
from torch import nn
from torch.nn import functional as F
from second.pytorch.models.voxel_encoder import get_paddings_indicator, register_vfe
from second.pytorch.models.middle import register_middle
from torchplus.nn import Empty
from torchplus.tools import change_default_args
import numpy as np
class PFNLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
"""
Pillar Feature Net Layer.
The Pillar Feature Net could be composed of a series of these layers, but the PointPillars paper results only
used a single PFNLayer. This layer performs a similar role as second.pytorch.voxelnet.VFELayer.
:param in_channels: <int>. Number of input channels.
:param out_channels: <int>. Number of output channels.
:param use_norm: <bool>. Whether to include BatchNorm.
:param last_layer: <bool>. If last_layer, there is no concatenation of features.
"""
super().__init__()
self.name = 'PFNLayer'
self.last_vfe = last_layer
if not self.last_vfe:
out_channels = out_channels // 2
self.units = out_channels
if use_norm:
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Linear = change_default_args(bias=False)(nn.Linear)
else:
BatchNorm1d = Empty
Linear = change_default_args(bias=True)(nn.Linear)
self.linear = Linear(in_channels, self.units)
self.norm = BatchNorm1d(self.units)
def forward(self, inputs):
x = self.linear(inputs)
x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
1).contiguous()
x = F.relu(x)
x_max = torch.max(x, dim=1, keepdim=True)[0]
if self.last_vfe:
return x_max
else:
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
@register_vfe
class PillarFeatureNetOld(nn.Module):
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(64, ),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1)):
"""
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
"""
super().__init__()
self.name = 'PillarFeatureNetOld'
assert len(num_filters) > 0
num_input_features += 5
if with_distance:
num_input_features += 1
self._with_distance = with_distance
# Create PillarFeatureNetOld layers
num_filters = [num_input_features] + list(num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
if i < len(num_filters) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(
in_filters, out_filters, use_norm, last_layer=last_layer))
self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
def forward(self, features, num_voxels, coors):
device = features.device
dtype = features.dtype
# Find distance of x, y, and z from cluster center
points_mean = features[:, :, :3].sum(
dim=1, keepdim=True) / num_voxels.type_as(features).view(-1, 1, 1)
f_cluster = features[:, :, :3] - points_mean
# Find distance of x, y, and z from pillar center
f_center = features[:, :, :2]
f_center[:, :, 0] = f_center[:, :, 0] - (
coors[:, 3].to(dtype).unsqueeze(1) * self.vx + self.x_offset)
f_center[:, :, 1] = f_center[:, :, 1] - (
coors[:, 2].to(dtype).unsqueeze(1) * self.vy + self.y_offset)
# Combine together feature decorations
features_ls = [features, f_cluster, f_center]
if self._with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
features = torch.cat(features_ls, dim=-1)
# The feature decorations were calculated without regard to whether pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_voxels, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
features *= mask
# Forward pass through PFNLayers
for pfn in self.pfn_layers:
features = pfn(features)
return features.squeeze()
@register_vfe
class PillarFeatureNet(nn.Module):
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(64, ),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1)):
"""
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
"""
super().__init__()
self.name = 'PillarFeatureNetOld'
assert len(num_filters) > 0
num_input_features += 5
if with_distance:
num_input_features += 1
self._with_distance = with_distance
# Create PillarFeatureNetOld layers
num_filters = [num_input_features] + list(num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
if i < len(num_filters) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(
in_filters, out_filters, use_norm, last_layer=last_layer))
self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
def forward(self, features, num_voxels, coors):
device = features.device
dtype = features.dtype
# Find distance of x, y, and z from cluster center
points_mean = features[:, :, :3].sum(
dim=1, keepdim=True) / num_voxels.type_as(features).view(-1, 1, 1)
f_cluster = features[:, :, :3] - points_mean
# Find distance of x, y, and z from pillar center
f_center = torch.zeros_like(features[:, :, :2])
f_center[:, :, 0] = features[:, :, 0] - (
coors[:, 3].to(dtype).unsqueeze(1) * self.vx + self.x_offset)
f_center[:, :, 1] = features[:, :, 1] - (
coors[:, 2].to(dtype).unsqueeze(1) * self.vy + self.y_offset)
# Combine together feature decorations
features_ls = [features, f_cluster, f_center]
if self._with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
features = torch.cat(features_ls, dim=-1)
# The feature decorations were calculated without regard to whether pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_voxels, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
features *= mask
# Forward pass through PFNLayers
for pfn in self.pfn_layers:
features = pfn(features)
return features.squeeze()
@register_vfe
class PillarFeatureNetRadius(nn.Module):
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(64, ),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1)):
"""
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
"""
super().__init__()
self.name = 'PillarFeatureNetRadius'
assert len(num_filters) > 0
num_input_features += 5
num_input_features -= 1 # radius xy->r, z->z
if with_distance:
num_input_features += 1
self._with_distance = with_distance
# Create PillarFeatureNetOld layers
num_filters = [num_input_features] + list(num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
if i < len(num_filters) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(
in_filters, out_filters, use_norm, last_layer=last_layer))
self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
def forward(self, features, num_voxels, coors):
device = features.device
dtype = features.dtype
# Find distance of x, y, and z from cluster center
points_mean = features[:, :, :3].sum(
dim=1, keepdim=True) / num_voxels.type_as(features).view(-1, 1, 1)
f_cluster = features[:, :, :3] - points_mean
# Find distance of x, y, and z from pillar center
f_center = torch.zeros_like(features[:, :, :2])
f_center[:, :, 0] = features[:, :, 0] - (
coors[:, 3].to(dtype).unsqueeze(1) * self.vx + self.x_offset)
f_center[:, :, 1] = features[:, :, 1] - (
coors[:, 2].to(dtype).unsqueeze(1) * self.vy + self.y_offset)
features_radius = torch.norm(features[:, :, :2], p=2, dim=2, keepdim=True)
features_radius = torch.cat([features_radius, features[:, :, 2:]], dim=2)
# Combine together feature decorations
features_ls = [features_radius, f_cluster, f_center]
if self._with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
features = torch.cat(features_ls, dim=-1)
# The feature decorations were calculated without regard to whether pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_voxels, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
features *= mask
# Forward pass through PFNLayers
for pfn in self.pfn_layers:
features = pfn(features)
return features.squeeze()
@register_vfe
class PillarFeatureNetRadiusHeight(nn.Module):
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(64, ),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1)):
"""
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers. This net performs a
similar role to SECOND's second.pytorch.voxelnet.VoxelFeatureExtractor.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
"""
super().__init__()
self.name = 'PillarFeatureNetRadiusHeight'
assert len(num_filters) > 0
num_input_features += 6
num_input_features -= 1 # radius xy->r, z->z
if with_distance:
num_input_features += 1
self._with_distance = with_distance
# Create PillarFeatureNetOld layers
num_filters = [num_input_features] + list(num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
if i < len(num_filters) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(
in_filters, out_filters, use_norm, last_layer=last_layer))
self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
def forward(self, features, num_voxels, coors):
device = features.device
dtype = features.dtype
# Find distance of x, y, and z from cluster center
points_mean = features[:, :, :3].sum(
dim=1, keepdim=True) / num_voxels.type_as(features).view(-1, 1, 1)
f_cluster = features[:, :, :3] - points_mean
pp_min = features[:, :, 2:3].min(dim=1, keepdim=True)[0]
pp_max = features[:, :, 2:3].max(dim=1, keepdim=True)[0]
pp_height = pp_max - pp_min
# Find distance of x, y, and z from pillar center
f_height = torch.zeros_like(features[:, :, :1])
f_height[:] = pp_height
f_center = torch.zeros_like(features[:, :, :2])
f_center[:, :, 0] = features[:, :, 0] - (
coors[:, 3].to(dtype).unsqueeze(1) * self.vx + self.x_offset)
f_center[:, :, 1] = features[:, :, 1] - (
coors[:, 2].to(dtype).unsqueeze(1) * self.vy + self.y_offset)
features_radius = torch.norm(features[:, :, :2], p=2, dim=2, keepdim=True)
features_radius = torch.cat([features_radius, features[:, :, 2:]], dim=2)
# Combine together feature decorations
features_ls = [features_radius, f_cluster, f_center, f_height]
if self._with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
features = torch.cat(features_ls, dim=-1)
# The feature decorations were calculated without regard to whether pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_voxels, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
features *= mask
# Forward pass through PFNLayers
for pfn in self.pfn_layers:
features = pfn(features)
return features.squeeze()
@register_middle
class PointPillarsScatter(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=64,
num_filters_down1=[64],
num_filters_down2=[64, 64],
name='SpMiddle2K'):
"""
Point Pillar's Scatter.
Converts learned features from dense tensor to sparse pseudo image. This replaces SECOND's
second.pytorch.voxelnet.SparseMiddleExtractor.
:param output_shape: ([int]: 4). Required output shape of features.
:param num_input_features: <int>. Number of input features.
"""
super().__init__()
self.name = 'PointPillarsScatter'
self.output_shape = output_shape
self.ny = output_shape[2]
self.nx = output_shape[3]
self.nchannels = num_input_features
def forward(self, voxel_features, coords, batch_size):
# batch_canvas will be the final output.
batch_canvas = []
for batch_itt in range(batch_size):
# Create the canvas for this sample
canvas = torch.zeros(
self.nchannels,
self.nx * self.ny,
dtype=voxel_features.dtype,
device=voxel_features.device)
# Only include non-empty pillars
batch_mask = coords[:, 0] == batch_itt
this_coords = coords[batch_mask, :]
indices = this_coords[:, 2] * self.nx + this_coords[:, 3]
indices = indices.type(torch.long)
voxels = voxel_features[batch_mask, :]
voxels = voxels.t()
# Now scatter the blob back to the canvas.
canvas[:, indices] = voxels
# Append to a list for later stacking.
batch_canvas.append(canvas)
# Stack to 3-dim tensor (batch-size, nchannels, nrows*ncols)
batch_canvas = torch.stack(batch_canvas, 0)
# Undo the column stacking to final 4-dim tensor
batch_canvas = batch_canvas.view(batch_size, self.nchannels, self.ny,
self.nx)
return batch_canvas
| 19,857 | 40.631027 | 117 | py |
second.pytorch | second.pytorch-master/second/pytorch/models/voxel_encoder.py | import time
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torchplus.nn import Empty, GroupNorm, Sequential
from torchplus.tools import change_default_args
REGISTERED_VFE_CLASSES = {}
def register_vfe(cls, name=None):
global REGISTERED_VFE_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_VFE_CLASSES, f"exist class: {REGISTERED_VFE_CLASSES}"
REGISTERED_VFE_CLASSES[name] = cls
return cls
def get_vfe_class(name):
global REGISTERED_VFE_CLASSES
assert name in REGISTERED_VFE_CLASSES, f"available class: {REGISTERED_VFE_CLASSES}"
return REGISTERED_VFE_CLASSES[name]
def get_paddings_indicator(actual_num, max_num, axis=0):
"""Create boolean mask by actually number of a padded tensor.
Args:
actual_num ([type]): [description]
max_num ([type]): [description]
Returns:
[type]: [description]
"""
actual_num = torch.unsqueeze(actual_num, axis + 1)
# tiled_actual_num: [N, M, 1]
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(
max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
# tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.int() > max_num
# paddings_indicator shape: [batch_size, max_num]
return paddings_indicator
class VFELayer(nn.Module):
def __init__(self, in_channels, out_channels, use_norm=True, name='vfe'):
super(VFELayer, self).__init__()
self.name = name
self.units = int(out_channels / 2)
if use_norm:
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Linear = change_default_args(bias=False)(nn.Linear)
else:
BatchNorm1d = Empty
Linear = change_default_args(bias=True)(nn.Linear)
self.linear = Linear(in_channels, self.units)
self.norm = BatchNorm1d(self.units)
def forward(self, inputs):
# [K, T, 7] tensordot [7, units] = [K, T, units]
voxel_count = inputs.shape[1]
x = self.linear(inputs)
x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
1).contiguous()
pointwise = F.relu(x)
# [K, T, units]
aggregated = torch.max(pointwise, dim=1, keepdim=True)[0]
# [K, 1, units]
repeated = aggregated.repeat(1, voxel_count, 1)
concatenated = torch.cat([pointwise, repeated], dim=2)
# [K, T, 2 * units]
return concatenated
@register_vfe
class VoxelFeatureExtractor(nn.Module):
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=[32, 128],
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1),
name='VoxelFeatureExtractor'):
super(VoxelFeatureExtractor, self).__init__()
self.name = name
if use_norm:
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Linear = change_default_args(bias=False)(nn.Linear)
else:
BatchNorm1d = Empty
Linear = change_default_args(bias=True)(nn.Linear)
assert len(num_filters) == 2
num_input_features += 3 # add mean features
if with_distance:
num_input_features += 1
self._with_distance = with_distance
self.vfe1 = VFELayer(num_input_features, num_filters[0], use_norm)
self.vfe2 = VFELayer(num_filters[0], num_filters[1], use_norm)
self.linear = Linear(num_filters[1], num_filters[1])
# var_torch_init(self.linear.weight)
# var_torch_init(self.linear.bias)
self.norm = BatchNorm1d(num_filters[1])
def forward(self, features, num_voxels, coors):
# features: [concated_num_points, num_voxel_size, 3(4)]
# num_voxels: [concated_num_points]
points_mean = features[:, :, :3].sum(
dim=1, keepdim=True) / num_voxels.type_as(features).view(-1, 1, 1)
features_relative = features[:, :, :3] - points_mean
if self._with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features = torch.cat([features, features_relative, points_dist],
dim=-1)
else:
features = torch.cat([features, features_relative], dim=-1)
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_voxels, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
# mask = features.max(dim=2, keepdim=True)[0] != 0
x = self.vfe1(features)
x *= mask
x = self.vfe2(x)
x *= mask
x = self.linear(x)
x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
1).contiguous()
x = F.relu(x)
x *= mask
# x: [concated_num_points, num_voxel_size, 128]
voxelwise = torch.max(x, dim=1)[0]
return voxelwise
@register_vfe
class VoxelFeatureExtractorV2(nn.Module):
"""VoxelFeatureExtractor with arbitrary number of VFE. deprecated.
"""
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=[32, 128],
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1),
name='VoxelFeatureExtractor'):
super(VoxelFeatureExtractorV2, self).__init__()
self.name = name
if use_norm:
BatchNorm1d = change_default_args(
eps=1e-3, momentum=0.01)(nn.BatchNorm1d)
Linear = change_default_args(bias=False)(nn.Linear)
else:
BatchNorm1d = Empty
Linear = change_default_args(bias=True)(nn.Linear)
assert len(num_filters) > 0
num_input_features += 3
if with_distance:
num_input_features += 1
self._with_distance = with_distance
num_filters = [num_input_features] + num_filters
filters_pairs = [[num_filters[i], num_filters[i + 1]]
for i in range(len(num_filters) - 1)]
self.vfe_layers = nn.ModuleList(
[VFELayer(i, o, use_norm) for i, o in filters_pairs])
self.linear = Linear(num_filters[-1], num_filters[-1])
# var_torch_init(self.linear.weight)
# var_torch_init(self.linear.bias)
self.norm = BatchNorm1d(num_filters[-1])
def forward(self, features, num_voxels, coors):
# features: [concated_num_points, num_voxel_size, 3(4)]
# num_voxels: [concated_num_points]
points_mean = features[:, :, :3].sum(
dim=1, keepdim=True) / num_voxels.type_as(features).view(-1, 1, 1)
features_relative = features[:, :, :3] - points_mean
if self._with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features = torch.cat([features, features_relative, points_dist],
dim=-1)
else:
features = torch.cat([features, features_relative], dim=-1)
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_voxels, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
for vfe in self.vfe_layers:
features = vfe(features)
features *= mask
features = self.linear(features)
features = self.norm(features.permute(0, 2, 1).contiguous()).permute(
0, 2, 1).contiguous()
features = F.relu(features)
features *= mask
# x: [concated_num_points, num_voxel_size, 128]
voxelwise = torch.max(features, dim=1)[0]
return voxelwise
@register_vfe
class SimpleVoxel(nn.Module):
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=[32, 128],
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1),
name='VoxelFeatureExtractor'):
super(SimpleVoxel, self).__init__()
self.name = name
self.num_input_features = num_input_features
def forward(self, features, num_voxels, coors):
# features: [concated_num_points, num_voxel_size, 3(4)]
# num_voxels: [concated_num_points]
points_mean = features[:, :, :self.num_input_features].sum(
dim=1, keepdim=False) / num_voxels.type_as(features).view(-1, 1)
return points_mean.contiguous()
@register_vfe
class SimpleVoxelRadius(nn.Module):
"""Simple voxel encoder. only keep r, z and reflection feature.
"""
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(32, 128),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1),
name='SimpleVoxelRadius'):
super(SimpleVoxelRadius, self).__init__()
self.num_input_features = num_input_features
self.name = name
def forward(self, features, num_voxels, coors):
# features: [concated_num_points, num_voxel_size, 3(4)]
# num_voxels: [concated_num_points]
points_mean = features[:, :, :self.num_input_features].sum(
dim=1, keepdim=False) / num_voxels.type_as(features).view(-1, 1)
feature = torch.norm(points_mean[:, :2], p=2, dim=1, keepdim=True)
# z is important for z position regression, but x, y is not.
res = torch.cat([feature, points_mean[:, 2:self.num_input_features]],
dim=1)
return res
| 10,090 | 38.417969 | 87 | py |
second.pytorch | second.pytorch-master/second/pytorch/builder/lr_scheduler_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
from torchplus.train import learning_schedules_fastai as lsf
import torch
def build(optimizer_config, optimizer, total_step):
"""Create lr scheduler based on config. note that
lr_scheduler must accept a optimizer that has been restored.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
lr_scheduler = _create_learning_rate_scheduler(
config.learning_rate, optimizer, total_step=total_step)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
lr_scheduler = _create_learning_rate_scheduler(
config.learning_rate, optimizer, total_step=total_step)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
lr_scheduler = _create_learning_rate_scheduler(
config.learning_rate, optimizer, total_step=total_step)
return lr_scheduler
def _create_learning_rate_scheduler(learning_rate_config, optimizer, total_step):
"""Create optimizer learning rate scheduler based on config.
Args:
learning_rate_config: A LearningRate proto message.
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
lr_scheduler = None
learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
if learning_rate_type == 'multi_phase':
config = learning_rate_config.multi_phase
lr_phases = []
mom_phases = []
for phase_cfg in config.phases:
lr_phases.append((phase_cfg.start, phase_cfg.lambda_func))
mom_phases.append((phase_cfg.start, phase_cfg.momentum_lambda_func))
lr_scheduler = lsf.LRSchedulerStep(
optimizer,total_step, lr_phases, mom_phases)
if learning_rate_type == 'one_cycle':
config = learning_rate_config.one_cycle
lr_scheduler = lsf.OneCycle(
optimizer, total_step, config.lr_max, list(config.moms), config.div_factor, config.pct_start)
if learning_rate_type == 'exponential_decay':
config = learning_rate_config.exponential_decay
lr_scheduler = lsf.ExponentialDecay(
optimizer, total_step, config.initial_learning_rate, config.decay_length, config.decay_factor, config.staircase)
if learning_rate_type == 'manual_stepping':
config = learning_rate_config.manual_stepping
lr_scheduler = lsf.ManualStepping(
optimizer, total_step, list(config.boundaries), list(config.rates))
if lr_scheduler is None:
raise ValueError('Learning_rate %s not supported.' % learning_rate_type)
return lr_scheduler | 3,518 | 36.83871 | 118 | py |
second.pytorch | second.pytorch-master/second/pytorch/builder/input_reader_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input reader builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
from torch.utils.data import Dataset
from second.builder import dataset_builder
from second.protos import input_reader_pb2
class DatasetWrapper(Dataset):
""" convert our dataset to Dataset class in pytorch.
"""
def __init__(self, dataset):
self._dataset = dataset
def __len__(self):
return len(self._dataset)
def __getitem__(self, idx):
return self._dataset[idx]
@property
def dataset(self):
return self._dataset
def build(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner=None,
multi_gpu=False) -> DatasetWrapper:
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
dataset = dataset_builder.build(
input_reader_config,
model_config,
training,
voxel_generator,
target_assigner,
multi_gpu=multi_gpu)
dataset = DatasetWrapper(dataset)
return dataset
| 2,448 | 30 | 80 | py |
second.pytorch | second.pytorch-master/second/pytorch/builder/box_coder_builder.py | import numpy as np
from second.protos import box_coder_pb2
from second.pytorch.core.box_coders import (BevBoxCoderTorch,
GroundBox3dCoderTorch)
def build(box_coder_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
box_coder_type = box_coder_config.WhichOneof('box_coder')
if box_coder_type == 'ground_box3d_coder':
cfg = box_coder_config.ground_box3d_coder
return GroundBox3dCoderTorch(cfg.linear_dim, cfg.encode_angle_vector)
elif box_coder_type == 'bev_box_coder':
cfg = box_coder_config.bev_box_coder
return BevBoxCoderTorch(cfg.linear_dim, cfg.encode_angle_vector, cfg.z_fixed, cfg.h_fixed)
else:
raise ValueError("unknown box_coder type")
| 969 | 32.448276 | 98 | py |
second.pytorch | second.pytorch-master/second/pytorch/builder/second_builder.py | # Copyright 2017 yanyan. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VoxelNet builder.
"""
from second.protos import second_pb2
from second.pytorch.builder import losses_builder
from second.pytorch.models.voxelnet import LossNormType, get_voxelnet_class
def build(model_cfg: second_pb2.VoxelNet, voxel_generator,
target_assigner, measure_time=False):
"""build second pytorch instance.
"""
if not isinstance(model_cfg, second_pb2.VoxelNet):
raise ValueError('model_cfg not of type ' 'second_pb2.VoxelNet.')
vfe_num_filters = list(model_cfg.voxel_feature_extractor.num_filters)
vfe_with_distance = model_cfg.voxel_feature_extractor.with_distance
grid_size = voxel_generator.grid_size
dense_shape = [1] + grid_size[::-1].tolist() + [vfe_num_filters[-1]]
classes_cfg = model_cfg.target_assigner.class_settings
num_class = len(classes_cfg)
use_mcnms = [c.use_multi_class_nms for c in classes_cfg]
use_rotate_nms = [c.use_rotate_nms for c in classes_cfg]
if len(model_cfg.target_assigner.nms_pre_max_sizes) != 0:
nms_pre_max_sizes = list(model_cfg.target_assigner.nms_pre_max_sizes)
assert len(nms_pre_max_sizes) == num_class
else:
nms_pre_max_sizes = [c.nms_pre_max_size for c in classes_cfg]
if len(model_cfg.target_assigner.nms_post_max_sizes) != 0:
nms_post_max_sizes = list(model_cfg.target_assigner.nms_post_max_sizes)
assert len(nms_post_max_sizes) == num_class
else:
nms_post_max_sizes = [c.nms_post_max_size for c in classes_cfg]
if len(model_cfg.target_assigner.nms_score_thresholds) != 0:
nms_score_thresholds = list(model_cfg.target_assigner.nms_score_thresholds)
assert len(nms_score_thresholds) == num_class
else:
nms_score_thresholds = [c.nms_score_threshold for c in classes_cfg]
if len(model_cfg.target_assigner.nms_iou_thresholds) != 0:
nms_iou_thresholds = list(model_cfg.target_assigner.nms_iou_thresholds)
assert len(nms_iou_thresholds) == num_class
else:
nms_iou_thresholds = [c.nms_iou_threshold for c in classes_cfg]
assert all(use_mcnms) or all([not b for b in use_mcnms]), "not implemented"
assert all(use_rotate_nms) or all([not b for b in use_rotate_nms]), "not implemented"
if all([not b for b in use_mcnms]):
assert all([e == nms_pre_max_sizes[0] for e in nms_pre_max_sizes])
assert all([e == nms_post_max_sizes[0] for e in nms_post_max_sizes])
assert all([e == nms_score_thresholds[0] for e in nms_score_thresholds])
assert all([e == nms_iou_thresholds[0] for e in nms_iou_thresholds])
num_input_features = model_cfg.num_point_features
loss_norm_type_dict = {
0: LossNormType.NormByNumExamples,
1: LossNormType.NormByNumPositives,
2: LossNormType.NormByNumPosNeg,
3: LossNormType.DontNorm,
}
loss_norm_type = loss_norm_type_dict[model_cfg.loss_norm_type]
losses = losses_builder.build(model_cfg.loss)
encode_rad_error_by_sin = model_cfg.encode_rad_error_by_sin
cls_loss_ftor, loc_loss_ftor, cls_weight, loc_weight, _ = losses
pos_cls_weight = model_cfg.pos_class_weight
neg_cls_weight = model_cfg.neg_class_weight
direction_loss_weight = model_cfg.direction_loss_weight
sin_error_factor = model_cfg.sin_error_factor
if sin_error_factor == 0:
sin_error_factor = 1.0
net = get_voxelnet_class(model_cfg.network_class_name)(
dense_shape,
num_class=num_class,
vfe_class_name=model_cfg.voxel_feature_extractor.module_class_name,
vfe_num_filters=vfe_num_filters,
middle_class_name=model_cfg.middle_feature_extractor.module_class_name,
middle_num_input_features=model_cfg.middle_feature_extractor.num_input_features,
middle_num_filters_d1=list(
model_cfg.middle_feature_extractor.num_filters_down1),
middle_num_filters_d2=list(
model_cfg.middle_feature_extractor.num_filters_down2),
rpn_class_name=model_cfg.rpn.module_class_name,
rpn_num_input_features=model_cfg.rpn.num_input_features,
rpn_layer_nums=list(model_cfg.rpn.layer_nums),
rpn_layer_strides=list(model_cfg.rpn.layer_strides),
rpn_num_filters=list(model_cfg.rpn.num_filters),
rpn_upsample_strides=list(model_cfg.rpn.upsample_strides),
rpn_num_upsample_filters=list(model_cfg.rpn.num_upsample_filters),
use_norm=True,
use_rotate_nms=all(use_rotate_nms),
multiclass_nms=all(use_mcnms),
nms_score_thresholds=nms_score_thresholds,
nms_pre_max_sizes=nms_pre_max_sizes,
nms_post_max_sizes=nms_post_max_sizes,
nms_iou_thresholds=nms_iou_thresholds,
use_sigmoid_score=model_cfg.use_sigmoid_score,
encode_background_as_zeros=model_cfg.encode_background_as_zeros,
use_direction_classifier=model_cfg.use_direction_classifier,
num_input_features=num_input_features,
num_groups=model_cfg.rpn.num_groups,
use_groupnorm=model_cfg.rpn.use_groupnorm,
with_distance=vfe_with_distance,
cls_loss_weight=cls_weight,
loc_loss_weight=loc_weight,
pos_cls_weight=pos_cls_weight,
neg_cls_weight=neg_cls_weight,
direction_loss_weight=direction_loss_weight,
loss_norm_type=loss_norm_type,
encode_rad_error_by_sin=encode_rad_error_by_sin,
loc_loss_ftor=loc_loss_ftor,
cls_loss_ftor=cls_loss_ftor,
target_assigner=target_assigner,
measure_time=measure_time,
voxel_generator=voxel_generator,
post_center_range=list(model_cfg.post_center_limit_range),
dir_offset=model_cfg.direction_offset,
sin_error_factor=sin_error_factor,
nms_class_agnostic=model_cfg.nms_class_agnostic,
num_direction_bins=model_cfg.num_direction_bins,
direction_limit_offset=model_cfg.direction_limit_offset,
)
return net
| 6,589 | 48.179104 | 89 | py |
second.pytorch | second.pytorch-master/second/pytorch/builder/optimizer_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
from torchplus.train import learning_schedules
from torchplus.train import optim
import torch
from torch import nn
from torchplus.train.fastai_optim import OptimWrapper, FastAIMixedOptim
from functools import partial
def children(m: nn.Module):
"Get children of `m`."
return list(m.children())
def num_children(m: nn.Module) -> int:
"Get number of children modules in `m`."
return len(children(m))
flatten_model = lambda m: sum(map(flatten_model,m.children()),[]) if num_children(m) else [m]
get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))]
def build(optimizer_config, net, name=None, mixed=False, loss_scale=512.0):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
optimizer_func = partial(
torch.optim.RMSprop,
alpha=config.decay,
momentum=config.momentum_optimizer_value,
eps=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
optimizer_func = partial(
torch.optim.SGD,
momentum=config.momentum_optimizer_value,
eps=config.epsilon)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
if optimizer_config.fixed_weight_decay:
optimizer_func = partial(
torch.optim.Adam, betas=(0.9, 0.99), amsgrad=config.amsgrad)
else:
# regular adam
optimizer_func = partial(
torch.optim.Adam, amsgrad=config.amsgrad)
# optimizer = OptimWrapper(optimizer, true_wd=optimizer_config.fixed_weight_decay, wd=config.weight_decay)
optimizer = OptimWrapper.create(
optimizer_func,
3e-3,
get_layer_groups(net),
wd=config.weight_decay,
true_wd=optimizer_config.fixed_weight_decay,
bn_wd=True)
print(hasattr(optimizer, "_amp_stash"), '_amp_stash')
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
raise ValueError('torch don\'t support moving average')
if name is None:
# assign a name to optimizer for checkpoint system
optimizer.name = optimizer_type
else:
optimizer.name = name
return optimizer
| 3,431 | 33.32 | 110 | py |
second.pytorch | second.pytorch-master/second/pytorch/builder/losses_builder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build localization and classification losses from config."""
from second.pytorch.core import losses
from second.pytorch.core.ghm_loss import GHMCLoss, GHMRLoss
from second.protos import losses_pb2
def build(loss_config):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
Raises:
ValueError: If hard_example_miner is used with sigmoid_focal_loss.
"""
classification_loss = _build_classification_loss(
loss_config.classification_loss)
localization_loss = _build_localization_loss(
loss_config.localization_loss)
classification_weight = loss_config.classification_weight
localization_weight = loss_config.localization_weight
hard_example_miner = None
if loss_config.HasField('hard_example_miner'):
raise ValueError('Pytorch don\'t support HardExampleMiner')
return (classification_loss, localization_loss,
classification_weight,
localization_weight, hard_example_miner)
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
# By default, Faster RCNN second stage classifier uses Softmax loss
# with anchor-wise outputs.
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
def _build_localization_loss(loss_config):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.LocalizationLoss):
raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.')
loss_type = loss_config.WhichOneof('localization_loss')
if loss_type == 'weighted_l2':
config = loss_config.weighted_l2
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return losses.WeightedL2LocalizationLoss(code_weight)
if loss_type == 'weighted_smooth_l1':
config = loss_config.weighted_smooth_l1
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return losses.WeightedSmoothL1LocalizationLoss(config.sigma, code_weight)
if loss_type == 'weighted_ghm':
config = loss_config.weighted_ghm
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return GHMRLoss(config.mu, config.bins, config.momentum, code_weight)
raise ValueError('Empty loss config.')
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
# alpha = None
# if config.HasField('alpha'):
# alpha = config.alpha
if config.alpha > 0:
alpha = config.alpha
else:
alpha = None
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
if loss_type == 'weighted_softmax_focal':
config = loss_config.weighted_softmax_focal
# alpha = None
# if config.HasField('alpha'):
# alpha = config.alpha
if config.alpha > 0:
alpha = config.alpha
else:
alpha = None
return losses.SoftmaxFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
if loss_type == 'weighted_ghm':
config = loss_config.weighted_ghm
return GHMCLoss(
bins=config.bins,
momentum=config.momentum)
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'bootstrapped_sigmoid':
config = loss_config.bootstrapped_sigmoid
return losses.BootstrappedSigmoidClassificationLoss(
alpha=config.alpha,
bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))
raise ValueError('Empty loss config.')
| 6,226 | 31.602094 | 80 | py |
second.pytorch | second.pytorch-master/second/pytorch/utils/__init__.py | import time
import contextlib
import torch
@contextlib.contextmanager
def torch_timer(name=''):
torch.cuda.synchronize()
t = time.time()
yield
torch.cuda.synchronize()
print(name, "time:", time.time() - t) | 229 | 19.909091 | 41 | py |
second.pytorch | second.pytorch-master/torchplus/tools.py | import functools
import inspect
import sys
from collections import OrderedDict
import numba
import numpy as np
import torch
def get_pos_to_kw_map(func):
pos_to_kw = {}
fsig = inspect.signature(func)
pos = 0
for name, info in fsig.parameters.items():
if info.kind is info.POSITIONAL_OR_KEYWORD:
pos_to_kw[pos] = name
pos += 1
return pos_to_kw
def get_kw_to_default_map(func):
kw_to_default = {}
fsig = inspect.signature(func)
for name, info in fsig.parameters.items():
if info.kind is info.POSITIONAL_OR_KEYWORD:
if info.default is not info.empty:
kw_to_default[name] = info.default
return kw_to_default
def change_default_args(**kwargs):
def layer_wrapper(layer_class):
class DefaultArgLayer(layer_class):
def __init__(self, *args, **kw):
pos_to_kw = get_pos_to_kw_map(layer_class.__init__)
kw_to_pos = {kw: pos for pos, kw in pos_to_kw.items()}
for key, val in kwargs.items():
if key not in kw and kw_to_pos[key] > len(args):
kw[key] = val
super().__init__(*args, **kw)
return DefaultArgLayer
return layer_wrapper
def torch_to_np_dtype(ttype):
type_map = {
torch.float16: np.dtype(np.float16),
torch.float32: np.dtype(np.float32),
torch.float16: np.dtype(np.float64),
torch.int32: np.dtype(np.int32),
torch.int64: np.dtype(np.int64),
torch.uint8: np.dtype(np.uint8),
}
return type_map[ttype]
| 1,607 | 27.210526 | 70 | py |
second.pytorch | second.pytorch-master/torchplus/metrics.py | import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class Scalar(nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('total', torch.FloatTensor([0.0]))
self.register_buffer('count', torch.FloatTensor([0.0]))
def forward(self, scalar):
if not scalar.eq(0.0):
self.count += 1
self.total += scalar.data.float()
return self.value.cpu()
@property
def value(self):
return self.total / self.count
def clear(self):
self.total.zero_()
self.count.zero_()
class Accuracy(nn.Module):
def __init__(self,
dim=1,
ignore_idx=-1,
threshold=0.5,
encode_background_as_zeros=True):
super().__init__()
self.register_buffer('total', torch.FloatTensor([0.0]))
self.register_buffer('count', torch.FloatTensor([0.0]))
self._ignore_idx = ignore_idx
self._dim = dim
self._threshold = threshold
self._encode_background_as_zeros = encode_background_as_zeros
def forward(self, labels, preds, weights=None):
# labels: [N, ...]
# preds: [N, C, ...]
if self._encode_background_as_zeros:
scores = torch.sigmoid(preds)
labels_pred = torch.max(preds, dim=self._dim)[1] + 1
pred_labels = torch.where((scores > self._threshold).any(self._dim),
labels_pred,
torch.tensor(0).type_as(labels_pred))
else:
pred_labels = torch.max(preds, dim=self._dim)[1]
N, *Ds = labels.shape
labels = labels.view(N, int(np.prod(Ds)))
pred_labels = pred_labels.view(N, int(np.prod(Ds)))
if weights is None:
weights = (labels != self._ignore_idx).float()
else:
weights = weights.float()
num_examples = torch.sum(weights)
num_examples = torch.clamp(num_examples, min=1.0).float()
total = torch.sum((pred_labels == labels.long()).float())
self.count += num_examples
self.total += total
return self.value.cpu()
# return (total / num_examples.data).cpu()
@property
def value(self):
return self.total / self.count
def clear(self):
self.total.zero_()
self.count.zero_()
class Precision(nn.Module):
def __init__(self, dim=1, ignore_idx=-1, threshold=0.5):
super().__init__()
self.register_buffer('total', torch.FloatTensor([0.0]))
self.register_buffer('count', torch.FloatTensor([0.0]))
self._ignore_idx = ignore_idx
self._dim = dim
self._threshold = threshold
def forward(self, labels, preds, weights=None):
# labels: [N, ...]
# preds: [N, C, ...]
if preds.shape[self._dim] == 1: # BCE
pred_labels = (torch.sigmoid(preds) >
self._threshold).long().squeeze(self._dim)
else:
assert preds.shape[
self._dim] == 2, "precision only support 2 class"
pred_labels = torch.max(preds, dim=self._dim)[1]
N, *Ds = labels.shape
labels = labels.view(N, int(np.prod(Ds)))
pred_labels = pred_labels.view(N, int(np.prod(Ds)))
if weights is None:
weights = (labels != self._ignore_idx).float()
else:
weights = weights.float()
pred_trues = pred_labels > 0
pred_falses = pred_labels == 0
trues = labels > 0
falses = labels == 0
true_positives = (weights * (trues & pred_trues).float()).sum()
true_negatives = (weights * (falses & pred_falses).float()).sum()
false_positives = (weights * (falses & pred_trues).float()).sum()
false_negatives = (weights * (trues & pred_falses).float()).sum()
count = true_positives + false_positives
# print(count, true_positives)
if count > 0:
self.count += count
self.total += true_positives
return self.value.cpu()
# return (total / num_examples.data).cpu()
@property
def value(self):
return self.total / self.count
def clear(self):
self.total.zero_()
self.count.zero_()
class Recall(nn.Module):
def __init__(self, dim=1, ignore_idx=-1, threshold=0.5):
super().__init__()
self.register_buffer('total', torch.FloatTensor([0.0]))
self.register_buffer('count', torch.FloatTensor([0.0]))
self._ignore_idx = ignore_idx
self._dim = dim
self._threshold = threshold
def forward(self, labels, preds, weights=None):
# labels: [N, ...]
# preds: [N, C, ...]
if preds.shape[self._dim] == 1: # BCE
pred_labels = (torch.sigmoid(preds) >
self._threshold).long().squeeze(self._dim)
else:
assert preds.shape[
self._dim] == 2, "precision only support 2 class"
pred_labels = torch.max(preds, dim=self._dim)[1]
N, *Ds = labels.shape
labels = labels.view(N, int(np.prod(Ds)))
pred_labels = pred_labels.view(N, int(np.prod(Ds)))
if weights is None:
weights = (labels != self._ignore_idx).float()
else:
weights = weights.float()
pred_trues = pred_labels == 1
pred_falses = pred_labels == 0
trues = labels == 1
falses = labels == 0
true_positives = (weights * (trues & pred_trues).float()).sum()
true_negatives = (weights * (falses & pred_falses).float()).sum()
false_positives = (weights * (falses & pred_trues).float()).sum()
false_negatives = (weights * (trues & pred_falses).float()).sum()
count = true_positives + false_negatives
if count > 0:
self.count += count
self.total += true_positives
return self.value.cpu()
# return (total / num_examples.data).cpu()
@property
def value(self):
return self.total / self.count
def clear(self):
self.total.zero_()
self.count.zero_()
def _calc_binary_metrics(labels,
scores,
weights=None,
ignore_idx=-1,
threshold=0.5):
pred_labels = (scores > threshold).long()
N, *Ds = labels.shape
labels = labels.view(N, int(np.prod(Ds)))
pred_labels = pred_labels.view(N, int(np.prod(Ds)))
pred_trues = pred_labels > 0
pred_falses = pred_labels == 0
trues = labels > 0
falses = labels == 0
true_positives = (weights * (trues & pred_trues).float()).sum()
true_negatives = (weights * (falses & pred_falses).float()).sum()
false_positives = (weights * (falses & pred_trues).float()).sum()
false_negatives = (weights * (trues & pred_falses).float()).sum()
return true_positives, true_negatives, false_positives, false_negatives
class PrecisionRecall(nn.Module):
def __init__(self,
dim=1,
ignore_idx=-1,
thresholds=0.5,
use_sigmoid_score=False,
encode_background_as_zeros=True):
super().__init__()
if not isinstance(thresholds, (list, tuple)):
thresholds = [thresholds]
self.register_buffer('prec_total',
torch.FloatTensor(len(thresholds)).zero_())
self.register_buffer('prec_count',
torch.FloatTensor(len(thresholds)).zero_())
self.register_buffer('rec_total',
torch.FloatTensor(len(thresholds)).zero_())
self.register_buffer('rec_count',
torch.FloatTensor(len(thresholds)).zero_())
self._ignore_idx = ignore_idx
self._dim = dim
self._thresholds = thresholds
self._use_sigmoid_score = use_sigmoid_score
self._encode_background_as_zeros = encode_background_as_zeros
def forward(self, labels, preds, weights=None):
# labels: [N, ...]
# preds: [N, ..., C]
if self._encode_background_as_zeros:
# this don't support softmax
assert self._use_sigmoid_score is True
total_scores = torch.sigmoid(preds)
# scores, label_preds = torch.max(total_scores, dim=1)
else:
if self._use_sigmoid_score:
total_scores = torch.sigmoid(preds)[..., 1:]
else:
total_scores = F.softmax(preds, dim=-1)[..., 1:]
"""
if preds.shape[self._dim] == 1: # BCE
scores = torch.sigmoid(preds)
else:
# assert preds.shape[
# self._dim] == 2, "precision only support 2 class"
# TODO: add support for [N, C, ...] format.
# TODO: add multiclass support
if self._use_sigmoid_score:
scores = torch.sigmoid(preds)[:, ..., 1:].sum(-1)
else:
scores = F.softmax(preds, dim=self._dim)[:, ..., 1:].sum(-1)
"""
scores = torch.max(total_scores, dim=-1)[0]
if weights is None:
weights = (labels != self._ignore_idx).float()
else:
weights = weights.float()
for i, thresh in enumerate(self._thresholds):
tp, tn, fp, fn = _calc_binary_metrics(labels, scores, weights,
self._ignore_idx, thresh)
rec_count = tp + fn
prec_count = tp + fp
if rec_count > 0:
self.rec_count[i] += rec_count
self.rec_total[i] += tp
if prec_count > 0:
self.prec_count[i] += prec_count
self.prec_total[i] += tp
return self.value
# return (total / num_examples.data).cpu()
@property
def value(self):
prec_count = torch.clamp(self.prec_count, min=1.0)
rec_count = torch.clamp(self.rec_count, min=1.0)
return ((self.prec_total / prec_count).cpu(),
(self.rec_total / rec_count).cpu())
@property
def thresholds(self):
return self._thresholds
def clear(self):
self.rec_count.zero_()
self.prec_count.zero_()
self.prec_total.zero_()
self.rec_total.zero_()
| 10,431 | 35.992908 | 80 | py |
second.pytorch | second.pytorch-master/torchplus/__init__.py | from . import train
from . import nn
from . import metrics
from . import tools
from .tools import change_default_args
from torchplus.ops.array_ops import scatter_nd, gather_nd
| 177 | 21.25 | 57 | py |
second.pytorch | second.pytorch-master/torchplus/nn/functional.py | import torch
def one_hot(tensor, depth, dim=-1, on_value=1.0, dtype=torch.float32):
tensor_onehot = torch.zeros(
*list(tensor.shape), depth, dtype=dtype, device=tensor.device)
tensor_onehot.scatter_(dim, tensor.unsqueeze(dim).long(), on_value)
return tensor_onehot
| 286 | 34.875 | 71 | py |
second.pytorch | second.pytorch-master/torchplus/nn/__init__.py | from torchplus.nn.functional import one_hot
from torchplus.nn.modules.common import Empty, Sequential
from torchplus.nn.modules.normalization import GroupNorm
| 159 | 39 | 57 | py |
second.pytorch | second.pytorch-master/torchplus/nn/modules/common.py | import sys
from collections import OrderedDict
import torch
from torch.nn import functional as F
class Empty(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(Empty, self).__init__()
def forward(self, *args, **kwargs):
if len(args) == 1:
return args[0]
elif len(args) == 0:
return None
return args
class Sequential(torch.nn.Module):
r"""A sequential container.
Modules will be added to it in the order they are passed in the constructor.
Alternatively, an ordered dict of modules can also be passed in.
To make it easier to understand, given is a small example::
# Example of using Sequential
model = Sequential(
nn.Conv2d(1,20,5),
nn.ReLU(),
nn.Conv2d(20,64,5),
nn.ReLU()
)
# Example of using Sequential with OrderedDict
model = Sequential(OrderedDict([
('conv1', nn.Conv2d(1,20,5)),
('relu1', nn.ReLU()),
('conv2', nn.Conv2d(20,64,5)),
('relu2', nn.ReLU())
]))
# Example of using Sequential with kwargs(python 3.6+)
model = Sequential(
conv1=nn.Conv2d(1,20,5),
relu1=nn.ReLU(),
conv2=nn.Conv2d(20,64,5),
relu2=nn.ReLU()
)
"""
def __init__(self, *args, **kwargs):
super(Sequential, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
for name, module in kwargs.items():
if sys.version_info < (3, 6):
raise ValueError("kwargs only supported in py36+")
if name in self._modules:
raise ValueError("name exists.")
self.add_module(name, module)
def __getitem__(self, idx):
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
def add(self, module, name=None):
if name is None:
name = str(len(self._modules))
if name in self._modules:
raise KeyError("name exists")
self.add_module(name, module)
def forward(self, input):
# i = 0
for module in self._modules.values():
# print(i)
input = module(input)
# i += 1
return input | 2,880 | 30.659341 | 80 | py |
second.pytorch | second.pytorch-master/torchplus/nn/modules/normalization.py | import torch
class GroupNorm(torch.nn.GroupNorm):
def __init__(self, num_channels, num_groups, eps=1e-5, affine=True):
super().__init__(
num_groups=num_groups,
num_channels=num_channels,
eps=eps,
affine=affine)
| 273 | 23.909091 | 72 | py |
second.pytorch | second.pytorch-master/torchplus/train/checkpoint.py | import json
import logging
import os
import signal
from pathlib import Path
import torch
class DelayedKeyboardInterrupt(object):
def __enter__(self):
self.signal_received = False
self.old_handler = signal.signal(signal.SIGINT, self.handler)
def handler(self, sig, frame):
self.signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt.')
def __exit__(self, type, value, traceback):
signal.signal(signal.SIGINT, self.old_handler)
if self.signal_received:
self.old_handler(*self.signal_received)
def latest_checkpoint(model_dir, model_name):
"""return path of latest checkpoint in a model_dir
Args:
model_dir: string, indicate your model dir(save ckpts, summarys,
logs, etc).
model_name: name of your model. we find ckpts by name
Returns:
path: None if isn't exist or latest checkpoint path.
"""
ckpt_info_path = Path(model_dir) / "checkpoints.json"
if not ckpt_info_path.is_file():
return None
with open(ckpt_info_path, 'r') as f:
ckpt_dict = json.loads(f.read())
if model_name not in ckpt_dict['latest_ckpt']:
return None
latest_ckpt = ckpt_dict['latest_ckpt'][model_name]
ckpt_file_name = Path(model_dir) / latest_ckpt
if not ckpt_file_name.is_file():
return None
return str(ckpt_file_name)
def _ordered_unique(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
def save(model_dir,
model,
model_name,
global_step,
max_to_keep=8,
keep_latest=True):
"""save a model into model_dir.
Args:
model_dir: string, indicate your model dir(save ckpts, summarys,
logs, etc).
model: torch.nn.Module instance.
model_name: name of your model. we find ckpts by name
global_step: int, indicate current global step.
max_to_keep: int, maximum checkpoints to keep.
keep_latest: bool, if True and there are too much ckpts,
will delete oldest ckpt. else will delete ckpt which has
smallest global step.
Returns:
path: None if isn't exist or latest checkpoint path.
"""
# prevent save incomplete checkpoint due to key interrupt
with DelayedKeyboardInterrupt():
ckpt_info_path = Path(model_dir) / "checkpoints.json"
ckpt_filename = "{}-{}.tckpt".format(model_name, global_step)
ckpt_path = Path(model_dir) / ckpt_filename
if not ckpt_info_path.is_file():
ckpt_info_dict = {'latest_ckpt': {}, 'all_ckpts': {}}
else:
with open(ckpt_info_path, 'r') as f:
ckpt_info_dict = json.loads(f.read())
ckpt_info_dict['latest_ckpt'][model_name] = ckpt_filename
if model_name in ckpt_info_dict['all_ckpts']:
ckpt_info_dict['all_ckpts'][model_name].append(ckpt_filename)
else:
ckpt_info_dict['all_ckpts'][model_name] = [ckpt_filename]
all_ckpts = ckpt_info_dict['all_ckpts'][model_name]
torch.save(model.state_dict(), ckpt_path)
# check ckpt in all_ckpts is exist, if not, delete it from all_ckpts
all_ckpts_checked = []
for ckpt in all_ckpts:
ckpt_path_uncheck = Path(model_dir) / ckpt
if ckpt_path_uncheck.is_file():
all_ckpts_checked.append(str(ckpt_path_uncheck))
all_ckpts = all_ckpts_checked
if len(all_ckpts) > max_to_keep:
if keep_latest:
ckpt_to_delete = all_ckpts.pop(0)
else:
# delete smallest step
get_step = lambda name: int(name.split('.')[0].split('-')[1])
min_step = min([get_step(name) for name in all_ckpts])
ckpt_to_delete = "{}-{}.tckpt".format(model_name, min_step)
all_ckpts.remove(ckpt_to_delete)
os.remove(str(Path(model_dir) / ckpt_to_delete))
all_ckpts_filename = _ordered_unique([Path(f).name for f in all_ckpts])
ckpt_info_dict['all_ckpts'][model_name] = all_ckpts_filename
with open(ckpt_info_path, 'w') as f:
f.write(json.dumps(ckpt_info_dict, indent=2))
def restore(ckpt_path, model, map_func=None):
if not Path(ckpt_path).is_file():
raise ValueError("checkpoint {} not exist.".format(ckpt_path))
state_dict = torch.load(ckpt_path)
if map_func is not None:
map_func(state_dict)
model.load_state_dict(state_dict)
print("Restoring parameters from {}".format(ckpt_path))
def _check_model_names(models):
model_names = []
for model in models:
if not hasattr(model, "name"):
raise ValueError("models must have name attr")
model_names.append(model.name)
if len(model_names) != len(set(model_names)):
raise ValueError("models must have unique name: {}".format(
", ".join(model_names)))
def _get_name_to_model_map(models):
if isinstance(models, dict):
name_to_model = {name: m for name, m in models.items()}
else:
_check_model_names(models)
name_to_model = {m.name: m for m in models}
return name_to_model
def try_restore_latest_checkpoints(model_dir, models, map_func=None):
name_to_model = _get_name_to_model_map(models)
for name, model in name_to_model.items():
latest_ckpt = latest_checkpoint(model_dir, name)
if latest_ckpt is not None:
restore(latest_ckpt, model, map_func)
def restore_latest_checkpoints(model_dir, models, map_func=None):
name_to_model = _get_name_to_model_map(models)
for name, model in name_to_model.items():
latest_ckpt = latest_checkpoint(model_dir, name)
if latest_ckpt is not None:
restore(latest_ckpt, model, map_func)
else:
raise ValueError("model {}\'s ckpt isn't exist".format(name))
def restore_models(model_dir, models, global_step, map_func=None):
name_to_model = _get_name_to_model_map(models)
for name, model in name_to_model.items():
ckpt_filename = "{}-{}.tckpt".format(name, global_step)
ckpt_path = model_dir + "/" + ckpt_filename
restore(ckpt_path, model, map_func)
def save_models(model_dir,
models,
global_step,
max_to_keep=15,
keep_latest=True):
with DelayedKeyboardInterrupt():
name_to_model = _get_name_to_model_map(models)
for name, model in name_to_model.items():
save(model_dir, model, name, global_step, max_to_keep, keep_latest)
| 6,655 | 36.60452 | 79 | py |
second.pytorch | second.pytorch-master/torchplus/train/optim.py | from collections import defaultdict, Iterable
import torch
from copy import deepcopy
from itertools import chain
from torch.autograd import Variable
required = object()
def param_fp32_copy(params):
param_copy = [
param.clone().type(torch.cuda.FloatTensor).detach() for param in params
]
for param in param_copy:
param.requires_grad = True
return param_copy
def set_grad(params, params_with_grad, scale=1.0):
for param, param_w_grad in zip(params, params_with_grad):
if param.grad is None:
param.grad = torch.nn.Parameter(
param.data.new().resize_(*param.data.size()))
grad = param_w_grad.grad.data
if scale is not None:
grad /= scale
if torch.isnan(grad).any() or torch.isinf(grad).any():
return True # invalid grad
param.grad.data.copy_(grad)
return False
class MixedPrecisionWrapper(object):
"""mixed precision optimizer wrapper.
Arguments:
optimizer (torch.optim.Optimizer): an instance of
:class:`torch.optim.Optimizer`
scale: (float): a scalar for grad scale.
auto_scale: (bool): whether enable auto scale.
The algorihm of auto scale is discribled in
http://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
"""
def __init__(self,
optimizer,
scale=None,
auto_scale=True,
inc_factor=2.0,
dec_factor=0.5,
num_iters_be_stable=500):
# if not isinstance(optimizer, torch.optim.Optimizer):
# raise ValueError("must provide a torch.optim.Optimizer")
self.optimizer = optimizer
if hasattr(self.optimizer, 'name'):
self.name = self.optimizer.name # for ckpt system
param_groups_copy = []
for i, group in enumerate(optimizer.param_groups):
group_copy = {n: v for n, v in group.items() if n != 'params'}
group_copy['params'] = param_fp32_copy(group['params'])
param_groups_copy.append(group_copy)
# switch param_groups, may be dangerous
self.param_groups = optimizer.param_groups
optimizer.param_groups = param_groups_copy
self.grad_scale = scale
self.auto_scale = auto_scale
self.inc_factor = inc_factor
self.dec_factor = dec_factor
self.stable_iter_count = 0
self.num_iters_be_stable = num_iters_be_stable
def __getstate__(self):
return self.optimizer.__getstate__()
def __setstate__(self, state):
return self.optimizer.__setstate__(state)
def __repr__(self):
return self.optimizer.__repr__()
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, state_dict):
return self.optimizer.load_state_dict(state_dict)
def zero_grad(self):
return self.optimizer.zero_grad()
def step(self, closure=None):
for g, g_copy in zip(self.param_groups, self.optimizer.param_groups):
invalid = set_grad(g_copy['params'], g['params'], self.grad_scale)
if invalid:
if self.grad_scale is None or self.auto_scale is False:
raise ValueError("nan/inf detected but auto_scale disabled.")
self.grad_scale *= self.dec_factor
print('scale decay to {}'.format(self.grad_scale))
return
if self.auto_scale is True:
self.stable_iter_count += 1
if self.stable_iter_count > self.num_iters_be_stable:
if self.grad_scale is not None:
self.grad_scale *= self.inc_factor
self.stable_iter_count = 0
if closure is None:
self.optimizer.step()
else:
self.optimizer.step(closure)
for g, g_copy in zip(self.param_groups, self.optimizer.param_groups):
for p_copy, p in zip(g_copy['params'], g['params']):
p.data.copy_(p_copy.data)
| 4,081 | 35.774775 | 87 | py |
second.pytorch | second.pytorch-master/torchplus/train/fastai_optim.py | from collections import Iterable, defaultdict
from copy import deepcopy
from itertools import chain
import torch
from torch import nn
from torch._utils import _unflatten_dense_tensors
from torch.autograd import Variable
from torch.nn.utils import parameters_to_vector
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)
def split_bn_bias(layer_groups):
"Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups."
split_groups = []
for l in layer_groups:
l1, l2 = [], []
for c in l.children():
if isinstance(c, bn_types): l2.append(c)
else: l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups
def get_master(layer_groups, flat_master: bool = False):
"Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
split_groups = split_bn_bias(layer_groups)
model_params = [[
param for param in lg.parameters() if param.requires_grad
] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
return model_params, master_params
else:
master_params = [[param.clone().float().detach() for param in lg]
for lg in model_params]
for mp in master_params:
for param in mp:
param.requires_grad = True
return model_params, master_params
def model_g2master_g(model_params, master_params,
flat_master: bool = False) -> None:
"Copy the `model_params` gradients to `master_params` for the optimizer step."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(master_group) != 0:
master_group[0].grad.data.copy_(
parameters_to_vector(
[p.grad.data.float() for p in model_group]))
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
if model.grad is not None:
if master.grad is None:
master.grad = master.data.new(*master.data.size())
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master2model(model_params, master_params,
flat_master: bool = False) -> None:
"Copy `master_params` to `model_params`."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(model_group) != 0:
for model, master in zip(
model_group,
_unflatten_dense_tensors(master_group[0].data,
model_group)):
model.data.copy_(master)
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
model.data.copy_(master.data)
def listify(p=None, q=None):
"Make `p` listy and the same length as `q`."
if p is None: p = []
elif isinstance(p, str): p = [p]
elif not isinstance(p, Iterable): p = [p]
n = q if type(q) == int else len(p) if q is None else len(q)
if len(p) == 1: p = p * n
assert len(p) == n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
def trainable_params(m: nn.Module):
"Return list of trainable params in `m`."
res = filter(lambda p: p.requires_grad, m.parameters())
return res
def is_tuple(x) -> bool:
return isinstance(x, tuple)
# copy from fastai.
class OptimWrapper(torch.optim.Optimizer):
"Basic wrapper around `opt` to simplify hyper-parameters changes."
def __init__(self, opt, wd, true_wd: bool = False, bn_wd: bool = True):
# super().__init__(opt.param_groups, dict())
self.opt, self.true_wd, self.bn_wd = opt, true_wd, bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_func, lr, layer_groups, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{
'params': trainable_params(l),
'lr': 0
} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr, opt.opt_func = listify(lr, layer_groups), opt_func
return opt
def new(self, layer_groups):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt_func', self.opt.__class__)
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{
'params': trainable_params(l),
'lr': 0
} for l in split_groups])
return self.create(
opt_func,
self.lr,
layer_groups,
wd=self.wd,
true_wd=self.true_wd,
bn_wd=self.bn_wd)
def __repr__(self) -> str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
#Pytorch optimizer methods
def step(self) -> None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr, wd, pg1, pg2 in zip(self._lr, self._wd,
self.opt.param_groups[::2],
self.opt.param_groups[1::2]):
for p in pg1['params']:
p.data.mul_(1 - wd * lr)
if self.bn_wd:
for p in pg2['params']:
p.data.mul_(1 - wd * lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self) -> None:
"Clear optimizer gradients."
self.opt.zero_grad()
#Passthrough to the inner opt.
def __getstate__(self):
return self.opt.__getstate__()
def __setstate__(self, state):
return self.opt.__setstate__(state)
def state_dict(self):
return self.opt.state_dict()
def load_state_dict(self, state_dict):
return self.opt.load_state_dict(state_dict)
def add_param_group(self, param_group):
return self.opt.add_param_group(param_group)
def clear(self):
"Reset the state of the inner optimizer."
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
@property
def param_groups(self):
return self.opt.param_groups
@property
def defaults(self):
return self.opt.defaults
@property
def state(self):
return self.opt.state
#Hyperparameters as properties
@property
def lr(self) -> float:
return self._lr[-1]
@lr.setter
def lr(self, val: float) -> None:
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self) -> float:
return self._mom[-1]
@mom.setter
def mom(self, val: float) -> None:
if 'momentum' in self.opt_keys:
self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys:
self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self) -> float:
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val: float) -> None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys:
self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys:
self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self) -> float:
return self._wd[-1]
@wd.setter
def wd(self, val: float) -> None:
"Set weight decay."
if not self.true_wd:
self.set_val(
'weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
#Helper functions
def read_defaults(self) -> None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys:
self._mom, self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys:
self._wd = self.read_val('weight_decay')
def set_val(self, key: str, val, bn_groups: bool = True):
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1, v2) for v1, v2 in zip(*val)]
for v, pg1, pg2 in zip(val, self.opt.param_groups[::2],
self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key: str):
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class FastAIMixedOptim(OptimWrapper):
@classmethod
def create(cls,
opt_func,
lr,
layer_groups,
model,
flat_master=False,
loss_scale=512.0,
**kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
opt.model_params, opt.master_params = get_master(
layer_groups, flat_master)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
#Changes the optimizer so that the optimization step is done in FP32.
# opt = self.learn.opt
mom, wd, beta = opt.mom, opt.wd, opt.beta
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{
'params': mp,
'lr': lr
} for mp, lr in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
opt.mom, opt.wd, opt.beta = mom, wd, beta
return opt
def step(self):
model_g2master_g(self.model_params, self.master_params,
self.flat_master)
for group in self.master_params:
for param in group:
param.grad.div_(self.loss_scale)
super(FastAIMixedOptim, self).step()
self.model.zero_grad()
#Update the params from master to model.
master2model(self.model_params, self.master_params, self.flat_master)
| 11,480 | 34.65528 | 108 | py |
second.pytorch | second.pytorch-master/torchplus/train/learning_schedules.py | """PyTorch edition of TensorFlow learning schedule in tensorflow object
detection API.
"""
import numpy as np
from torch.optim.optimizer import Optimizer
class _LRSchedulerStep(object):
def __init__(self, optimizer, last_step=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_step == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError(
"param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".
format(i))
self.base_lrs = list(
map(lambda group: group['initial_lr'], optimizer.param_groups))
self.step(last_step + 1)
self.last_step = last_step
"""
def get_lr(self):
raise NotImplementedError
"""
def get_lr(self):
ret = [self._get_lr_per_group(base_lr) for base_lr in self.base_lrs]
return ret
def _get_lr_per_group(self, base_lr):
raise NotImplementedError
def step(self, step=None):
if step is None:
step = self.last_step + 1
self.last_step = step
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
class Constant(_LRSchedulerStep):
def __init__(self, optimizer, last_step=-1):
super().__init__(optimizer, last_step)
def _get_lr_per_group(self, base_lr):
return base_lr
class ManualStepping(_LRSchedulerStep):
"""Pytorch edition of manual_stepping in tensorflow.
DON'T SUPPORT PARAM GROUPS.
"""
def __init__(self, optimizer, boundaries, rates, last_step=-1):
self._boundaries = boundaries
self._num_boundaries = len(boundaries)
self._learning_rates = rates
if any([b < 0 for b in boundaries]) or any(
[not isinstance(b, int) for b in boundaries]):
raise ValueError('boundaries must be a list of positive integers')
if any(
[bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]):
raise ValueError(
'Entries in boundaries must be strictly increasing.')
if any([not isinstance(r, float) for r in rates]):
raise ValueError('Learning rates must be floats')
if len(rates) != len(boundaries) + 1:
raise ValueError('Number of provided learning rates must exceed '
'number of boundary points by exactly 1.')
super().__init__(optimizer, last_step)
def _get_lr_per_group(self, base_lr):
step = self.last_step
ret = None
for i, bound in enumerate(self._boundaries):
if step > bound:
ret = self._learning_rates[i + 1]
if ret is not None:
return ret
return self._learning_rates[0]
class ExponentialDecayWithBurnin(_LRSchedulerStep):
"""Pytorch edition of manual_stepping in tensorflow.
"""
def __init__(self,
optimizer,
learning_rate_decay_steps,
learning_rate_decay_factor,
burnin_learning_rate,
burnin_steps,
last_step=-1):
self._decay_steps = learning_rate_decay_steps
self._decay_factor = learning_rate_decay_factor
self._burnin_learning_rate = burnin_learning_rate
self._burnin_steps = burnin_steps
super().__init__(optimizer, last_step)
def _get_lr_per_group(self, base_lr):
if self._burnin_learning_rate == 0:
burnin_learning_rate = base_lr
step = self.last_step
post_burnin_learning_rate = (base_lr * self._decay_factor ^
(step // self._decay_steps))
if step < self._burnin_steps:
return burnin_learning_rate
else:
return post_burnin_learning_rate
class ExponentialDecay(_LRSchedulerStep):
def __init__(self,
optimizer,
learning_rate_decay_steps,
learning_rate_decay_factor,
staircase=True,
last_step=-1):
self._decay_steps = learning_rate_decay_steps
self._decay_factor = learning_rate_decay_factor
self._staircase = staircase
super().__init__(optimizer, last_step)
def _get_lr_per_group(self, base_lr):
step = self.last_step
if self._staircase:
post_burnin_learning_rate = base_lr * pow(self._decay_factor,
(step // self._decay_steps))
else:
post_burnin_learning_rate = base_lr * pow(self._decay_factor,
(step / self._decay_steps))
return post_burnin_learning_rate
class CosineDecayWithWarmup(_LRSchedulerStep):
def __init__(self,
optimizer,
total_steps,
warmup_learning_rate,
warmup_steps,
last_step=-1):
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to '
'warmup_steps.')
self._total_steps = total_steps
self._warmup_learning_rate = warmup_learning_rate
self._warmup_steps = warmup_steps
super().__init__(optimizer, last_step)
def _get_lr_per_group(self, base_lr):
if base_lr < self._warmup_learning_rate:
raise ValueError('learning_rate_base must be larger '
'or equal to warmup_learning_rate.')
step = self.last_step
learning_rate = 0.5 * base_lr * (
1 + np.cos(np.pi *
(float(step) - self._warmup_steps
) / float(self._total_steps - self._warmup_steps)))
if self._warmup_steps > 0:
slope = (base_lr - self._warmup_learning_rate) / self._warmup_steps
pre_cosine_learning_rate = slope * float(
step) + self._warmup_learning_rate
if step < self._warmup_steps:
return pre_cosine_learning_rate
else:
return learning_rate
class OneCycle(_LRSchedulerStep):
def __init__(self,
optimizer,
total_steps,
lr_max,
moms,
div_factor=25,
pct_start=0.3,
last_step=-1):
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to '
'warmup_steps.')
self._total_steps = total_steps
self._lr_max = lr_max
self._moms = moms
self._warmup_learning_rate = warmup_learning_rate
self._warmup_steps = warmup_steps
super().__init__(optimizer, last_step)
def _get_lr_per_group(self, base_lr):
if base_lr < self._warmup_learning_rate:
raise ValueError('learning_rate_base must be larger '
'or equal to warmup_learning_rate.')
step = self.last_step
learning_rate = 0.5 * base_lr * (
1 + np.cos(np.pi *
(float(step) - self._warmup_steps
) / float(self._total_steps - self._warmup_steps)))
if self._warmup_steps > 0:
slope = (base_lr - self._warmup_learning_rate) / self._warmup_steps
pre_cosine_learning_rate = slope * float(
step) + self._warmup_learning_rate
if step < self._warmup_steps:
return pre_cosine_learning_rate
else:
return learning_rate
| 7,996 | 35.35 | 79 | py |
second.pytorch | second.pytorch-master/torchplus/train/learning_schedules_fastai.py | import numpy as np
import math
from functools import partial
import torch
class LRSchedulerStep(object):
def __init__(self, fai_optimizer, total_step, lr_phases, mom_phases):
self.optimizer = fai_optimizer
self.total_step = total_step
self.lr_phases = []
for i, (start, lambda_func) in enumerate(lr_phases):
if len(self.lr_phases) != 0:
assert self.lr_phases[-1][0] < int(start * total_step)
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(lr_phases) - 1:
self.lr_phases.append((int(start * total_step),
int(lr_phases[i + 1][0] * total_step),
lambda_func))
else:
self.lr_phases.append((int(start * total_step), total_step,
lambda_func))
assert self.lr_phases[0][0] == 0
self.mom_phases = []
for i, (start, lambda_func) in enumerate(mom_phases):
if len(self.mom_phases) != 0:
assert self.mom_phases[-1][0] < int(start * total_step)
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(mom_phases) - 1:
self.mom_phases.append((int(start * total_step),
int(mom_phases[i + 1][0] * total_step),
lambda_func))
else:
self.mom_phases.append((int(start * total_step), total_step,
lambda_func))
if len(mom_phases) > 0:
assert self.mom_phases[0][0] == 0
def step(self, step):
lrs = []
moms = []
for start, end, func in self.lr_phases:
if step >= start:
lrs.append(func((step - start) / (end - start)))
if len(lrs) > 0:
self.optimizer.lr = lrs[-1]
for start, end, func in self.mom_phases:
if step >= start:
moms.append(func((step - start) / (end - start)))
self.optimizer.mom = func((step - start) / (end - start))
if len(moms) > 0:
self.optimizer.mom = moms[-1]
@property
def learning_rate(self):
return self.optimizer.lr
def annealing_cos(start, end, pct):
# print(pct, start, end)
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start - end) / 2 * cos_out
class OneCycle(LRSchedulerStep):
def __init__(self, fai_optimizer, total_step, lr_max, moms, div_factor,
pct_start):
self.lr_max = lr_max
self.moms = moms
self.div_factor = div_factor
self.pct_start = pct_start
a1 = int(total_step * self.pct_start)
a2 = total_step - a1
low_lr = self.lr_max / self.div_factor
lr_phases = ((0, partial(annealing_cos, low_lr, self.lr_max)),
(self.pct_start,
partial(annealing_cos, self.lr_max, low_lr / 1e4)))
mom_phases = ((0, partial(annealing_cos, *self.moms)),
(self.pct_start, partial(annealing_cos,
*self.moms[::-1])))
fai_optimizer.lr, fai_optimizer.mom = low_lr, self.moms[0]
super().__init__(fai_optimizer, total_step, lr_phases, mom_phases)
class ExponentialDecay(LRSchedulerStep):
def __init__(self,
fai_optimizer,
total_step,
initial_learning_rate,
decay_length,
decay_factor,
staircase=True):
"""
Args:
decay_length: must in (0, 1)
"""
assert decay_length > 0
assert decay_length < 1
self._decay_steps_unified = decay_length
self._decay_factor = decay_factor
self._staircase = staircase
step = 0
stage = 1
lr_phases = []
if staircase:
while step <= total_step:
func = lambda p, _d=initial_learning_rate * stage: _d
lr_phases.append((step / total_step, func))
stage *= decay_factor
step += int(decay_length * total_step)
else:
func = lambda p: pow(decay_factor, (p / decay_length))
lr_phases.append((0, func))
super().__init__(fai_optimizer, total_step, lr_phases, [])
class ManualStepping(LRSchedulerStep):
def __init__(self, fai_optimizer, total_step, boundaries, rates):
assert all([b > 0 and b < 1 for b in boundaries])
assert len(boundaries) + 1 == len(rates)
boundaries.insert(0, 0.0)
lr_phases = []
for start, rate in zip(boundaries, rates):
func = lambda p, _d=rate: _d
lr_phases.append((start, func))
super().__init__(fai_optimizer, total_step, lr_phases, [])
class FakeOptim:
def __init__(self):
self.lr = 0
self.mom = 0
if __name__ == "__main__":
import matplotlib.pyplot as plt
opt = FakeOptim() # 3e-3, wd=0.4, div_factor=10
# schd = OneCycle(opt, 100, 3e-3, (0.95, 0.85), 10.0, 0.4)
schd = ExponentialDecay(opt, 100, 3e-4, 0.1, 0.8, staircase=True)
schd = ManualStepping(opt, 100, [0.8, 0.9], [0.001, 0.0001, 0.00005])
lrs = []
moms = []
for i in range(100):
schd.step(i)
lrs.append(opt.lr)
moms.append(opt.mom)
plt.plot(lrs)
# plt.plot(moms)
# plt.show()
# plt.plot(moms)
plt.show()
| 5,671 | 35.127389 | 79 | py |
second.pytorch | second.pytorch-master/torchplus/train/__init__.py | from torchplus.train.checkpoint import (latest_checkpoint, restore,
restore_latest_checkpoints,
restore_models, save, save_models,
try_restore_latest_checkpoints)
from torchplus.train.common import create_folder
from torchplus.train.optim import MixedPrecisionWrapper
| 388 | 54.571429 | 74 | py |
second.pytorch | second.pytorch-master/torchplus/ops/array_ops.py | import ctypes
import math
import time
import torch
def scatter_nd(indices, updates, shape):
"""pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully
when indice repeats, don't support repeat add which is supported
in tensorflow.
"""
ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
ndim = indices.shape[-1]
output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:]
flatted_indices = indices.view(-1, ndim)
slices = [flatted_indices[:, i] for i in range(ndim)]
slices += [Ellipsis]
ret[slices] = updates.view(*output_shape)
return ret
def gather_nd(params, indices):
# this function has a limit that MAX_ADVINDEX_CALC_DIMS=5
ndim = indices.shape[-1]
output_shape = list(indices.shape[:-1]) + list(params.shape[indices.shape[-1]:])
flatted_indices = indices.view(-1, ndim)
slices = [flatted_indices[:, i] for i in range(ndim)]
slices += [Ellipsis]
return params[slices].view(*output_shape)
| 1,061 | 33.258065 | 84 | py |
SPTM | SPTM-master/src/common/register_test_setups.py | DATA_PATH = '../../data/'
class TestSetup:
def __init__(self,
dir,
wad,
memory_buffer_lmp,
goal_lmps,
maps,
exploration_map,
goal_locations,
goal_names,
box):
self.wad = DATA_PATH + dir + wad
self.memory_buffer_lmp = DATA_PATH + dir + memory_buffer_lmp
self.goal_lmps = [DATA_PATH + dir + value for value in goal_lmps]
self.maps = maps
self.exploration_map = exploration_map
self.goal_locations = goal_locations
self.goal_names = goal_names
self.box = box
TEST_SETUPS = {}
STANDARD_MAPS = ['map02', 'map03', 'map04', 'map05']
EXPLORATION_MAP = 'map06'
STANDARD_GOAL_NAMES = ['tall_red_pillar',
'candelabra',
'tall_blue_torch',
'short_green_pillar']
TEST_SETUPS['deepmind_small'] = \
TestSetup(
dir='Test/deepmind_small/',
wad='deepmind_small.wad_manymaps_test.wad',
memory_buffer_lmp='deepmind_small.lmp',
goal_lmps=['deepmind_small_tall_red_pillar.lmp',
'deepmind_small_candelabra.lmp',
'deepmind_small_tall_blue_torch.lmp',
'deepmind_small_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(-64.0, -192.0),
(64.0, 64.0),
(320.0, -64.0),
(192.0, 64.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-512.0, -384.0, 768.0, 256.0])
TEST_SETUPS['deepmind_small_dm'] = \
TestSetup(
dir='Test/deepmind_small_dm/',
wad='deepmind_small.wad_manymaps_test.wad',
memory_buffer_lmp='deepmind_small.lmp',
goal_lmps=['deepmind_small_tall_red_pillar.lmp',
'deepmind_small_candelabra.lmp',
'deepmind_small_tall_blue_torch.lmp',
'deepmind_small_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(-64.0, -192.0),
(64.0, 64.0),
(320.0, -64.0),
(192.0, 64.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-512.0, -384.0, 768.0, 256.0])
TEST_SETUPS['deepmind_small_autoexplore'] = \
TestSetup(
dir='Test/deepmind_small_autoexplore/',
wad='deepmind_small.wad_manymaps_test.wad',
memory_buffer_lmp='deepmind_small.lmp',
goal_lmps=['deepmind_small_tall_red_pillar.lmp',
'deepmind_small_candelabra.lmp',
'deepmind_small_tall_blue_torch.lmp',
'deepmind_small_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(-64.0, -192.0),
(64.0, 64.0),
(320.0, -64.0),
(192.0, 64.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-512.0, -384.0, 768.0, 256.0])
TEST_SETUPS['open_space_five'] = \
TestSetup(
dir='Test/open_space_five/',
wad='open_space_five.wad_manymaps_test.wad',
memory_buffer_lmp='open_space_five.lmp',
goal_lmps=['open_space_five_tall_red_pillar.lmp',
'open_space_five_candelabra.lmp',
'open_space_five_tall_blue_torch.lmp',
'open_space_five_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(1728.0, 896.0),
(832.0, 1728.0),
(832.0, 128.0),
(1728.0, 1152.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[0.0, 0.0, 1856.0, 1856.0])
TEST_SETUPS['star_maze'] = \
TestSetup(
dir='Test/star_maze/',
wad='star_maze.wad_manymaps_test.wad',
memory_buffer_lmp='star_maze.lmp',
goal_lmps=['star_maze_tall_red_pillar.lmp',
'star_maze_candelabra.lmp',
'star_maze_tall_blue_torch.lmp',
'star_maze_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(-448.0, -992.0),
(-704.0, -320.0),
(736.0, -320.0),
(544.0, 768.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-928.0, -1088.0, 1472.0, 864.0])
TEST_SETUPS['office1'] = \
TestSetup(
dir='Test/office1/',
wad='office1.wad_manymaps_test.wad',
memory_buffer_lmp='office1.lmp',
goal_lmps=['office1_tall_red_pillar.lmp',
'office1_candelabra.lmp',
'office1_tall_blue_torch.lmp',
'office1_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(320.0, 192.0),
(192.0, 192.0),
(960.0, -64.0),
(832.0, -576.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-384.0, -640.0, 1280.0, 256.0])
TEST_SETUPS['office1_dm'] = \
TestSetup(
dir='Test/office1_dm/',
wad='office1.wad_manymaps_test.wad',
memory_buffer_lmp='office1.lmp',
goal_lmps=['office1_tall_red_pillar.lmp',
'office1_candelabra.lmp',
'office1_tall_blue_torch.lmp',
'office1_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(320.0, 192.0),
(192.0, 192.0),
(960.0, -64.0),
(832.0, -576.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-384.0, -640.0, 1280.0, 256.0])
TEST_SETUPS['office1_autoexplore'] = \
TestSetup(
dir='Test/office1_autoexplore/',
wad='office1.wad_manymaps_test.wad',
memory_buffer_lmp='office1.lmp',
goal_lmps=['office1_tall_red_pillar.lmp',
'office1_candelabra.lmp',
'office1_tall_blue_torch.lmp',
'office1_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(320.0, 192.0),
(192.0, 192.0),
(960.0, -64.0),
(832.0, -576.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-384.0, -640.0, 1280.0, 256.0])
TEST_SETUPS['columns'] = \
TestSetup(
dir='Test/columns/',
wad='columns.wad_manymaps_test.wad',
memory_buffer_lmp='columns.lmp',
goal_lmps=['columns_tall_red_pillar.lmp',
'columns_candelabra.lmp',
'columns_tall_blue_torch.lmp',
'columns_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(-672.0, -480.0),
(-224.0, 352.0),
(256.0, 320.0),
(768.0, -448.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-704.0, -512.0, 832.0, 384.0])
TEST_SETUPS['columns_dm'] = \
TestSetup(
dir='Test/columns_dm/',
wad='columns.wad_manymaps_test.wad',
memory_buffer_lmp='columns.lmp',
goal_lmps=['columns_tall_red_pillar.lmp',
'columns_candelabra.lmp',
'columns_tall_blue_torch.lmp',
'columns_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(-672.0, -480.0),
(-224.0, 352.0),
(256.0, 320.0),
(768.0, -448.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-704.0, -512.0, 832.0, 384.0])
TEST_SETUPS['columns_autoexplore'] = \
TestSetup(
dir='Test/columns_autoexplore/',
wad='columns.wad_manymaps_test.wad',
memory_buffer_lmp='columns.lmp',
goal_lmps=['columns_tall_red_pillar.lmp',
'columns_candelabra.lmp',
'columns_tall_blue_torch.lmp',
'columns_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(-672.0, -480.0),
(-224.0, 352.0),
(256.0, 320.0),
(768.0, -448.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-704.0, -512.0, 832.0, 384.0])
TEST_SETUPS['office2'] = \
TestSetup(
dir='Test/office2/',
wad='office2.wad_manymaps_test.wad',
memory_buffer_lmp='office2.lmp',
goal_lmps=['office2_tall_red_pillar.lmp',
'office2_candelabra.lmp',
'office2_tall_blue_torch.lmp',
'office2_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(-384.0, -256.0),
(0.0, 0.0),
(352.0, -480.0),
(768.0, 32.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-576.0, -640.0, 832.0, 320.0])
TEST_SETUPS['topological_star_easier'] = \
TestSetup(
dir='Test/topological_star_easier/',
wad='topological_star_easier.wad_manymaps_test.wad',
memory_buffer_lmp='topological_star_easier.lmp',
goal_lmps=['topological_star_easier_tall_red_pillar.lmp',
'topological_star_easier_candelabra.lmp',
'topological_star_easier_tall_blue_torch.lmp',
'topological_star_easier_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(-832.0, -384.0),
(-704.0, -128.0),
(960.0, -384.0),
(960.0, 128.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-896.0, -448.0, 1024.0, 576.0])
TEST_SETUPS['open_space_two'] = \
TestSetup(
dir='Val/open_space_two/',
wad='open_space_two.wad_manymaps_test.wad',
memory_buffer_lmp='open_space_two.lmp',
goal_lmps=['open_space_two_tall_red_pillar.lmp',
'open_space_two_candelabra.lmp',
'open_space_two_tall_blue_torch.lmp',
'open_space_two_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(1728.0, 1600.0),
(1728.0, 128.0),
(128.0, 1728.0),
(128.0, 128.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[0.0, 0.0, 1856.0, 1856.0])
TEST_SETUPS['branching'] = \
TestSetup(
dir='Val/branching/',
wad='branching.wad_manymaps_test.wad',
memory_buffer_lmp='branching.lmp',
goal_lmps=['branching_tall_red_pillar.lmp',
'branching_candelabra.lmp',
'branching_tall_blue_torch.lmp',
'branching_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(192.0, -448.0),
(64.0, 320.0),
(320.0, -64.0),
(448.0, -320.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-256.0, -768.0, 1024.0, 512.0])
TEST_SETUPS['deepmind_large'] = \
TestSetup(
dir='Val/deepmind_large/',
wad='deepmind_large.wad_manymaps_test.wad',
memory_buffer_lmp='deepmind_large.lmp',
goal_lmps=['deepmind_large_tall_red_pillar.lmp',
'deepmind_large_candelabra.lmp',
'deepmind_large_tall_blue_torch.lmp',
'deepmind_large_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(576.0, -320.0),
(1088.0, -576.0),
(320.0, -192.0),
(704.0, -832.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-640.0, -1024.0, 1280.0, 128.0])
TEST_SETUPS['deepmind_large_dm'] = \
TestSetup(
dir='Val/deepmind_large_dm/',
wad='deepmind_large.wad_manymaps_test.wad',
memory_buffer_lmp='deepmind_large.lmp',
goal_lmps=['deepmind_large_tall_red_pillar.lmp',
'deepmind_large_candelabra.lmp',
'deepmind_large_tall_blue_torch.lmp',
'deepmind_large_short_green_pillar.lmp'],
maps=STANDARD_MAPS,
exploration_map=EXPLORATION_MAP,
goal_locations=[(576.0, -320.0),
(1088.0, -576.0),
(320.0, -192.0),
(704.0, -832.0)],
goal_names=STANDARD_GOAL_NAMES,
box=[-640.0, -1024.0, 1280.0, 128.0])
def register_test_setups():
return TEST_SETUPS
| 13,019 | 39.560748 | 69 | py |
SPTM | SPTM-master/src/common/resnet.py | #!/usr/bin/env python
#taken from https://github.com/raghakot/keras-resnet/blob/master/resnet.py
from __future__ import division
import six
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers.core import Lambda
from keras.layers.merge import (dot, concatenate)
from keras.layers.convolutional import (
Conv2D,
MaxPooling2D,
AveragePooling2D
)
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
NUM_EMBEDDING = 512 #256 #512 #1024 #256 #1024 #256
TOP_HIDDEN = 4 #1 #4
NORMALIZATION_ON = False #True #False #True
def _bn_relu(input):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
return Activation("relu")(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu block
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
def _residual_block(block_function, filters, repetitions, is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks.
"""
def f(input):
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = (2, 2)
input = block_function(filters=filters, init_strides=init_strides,
is_first_block_of_first_layer=(is_first_layer and i == 0))(input)
return input
return f
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv2D(filters=filters, kernel_size=(3, 3),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1)
return _shortcut(input, residual)
return f
def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1),
strides=init_strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4))(input)
else:
conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3),
strides=init_strides)(input)
conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1)
residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3)
return _shortcut(input, residual)
return f
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_dim_ordering() == 'tf':
ROW_AXIS = 1
COL_AXIS = 2
CHANNEL_AXIS = 3
else:
CHANNEL_AXIS = 1
ROW_AXIS = 2
COL_AXIS = 3
def _get_block(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
def _bn_relu_for_dense(input):
norm = BatchNormalization(axis=1)(input)
return Activation('relu')(norm)
def _top_network(input):
raw_result = _bn_relu_for_dense(input)
for _ in xrange(TOP_HIDDEN):
raw_result = Dense(units=NUM_EMBEDDING, kernel_initializer='he_normal')(raw_result)
raw_result = _bn_relu_for_dense(raw_result)
output = Dense(units=2, activation='softmax', kernel_initializer='he_normal')(raw_result)
return output
class ResnetBuilder(object):
@staticmethod
def build(input_shape, num_outputs, block_fn, repetitions, is_classification):
"""Builds a custom ResNet like architecture.
Args:
input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols)
num_outputs: The number of outputs at final softmax layer
block_fn: The block function to use. This is either `basic_block` or `bottleneck`.
The original paper used basic_block for layers < 50
repetitions: Number of repetitions of various block units.
At each block unit, the number of filters are doubled and the input size is halved
Returns:
The keras `Model`.
"""
_handle_dim_ordering()
if len(input_shape) != 3:
raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")
# Permute dimension order if necessary
if K.image_dim_ordering() == 'tf':
input_shape = (input_shape[1], input_shape[2], input_shape[0])
# Load function from str if needed.
block_fn = _get_block(block_fn)
input = Input(shape=input_shape)
conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input)
pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1)
block = pool1
filters = 64
for i, r in enumerate(repetitions):
block = _residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block)
filters *= 2
# Last activation
block = _bn_relu(block)
# Classifier block
block_shape = K.int_shape(block)
pool2 = AveragePooling2D(pool_size=(block_shape[ROW_AXIS], block_shape[COL_AXIS]),
strides=(1, 1))(block)
flatten1 = Flatten()(pool2)
last_activation = None
if is_classification:
last_activation = "softmax"
dense = Dense(units=num_outputs, kernel_initializer="he_normal",
activation=last_activation)(flatten1)
model = Model(inputs=input, outputs=dense)
return model
@staticmethod
def build_resnet_18(input_shape, num_outputs, is_classification=True):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [2, 2, 2, 2], is_classification)
@staticmethod
def build_resnet_34(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, basic_block, [3, 4, 6, 3])
@staticmethod
def build_resnet_50(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 6, 3])
@staticmethod
def build_resnet_101(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 23, 3])
@staticmethod
def build_resnet_152(input_shape, num_outputs):
return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3])
@staticmethod
def build_top_network(edge_model):
number_of_top_layers = 3 + TOP_HIDDEN * 3
input = Input(shape=(2 * NUM_EMBEDDING,))
output = edge_model.layers[-number_of_top_layers](input) #_top_network(input)
for index in xrange(-number_of_top_layers + 1, 0):
output = edge_model.layers[index](output)
return Model(inputs=input, outputs=output)
@staticmethod
def build_bottom_network(edge_model, input_shape):
channels, height, width = input_shape
input = Input(shape=(height, width, channels))
branch = edge_model.layers[3]
output = branch(input)
if NORMALIZATION_ON:
output = Lambda(lambda x: K.l2_normalize(x, axis=1))(output)
return Model(inputs=input, outputs=output)
@staticmethod
def build_siamese_resnet_18(input_shape, num_outputs):
channels, height, width = input_shape
branch_channels = 3 #channels / 2
branch_input_shape = (branch_channels, height, width)
branch = ResnetBuilder.build_resnet_18(branch_input_shape, NUM_EMBEDDING, False)
input = Input(shape=(height, width, channels))
first_branch = branch(Lambda(lambda x: x[:, :, :, :3])(input))
second_branch = branch(Lambda(lambda x: x[:, :, :, 3:])(input))
if NORMALIZATION_ON:
first_branch = Lambda(lambda x: K.l2_normalize(x, axis=1))(first_branch)
second_branch = Lambda(lambda x: K.l2_normalize(x, axis=1))(second_branch)
raw_result = concatenate([first_branch, second_branch])
output = _top_network(raw_result)
# raw_result = dot([first_branch, second_branch], axes=1)
# result = Lambda(lambda x: (K.clip(x, 0.5, 1) - 0.5) * 2.0)(raw_result)
# negated_result = Lambda(lambda x: 1 - x)(result)
# output = concatenate([negated_result, result])
return Model(inputs=input, outputs=output)
@staticmethod
def build_pixel_comparison_network(input_shape):
channels, height, width = input_shape
input = Input(shape=(height, width, channels))
first = Flatten()(Lambda(lambda x: x[:, :, :, :1])(input))
second = Flatten()(Lambda(lambda x: x[:, :, :, 1:])(input))
# second = Lambda(lambda x: -x)(second)
# difference = add([first, second])
# raw_result = Lambda(lambda x: K.mean(K.abs(x), axis=1, keepdims=True))(difference)
# prob_zero = Lambda(lambda x: x / 255.0)(raw_result)
# prob_one = Lambda(lambda x: 1.0 - x)(prob_zero)
prob_one = dot([first, second], axes=1, normalize=True)
prob_zero = Lambda(lambda x: 1.0 - x)(prob_one)
output = concatenate([prob_zero, prob_one])
return Model(inputs=input, outputs=output)
| 13,060 | 38.459215 | 109 | py |
SPTM | SPTM-master/src/common/util.py | #!/usr/bin/env python
import cPickle
import cv2
import numpy as np
import h5py
from vizdoom import *
import math
import os
import os.path
import sys
import random
import scipy.misc
from constants import *
from video_writer import *
import cv2
import os
import cPickle
import numpy as np
np.random.seed(DEFAULT_RANDOM_SEED)
import keras
import random
random.seed(DEFAULT_RANDOM_SEED)
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
def wait_idle(game, wait_idle_tics=WAIT_IDLE_TICS):
if wait_idle_tics > 0:
game.make_action(STAY_IDLE, wait_idle_tics)
def game_make_action_wrapper(game, action, repeat):
game.make_action(action, repeat)
wait_idle(game)
return None
def save_list_of_arrays_to_hdf5(input, prefix):
stacked = np.array(input)
h5f = h5py.File(prefix + HDF5_NAME, 'w')
h5f.create_dataset('dataset', data=stacked)
h5f.close()
def load_array_from_hdf5(prefix):
h5f = h5py.File(prefix + HDF5_NAME,'r')
data = h5f['dataset'][:]
h5f.close()
return data
class StateRecorder():
def __init__(self, game):
self.game = game
self.game_variables = []
self.actions = []
self.rewards = []
self.screen_buffers = []
def record_buffers(self, state):
self.screen_buffers.append(state.screen_buffer.transpose(VIZDOOM_TO_TF))
'''records current state, then makes the provided action'''
def record(self, action_index, repeat):
state = self.game.get_state()
self.record_buffers(state)
self.game_variables.append(state.game_variables)
r = game_make_action_wrapper(self.game, ACTIONS_LIST[action_index], repeat)
self.actions.append(action_index)
self.rewards.append(r)
def save_recorded_buffers(self):
save_list_of_arrays_to_hdf5(self.screen_buffers, SCREEN_BUFFERS_PATH)
def save_recorded(self):
self.save_recorded_buffers()
data = (self.game_variables,
self.actions,
self.rewards)
with open(NAVIGATION_RECORDING_PATH, 'wb') as output_file:
cPickle.dump(data, output_file)
def downsample(input, factor):
for _ in xrange(factor):
input = cv2.pyrDown(input)
return input
def double_downsampling(input):
return cv2.pyrDown(cv2.pyrDown(input))
def double_upsampling(input):
return cv2.pyrUp(cv2.pyrUp(input))
def color2gray(input):
return cv2.cvtColor(input, cv2.COLOR_RGB2GRAY)
def doom_navigation_setup(seed, wad):
game = DoomGame()
game.load_config(DEFAULT_CONFIG)
game.set_doom_scenario_path(wad)
game.set_seed(seed)
game.init()
return game
def calculate_distance_angle(start_coordinates, current_coordinates):
distance = math.sqrt((start_coordinates[0] - current_coordinates[0]) ** 2 +
(start_coordinates[1] - current_coordinates[1]) ** 2 +
(start_coordinates[2] - current_coordinates[2]) ** 2)
abs_angle_difference = math.fabs(start_coordinates[3] - current_coordinates[3])
angle = min(abs_angle_difference, 360.0 - abs_angle_difference)
return distance, angle
def generator(x, y, batch_size, max_action_distance):
while True:
number_of_samples = x.shape[0]
x_list = []
y_list = []
for index in xrange(batch_size):
choice = random.randint(0, number_of_samples - max_action_distance - 1)
distance = random.randint(1, max_action_distance)
current_x = x[choice]
current_y = y[choice]
future_x = x[choice + distance]
x_list.append(np.concatenate((current_x, future_x), axis=2))
y_list.append(current_y)
yield np.array(x_list), np.array(y_list)
def vertically_stack_image_list(input_image_list):
image_list = []
for image in input_image_list:
image_list.append(image)
image_list.append(np.zeros([SHOW_BORDER, image.shape[1], SHOW_CHANNELS], dtype=np.uint8))
return np.concatenate(image_list, axis=0)
def save_np_array_as_png(input, path):
scipy.misc.toimage(input, cmin=0.0, cmax=255.0).save(path)
class NavigationVideoWriter():
def __init__(self, save_path, nonstop=False):
self.nonstop = nonstop
self.video_writer = VideoWriter(save_path,
(2 * SHOW_WIDTH + SHOW_BORDER, SHOW_HEIGHT),
mode='replace',
framerate=FPS)
def side_by_side(self, first, second):
if not HIGH_RESOLUTION_VIDEO:
first = double_upsampling(first)
second = double_upsampling(second)
return np.concatenate((first,
np.zeros([SHOW_HEIGHT, SHOW_BORDER, SHOW_CHANNELS], dtype=np.uint8),
second), axis=1)
def write(self, left, right, counter, deep_net_actions):
side_by_side_screen = self.side_by_side(left, right)
if not self.nonstop:
if counter == 0:
for _ in xrange(START_PAUSE_FRAMES):
self.video_writer.add_frame(side_by_side_screen)
elif counter + 1 < deep_net_actions:
self.video_writer.add_frame(side_by_side_screen)
else:
for _ in xrange(END_PAUSE_FRAMES):
self.video_writer.add_frame(side_by_side_screen)
for _ in xrange(DELIMITER_FRAMES):
self.video_writer.add_frame(np.zeros_like(side_by_side_screen))
else:
self.video_writer.add_frame(side_by_side_screen)
def close(self):
self.video_writer.close()
def make_deep_action(current_screen, goal_screen, model, game, repeat, randomized):
x = np.expand_dims(np.concatenate((current_screen,
goal_screen), axis=2), axis=0)
action_probabilities = np.squeeze(model.predict(x,
batch_size=1))
action_index = None
if randomized:
action_index = np.random.choice(len(ACTIONS_LIST), p=action_probabilities)
else:
action_index = np.argmax(action_probabilities)
game_make_action_wrapper(game, ACTIONS_LIST[action_index], repeat)
return action_index, action_probabilities, current_screen
def current_make_deep_action(goal_screen, model, game, repeat, randomized):
state = game.get_state()
current_screen = state.screen_buffer.transpose(VIZDOOM_TO_TF)
return make_deep_action(current_screen, goal_screen, model, game, repeat, randomized)
def get_deep_prediction(current_screen, goal_screen, model):
x = np.expand_dims(np.concatenate((current_screen,
goal_screen), axis=2), axis=0)
return np.squeeze(model.predict(x, batch_size=1))
def current_get_deep_prediction(goal_screen, model, game):
state = game.get_state()
current_screen = state.screen_buffer.transpose(VIZDOOM_TO_TF)
return get_deep_prediction(current_screen, goal_screen, model)
def explore(game, number_of_actions):
is_left = random.random() > 0.5
start_moving_straight = random.randint(0, number_of_actions)
for counter in xrange(number_of_actions):
if counter >= start_moving_straight:
action_index = INVERSE_ACTION_NAMES_INDEX['MOVE_FORWARD']
else:
if is_left:
action_index = INVERSE_ACTION_NAMES_INDEX['TURN_LEFT']
else:
action_index = INVERSE_ACTION_NAMES_INDEX['TURN_RIGHT']
game_make_action_wrapper(game, ACTIONS_LIST[action_index], TEST_REPEAT)
def get_distance(first_point, second_point):
return math.sqrt((first_point[0] - second_point[0]) ** 2 +
(first_point[1] - second_point[1]) ** 2)
| 7,338 | 32.976852 | 95 | py |
SPTM | SPTM-master/src/test/test_setup.py | import sys
sys.path.append('..')
from common import *
from vizdoom import *
import cv2
import numpy as np
np.random.seed(TEST_RANDOM_SEED)
import keras
import random
random.seed(TEST_RANDOM_SEED)
def test_setup(wad):
game = doom_navigation_setup(TEST_RANDOM_SEED, wad)
wait_idle(game, WAIT_BEFORE_START_TICS)
return game
# limit memory usage
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = TEST_MEMORY_FRACTION
set_session(tf.Session(config=config))
| 570 | 23.826087 | 73 | py |
SPTM | SPTM-master/src/test/navigator.py | from sptm import *
def check_if_close(first_point, second_point):
if ((first_point[0] - second_point[0]) ** 2 +
(first_point[1] - second_point[1]) ** 2 <= GOAL_DISTANCE_ALLOWANCE ** 2):
return True
else:
return False
class Navigator:
def __init__(self, exploration_model_directory):
self.exploration_model_directory = exploration_model_directory
self.action_model = load_keras_model(3, len(ACTIONS_LIST), ACTION_MODEL_WEIGHTS_PATH)
self.memory = SPTM()
self.trial_index = -1
print 'Navigator ready!'
def get_screens_and_coordinates(self):
return self.screens, self.coordinates
def setup_video(self, movie_path):
self.record_video = True
self.navigation_video_writer = NavigationVideoWriter(os.path.join(EVALUATION_PATH, movie_path),
nonstop=True)
def setup_trajectories(self, trajectories_name, box):
self.record_trajectories = True
self.trajectory_plotter = TrajectoryPlotter(os.path.join(EVALUATION_PATH, trajectories_name), *box)
def common_setup(self, game):
self.game = game
self.screens = []
self.coordinates = []
self.steps = 0
self.just_started = True
self.termination = False
self.record_video = False
self.record_trajectories = False
def setup_exploration(self, step_budget, game, environment, box):
self.looking_for_goal = False
self.step_budget = step_budget
self.common_setup(game)
# self.setup_video('movie.mov')
self.setup_trajectories(environment + '_exploration.pdf', box)
self.goal_frame = None
def setup_navigation_test(self, step_budget, game, goal_location, keyframes, keyframe_coordinates, keyframe_actions, goal_frame, movie_path, box, environment):
self.trial_index += 1
self.looking_for_goal = True
self.step_budget = step_budget
self.common_setup(game)
self.goal_location = goal_location
self.setup_video(movie_path)
self.setup_trajectories(movie_path + '.pdf', box)
self.keyframes = keyframes[:]
self.keyframe_coordinates = keyframe_coordinates[:]
self.keyframe_actions = keyframe_actions[:]
self.goal_frame = goal_frame
goal_localization_keyframe_index = self.process_memory(environment)
self.not_localized_count = 0
return goal_localization_keyframe_index
def record_all(self, first_frame, second_frame, x, y):
if self.record_video:
self.navigation_video_writer.write(first_frame, second_frame, 1, 1)
if self.record_trajectories:
self.trajectory_plotter.add_point([x, y])
def process_memory(self, environment):
self.memory.plot_shortest_path = False #make True for showing the shortest path from the starting position
self.memory.environment = environment
self.memory.trial_index = self.trial_index
self.memory.set_shortcuts_cache_file(environment)
self.memory.build_graph(self.keyframes, self.keyframe_coordinates)
best_index, best_probability = self.memory.set_goal(self.goal_frame, self.goal_location, self.keyframe_coordinates)
print 'Goal localization confidence:', best_probability
self.keyframes.append(self.goal_frame)
self.keyframe_coordinates.append(self.goal_location) #NOTE: these are not the exact goal frame coordinates, but close
self.memory.compute_shortest_paths(len(self.keyframes) - 1)
return best_index
def save_recordings(self):
if self.record_video:
self.navigation_video_writer.close()
if self.record_trajectories:
self.trajectory_plotter.save()
def get_steps(self):
return self.steps
def set_intermediate_reachable_goal(self):
current_screen = self.game.get_state().screen_buffer.transpose(VIZDOOM_TO_TF)
self.target_index, self.nn = self.memory.find_intermediate_reachable_goal(current_screen, self.game.get_state().game_variables, self.keyframe_coordinates)
if self.target_index is None:
self.target_index = len(self.keyframes) - 1
self.not_localized_count = 1
else:
self.not_localized_count = 0
def record_all_during_repeat(self, right_image):
for index in xrange(-TEST_REPEAT, 0):
left_image = self.screens[index]
if right_image is None:
right_image = left_image
self.record_all(left_image,
right_image,
self.coordinates[index][0],
self.coordinates[index][1])
def random_explore_step(self):
if self.check_frozen_with_repeat():
return
self._random_explore_step_with_repeat()
self.record_all_during_repeat(self.keyframes[-1])
def policy_explore_step(self, walkthrough=False):
if self.check_frozen_with_repeat():
return
self._policy_explore_step_with_repeat()
target_frame = None
if not walkthrough:
target_frame = self.keyframes[-1]
self.record_all_during_repeat(target_frame)
def policy_navigation_step(self, teach_and_repeat=False):
self.set_intermediate_reachable_goal()
if self.not_localized_count == 0:
action_function = self._align_step_with_repeat
action_function_arguments = (teach_and_repeat,)
number_of_actions = DEEP_NET_ACTIONS
else:
action_function = self._policy_explore_step_with_repeat
action_function_arguments = ()
number_of_actions = 5
for counter in xrange(number_of_actions):
if self.check_frozen_with_repeat():
break
action_function(*action_function_arguments)
self.record_all_during_repeat(self.keyframes[self.target_index])
def log_navigation_state(self):
self.screens.append(self.game.get_state().screen_buffer.transpose(VIZDOOM_TO_TF))
self.coordinates.append(self.game.get_state().game_variables)
def _align_step_with_repeat(self, teach_and_repeat):
self.log_navigation_state()
if self.just_started:
for _ in xrange(TEST_REPEAT):
self.screens.append(self.screens[-1])
self.coordinates.append(self.coordinates[-1])
self.just_started = False
if not teach_and_repeat:
first_arg = self.screens[-1 - TEST_REPEAT]
second_arg = self.screens[-1]
third_arg = self.keyframes[self.target_index]
if HIGH_RESOLUTION_VIDEO:
first_arg = double_downsampling(first_arg)
second_arg = double_downsampling(second_arg)
third_arg = double_downsampling(third_arg)
x = np.expand_dims(np.concatenate((first_arg,
second_arg,
third_arg), axis=2), axis=0)
action_probabilities = np.squeeze(self.action_model.predict(x,
batch_size=1))
action_index = np.random.choice(len(ACTIONS_LIST), p=action_probabilities)
action = ACTIONS_LIST[action_index]
else:
if np.random.rand() < (1.0 - TEACH_AND_REPEAT_RANDOMIZATION):
if self.target_index > self.nn and self.target_index < len(self.keyframe_actions):
action = self.keyframe_actions[self.nn]
elif self.target_index < self.nn:
action = inverse_action(self.keyframe_actions[self.nn - 1])
else:
action_index = np.random.choice(len(ACTIONS_LIST))
action = ACTIONS_LIST[action_index]
else:
action_index = np.random.choice(len(ACTIONS_LIST))
action = ACTIONS_LIST[action_index]
for repeat_index in xrange(TEST_REPEAT):
if repeat_index > 0:
self.log_navigation_state()
game_make_action_wrapper(self.game, action, 1)
self.steps += 1
def _random_explore_step_with_repeat(self):
action_index = random.randint(0, len(ACTIONS_LIST) - 1)
self.game.set_action(ACTIONS_LIST[action_index])
for repeat_index in xrange(TEST_REPEAT):
self.log_navigation_state()
self.game.advance_action(1, True)
self.steps += 1
def _policy_explore_step_with_repeat(self):
self._random_explore_step_with_repeat()
def check_frozen_with_repeat(self):
if (self.check_goal_reached() or
self.steps + TEST_REPEAT > self.step_budget):
self.termination = True
return True
else:
return False
def check_termination(self):
return self.termination
def check_goal_reached(self):
if self.looking_for_goal:
current_coordinates = self.game.get_state().game_variables
return check_if_close(current_coordinates, self.goal_location)
else:
return False
| 8,430 | 38.397196 | 161 | py |
SPTM | SPTM-master/src/test/sptm.py | from test_setup import *
import os.path
from numpy import mean
from numpy import median
import networkx as nx
from trajectory_plotter import *
def load_keras_model(number_of_input_frames, number_of_actions, path, load_method=resnet.ResnetBuilder.build_resnet_18):
result = load_method((number_of_input_frames * NET_CHANNELS, NET_HEIGHT, NET_WIDTH), number_of_actions)
result.load_weights(path)
result.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return result
def top_number_to_threshold(n, top_number, values):
top_number = min([top_number, n])
threshold = np.percentile(values, (n - top_number) * 100 / float(n))
return threshold
def sieve(shortcuts, top_number):
if top_number == 0:
return []
probabilities = shortcuts[:, 0]
n = shortcuts.shape[0]
threshold = top_number_to_threshold(n, top_number, probabilities)
print 'Confidence threshold for top', top_number, 'out of', n, ':', threshold
sieved_shortcut_indexes = []
for index in xrange(n):
if probabilities[index] >= threshold:
sieved_shortcut_indexes.append(index)
return shortcuts[sieved_shortcut_indexes]
class InputProcessor:
def __init__(self):
if EDGE_NETWORK in [SIAMESE_NETWORK, JOINT_NETWORK]:
self.edge_model = load_keras_model(2, 2, EDGE_MODEL_WEIGHTS_PATH, EDGE_NETWORK)
if EDGE_NETWORK == SIAMESE_NETWORK:
self.siamese = True
self.bottom_network = resnet.ResnetBuilder.build_bottom_network(self.edge_model, (NET_CHANNELS, NET_HEIGHT, NET_WIDTH))
self.top_network = resnet.ResnetBuilder.build_top_network(self.edge_model)
else:
self.siamese = False
elif EDGE_NETWORK == PIXEL_COMPARISON_NETWORK:
self.edge_model = PIXEL_COMPARISON_NETWORK((2 * PIXEL_COMPARISON_CHANNELS,
PIXEL_COMPARISON_HEIGHT,
PIXEL_COMPARISON_WIDTH))
self.siamese = False
else:
raise Exception('Unknown architecture')
def preprocess_input(self, input):
if HIGH_RESOLUTION_VIDEO:
return double_downsampling(input)
else:
return input
def prepare_for_pixel_comparison(self, frame):
frame = self.preprocess_input(frame)
frame = color2gray(frame)
downsampled = downsample(frame, PIXEL_COMPARISON_DOWNSAMPLING_FACTOR)
if PIXEL_COMPARISON_LOCAL_NORMALIZATION:
zero_steps = downsampled.shape[0] / PIXEL_COMPARISON_LOCAL_WINDOW
one_steps = downsampled.shape[1] / PIXEL_COMPARISON_LOCAL_WINDOW
for zero in xrange(zero_steps):
for one in xrange(one_steps):
zero_start = zero * PIXEL_COMPARISON_LOCAL_WINDOW
zero_end = (zero + 1) * PIXEL_COMPARISON_LOCAL_WINDOW
one_start = one * PIXEL_COMPARISON_LOCAL_WINDOW
one_end = (one + 1) * PIXEL_COMPARISON_LOCAL_WINDOW
crop = downsampled[zero_start:zero_end, one_start:one_end]
mean = np.mean(crop)
std = np.std(crop)
downsampled[zero_start:zero_end, one_start:one_end] = (crop - mean) / (std + 0.00000001)
# cv2.imwrite('to_check.png', downsampled)
return np.expand_dims(downsampled, axis=2)
def set_memory_buffer(self, keyframes):
keyframes = [self.preprocess_input(keyframe) for keyframe in keyframes]
if EDGE_NETWORK == PIXEL_COMPARISON_NETWORK:
keyframes = [self.prepare_for_pixel_comparison(frame) for frame in keyframes]
if not self.siamese:
list_to_predict = []
for keyframe in keyframes:
x = np.concatenate((keyframes[0], keyframe), axis=2)
list_to_predict.append(x)
self.tensor_to_predict = np.array(list_to_predict)
else:
memory_codes = self.bottom_network.predict(np.array(keyframes))
list_to_predict = []
for index in xrange(len(keyframes)):
x = np.concatenate((memory_codes[0], memory_codes[index]), axis=0)
list_to_predict.append(x)
self.tensor_to_predict = np.array(list_to_predict)
def append_to_memory_buffer(self, keyframe):
keyframe = self.preprocess_input(keyframe)
if EDGE_NETWORK == PIXEL_COMPARISON_NETWORK:
keyframe = self.prepare_for_pixel_comparison(keyframe)
expanded_keyframe = np.expand_dims(keyframe, axis=0)
if not self.siamese:
x = np.concatenate((expanded_keyframe, expanded_keyframe), axis=3)
else:
memory_code = self.bottom_network.predict(expanded_keyframe)
x = np.concatenate((memory_code, memory_code), axis=1)
self.tensor_to_predict = np.concatenate((self.tensor_to_predict, x), axis=0)
def predict_single_input(self, input):
input = self.preprocess_input(input)
if EDGE_NETWORK == PIXEL_COMPARISON_NETWORK:
input = self.prepare_for_pixel_comparison(input)
if not self.siamese:
for index in xrange(self.tensor_to_predict.shape[0]):
self.tensor_to_predict[index][:, :, :(input.shape[2])] = input
probabilities = self.edge_model.predict(self.tensor_to_predict,
batch_size=TESTING_BATCH_SIZE)
else:
input_code = np.squeeze(self.bottom_network.predict(np.expand_dims(input, axis=0), batch_size=1))
for index in xrange(self.tensor_to_predict.shape[0]):
self.tensor_to_predict[index][0:(input_code.shape[0])] = input_code
probabilities = self.top_network.predict(self.tensor_to_predict,
batch_size=TESTING_BATCH_SIZE)
return probabilities[:, 1]
def get_memory_size(self):
return self.tensor_to_predict.shape[0]
class SPTM:
def __init__(self):
self.input_processor = InputProcessor()
def set_shortcuts_cache_file(self, environment):
# if no limit, MEMORY_MAX_FRAMES is None
if MEMORY_MAX_FRAMES is None:
max_frames = -1
else:
max_frames = MEMORY_MAX_FRAMES
self.shortcuts_cache_file = SHORTCUTS_CACHE_FILE_TEMPLATE % (environment, MEMORY_SUBSAMPLING, max_frames)
def set_memory_buffer(self, keyframes):
self.input_processor.set_memory_buffer(keyframes)
def append_to_memory_buffer(self, keyframe):
self.input_processor.append_to_memory_buffer(keyframe)
def predict_single_input(self, input):
return self.input_processor.predict_single_input(input)
def get_memory_size(self):
return self.input_processor.get_memory_size()
def add_double_sided_edge(self, first, second):
self.graph.add_edge(first, second)
self.graph.add_edge(second, first)
def add_double_forward_biased_edge(self, first, second):
self.graph.add_edge(first, second)
self.graph.add_edge(second, first, {'weight' : 1000000000})
def smooth_shortcuts_matrix(self, shortcuts_matrix, keyframe_coordinates):
for first in xrange(len(shortcuts_matrix)):
for second in xrange(first + 1, len(shortcuts_matrix)):
shortcuts_matrix[first][second] = (shortcuts_matrix[first][second] +
shortcuts_matrix[second][first]) / 2.0
shortcuts = []
for first in xrange(len(shortcuts_matrix)):
for second in xrange(first + 1 + MIN_SHORTCUT_DISTANCE, len(shortcuts_matrix)):
values = []
for shift in xrange(-SHORTCUT_WINDOW, SHORTCUT_WINDOW + 1):
first_shifted = first + shift
second_shifted = second + shift
if first_shifted < len(shortcuts_matrix) and second_shifted < len(shortcuts_matrix) and first_shifted >= 0 and second_shifted >= 0:
values.append(shortcuts_matrix[first_shifted][second_shifted])
quality = median(values)
distance = get_distance(keyframe_coordinates[first],
keyframe_coordinates[second])
shortcuts.append((quality, first, second, distance))
return np.array(shortcuts)
def compute_shortcuts(self, keyframes, keyframe_coordinates):
self.set_memory_buffer(keyframes)
if not os.path.isfile(self.shortcuts_cache_file):
shortcuts_matrix = []
for first in xrange(len(keyframes)):
probabilities = self.predict_single_input(keyframes[first])
shortcuts_matrix.append(probabilities)
print 'Finished:', float(first * 100) / float(len(keyframes)), '%'
shortcuts = self.smooth_shortcuts_matrix(shortcuts_matrix, keyframe_coordinates)
shortcuts = sieve(shortcuts, LARGE_SHORTCUTS_NUMBER)
np.save(self.shortcuts_cache_file, shortcuts)
else:
shortcuts = np.load(self.shortcuts_cache_file)
self.shortcuts = sieve(shortcuts, SMALL_SHORTCUTS_NUMBER)
def get_number_of_shortcuts(self):
return len(self.shortcuts)
def get_shortcut(self, index):
return (int(self.shortcuts[index, 1]), int(self.shortcuts[index, 2]))
def get_shortcuts(self):
return self.shortcuts
def build_graph(self, keyframes, keyframe_coordinates):
self.set_memory_buffer(keyframes)
memory_size = self.get_memory_size()
self.graph = nx.Graph()
self.graph.add_nodes_from(range(memory_size))
for first in xrange(memory_size - 1):
# self.add_double_forward_biased_edge(first, first + 1)
self.add_double_sided_edge(first, first + 1)
self.compute_shortcuts(keyframes, keyframe_coordinates)
for index in xrange(self.get_number_of_shortcuts()):
edge = self.get_shortcut(index)
first, second = edge
assert abs(first - second) > MIN_SHORTCUT_DISTANCE
self.add_double_sided_edge(*edge)
def find_nn(self, input):
probabilities = self.predict_single_input(input)
best_index = np.argmax(probabilities)
best_probability = np.max(probabilities)
return best_index, best_probability, probabilities
def set_goal(self, goal_frame, real_goal_coordinates, keyframe_coordinates):
self.step = 0
best_index, probabilities, nns = self.find_knn_median_threshold(goal_frame, NUMBER_OF_NEAREST_NEIGHBOURS, 0.0)
print nns
print [probabilities[nn] for nn in nns]
print [get_distance(real_goal_coordinates, keyframe_coordinates[nn]) for nn in nns]
print [keyframe_coordinates[nn] for nn in nns]
print real_goal_coordinates
best_probability = 1.0
if best_index is None:
best_index, best_probability, _ = self.find_nn(goal_frame)
memory_size = self.get_memory_size()
goal_index = memory_size
self.graph.add_node(goal_index)
edge = (best_index, goal_index)
self.add_double_sided_edge(*edge)
self.append_to_memory_buffer(goal_frame)
print 'Real goal distance:', get_distance(real_goal_coordinates, keyframe_coordinates[best_index])
self.smoothed_memory = None
self.last_nn = None
return best_index, best_probability
def compute_shortest_paths(self, graph_goal):
self.shortest_paths = nx.shortest_path(self.graph, target=graph_goal, weight='weight')
self.shortest_distances = [len(value) - 1 for value in self.shortest_paths.values()]
print 'Mean shortest_distances to goal:', mean(self.shortest_distances)
print 'Median shortest_distances to goal:', median(self.shortest_distances)
def get_shortest_paths_and_distances(self):
return self.shortest_paths, self.shortest_distances
def _find_neighbours_by_threshold(self, threshold, probabilities):
nns = []
for index, probability in enumerate(probabilities):
if probability >= threshold:
nns.append(index)
return nns
def find_neighbours_by_threshold(self, input, threshold):
probabilities = self.predict_single_input(input)
return self._find_neighbours_by_threshold(threshold, probabilities)
def find_knn(self, input, k):
probabilities = self.predict_single_input(input)
threshold = top_number_to_threshold(self.get_memory_size(),
k,
probabilities)
return self._find_neighbours_by_threshold(threshold, probabilities)
def find_knn_median_threshold(self, input, k, threshold):
probabilities = self.predict_single_input(input)
knn_threshold = top_number_to_threshold(self.get_memory_size(),
k,
probabilities)
final_threshold = max([threshold, knn_threshold])
nns = self._find_neighbours_by_threshold(final_threshold, probabilities)
nns.sort()
if nns:
nn = nns[len(nns) / 2]
return nn, probabilities, nns
else:
return None, probabilities, nns
def find_nn_threshold(self, input, threshold):
nn, probability, probabilities = self.find_nn(input)
if probability < threshold:
return None, None
else:
return nn, probabilities
def find_nn_on_last_shortest_path(self, input):
if self.last_nn is None:
return None, None
probabilities = self.predict_single_input(input)
last_shortest_path_prefix = self.shortest_paths[self.last_nn][:(MAX_LOOK_AHEAD + 1)]
path_probabilities = np.array([probabilities[index] for index in last_shortest_path_prefix])
best_look_ahead = np.argmax(path_probabilities)
best_probability = np.max(path_probabilities)
if best_probability < WEAK_INTERMEDIATE_REACHABLE_GOAL_THRESHOLD:
return None, None
return last_shortest_path_prefix[best_look_ahead], probabilities
def find_smoothed_nn(self, input):
nn = None
if SMOOTHED_LOCALIZATION:
nn, probabilities = self.find_nn_on_last_shortest_path(input)
if nn is None:
nn, probabilities, _ = self.find_knn_median_threshold(input, NUMBER_OF_NEAREST_NEIGHBOURS, INTERMEDIATE_REACHABLE_GOAL_THRESHOLD)
return nn, probabilities
def select_IRG_on_shortest_path(self, nn, probabilities):
shortest_path = self.shortest_paths[nn]
print 'Current shortest path:', len(shortest_path) - 1
if self.plot_shortest_path:
plotter = TrajectoryPlotter(os.path.join(EVALUATION_PATH, 'shortest_path%d_%d.pdf' % (self.trial_index, self.step)), *TEST_SETUPS[self.environment].box)
self.step += 1
for point in shortest_path:
plotter.add_point(keyframe_coordinates[point][:2])
plotter.add_edge((current_coordinates[0],
current_coordinates[0],
current_coordinates[1],
current_coordinates[1]))
plotter.save()
self.plot_shortest_path = False
if SMOOTHED_LOCALIZATION:
upper_limit = len(shortest_path) - 1
valid_min_look_ahead = min(MIN_LOOK_AHEAD, upper_limit)
valid_max_look_ahead = min(MAX_LOOK_AHEAD, upper_limit)
best_look_ahead = valid_min_look_ahead
for look_ahead in xrange(valid_min_look_ahead,
valid_max_look_ahead + 1):
index = shortest_path[look_ahead]
if probabilities[index] >= INTERMEDIATE_REACHABLE_GOAL_THRESHOLD:
best_look_ahead = look_ahead
else:
best_look_ahead = 0
for look_ahead, index in enumerate(shortest_path):
if probabilities[index] >= INTERMEDIATE_REACHABLE_GOAL_THRESHOLD:
best_look_ahead = look_ahead
IRG = shortest_path[best_look_ahead]
print 'Found IRG:', IRG
return IRG
def find_intermediate_reachable_goal(self, input, current_coordinates, keyframe_coordinates):
nn, probabilities = self.find_smoothed_nn(input)
self.last_nn = nn
if nn is None:
print 'Found no IRG!'
return None, None
else:
return self.select_IRG_on_shortest_path(nn, probabilities), nn
| 15,363 | 42.036415 | 158 | py |
SPTM | SPTM-master/src/train/resave_weights.py | from train_setup import *
# necessary because of keras issues
# with loading more than one model at the same time
if __name__ == '__main__':
if sys.argv[1] == 'action':
model = keras.models.load_model(ACTION_MODEL_PATH)
model.save_weights(ACTION_MODEL_WEIGHTS_PATH)
elif sys.argv[1] == 'edge':
model = keras.models.load_model(EDGE_MODEL_PATH)
model.save_weights(EDGE_MODEL_WEIGHTS_PATH)
else:
raise Exception('Unknown resave mode!')
| 460 | 31.928571 | 54 | py |
SPTM | SPTM-master/src/train/train_edge_predictor.py | from train_setup import *
def data_generator():
game = doom_navigation_setup(DEFAULT_RANDOM_SEED, TRAIN_WAD)
while True:
x_result = []
y_result = []
for episode in xrange(EDGE_EPISODES):
game.set_doom_map(MAP_NAME_TEMPLATE % random.randint(MIN_RANDOM_TEXTURE_MAP_INDEX,
MAX_RANDOM_TEXTURE_MAP_INDEX))
game.new_episode()
x = []
for _ in xrange(MAX_CONTINUOUS_PLAY):
current_x = game.get_state().screen_buffer.transpose(VIZDOOM_TO_TF)
action_index = random.randint(0, ACTION_CLASSES - 1)
game_make_action_wrapper(game, ACTIONS_LIST[action_index], TRAIN_REPEAT)
x.append(current_x)
first_second_label = []
current_first = 0
while True:
y = None
current_second = None
if random.random() < 0.5:
y = 1
second = current_first + random.randint(1, MAX_ACTION_DISTANCE)
if second >= MAX_CONTINUOUS_PLAY:
break
current_second = second
else:
y = 0
second = current_first + random.randint(1, MAX_ACTION_DISTANCE)
if second >= MAX_CONTINUOUS_PLAY:
break
current_second_before = None
current_second_after = None
index_before_max = current_first - NEGATIVE_SAMPLE_MULTIPLIER * MAX_ACTION_DISTANCE
index_after_min = current_first + NEGATIVE_SAMPLE_MULTIPLIER * MAX_ACTION_DISTANCE
if index_before_max >= 0:
current_second_before = random.randint(0, index_before_max)
if index_after_min < MAX_CONTINUOUS_PLAY:
current_second_after = random.randint(index_after_min, MAX_CONTINUOUS_PLAY - 1)
if current_second_before is None:
current_second = current_second_after
elif current_second_after is None:
current_second = current_second_before
else:
if random.random() < 0.5:
current_second = current_second_before
else:
current_second = current_second_after
first_second_label.append((current_first, current_second, y))
current_first = second + 1
random.shuffle(first_second_label)
for first, second, y in first_second_label:
future_x = x[second]
current_x = x[first]
current_y = y
x_result.append(np.concatenate((current_x, future_x), axis=2))
y_result.append(current_y)
number_of_batches = len(x_result) / BATCH_SIZE
for batch_index in xrange(number_of_batches):
from_index = batch_index * BATCH_SIZE
to_index = (batch_index + 1) * BATCH_SIZE
yield (np.array(x_result[from_index:to_index]),
keras.utils.to_categorical(np.array(y_result[from_index:to_index]),
num_classes=EDGE_CLASSES))
if __name__ == '__main__':
logs_path, current_model_path = setup_training_paths(EXPERIMENT_OUTPUT_FOLDER)
model = EDGE_NETWORK(((1 + EDGE_STATE_ENCODING_FRAMES) * NET_CHANNELS, NET_HEIGHT, NET_WIDTH), EDGE_CLASSES)
adam = keras.optimizers.Adam(lr=LEARNING_RATE, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
callbacks_list = [keras.callbacks.TensorBoard(log_dir=logs_path, write_graph=False),
keras.callbacks.ModelCheckpoint(current_model_path,
period=MODEL_CHECKPOINT_PERIOD)]
model.fit_generator(data_generator(),
steps_per_epoch=DUMP_AFTER_BATCHES,
epochs=EDGE_MAX_EPOCHS,
callbacks=callbacks_list)
| 3,720 | 45.5125 | 110 | py |
SPTM | SPTM-master/src/train/train_action_predictor.py | from train_setup import *
def data_generator():
game = doom_navigation_setup(DEFAULT_RANDOM_SEED, TRAIN_WAD)
game.set_doom_map(MAP_NAME_TEMPLATE % random.randint(MIN_RANDOM_TEXTURE_MAP_INDEX,
MAX_RANDOM_TEXTURE_MAP_INDEX))
game.new_episode()
yield_count = 0
while True:
if yield_count >= ACTION_MAX_YIELD_COUNT_BEFORE_RESTART:
game.set_doom_map(MAP_NAME_TEMPLATE % random.randint(MIN_RANDOM_TEXTURE_MAP_INDEX,
MAX_RANDOM_TEXTURE_MAP_INDEX))
game.new_episode()
yield_count = 0
x = []
y = []
for _ in xrange(MAX_CONTINUOUS_PLAY):
current_x = game.get_state().screen_buffer.transpose(VIZDOOM_TO_TF)
action_index = random.randint(0, ACTION_CLASSES - 1)
game_make_action_wrapper(game, ACTIONS_LIST[action_index], TRAIN_REPEAT)
current_y = action_index
x.append(current_x)
y.append(current_y)
first_second_pairs = []
current_first = 0
while True:
distance = random.randint(1, MAX_ACTION_DISTANCE)
second = current_first + distance
if second >= MAX_CONTINUOUS_PLAY:
break
first_second_pairs.append((current_first, second))
current_first = second + 1
random.shuffle(first_second_pairs)
x_result = []
y_result = []
for first, second in first_second_pairs:
future_x = x[second]
current_x = x[first]
previous_x = current_x
if first > 0:
previous_x = x[first - 1]
current_y = y[first]
x_result.append(np.concatenate((previous_x, current_x, future_x), axis=2))
y_result.append(current_y)
if len(x_result) == BATCH_SIZE:
yield_count += 1
yield (np.array(x_result),
keras.utils.to_categorical(np.array(y_result),
num_classes=ACTION_CLASSES))
x_result = []
y_result = []
if __name__ == '__main__':
logs_path, current_model_path = setup_training_paths(EXPERIMENT_OUTPUT_FOLDER)
model = ACTION_NETWORK(((1 + ACTION_STATE_ENCODING_FRAMES) * NET_CHANNELS, NET_HEIGHT, NET_WIDTH), ACTION_CLASSES)
adam = keras.optimizers.Adam(lr=LEARNING_RATE, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
callbacks_list = [keras.callbacks.TensorBoard(log_dir=logs_path, write_graph=False),
keras.callbacks.ModelCheckpoint(current_model_path,
period=MODEL_CHECKPOINT_PERIOD)]
model.fit_generator(data_generator(),
steps_per_epoch=DUMP_AFTER_BATCHES,
epochs=ACTION_MAX_EPOCHS,
callbacks=callbacks_list)
| 2,813 | 42.292308 | 116 | py |
SPTM | SPTM-master/src/train/train_setup.py | import sys
sys.path.append('..')
from common import *
# limit memory usage
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = TRAIN_MEMORY_FRACTION
set_session(tf.Session(config=config))
def setup_training_paths(experiment_id):
experiment_path = EXPERIMENTS_PATH_TEMPLATE % experiment_id
logs_path = LOGS_PATH_TEMPLATE % experiment_id
models_path = MODELS_PATH_TEMPLATE % experiment_id
current_model_path = CURRENT_MODEL_PATH_TEMPLATE % experiment_id
assert (not os.path.exists(experiment_path)), 'Experiment folder %s already exists' % experiment_path
os.makedirs(experiment_path)
os.makedirs(logs_path)
os.makedirs(models_path)
return logs_path, current_model_path
| 796 | 35.227273 | 103 | py |
AGES | AGES-master/resnet.py | import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResMLPBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.fc1 = nn.Sequential(
nn.Linear(channels, channels),
nn.BatchNorm1d(channels),
nn.ReLU(inplace=True)
)
self.fc2 = nn.Sequential(
nn.Linear(channels, channels),
nn.BatchNorm1d(channels),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.fc2(self.fc1(x))
out += x
return self.relu(out)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, in_channels=3, fc_size=2048, out_dim=64, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, out_dim)
self.fc = nn.Sequential(
nn.Linear(512 * block.expansion, fc_size),
nn.BatchNorm1d(fc_size),
nn.ReLU(inplace=True),
ResMLPBlock(fc_size),
nn.Linear(fc_size, out_dim)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
avepool = self.avgpool(x)
avepool = torch.flatten(avepool, 1)
out = self.fc(avepool)
return out, avepool
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
| 10,067 | 35.478261 | 106 | py |
AGES | AGES-master/bgm.py | from sagan import *
import torchvision.models as models
from resnet import *
import torch.nn.init as init
class ResEncoder(nn.Module):
r'''ResNet Encoder
Args:
latent_dim: latent dimension
arch: network architecture. Choices: resnet - resnet50, resnet18
dist: encoder distribution. Choices: deterministic, gaussian, implicit
fc_size: number of nodes in each fc layer
noise_dim: dimension of input noise when an implicit encoder is used
'''
def __init__(self, latent_dim=64, arch='resnet', dist='gaussian', fc_size=2048, noise_dim=128):
super().__init__()
self.latent_dim = latent_dim
self.dist = dist
self.noise_dim = noise_dim
in_channels = noise_dim + 3 if dist == 'implicit' else 3
out_dim = latent_dim * 2 if dist == 'gaussian' else latent_dim
if arch == 'resnet':
self.encoder = resnet50(pretrained=False, in_channels=in_channels, fc_size=fc_size, out_dim=out_dim)
else:
assert arch == 'resnet18'
self.encoder = resnet18(pretrained=False, in_channels=in_channels, fc_size=fc_size, out_dim=out_dim)
def forward(self, x, avepool=False):
'''
:param x: input image
:param avepool: whether to return the average pooling feature (used for downstream tasks)
:return:
'''
if self.dist == 'implicit':
# Concatenate noise with the input image x
noise = x.new(x.size(0), self.noise_dim, 1, 1).normal_(0, 1)
noise = noise.expand(x.size(0), self.noise_dim, x.size(2), x.size(3))
x = torch.cat([x, noise], dim=1)
z, ap = self.encoder(x)
if avepool:
return ap
if self.dist == 'gaussian':
return z.chunk(2, dim=1)
else:
return z
class BigDecoder(nn.Module):
r'''Big generator based on SAGAN
Args:
latent_dim: latent dimension
conv_dim: base number of channels
image_size: image resolution
dist: generator distribution. Choices: deterministic, gaussian, implicit
g_std: scaling the standard deviation of the gaussian generator. Default: 1
'''
def __init__(self, latent_dim=64, conv_dim=32, image_size=64, dist='deterministic', g_std=1):
super().__init__()
self.latent_dim = latent_dim
self.dist = dist
self.g_std = g_std
out_channels = 6 if dist == 'gaussian' else 3
add_noise = True if dist == 'implicit' else False
self.decoder = Generator(latent_dim, conv_dim, image_size, out_channels, add_noise)
def forward(self, z, mean=False, stats=False):
out = self.decoder(z)
if self.dist == 'gaussian':
x_mu, x_logvar = out.chunk(2, dim=1)
if stats:
return x_mu, x_logvar
else:
x_sample = reparameterize(x_mu, (x_logvar / 2).exp(), self.g_std)
if mean:
return x_mu
else:
return x_sample
else:
return out
class BGM(nn.Module):
r'''Bidirectional generative model
Args:
General settings:
latent_dim: latent dimension
conv_dim: base number of channels
image_size: image resolution
image_channel: number of image channel
Encoder settings:
enc_dist: encoder distribution
enc_arch: encoder architecture
enc_fc_size: number of nodes in each fc layer in encoder
enc_noise_dim: dimension of input noise when an implicit encoder is used
Generator settings:
dec_dist: generator distribution. Choices: deterministic, implicit
dec_arch: generator architecture. Choices: sagan, dcgan
'''
def __init__(self, latent_dim=64, conv_dim=32, image_size=64, image_channel=3,
enc_dist='gaussian', enc_arch='resnet', enc_fc_size=2048, enc_noise_dim=128,
dec_dist='deterministic',
type='big', old=False):
super().__init__()
self.latent_dim = latent_dim
self.enc_dist = enc_dist
self.dec_dist = dec_dist
if type == 'big':
self.encoder = ResEncoder(latent_dim, enc_arch, enc_dist, enc_fc_size, enc_noise_dim)
if old:
self.decoder = Generator(latent_dim, conv_dim, image_size)
else:
self.decoder = BigDecoder(latent_dim, conv_dim, image_size, dec_dist)
elif type == 'dcgan':
self.encoder = DCEncoder(latent_dim, conv_dim, image_size, image_channel, enc_dist)
self.decoder = DCDecoder(latent_dim, conv_dim, image_size, image_channel, dec_dist)
def forward(self, x=None, z=None, recon=False, infer_mean=True):
# recon_mean is used for gaussian decoder which we do not use here.
# Training Mode
if x is not None and z is not None:
if self.enc_dist == 'gaussian':
z_mu, z_logvar = self.encoder(x)
z_fake = reparameterize(z_mu, (z_logvar / 2).exp())
else: # deterministic or implicit
z_fake = self.encoder(x)
x_fake = self.decoder(z)
return z_fake, x_fake
# Inference Mode
elif x is not None and z is None:
# Get latent
if self.enc_dist == 'gaussian':
z_mu, z_logvar = self.encoder(x)
z_fake = reparameterize(z_mu, (z_logvar / 2).exp())
else: # deterministic or implicit
z_fake = self.encoder(x)
# Reconstruction
if recon:
return self.decoder(z_fake)
# Representation
# Mean representation for Gaussian encoder
elif infer_mean and self.enc_dist == 'gaussian':
return z_mu
# Random representation sampled from q_e(z|x)
else:
return z_fake
# Generation Mode
elif x is None and z is not None:
return self.decoder(z)
class BigJointDiscriminator(nn.Module):
r'''Big joint discriminator based on SAGAN
Args:
latent_dim: latent dimension
conv_dim: base number of channels
image_size: image resolution
fc_size: number of nodes in each fc layers
'''
def __init__(self, latent_dim=64, conv_dim=32, image_size=64, fc_size=1024):
super().__init__()
self.discriminator = Discriminator(conv_dim, image_size, in_channels=3, out_feature=True)
self.discriminator_z = Discriminator_MLP(latent_dim, fc_size)
self.discriminator_j = Discriminator_MLP(conv_dim * 16 + fc_size, fc_size)
def forward(self, x, z):
sx, feature_x = self.discriminator(x)
sz, feature_z = self.discriminator_z(z)
sxz, _ = self.discriminator_j(torch.cat((feature_x, feature_z), dim=1))
return (sx + sz + sxz) / 3
class DCAE(nn.Module):
def __init__(self, latent_dim=64, conv_dim=64, image_size=28, image_channel=3, enc_dist='gaussian',
dec_dist='deterministic', tanh=False):
super().__init__()
self.latent_dim = latent_dim
self.enc_dist = enc_dist
self.dec_dist = dec_dist
self.encoder = DCEncoder(latent_dim, conv_dim, image_size, image_channel, enc_dist, tanh=tanh)
self.decoder = DCDecoder(latent_dim, conv_dim, image_size, image_channel, dec_dist)
def forward(self, x=None, z=None, infer_mean=True, recon=False, gen_mean=True, recon_mean=False):
# Training Mode (only used in age)
if x is not None and z is not None:
if self.enc_dist == 'gaussian':
z_mu, z_logvar = self.encoder(x)
z_fake = reparameterize(z_mu, (z_logvar / 2).exp())
else: #deterministic
z_fake = self.encoder(x)
x_fake = self.decoder(z)
return z_fake, x_fake
# Inference Mode
elif x is not None and z is None:
if self.enc_dist == 'gaussian':
z_mu, z_logvar = self.encoder(x)
# Reconstruction
if recon:
if self.enc_dist == 'gaussian':
z_fake = reparameterize(z_mu, (z_logvar / 2).exp())
else:
z_fake = self.encoder(x)
if self.dec_dist == 'gaussian':
if recon_mean:
return self.decoder(z_fake, mean=True)
else:
x_recon, x_mu, x_logvar = self.decoder(z_fake, stats=True)
return x_recon, x_mu, x_logvar, z_mu, z_logvar
else:
return self.decoder(z_fake)
# Representation
else:
if self.enc_dist != 'gaussian':
return self.encoder(x)
else:
if infer_mean: # Mean representation
return z_mu
else: # Sample representation
z_fake = reparameterize(z_mu, (z_logvar / 2).exp())
return z_fake
# Generation Mode
elif x is None and z is not None:
x_fake = self.decoder(z, mean=gen_mean)
return x_fake
class DCEncoder(nn.Module):
'''DCGAN discriminator'''
def __init__(self, latent_dim=64, conv_dim=64, image_size=28, image_channel=3, dist='gaussian', noise_dim=100, tanh=False):
super().__init__()
self.dist = dist
in_channels = image_channel + noise_dim if dist == 'implicit' else image_channel
self.noise_dim = noise_dim
self.conv = nn.Sequential(
nn.Conv2d(in_channels, conv_dim, 5, 2, 2),
nn.BatchNorm2d(conv_dim * 1),
nn.ReLU(inplace=True),
nn.Conv2d(conv_dim, conv_dim * 2, 5, 2, 2),
nn.BatchNorm2d(conv_dim * 2),
nn.ReLU(inplace=True),
nn.Conv2d(conv_dim * 2, conv_dim * 4, 5, 2, 2),
nn.BatchNorm2d(conv_dim * 4),
nn.ReLU(inplace=True),
)
fc_size = latent_dim * 2 if dist == 'gaussian' else latent_dim
self.fc = nn.Linear(conv_dim * 4 * 4 * 4, fc_size)
self.add_tanh = tanh
if tanh:
self.tanh = nn.Tanh()
def forward(self, x):
if self.dist == 'implicit':
eps = torch.randn(x.size(0), self.noise_dim, device=x.device)
eps = eps.view(x.size(0), self.noise_dim, 1, 1).expand(x.size(0), self.noise_dim, x.size(2), x.size(2))
x = torch.cat([x, eps], dim=1)
x = self.conv(x).view(x.size(0), -1)
if self.dist == 'gaussian':
return self.fc(x).chunk(2, dim=1)
else:
if self.add_tanh:
return self.tanh(self.fc(x))
else:
return self.fc(x)
class DCDecoder(nn.Module):
'''DCGAN Generator'''
def __init__(self, latent_dim=64, conv_dim=64, image_size=28, image_channel=3, dist='deterministic'):
super().__init__()
self.dist = dist
self.conv_dim = conv_dim
# Input 100
if dist == 'implicit':
self.fc = nn.Linear(latent_dim, conv_dim * 4 * 4 * 4)
self.bn0 = nn.BatchNorm2d(conv_dim * 4)
self.conv2 = nn.ConvTranspose2d(conv_dim * 4, conv_dim * 2, 5, 2, 2)
self.bn2 = nn.BatchNorm2d(conv_dim * 2)
self.conv3 = nn.ConvTranspose2d(conv_dim * 2, conv_dim, 5, 2, 2, 1)
self.bn3 = nn.BatchNorm2d(conv_dim)
self.toRGB = nn.ConvTranspose2d(conv_dim, image_channel, 5, 2, 2, 1)
self.relu = nn.ReLU(True)
self.tanh = nn.Tanh()
self.noise1 = NoiseInjection(conv_dim * 4, 4)
self.noise2 = NoiseInjection(conv_dim * 2, 7)
self.noise3 = NoiseInjection(conv_dim, 14)
else:
out_channels = image_channel if dist == 'deterministic' else image_channel * 2
self.fc = nn.Sequential(
nn.Linear(latent_dim, conv_dim * 4 * 4 * 4),
nn.BatchNorm1d(conv_dim * 4 * 4 * 4),
nn.ReLU(True)
)
self.net = nn.Sequential(
nn.ConvTranspose2d(conv_dim * 4, conv_dim * 2, 5, 2, 2),
nn.BatchNorm2d(conv_dim * 2),
nn.ReLU(True),
nn.ConvTranspose2d(conv_dim * 2, conv_dim, 5, 2, 2, 1),
nn.BatchNorm2d(conv_dim),
nn.ReLU(True),
nn.ConvTranspose2d(conv_dim, out_channels, 5, 2, 2, 1),
nn.Tanh(),
)
def forward(self, z, mean=False, stats=False):
out = self.fc(z).view(-1, self.conv_dim * 4, 4, 4)
if self.dist == 'gaussian':
x_mu, x_logvar = self.net(out).chunk(2, dim=1)
x_sample = reparameterize(x_mu, (x_logvar / 2).exp())
if stats:
return x_sample, x_mu, x_logvar
else:
if mean:
return x_mu
else:
return x_sample
elif self.dist == 'implicit':
out = self.relu(self.bn0(self.noise1(out)))
out = self.relu(self.bn2(self.noise2(self.conv2(out))))
out = self.relu(self.bn3(self.noise3(self.conv3(out))))
return self.tanh(self.toRGB(out))
else:
return self.net(out)
class DCJointDiscriminator(nn.Module):
def __init__(self, latent_dim=64, conv_dim=64, image_size=64, image_channel=3, fc_dim=1024):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(image_channel, conv_dim, 5, 2, 2),
nn.LeakyReLU(inplace=True),
nn.Conv2d(conv_dim, conv_dim * 2, 5, 2, 2),
nn.LeakyReLU(inplace=True),
nn.Conv2d(conv_dim * 2, conv_dim * 4, 5, 2, 2),
nn.LeakyReLU(inplace=True),
)
self.fc_j = nn.Sequential(
nn.Linear(conv_dim * 4 * 4 * 4 + latent_dim, fc_dim),
nn.LeakyReLU(inplace=True),
nn.Linear(fc_dim, 1)
)
def forward(self, x, z):
x = self.conv(x).view(x.size(0), -1)
xz = torch.cat([x, z], dim=1)
sxz = self.fc_j(xz)
return sxz
class ToyAE(nn.Module):
def __init__(self, data_dim=2, latent_dim=10, enc_hidden_dim=500, dec_hidden_dim=500,
enc_dist='gaussian', dec_dist='gaussian'):
super().__init__()
self.latent_dim = latent_dim
self.enc_dist = enc_dist
self.dec_dist = dec_dist
self.encoder = EncoderMLP(data_dim, latent_dim, enc_hidden_dim, enc_dist)
self.decoder = DecoderMLP(data_dim, latent_dim, dec_hidden_dim, dec_dist)
def forward(self, x=None, z=None, infer_mean=True, recon=False, gen_mean=False):
# Training Mode (only used in age)
if x is not None and z is not None:
if self.enc_dist == 'gaussian':
z_mu, z_logvar = self.encoder(x)
z_fake = reparameterize(z_mu, (z_logvar / 2).exp())
else: # deterministic or implicit
z_fake = self.encoder(x)
x_fake = self.decoder(z)
return z_fake, x_fake
# Inference Mode
elif x is not None and z is None:
if self.enc_dist == 'gaussian':
z_mu, z_logvar = self.encoder(x)
z_fake = reparameterize(z_mu, (z_logvar / 2).exp())
else: # deterministic or implicit
z_fake = self.encoder(x)
# Reconstruction
if recon:
if self.dec_dist == 'gaussian':
x_recon, x_mu, x_logvar = self.decoder(z_fake, stats=True)
return x_recon, x_mu, x_logvar, z_mu, z_logvar
else:
return self.decoder(z_fake)
# Representation
elif infer_mean and self.enc_dist == 'gaussian': # Mean representation
return z_mu
else: # Sample representation
return z_fake
# Generation Mode
elif x is None and z is not None:
x_fake = self.decoder(z, mean=gen_mean)
return x_fake
class DecoderMLP(nn.Module):
def __init__(self, data_dim=2, latent_dim=10, hidden_dim=500, dist='gaussian'):
super().__init__()
self.dist = dist
net = [
nn.Linear(latent_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
]
if dist == 'gaussian':
net.append(nn.Linear(hidden_dim, data_dim * 2))
else:
assert self.dist == 'deterministic'
net.append(nn.Linear(hidden_dim, data_dim))
self.decoder = nn.Sequential(*net)
def forward(self, z, mean=False, stats=False):
if self.dist == 'gaussian':
x_mu, x_logvar = self.decoder(z).chunk(2, dim=1)
x_sample = reparameterize(x_mu, (x_logvar / 2).exp())
if stats:
return x_sample, x_mu, x_logvar
else:
if mean:
return x_mu
else:
return x_sample
else:
return self.decoder(z)
class EncoderMLP(nn.Module):
def __init__(self, data_dim=2, latent_dim=10, hidden_dim=500, dist='gaussian'):
super().__init__()
self.dist = dist
net = [
nn.Linear(data_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True),
]
if dist == 'gaussian':
net.append(nn.Linear(hidden_dim, latent_dim * 2))
else:
assert self.dist == 'deterministic'
net.append(nn.Linear(hidden_dim, latent_dim))
self.encoder = nn.Sequential(*net)
def forward(self, x):
if self.dist == 'gaussian':
return self.encoder(x).chunk(2, dim=1)
else:
return self.encoder(x)
class DiscriminatorMLP(nn.Module):
def __init__(self, data_dim=2, latent_dim=10, hidden_dim_x=400, hidden_dim_z=500, hidden_dim=500):
super().__init__()
self.dis_z = DisMLPBlock(latent_dim, hidden_dim_z)
self.dis_x = DisMLPBlock(data_dim, hidden_dim_x)
self.dis_j = DisMLPBlock(hidden_dim_z + hidden_dim_x, hidden_dim)
def forward(self, x, z):
sz, fz = self.dis_z(z)
sx, fx = self.dis_x(x)
sj, _ = self.dis_j(torch.cat([fz, fx], dim=1))
return (sz + sx + sj) / 3
class DisMLPBlock(nn.Module):
def __init__(self, input_dim=10, hidden_dim=500):
super().__init__()
self.block = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(inplace=True),
)
self.layer = nn.Linear(hidden_dim, 1)
def forward(self, x):
feature = self.block(x)
return self.layer(feature), feature
def reparameterize(mu, sigma, std=1):
assert mu.shape == sigma.shape
eps = mu.new(mu.shape).normal_(0, std)
return mu + sigma * eps
def kl_div(mu, logvar):
return -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).sum()
def gaussian_nll(x_mu, x_logvar, x):
'''NLL'''
sigma_inv = (- x_logvar / 2).exp()
return 0.5 * (x_logvar + ((x - x_mu) * sigma_inv).pow(2) + np.log(2*np.pi)).sum()
def kaiming_init(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif type(m) == nn.BatchNorm1d or type(m) == nn.BatchNorm2d:
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
| 20,262 | 37.376894 | 127 | py |
AGES | AGES-master/utils.py | import numpy as np
import os
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data import TensorDataset, DataLoader
def draw_recon(x, x_recon):
x_l, x_recon_l = x.tolist(), x_recon.tolist()
result = [None] * (len(x_l) + len(x_recon_l))
result[::2] = x_l
result[1::2] = x_recon_l
return torch.FloatTensor(result)
def make_folder(path):
if not os.path.exists(path):
os.makedirs(path)
def write_config_to_file(config, save_path):
with open(os.path.join(save_path, 'config.txt'), 'w') as file:
for arg in vars(config):
file.write(str(arg) + ': ' + str(getattr(config, arg)) + '\n')
def make_dataloader(args):
test_loader = None
if args.dataset == 'celeba':
trans_f = transforms.Compose([
transforms.CenterCrop(args.image_size*2),
transforms.Resize((args.image_size, args.image_size)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_set = datasets.CelebA(args.data_dir, split='train', download=True, transform=trans_f,)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, pin_memory=False,
drop_last=False, num_workers=3)
elif args.dataset == 'cifar':
trans_f = transforms.Compose([
transforms.Resize((args.image_size, args.image_size)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train_set = datasets.CIFAR10(root=args.data_dir, train=True, download=True, transform=trans_f)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=3)
elif args.dataset == 'imagenet':
trans_f = transforms.Compose([
transforms.Resize((73, 73)),
transforms.CenterCrop(64),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train_set = datasets.ImageFolder(args.data_dir, transform=trans_f)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, pin_memory=False,
num_workers=8)
elif args.dataset == 'mnist':
trans_f = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
])
train_set = datasets.MNIST(args.data_dir, train=True, download=False, transform=trans_f)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, pin_memory=False,
num_workers=3)
elif args.dataset == 'mnist_stack':
train_set = np.load(args.data_dir)
train_set = (train_set - 0.5) / 0.5
train_set = TensorDataset(torch.from_numpy(train_set))
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, pin_memory=False, num_workers=3)
else:
assert args.dataset == 'mog'
base = args.mog_base
num_class = base ** 2
num_each_class = [10000] * num_class
if args.mog_imbalance:
num_each_class[1::2] = [500] * (num_class // 2)
means = []
for i in range(base):
for j in range(base):
means.append([(i - (base - 1) / 2) * 2, (j - (base - 1) / 2) * 2])
std = args.mog_std
x = torch.randn(sum(num_each_class), 2) * std
y = torch.ones(sum(num_each_class))
for i in range(num_class):
x[sum(num_each_class[:i]):sum(num_each_class[:(i + 1)]), :] += torch.Tensor(means[i])
y[sum(num_each_class[:i]):sum(num_each_class[:(i + 1)])] *= i
train_set = TensorDataset(x, y)
train_loader = DataLoader(dataset=train_set, batch_size=args.batch_size, shuffle=True)
test_loader = DataLoader(dataset=train_set, batch_size=10000, shuffle=True)
return train_loader, test_loader
| 4,189 | 40.078431 | 121 | py |
AGES | AGES-master/sagan.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import spectral_norm
from torch.nn.init import orthogonal_
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
orthogonal_(m.weight)
m.bias.data.fill_(0.)
def snlinear(in_features, out_features):
return spectral_norm(nn.Linear(in_features=in_features, out_features=out_features))
def snconv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
return spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias))
def snconvtrans2d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True):
return spectral_norm(nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, output_padding=output_padding, dilation=dilation, groups=groups, bias=bias))
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2, kernel_size=1, stride=1, padding=0)
self.snconv1x1_attn = snconv2d(in_channels=in_channels//2, out_channels=in_channels, kernel_size=1, stride=1, padding=0)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.sigma = nn.Parameter(torch.zeros(1))
def forward(self, x):
"""
inputs :
x : input feature maps(B X C X W X H)
returns :
out : self attention value + input feature
"""
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch//8, h*w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch//8, h*w//4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch//2, h*w//4)
# Attn_g
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch//2, h, w)
attn_g = self.snconv1x1_attn(attn_g)
# Out
out = x + self.sigma*attn_g
return out
class NoiseInjection(nn.Module):
def __init__(self, channel, size=1):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channel, size, size))
def forward(self, image):
one_channel = image[:,0:1,:,:]
noise = one_channel.new(one_channel.shape).normal_(0, 1)
out = image + self.weight * noise
return out
class GenIniBlock(nn.Module):
def __init__(self, z_dim, out_channels, size=1, add_noise=True):
super().__init__()
self.out_channels = out_channels
self.add_noise = add_noise
self.snlinear0 = snlinear(in_features=z_dim, out_features=out_channels * 4 * 4)
if add_noise:
self.noise0 = NoiseInjection(out_channels, size)
def forward(self, z):
act0 = self.snlinear0(z)
act0 = act0.view(-1, self.out_channels, 4, 4)
if self.add_noise:
act0 = self.noise0(act0)
return act0
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, size=1, add_noise=True):
super().__init__()
self.conv_1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.conv_2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.add_noise = add_noise
if add_noise:
self.noise1 = NoiseInjection(out_channels, size)
self.noise2 = NoiseInjection(out_channels, size)
self.conv_0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm2d(in_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.upsample = lambda x: F.interpolate(x, scale_factor=2, mode='nearest')
def forward(self, x,):
x0 = x
x = self.relu(self.bn1(x))
x = self.upsample(x) # upsample
x = self.conv_1(x)
if self.add_noise:
x = self.noise1(x)
x = self.relu(self.bn2(x))
x = self.conv_2(x)
if self.add_noise:
x = self.noise2(x)
x0 = self.upsample(x0) # upsample
x0 = self.conv_0(x0)
out = x + x0
return out
class Generator(nn.Module):
r'''SAGAN Generator
Args:
latent_dim: latent dimension
conv_dim: base number of channels
image_size: image resolution
out_channels: number of output channels
add_noise: whether to add noises to each conv layer
attn: whether to add self-attention layer
'''
def __init__(self, latent_dim, conv_dim=32, image_size=128, out_channels=3, add_noise=True, attn=True):
super().__init__()
self.latent_dim = latent_dim
self.conv_dim = conv_dim
self.image_size = image_size
self.add_noise = add_noise
self.attn = attn
self.block0 = GenIniBlock(latent_dim, conv_dim * 16, 4, add_noise=add_noise)
self.block1 = GenBlock(conv_dim * 16, conv_dim * 16, size=8, add_noise=add_noise)
self.block2 = GenBlock(conv_dim * 16, conv_dim * 8, size=16, add_noise=add_noise)
if image_size == 64:
self.block3 = GenBlock(conv_dim * 8, conv_dim * 4, size=32, add_noise=add_noise)
if attn:
self.self_attn1 = Self_Attn(conv_dim * 4)
self.block4 = GenBlock(conv_dim * 4, conv_dim * 2, size=64, add_noise=add_noise)
conv_dim = conv_dim * 2
elif image_size == 128:
self.block3 = GenBlock(conv_dim * 8, conv_dim * 4, add_noise=add_noise)
if attn:
self.self_attn1 = Self_Attn(conv_dim * 4)
self.block4 = GenBlock(conv_dim * 4, conv_dim * 2, add_noise=add_noise)
# self.self_attn2 = Self_Attn(conv_dim*2)
self.block5 = GenBlock(conv_dim * 2, conv_dim, add_noise=add_noise)
else: # image_size == 256 or 512
self.block3 = GenBlock(conv_dim * 8, conv_dim * 8, add_noise=add_noise)
self.block4 = GenBlock(conv_dim * 8, conv_dim * 4, add_noise=add_noise)
if attn:
self.self_attn1 = Self_Attn(conv_dim * 4)
self.block5 = GenBlock(conv_dim * 4, conv_dim * 2, add_noise=add_noise)
self.block6 = GenBlock(conv_dim * 2, conv_dim, add_noise=add_noise)
if image_size == 512:
self.block7 = GenBlock(conv_dim, conv_dim, add_noise=add_noise)
self.bn = nn.BatchNorm2d(conv_dim, eps=1e-5, momentum=0.0001, affine=True)
self.relu = nn.ReLU(inplace=True)
self.toRGB = snconv2d(in_channels=conv_dim, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.tanh = nn.Tanh()
# Weight init
self.apply(init_weights)
def forward(self, z):
out = self.block0(z)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
if self.attn:
out = self.self_attn1(out)
out = self.block4(out)
if self.image_size > 64:
out = self.block5(out)
if self.image_size == 256 or self.image_size == 512:
out = self.block6(out)
if self.image_size == 512:
out = self.block7(out)
out = self.bn(out)
out = self.relu(out)
out = self.toRGB(out)
out = self.tanh(out)
return out
class DiscOptBlock(nn.Module):
# Compared with block, optimized_block always downsamples the spatial resolution of the input vector by a factor of 4.
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv_1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.conv_2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.conv_0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
self.relu = nn.ReLU()
self.downsample = nn.AvgPool2d(2)
def forward(self, x):
x0 = x
x = self.conv_1(x)
x = self.relu(x)
x = self.conv_2(x)
x = self.downsample(x)
x0 = self.downsample(x0)
x0 = self.conv_0(x0)
out = x + x0
return out
class DiscBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv_1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.conv_2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.conv_0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
self.relu = nn.ReLU()
self.downsample = nn.AvgPool2d(2)
self.ch_mismatch = False
if in_channels != out_channels:
self.ch_mismatch = True
def forward(self, x, downsample=True):
x0 = x
x = self.relu(x)
x = self.conv_1(x)
x = self.relu(x)
x = self.conv_2(x)
if downsample:
x = self.downsample(x)
if downsample or self.ch_mismatch:
x0 = self.conv_0(x0)
if downsample:
x0 = self.downsample(x0)
out = x + x0
return out
class Discriminator(nn.Module):
"""Discriminator."""
def __init__(self, conv_dim, image_size=128, in_channels=3, out_channels=1, out_feature=False):
super().__init__()
self.conv_dim = conv_dim
self.image_size = image_size
self.out_feature = out_feature
self.fromRGB = snconv2d(in_channels, conv_dim, 1, bias=True)
self.block1 = DiscBlock(conv_dim, conv_dim * 2)
self.self_attn = Self_Attn(conv_dim*2)
self.block2 = DiscBlock(conv_dim * 2, conv_dim * 4)
self.block3 = DiscBlock(conv_dim * 4, conv_dim * 8)
if image_size == 64:
self.block4 = DiscBlock(conv_dim * 8, conv_dim * 16)
self.block5 = DiscBlock(conv_dim * 16, conv_dim * 16)
elif image_size == 128:
self.block4 = DiscBlock(conv_dim * 8, conv_dim * 16)
self.block5 = DiscBlock(conv_dim * 16, conv_dim * 16)
else:
self.block4 = DiscBlock(conv_dim * 8, conv_dim * 8)
self.block5 = DiscBlock(conv_dim * 8, conv_dim * 16)
self.block6 = DiscBlock(conv_dim * 16, conv_dim * 16)
self.relu = nn.ReLU(inplace=True)
self.snlinear1 = snlinear(in_features=conv_dim*16, out_features=out_channels)
# Weight init
self.apply(init_weights)
def forward(self, x):
h0 = self.fromRGB(x)
h1 = self.block1(h0)
h1 = self.self_attn(h1)
h2 = self.block2(h1)
h3 = self.block3(h2)
h4 = self.block4(h3)
if self.image_size == 64:
h5 = self.block5(h4, downsample=False)
h6 = h5
elif self.image_size == 128:
h5 = self.block5(h4)
h6 = self.block6(h5, downsample=False)
else:
h5 = self.block5(h4)
h6 = self.block6(h5)
h6 = self.block7(h6, downsample=False)
h6 = self.relu(h6)
# Global sum pooling
h7 = torch.sum(h6, dim=[2,3])
out = torch.squeeze(self.snlinear1(h7))
if self.out_feature:
return out, h7
else:
return out
class DisFinalBlock(nn.Module):
""" Final block for the Discriminator """
def __init__(self, in_channels):
super().__init__()
self.conv_1 = snconv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, padding=1)
self.conv_2 = snconv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=4)
self.conv_3 = snconv2d(in_channels=in_channels, out_channels=1, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
y = self.relu(self.conv_1(x))
y = self.relu(self.conv_2(y))
y = self.conv_3(y)
return y
class SNResMLPBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.fc1 = nn.Sequential(
snlinear(channels, channels),
nn.ReLU(inplace=True)
)
self.fc2 = snlinear(channels, channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.fc2(self.fc1(x))
out += x
return self.relu(out)
class Discriminator_MLP(nn.Module):
def __init__(self, in_channels, out_channels, out_feature=True, num_block=3):
super().__init__()
self.out_feature = out_feature
self.num_block = num_block
self.fc1 = nn.Sequential(
snlinear(in_channels, out_channels),
nn.ReLU(inplace=True)
)
self.block1 = SNResMLPBlock(out_channels)
if num_block > 1:
self.block2 = SNResMLPBlock(out_channels)
if num_block > 2:
self.block3 = SNResMLPBlock(out_channels)
self.fc4 = snlinear(out_channels, 1)
self.apply(init_weights)
def forward(self, z):
out = self.fc1(z)
f = self.block1(out)
if self.num_block > 1:
f = self.block2(f)
if self.num_block > 2:
f = self.block3(f)
out = self.fc4(f)
if self.out_feature:
return out, f
else:
return out
class DCDiscriminator(nn.Module):
def __init__(self, conv_dim=64, image_size=64, image_channel=3):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(image_channel, conv_dim, 5, 2, 2),
nn.LeakyReLU(inplace=True),
nn.Conv2d(conv_dim, conv_dim * 2, 5, 2, 2),
nn.LeakyReLU(inplace=True),
nn.Conv2d(conv_dim * 2, conv_dim * 4, 5, 2, 2),
nn.LeakyReLU(inplace=True),
)
self.fc = nn.Linear(conv_dim * 4 * 4 * 4, 1)
def forward(self, x):
x = self.conv(x).view(x.size(0), -1)
return self.fc(x)
| 15,117 | 35.254197 | 137 | py |
AGES | AGES-master/train.py | import sys
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.utils.data import TensorDataset, DataLoader
import argparse
import matplotlib.pyplot as plt
from bgm import *
from sagan import *
from config import *
import os
import random
import utils
def main():
global args
args = get_config()
args.commond = 'python ' + ' '.join(sys.argv)
# Create saving directory
if args.unigen:
save_dir = './results_unigen/{0}/G{1}_glr{2}_dlr{3}_dstep{4}_zdim{5}_{6}/'.format(
args.dataset, args.dec_dist, str(args.lr), str(args.lr_d), str(args.d_steps_per_iter),
str(args.latent_dim), args.div)
else:
save_dir = './results/{0}/E{1}_G{2}_glr{3}_dlr{4}_gstep{5}_dstep{6}_zdim{7}_{8}/'.format(
args.dataset, args.enc_dist, args.dec_dist, str(args.lr), str(args.lr_d),
str(args.g_steps_per_iter), str(args.d_steps_per_iter), str(args.latent_dim), args.div)
utils.make_folder(save_dir)
utils.write_config_to_file(args, save_dir)
global device
device = torch.device('cuda')
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# Load datasets
train_loader, test_loader = utils.make_dataloader(args)
num_samples = len(train_loader.dataset)
global num_iter_per_epoch
num_iter_per_epoch = num_samples // args.batch_size
# Losses file
log_file_name = os.path.join(save_dir, 'log.txt')
global log_file
if args.resume:
log_file = open(log_file_name, "at")
else:
log_file = open(log_file_name, "wt")
# Build model
if args.unigen:
if args.dataset == 'mnist_stack':
model = DCDecoder(args.latent_dim, 64, args.image_size, 3, args.dec_dist)
discriminator = DCDiscriminator(args.d_conv_dim, args.image_size)
else:
model = Generator(args.latent_dim, args.g_conv_dim, args.image_size)
discriminator = Discriminator(args.d_conv_dim, args.image_size)
encoder_optimizer = None
decoder_optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
D_optimizer = optim.Adam(discriminator.parameters(), lr=args.lr_d, betas=(args.beta1, args.beta2))
else:
if args.dataset == 'mog':
model = ToyAE(data_dim=2, latent_dim=args.latent_dim, enc_hidden_dim=500, dec_hidden_dim=500,
enc_dist=args.enc_dist, dec_dist=args.dec_dist)
discriminator = DiscriminatorMLP(data_dim=2, latent_dim=args.latent_dim, hidden_dim_x=400,
hidden_dim_z=400, hidden_dim=400)
elif args.dataset in ['mnist', 'mnist_stack']:
image_channel = 3 if args.dataset == 'mnist_stack' else 1
tanh = args.prior == 'uniform' and args.enc_dist == 'deterministic'
model = DCAE(args.latent_dim, 64, args.image_size, image_channel, args.enc_dist, args.dec_dist, tanh)
discriminator = DCJointDiscriminator(args.latent_dim, 64, args.image_size, image_channel, args.dis_fc_size)
else:
model = BGM(args.latent_dim, args.g_conv_dim, args.image_size, 3,
args.enc_dist, args.enc_arch, args.enc_fc_size, args.enc_noise_dim, args.dec_dist)
discriminator = BigJointDiscriminator(args.latent_dim, args.d_conv_dim, args.image_size, args.dis_fc_size)
encoder_optimizer = optim.Adam(model.encoder.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
decoder_optimizer = optim.Adam(model.decoder.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
D_optimizer = optim.Adam(discriminator.parameters(), lr=args.lr_d, betas=(args.beta1, args.beta2))
# Load model from checkpoint
if args.resume:
ckpt_dir = args.ckpt_dir if args.ckpt_dir != '' else save_dir + 'model' + str(args.start_epoch - 1) + '.sav'
checkpoint = torch.load(ckpt_dir)
model.load_state_dict(checkpoint['model'])
discriminator.load_state_dict(checkpoint['discriminator'])
del checkpoint
model = nn.DataParallel(model.to(device))
discriminator = nn.DataParallel(discriminator.to(device))
# Fixed noise from prior p_z for generating from G
global fixed_noise
if args.prior == 'gaussian':
fixed_noise = torch.randn(args.save_n_samples, args.latent_dim, device=device)
else:
fixed_noise = torch.rand(args.save_n_samples, args.latent_dim, device=device) * 2 - 1
# Train
for i in range(args.start_epoch, args.start_epoch + args.n_epochs):
train_age(i, model, discriminator, encoder_optimizer, decoder_optimizer, D_optimizer, train_loader,
args.print_every, save_dir, args.sample_every, test_loader)
if i % args.save_model_every == 0:
torch.save({'model': model.module.state_dict(), 'discriminator': discriminator.module.state_dict()},
save_dir + 'model' + str(i) + '.sav')
# Training functions
def train_age(epoch, model, discriminator, encoder_optimizer, decoder_optimizer, D_optimizer, train_loader, print_every,
save_dir, sample_every, test_loader=None):
'''
Training the bidirectional generative model using AGE.
:param epoch: training epoch
:param model: encoder E(x) and generator G(z)
:param discriminator: discriminator D(x,z)
:param encoder_optimizer: optimizer for encoder
:param decoder_optimizer: optimizer for generator
:param D_optimizer: optimizer for discriminator
:param train_loader: training data loader
:param print_every: print losses every print_every iterations
:param save_dir: directory for saving sampled images and model
:param sample_every: test (sample images) every sample_every iterations
:param test_loader: test data loader (used on MoG)
'''
model.train()
discriminator.train()
for batch_idx, (x) in enumerate(train_loader):
if not args.dataset in ['celeba1', 'celeba-1']:
x = x[0]
x = x.to(device)
# ================== TRAIN DISCRIMINATOR ================== #
for _ in range(args.d_steps_per_iter):
discriminator.zero_grad()
# Sample z from prior p_z
if args.prior == 'gaussian':
z = torch.randn(x.size(0), args.latent_dim, device=x.device)
else:
z = torch.rand(x.size(0), args.latent_dim, device=x.device) * 2 - 1
# Get inferred latent z = E(x) and generated image x = G(z)
if args.unigen:
x_fake = model(z)
else:
z_fake, x_fake = model(x, z)
# Compute D loss
if args.unigen:
# Real data score
encoder_score = discriminator(x)
# Fake data score
decoder_score = discriminator(x_fake.detach())
else:
encoder_score = discriminator(x, z_fake.detach())
decoder_score = discriminator(x_fake.detach(), z)
del z_fake
del x_fake
loss_d = F.softplus(decoder_score).mean() + F.softplus(-encoder_score).mean()
loss_d.backward()
D_optimizer.step()
for _ in range(args.g_steps_per_iter):
if args.prior == 'gaussian':
z = torch.randn(x.size(0), args.latent_dim, device=x.device)
else:
z = torch.rand(x.size(0), args.latent_dim, device=x.device) * 2 - 1
if args.unigen:
x_fake = model(z)
else:
z_fake, x_fake = model(x, z)
# ================== TRAIN ENCODER ================== #
if not args.unigen:
model.zero_grad()
encoder_score = discriminator(x, z_fake)
del z_fake
# Clip the scaling factor
r_encoder = torch.exp(encoder_score.detach())
if args.clip:
upper = 1 / args.scale_lower if args.scale_upper is None else args.scale_upper
r_encoder = r_encoder.clamp(args.scale_lower, upper)
if args.div == 'revkl':
s_encoder = 1 / r_encoder
elif args.div == 'js':
s_encoder = 1 / (1 + r_encoder)
elif args.div == 'hellinger':
s_encoder = 1 / (2 * torch.sqrt(r_encoder))
else:
assert args.div in ['all', 'kl']
s_encoder = r_encoder.new_ones(r_encoder.shape)
loss_encoder = (s_encoder * encoder_score).mean()
else:
if args.div == 'revkl':
loss_encoder = -torch.exp(-encoder_score).mean()
elif args.div == 'js':
loss_encoder = -F.softplus(-encoder_score).mean()
elif args.div == 'hellinger':
loss_encoder = -torch.exp(-encoder_score / 2).mean()
else:
assert args.div in ['all', 'kl']
loss_encoder = encoder_score.mean()
loss_encoder.backward()
encoder_optimizer.step()
# ================== TRAIN GENERATOR ================== #
model.zero_grad()
if args.unigen:
decoder_score = discriminator(x_fake)
else:
decoder_score = discriminator(x_fake, z)
# Clip the scaling factor
r_decoder = torch.exp(decoder_score.detach())
if args.clip:
upper = 1 / args.scale_lower if args.scale_upper is None else args.scale_upper
r_decoder = r_decoder.clamp(args.scale_lower, upper)
if args.div == 'kl':
s_decoder = r_decoder
elif args.div == 'js':
s_decoder = r_decoder / (r_decoder + 1)
elif args.div == 'hellinger':
s_decoder = torch.sqrt(r_decoder) / 2
else:
assert args.div in ['all', 'revkl']
s_decoder = r_decoder.new_ones(r_decoder.shape)
loss_decoder = -(s_decoder * decoder_score).mean()
else:
if args.div == 'kl':
loss_decoder = -torch.exp(decoder_score).mean()
elif args.div == 'js':
loss_decoder = -F.softplus(decoder_score).mean()
elif args.div == 'hellinger':
loss_decoder = -torch.exp(decoder_score / 2).mean()
else:
assert args.div in ['all', 'revkl']
loss_decoder = -decoder_score.mean()
loss_decoder.backward()
decoder_optimizer.step()
# Print out losses
if batch_idx == 0 or (batch_idx + 1) % print_every == 0:
if args.unigen:
log = ('Train Epoch: {} ({:.0f}%)\tD loss: {:.4f}, Decoder loss: {:.4f}'.format(
epoch, 100. * batch_idx / len(train_loader),
loss_d.item(), loss_decoder.item()))
else:
log = ('Train Epoch: {} ({:.0f}%)\tD loss: {:.4f}, Encoder loss: {:.4f}, Decoder loss: {:.4f}'.format(
epoch, 100. * batch_idx / len(train_loader),
loss_d.item(), loss_encoder.item(), loss_decoder.item()))
print(log)
log_file.write(log + '\n')
log_file.flush()
# Sample images
if (batch_idx + 1) % sample_every == 0:
if args.dataset != 'mog':
if epoch % args.sample_every_epoch == 0:
test(epoch, batch_idx + 1, model, x[:args.save_n_recons], save_dir)
else:
plot = epoch % args.sample_every_epoch == 0 and (batch_idx + 1) + sample_every > num_iter_per_epoch
test_toy(epoch, batch_idx + 1, model, test_loader, fixed_noise, save_dir, plot)
def test(epoch, i, model, test_data, save_dir):
model.eval()
with torch.no_grad():
x = test_data.to(device)
# Reconstruction
if not args.unigen:
x_recon = model(x, recon=True)
recons = utils.draw_recon(x.cpu(), x_recon.cpu())
del x_recon
save_image(recons, save_dir + 'recon_' + str(epoch) + '_' + str(i) + '.png', nrow=args.nrow,
normalize=True, scale_each=True)
# Generation
sample = model(z=fixed_noise).cpu()
save_image(sample, save_dir + 'gen_' + str(epoch) + '_' + str(i) + '.png', normalize=True, scale_each=True)
del sample
model.train()
def test_toy(epoch, i, model, test_loader, fixed_z, save_dir, plot=False):
model.eval()
with torch.no_grad():
x_samp = model(z=fixed_z).detach().cpu()
x_test, y_test = next(iter(test_loader))
x_test = x_test.to(device)
with torch.no_grad():
if args.dec_dist == 'gaussian':
x_recon, x_mu, x_logvar, z_mu, z_logvar = model(x=x_test, recon=True)
else:
x_recon = model(x=x_test, recon=True)
x_recon = x_recon.detach().cpu()
if plot:
plt.rcParams['figure.figsize'] = [10, 5]
plt.subplot(121)
plt.scatter(x_samp[:, 0], x_samp[:, 1], marker='o')
plt.subplot(122)
plt.scatter(x_recon[:, 0], x_recon[:, 1], marker='o', c=y_test)
plt.savefig(save_dir + str(epoch) + '.png')
plt.close()
if args.dec_dist == 'gaussian':
nll = gaussian_nll(x_mu, x_logvar, x_test)
kl_d = kl_div(z_mu, z_logvar)
elbo = kl_d + nll
log = ('Test Epoch {}, Iter {}\t-ELBO: {:.4f}, KL: {:.4f}, NLL: {:.4f}'.format(
epoch, i, elbo.item() / len(x_test), kl_d.item() / len(x_test), nll.item() / len(x_test)))
print(log)
log_file.write(log + '\n')
log_file.flush()
model.train()
if __name__ == '__main__':
main()
| 14,270 | 40.485465 | 120 | py |
submodlib | submodlib-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../submodlib'))
# -- Project information -----------------------------------------------------
project = 'submodlib'
copyright = '2020, Vishal Kaushal'
author = 'Vishal Kaushal'
# The full version, including alpha/beta/rc tags
#version = '0.0.7'
#release = '0.0.7'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
#'numpydoc',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex'
]
bibtex_bibfiles = ['ref.bib']
#mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js'
autosummary_generate = True
numpydoc_show_class_members = False
class_members_toctree = False
#napoleon_numpy_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
source_suffix = '.rst'
import submodlib
version = submodlib.__version__
release = submodlib.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 2,861 | 30.450549 | 79 | py |
grabnel | grabnel-master/src/train_model.py | """Code to train a graph classifier model.
To be used to train any model except for the GraphUNet in the paper.
See train_gunet.py for the script to train Graph UNet.
"""
import argparse
import os
from copy import deepcopy
from os.path import join
import pandas as pd
import torch
import torch.optim as optim
from attack.data import Data, ERData
from attack.utils import (classification_loss, get_dataset_split, get_device,
number_of_correct_predictions, setseed)
from models.utils import get_model_class
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='IMDB-BINARY')
parser.add_argument('--model', type=str, default='gcn', choices=['gcn', 'gin', 'embedding', 's2v'])
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay')
parser.add_argument('--num_epochs', type=int, default=200, help='number of epochs to train for')
parser.add_argument('--gpu', type=str, default=None, help='A gpu device number if available.')
parser.add_argument('--seed', type=int, default=0, help='RNG seed.')
args = parser.parse_args()
setseed(args.seed)
print(vars(args))
# use gpu if availbale else cpu
device = get_device(args.gpu)
# load data
dataset_split = get_dataset_split(args.dataset)
if args.dataset == 'er_graphs':
data = ERData(seed=args.seed)
else:
data = Data(dataset_name=args.dataset, dataset_split=dataset_split, seed=args.seed)
# specific model
model_class = get_model_class(args.model)
model = model_class(data.feature_dim, data.number_of_labels)
model = model.to(device)
# specify loss function
loss_fn = classification_loss(data.is_binary)
# train model
train_loader, valid_loader = data.training_dataloaders()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
best_val_acc = 0.
best_model = None
training_logs = []
for epoch in range(args.num_epochs):
# training step
model.train()
train_loss, train_acc = 0, 0
for i, (graphs, labels) in enumerate(train_loader):
graphs, labels = graphs.to(device), labels.to(device)
labels = labels.long()
predictions = model(graphs)
# GIN models still give a bug here:
if data.is_binary and predictions.shape[1] > 1:
predictions = predictions[:, 0]
loss = loss_fn(predictions, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.detach().item()
train_acc += number_of_correct_predictions(predictions, labels, data.is_binary).detach().item()
train_loss /= len(train_loader)
train_acc /= len(train_loader.dataset)
# evaluation step
model.eval()
valid_loss, valid_acc = 0, 0
with torch.no_grad():
for i, (graphs, labels) in enumerate(valid_loader):
graphs, labels = graphs.to(device), labels.to(device)
labels = labels.long()
predictions = model(graphs)
if data.is_binary and predictions.shape[1] > 1:
predictions = predictions[:, 0]
loss = loss_fn(predictions, labels)
valid_loss += loss.detach().item()
valid_acc += number_of_correct_predictions(predictions, labels, data.is_binary).detach().item()
valid_loss /= len(valid_loader)
valid_acc /= len(valid_loader.dataset)
# save best model
if valid_acc > best_val_acc:
print('Best val acc recorded at epoch ', epoch)
best_model = deepcopy(model)
best_val_acc = valid_acc
print(epoch, '{:.4f}'.format(train_loss), '{:.4f}'.format(valid_loss),
'{:.2f}'.format(train_acc), '{:.2f}'.format(valid_acc))
training_logs.append([epoch, train_loss, valid_loss, train_acc, valid_acc])
# save model
os.makedirs(join('output', 'models'), exist_ok=True)
model_path = join('output', 'models', f'{args.model}_{args.dataset}_{args.seed}.pt')
torch.save(best_model.state_dict(), model_path)
# save training information
os.makedirs(join('output', 'training_logs'), exist_ok=True)
training_logs_path = join('output', 'training_logs', f'{args.model}_{args.dataset}_{args.seed}.csv')
training_logs = pd.DataFrame(training_logs, columns=['epoch', 'train_loss', 'valid_loss', 'train_acc', 'valid_acc'])
training_logs.to_csv(training_logs_path)
| 4,358 | 37.236842 | 116 | py |
grabnel | grabnel-master/src/train_gunet.py | import sys
sys.path.append('../')
sys.path.append('./src/models/gunet')
import argparse
import random
import time
import torch
import numpy as np
from src.models.gunet.network import GNet
from src.models.gunet.trainer import Trainer
from src.models.gunet.utils.data_loader import FileLoader
from src.models.gunet.config import get_parser, update_args_with_default
parser = get_parser()
parser.add_argument('--save_path', type=str, default='../src/output/models/')
parser.add_argument('--log_path', type=str, default='../src/output/training_logs/')
parser.add_argument('--split_save_path', type=str, default='../data/')
parser.add_argument('-seed', '--seed', type=int, default=1, help='seed')
parser.add_argument('-data', '--data', default='PROTEINS', help='data folder name')
parser.add_argument('--preamble_path', type=str, default='../data/gunet_data/')
args, _ = parser.parse_known_args()
# for known datasets, overwrite with the default hyperparameters setting provided by the GUNet authors
args = update_args_with_default(args)
def set_random(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def app_run(args, G_data, fold_idx):
G_data.use_fold_data(fold_idx)
G_data.pickle_data()
net = GNet(G_data.feat_dim, G_data.num_class, args)
trainer = Trainer(args, net, G_data, save_path=args.save_path, log_path=args.log_path)
trainer.train()
def main():
print(args)
set_random(args.seed)
start = time.time()
G_data = FileLoader(args).load_data(args.preamble_path)
print('load data using ------>', time.time()-start)
if args.fold == 0:
for fold_idx in range(10):
print('start training ------> fold', fold_idx+1)
app_run(args, G_data, fold_idx)
else:
print('start training ------> fold', args.fold)
app_run(args, G_data, args.fold-1)
if __name__ == "__main__":
main()
| 1,909 | 31.372881 | 102 | py |
grabnel | grabnel-master/src/evaluate_model.py | """Code to evaluate a graph classifier model."""
import argparse
import os
from os.path import join
import pandas as pd
import torch
from attack.data import Data, ERData
from attack.utils import (classification_loss, correct_predictions,
get_dataset_split, get_device, setseed)
from models.utils import get_model_class
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='IMDB-BINARY')
parser.add_argument('--model', type=str, default='gcn', choices=['gcn', 'gin', 's2v'])
parser.add_argument('--gpu', type=str, default=None, help='A gpu device number if available.')
parser.add_argument('--seed', type=int, default=0, help='RNG seed.')
args = parser.parse_args()
setseed(args.seed)
print(vars(args))
# use gpu if available else cpu
device = get_device(args.gpu)
# load data
dataset_split = get_dataset_split(args.dataset)
if args.dataset == 'er_graphs':
data = ERData(seed=args.seed)
else:
data = Data(dataset_name=args.dataset, dataset_split=dataset_split, seed=args.seed)
# load model
model_class = get_model_class(args.model)
model = model_class(data.feature_dim, data.number_of_labels)
model_path = join(f'output/models/{args.model}_{args.dataset}_{args.seed}.pt')
model.load_state_dict(torch.load(model_path, map_location=device))
model = model.to(device)
model.eval()
# specify loss function
loss_fn = classification_loss(data.is_binary)
# dataframe constructor
def results_to_df(predictions, labels, loss, number_of_labels):
""" Constructs a dataframe summarising prediction information
Args:
predictions: An (n x l) numpy array where n are the number of samples and l is the number of labels (or 1 if binary)
labels: A 1D array of predictions for each graph
loss: A 1D array of the loss for the prediction and label
number_of_labels: Number of labels in the dataset
Returns:
A dataframe with columns [labels, loss, predictions, correct_predictions]. Predictions takes the form of many
columns if the number_of_labels > 2. In this case we have columns predictions_0, ... predictions_{l-1}.
"""
results_dict = {'labels': labels, 'loss': loss, 'correct_prediction': correct_predictions(predictions, labels)}
if number_of_labels == 2:
results_dict.update({'predictions': predictions.squeeze()})
else:
for class_label in range(number_of_labels):
results_dict.update({f'predictions_{class_label}': predictions[:, class_label]})
return pd.DataFrame(results_dict)
# datasets to evaluate model on
dataset_b_loader, dataset_c_loader = data.adversarial_dataloaders()
results = []
# compute statistics for dataset b
for i, (graphs, labels) in enumerate(dataset_b_loader):
with torch.no_grad():
graphs, labels = graphs.to(device), labels.to(device)
predictions = model(graphs)
# GIN models still give a bug here:
if data.is_binary and predictions.shape[1] > 1:
predictions = predictions[:, :1]
loss = loss_fn(predictions, labels, reduction='none')
df = results_to_df(predictions.cpu().numpy(), labels.cpu().numpy(), loss.cpu().numpy(), data.number_of_labels)
df['dataset'] = 'b'
results.append(df)
# compute statistics for dataset c
for i, (graphs, labels) in enumerate(dataset_c_loader):
with torch.no_grad():
graphs, labels = graphs.to(device), labels.to(device)
predictions = model(graphs)
# GIN models still give a bug here:
if data.is_binary and predictions.shape[1] > 1:
predictions = predictions[:, :1]
loss = loss_fn(predictions, labels, reduction='none')
df = results_to_df(predictions.cpu().numpy(), labels.cpu().numpy(), loss.cpu().numpy(), data.number_of_labels)
df['dataset'] = 'c'
results.append(df)
# test set accuracy and loss
results = pd.concat(results)
print('Test set accuracy (b)', 100*results.query('dataset=="b"')['correct_prediction'].mean())
print('Test set loss (b)', results.query('dataset=="b"')['loss'].mean())
print('Test set accuracy (c)', 100*results.query('dataset=="c"')['correct_prediction'].mean())
print('Test set loss (c)', results.query('dataset=="c"')['loss'].mean())
# save data
os.makedirs(join('output', 'evaluation_logs'), exist_ok=True)
results_path = os.path.join('output', 'evaluation_logs', f'{args.model}_{args.dataset}_{args.seed}.csv')
results.to_csv(results_path)
| 4,450 | 39.463636 | 124 | py |
grabnel | grabnel-master/src/attack/genetic.py | """Genetic algorithm attack."""
from copy import deepcopy
import dgl
import numpy as np
import pandas as pd
import scipy
import torch
from .base_attack import BaseAttack
from .utils import correct_predictions, population_graphs, random_sample_flip, random_sample_rewire_swap, get_allowed_nodes_k_hop, extrapolate_breakeven
class Genetic(BaseAttack):
def __init__(self, classifier, loss_fn, population_size: int = 100,
crossover_rate: float = 0.1, mutation_rate: float = 0.2,
target_class: int = None,
mode: str = 'flip'):
"""A genetic algorithm based attack.
This class stores an unperturbed graph in dgl.DGLGraph format, but the perturbed samples are represented as set.
Each sample is a set of tuples (u, v) where u < v which represents an undirected edge u ~ v. To realise this
perturbation each of the edges in the set are flipped. The original graph will be referred to as `graph` and an
element of the population `sample`.
Args:
classifier: see BaseAttack
loss_fn: see BaseAttack
population_size: The number of perturbed graph in the population at any one point.
crossover_rate: `crossover_rate` x `population_size` of the samples will be crossed over in each step.
mutation_rate: All samples are mutated, `mutation_rate` of the flipped edges will be mutated.
:param mode: str: 'flip', 'add', 'remove' or 'rewire': allowed edit operations on the edges.
"""
super().__init__(classifier, loss_fn)
self.target_class = target_class
self.population_size = population_size
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.population = []
assert mode in ['flip', 'add', 'remove', 'rewire'], f'mode {mode} is not recognised!'
self.mode = mode
def attack(self, graph: dgl.DGLGraph, label: torch.tensor, budget: int, max_queries: int,
initial_population: list = None):
"""The attack proceeds by rounds of selection, crossover and mutation. The number of rounds is determined by
the `population_size` and `max_queries`. The population is a list of sets. Each set represents a perturbation.
The set is of edge pairs (u, v) where u < v.
initial_population: list: if specified, use this list of samples as initial population. Otherwise we randomly
sample from the graphs
"""
adv_example = None
is_edge_weighted = 'weight' in graph.edata.keys()
if initial_population is not None:
self.population += initial_population
if len(self.population) < self.population_size:
if self.mode == 'rewire':
self.population += [random_sample_rewire_swap(graph, budget, rewire_only=not is_edge_weighted)
for _ in
range(self.population_size - len(self.population))]
else:
self.population += [random_sample_flip(graph, budget) for _ in
range(self.population_size - len(self.population))]
else:
self.population = self.initial_population(graph, budget)
rounds = max(1, np.round(max_queries / self.population_size).astype(np.int))
merged_dfs = None
best_losses_so_far = []
for round_no in range(rounds):
fitness, predictions = self.fitness_of_population(graph, label, self.population)
fitness = np.nan_to_num(fitness, neginf=0., posinf=0.)
print(f'Round{round_no}/{rounds}: {np.max(fitness)}')
self.population = self.select_fittest(self.population, fitness)
self.population = self.crossover_population(self.population, budget)
self.population = self.mutate_population(graph, self.population)
new_df = self.construct_dataframe(fitness, predictions, label.squeeze(), (round_no + 1) * self.population_size)
if merged_dfs is None: merged_dfs = new_df
else: merged_dfs = pd.concat([merged_dfs, new_df])
# added by xingchen: terminate the run whenever the attack succeeds.
labels = torch.repeat_interleave(label, len(predictions))
if (self.target_class is None and np.sum(correct_predictions(predictions.numpy(), labels.numpy())) < len(
predictions)) \
or (self.target_class is not None and (np.argmax(predictions.numpy(), axis=1) == self.target_class).any()):
print('Attack succeeded!')
if self.target_class is None:
comps = correct_predictions(predictions.numpy(), labels.numpy())
for i, comp in enumerate(comps):
if not comp:
adv_example = population_graphs(graph, [self.population[i]], mode=self.mode)
break
else:
for i, pred in enumerate(predictions):
if np.argmax(pred.numpy()) == self.target_class:
adv_example = population_graphs(graph, [self.population[i]], mode=self.mode)
break
break
best_losses_so_far.append(np.max(merged_dfs.losses.values))
if len(best_losses_so_far) > 200 / self.population_size and extrapolate_breakeven(best_losses_so_far) > 1e5 / self.population_size:
print(f'Predicted breakeven point is {extrapolate_breakeven(best_losses_so_far)} and run terminated')
break
return merged_dfs, adv_example
def initial_population(self, graph: dgl.DGLGraph, budget: int) -> list:
"""Create an initial population using random flips to create perturbation."""
is_edge_weighted = 'weight' in graph.edata.keys()
if self.mode == 'rewire':
population = [random_sample_rewire_swap(graph, budget, rewire_only=not is_edge_weighted) for _ in
range(self.population_size - len(self.population))]
else:
population = [random_sample_flip(graph, budget) for _ in range(self.population_size)]
return population
def fitness_of_population(self, graph: dgl.DGLGraph, label: torch.tensor, population: list) \
-> (np.array, torch.tensor):
"""Evaluate the fitness of the population.
Args:
graph: The original unperturbed graph
label: The label of the unperturbed graph
population: A list of perturbed graphs
Returns:
fitness: A 1D numpy array where the ith element is the loss of element i in the population
predictions: A torch array containing logits (1D if its a binary classification task, otherwise an (n x C)
array where C is the number of classes.
"""
perturbed_graphs = population_graphs(graph, population, self.mode)
with torch.no_grad():
try:
predictions = self.classifier(dgl.batch(perturbed_graphs))
except RuntimeError:
# this is possibly a dgl bug seemingly related to this https://github.com/dmlc/dgl/issues/2310
# dgl.unbatch() should exactly inverses dgl.batch(), but you might get RuntimeError by doing something
# like dgl.unbatch(dgl.batch([graphs])).
predictions = self.classifier(perturbed_graphs)
labels = torch.repeat_interleave(label, len(perturbed_graphs))
fitness = self.loss_fn(predictions, labels, reduction='none')
fitness = fitness.detach().numpy()
return fitness, predictions
def select_fittest(self, population: list, fitness: np.array) -> list:
"""Takes half the fittest scores and then samples the other half using softmax weighting on the scores."""
softmax_fitness = scipy.special.softmax(fitness)
fittest_idx = np.argsort(-softmax_fitness)[:int(np.floor(self.population_size / 2))]
random_idx = np.random.choice(np.arange(self.population_size), int(np.ceil(self.population_size / 2)),
replace=True, p=softmax_fitness)
all_idx = np.concatenate([fittest_idx, random_idx])
population = [population[idx] for idx in all_idx]
return population
def crossover_population(self, population: list, budget: int) -> list:
"""Each sample is crossed over by probability `self.crossover_rate`."""
for i, sample in enumerate(population):
if self.crossover_rate < np.random.rand():
population[i] = self.crossover(sample, population, budget)
return population
def crossover(self, sample: set, population: list, budget: int) -> set:
"""Cross over of the `sample` and one other random sample from the `population`. The crossover is done by taking
the union of all flips of the two samples and then sampling `budget` of them to create a new sample."""
other_sample = np.random.choice(range(self.population_size))
other_sample = population[other_sample]
all_flips = list(set(sample).union(set(other_sample)))
new_sample = np.random.choice(range(len(all_flips)), budget, replace=False)
new_sample = set([all_flips[i] for i in new_sample])
return new_sample
def mutate_population(self, graph: dgl.DGLGraph, population: list) -> list:
"""Mutate all samples in the population."""
for idx in range(self.population_size):
population[idx] = self.mutate_sample(graph, population[idx])
return population
def mutate_sample(self, graph: dgl.DGLGraph, sample: set, ) -> set:
""" Mutate the edges in the sample with at a rate of `self.mutation_rate`.
Args:
graph: The original unperturbed graph
sample: The perturbed graph represented as a set of edge flips
Returns:
A new perturbed graph (in set format) which is a mutation of `sample`.
"""
is_edge_weighted = 'weight' in graph.edata.keys()
new_sample = set()
# choose edges to mutate
to_mutate = []
for i, edge in enumerate(sample):
if np.random.rand() < self.mutation_rate:
to_mutate.append(edge)
else:
new_sample.add(edge)
# mutate edges for new sample
for edge in to_mutate:
new_edge = self.mutate_rewire_triplet(graph, edge, rewire_only=not is_edge_weighted) \
if self.mode == 'rewire' \
else self.mutate_edge(graph, edge, )
while new_edge in new_sample:
new_edge = self.mutate_rewire_triplet(graph, edge, rewire_only=not is_edge_weighted) \
if self.mode == 'rewire' \
else self.mutate_edge(graph, edge,)
new_sample.add(new_edge)
return new_sample
@staticmethod
def mutate_edge(graph, edge, ):
"""Mutate a single edge. The mutation chooses a random end point of the edge and then pairs it with a random
node in the graph.
"""
u, v = edge
if np.random.rand() < 0.5:
new_u = u
else:
new_u = v
available_nodes = np.arange(graph.number_of_nodes())
new_v = np.random.choice(available_nodes)
while new_u == new_v:
new_v = np.random.choice(available_nodes)
return min(new_u, new_v), max(new_u, new_v)
@staticmethod
def mutate_rewire_triplet(graph, edge, rewire_only: bool = False, swap_only: bool = False):
"""Mutate triplet (u, v, w) used for rewiring operation (i.e. we either rewire u->v to u->w, or for the case
when (u, w) is already an edge, swap u-v and u-w"""
from copy import deepcopy
if rewire_only and swap_only: raise ValueError(
'Only either or neither of swap_only and rewire_only can be True!')
# the index of the triplet to mutate
patience = 100
new_edge = deepcopy(edge)
u, v, w = new_edge
while patience >= 0:
rewire_id = np.random.randint(0, len(edge))
if rewire_id == 0: # the candidate u is the neighbours of v with index number < v
new_node = np.random.choice(graph.out_edges(v)[1])
if new_node in [u, v, w] or new_node > v:
patience -= 1
continue
new_edge = (new_node, v, w)
break
elif rewire_id == 1: # the candidate v is the neighbour of u with index number > u
new_node = np.random.choice(graph.out_edges(u)[1])
if new_node in [u, v, w] or new_node < u:
patience -= 1
continue
new_edge = (u, new_node, w)
break
elif rewire_id == 2:
if swap_only:
new_node = np.random.choice(graph.out_edges(u)[1])
if new_node in [u, v, w] or new_node < u:
patience -= 1
continue
else:
new_node = np.random.randint(u, graph.number_of_nodes())
if new_node in [u, v, w]:
patience -= 1
continue
if rewire_only and new_node in graph.out_edges(u)[1]:
patience -= 1
continue
new_edge = (u, v, new_node)
break
if patience <= 0:
# print(f'Patience exhausted in trying to mutate {edge}!')
return new_edge
return new_edge
@staticmethod
def construct_dataframe(losses: np.array, predictions: torch.tensor, label: torch.tensor, queries: int) \
-> pd.DataFrame:
"""Construct a pandas dataframe consistent with the base class. This dataframe is for all samples evaluated
after exactly `queries` queries."""
labels = np.tile(label, len(predictions))
df = pd.DataFrame({'losses': losses,
'correct_prediction': correct_predictions(predictions.numpy(), labels),
'queries': queries})
return df
| 14,484 | 49.121107 | 152 | py |
grabnel | grabnel-master/src/attack/randomattack.py | """Random attack."""
from copy import deepcopy
import dgl
import numpy as np
import pandas as pd
import torch
from .base_attack import BaseAttack
from .utils import correct_predictions, random_sample_rewire_swap, random_sample_flip, population_graphs, extrapolate_breakeven
class RandomFlip(BaseAttack):
def __init__(self, classifier: torch.nn.Module, loss_fn: torch.nn.Module, mode: str = 'flip',
target_class: int = None, preserve_disconnected_components=False,
**kwargs):
"""A baseline attack that chooses pairs of nodes to flip edges between.
mode: flip or rewire"""
super().__init__(classifier, loss_fn)
assert mode in ['flip', 'add', 'remove', 'rewire'], f'mode {mode} is not recognised!'
self.mode = mode
self.target_class = target_class
self.preserve_disconnected_components = preserve_disconnected_components
def attack(self, graph: dgl.DGLGraph, label: torch.tensor, budget: int, max_queries: int):
"""
Args:
graph: Unperturbed graph. This graph will be copied before perturbing.
Returns:
A perturbed version of the input graph.
"""
adv_example = None
best_losses_so_far = []
merged_dfs = None
is_edge_weighted = 'weight' in graph.edata.keys()
for i in range(max_queries):
if i % 100 == 0:
print(f'Iter: {i} / {max_queries} = {i/max_queries*100} %. Best loss={np.max(merged_dfs.losses.values) if merged_dfs is not None else "NA"}')
# sample edges
if self.mode == 'rewire':
edges = random_sample_rewire_swap(graph, budget, rewire_only=not is_edge_weighted,
preserve_disconnected_components=self.preserve_disconnected_components)
else: # flip, add or remove
edges = random_sample_flip(graph, budget, add_edge_only=self.mode == 'add',
remove_edge_only=self.mode == 'remove',
preserve_disconnected_components=self.preserve_disconnected_components)
with torch.no_grad():
perturbed_graph = population_graphs(graph, [edges], mode=self.mode)[0]
predictions = self.classifier(perturbed_graph).detach()
if not isinstance(label, torch.Tensor):
label = torch.tensor(label)
losses = self.loss_fn(predictions, label, reduction='none').numpy()
new_df = self.construct_dataframe(losses, predictions, label.squeeze(), i + 1)
if merged_dfs is None: merged_dfs = new_df
else: merged_dfs = pd.concat([merged_dfs, new_df])
best_losses_so_far.append(np.max(merged_dfs.losses.values))
# predictions = self.classifier(dgl.batch(perturbed_graphs))
# labels = torch.repeat_interleave(label, len(predictions))
if (self.target_class is None and np.sum(
correct_predictions(predictions.numpy(), label.numpy())) < len(
predictions)) \
or (self.target_class is not None and (
np.argmax(predictions.numpy(), axis=1) == self.target_class).any()):
print(f'Attack succeeded!: recent loss={losses}')
if self.target_class is None:
comps = correct_predictions(predictions.numpy(), label.numpy())
for i, comp in enumerate(comps):
if not comp:
adv_example = deepcopy(perturbed_graph)
else:
for i, pred in enumerate(predictions):
if np.argmax(pred.numpy()) == self.target_class:
adv_example = deepcopy(perturbed_graph)
return merged_dfs, adv_example
if len(best_losses_so_far) > 200 and extrapolate_breakeven(best_losses_so_far) > 1e5:
print(f'Predicted breakeven point is {extrapolate_breakeven(best_losses_so_far)} and run terminated')
return merged_dfs, adv_example
return merged_dfs, adv_example
@staticmethod
def construct_dataframe(losses: np.array, predictions: torch.tensor, label: torch.tensor, queries: int) \
-> pd.DataFrame:
"""Construct a pandas dataframe consistent with the base class. This dataframe is for all samples evaluated
after exactly `queries` queries."""
labels = np.tile(label, len(predictions))
df = pd.DataFrame({'losses': losses,
'correct_prediction': correct_predictions(predictions.numpy(), labels),
'queries': queries})
return df
| 4,924 | 49.255102 | 157 | py |
grabnel | grabnel-master/src/attack/base_attack.py | import dgl
import pandas as pd
import torch
class BaseAttack:
def __init__(self, classifier, loss_fn):
"""Base adversarial attack model
Args:
classifier: The pytorch classifier to attack.
loss_fn: The loss function, this will be maximised by an attacker.
"""
self.classifier = classifier
self.loss_fn = loss_fn
self.results = None
def attack(self, graph: dgl.DGLGraph, label: torch.tensor, budget: int, max_queries: int) -> pd.DataFrame:
"""
Args:
graph: The unperturbed input graph.
label: The label of the unperturbed input graph.
budget: Total number of edge additions and deletions.
max_queries: The total number of times the victim model can be queried.
Returns:
A pandas dataframe with columns `losses`, `correct_prediction` and `queries`. As the attack progresses
all intermediate perturbations with this information is stored. `losses` is the loss of the perturbed
sample, `correct_prediction` is if the model still classifies the perturbed sample correctly and `queries`
is the number of times the model has been queried when generating the sample.
"""
pass
| 1,290 | 35.885714 | 118 | py |
grabnel | grabnel-master/src/attack/utils.py | import random
import dgl
import networkx as nx
import numpy as np
import torch
import torch.nn as nn
from copy import deepcopy
def find_n_hop_neighbour(graph: dgl.DGLGraph, node_idx: int, n_hop: int, undirected=True,
exclude_self=True) -> torch.Tensor:
"""
Given a node index, finds its n-hop neighbours in graph
:param graph: dgl.DGLGraph: the graph object on which we find neighbours
:param node_idx: the index of the node whose n-hop neighours we aim to find
:param n_hop: int. number of hop distances to specify from node_idx
:param undirected: bool. Whether to ignore any directedness in the graph (if the graph is already undirected, this
flag does nothing)
:param exclude_self: bool. whether to exclude node_idx itself from the list of neighbours.
:return: a torch.Tensor of shape (n, ), where n is the total number of n-hop neighbours to node_index
(including itself)
"""
if undirected:
graph = dgl.to_simple(deepcopy(graph))
nodes = torch.tensor([node_idx])
for i in range(n_hop):
_, neighbours = graph.out_edges(nodes)
nodes = torch.cat((nodes, neighbours))
nodes = torch.unique(nodes)
if exclude_self:
nodes = nodes[nodes != node_idx]
return nodes
def check_directed(graph: dgl.DGLGraph) -> bool:
"""
Check whether a dgl graph is directed or undirected (or equivalently, every edge is bi-directed)
:param graph:
:return:
"""
A = graph.adjacency_matrix().to_dense()
return ~(A == torch.transpose(A, 0, 1)).all()
def get_allowed_nodes_k_hop(graph: dgl.DGLGraph, previous_edits, k_hop: int = 1):
"""
For a graph, given a set of previous nodes, return the list of node indices within k_hop distance of previous edit
nodes.
When there is no previous edits,
:param k_hop: int. the value of the hop distance to be considered neighbours. Default is 1.
:param graph:
:param previous_edits: the or list of edges editted previously.
:return:
"""
previous_edits = list(set(list(sum(previous_edits, ())))) # flattens the list
if len(previous_edits) == 0:
return [i for i in range(graph.number_of_nodes())]
allowed_nodes = torch.tensor([])
for node in previous_edits:
allowed_nodes = torch.cat((allowed_nodes, find_n_hop_neighbour(graph, node, k_hop, exclude_self=False)))
return torch.unique(allowed_nodes).long()
def get_node_probabilities(graph: dgl.DGLGraph, node_indices=None):
"""Score the edges based on its 1/sqrt(degree), used as the probability weight for node selection in prior-enabled
case"""
degrees = graph.in_degrees(node_indices).detach().numpy()
inv_degree_sqrt = 1. / np.sqrt(degrees)
inv_degree_sqrt /= np.sum(inv_degree_sqrt)
return inv_degree_sqrt
def get_node_probabilities_by_proximity(graph: dgl.DGLGraph, committed_edits=None):
"""Score the nodes based on their proximity to already attacked nodes. Nodes closer to attacked nodes get a higher
chance of being selected."""
if committed_edits is None or len(committed_edits) == 0:
return None
from scipy.sparse.csgraph import connected_components
def number_connected_components(dglgraph):
return connected_components(dglgraph.adjacency_matrix(scipy_fmt="csr"))[0]
def random_sample_flip(graph: dgl.DGLGraph, budget: int,
prohibited_nodes: list = None,
prohibited_edges: list = None,
add_edge_only: bool = False,
remove_edge_only: bool = False,
n_hop: int = None,
allow_disconnected: bool = True,
preserve_disconnected_components: bool = False,
committed_edges = None,) -> set:
"""Perturb the graph using `budget` random flips.
with_prior: bool.
whether to select the nodes based on the prior information presented in the paper. Enabling this option has
two effects:
1. the probability of nodes being chosen will be inversely proportional to the sqrt(node degree). i.e. the
lower degree nodes will have a higher chance of being selected.
2. new perturbation candidates will be within 1-hop distance of previously suggested edits for at least
one end-node (the other one is not affected)
"""
assert (add_edge_only and remove_edge_only) is False, \
'Either (or neither) add_edge_only and remove_edge_only can be True!'
n_components = number_connected_components(graph)
edges_to_flip = set()
while len(edges_to_flip) < budget:
patience = 100
while patience > 0:
all_nodes = range(graph.number_of_nodes())
allowed_nodes = all_nodes
if n_hop is None:
u = np.random.choice(allowed_nodes, replace=False, )
v = np.random.choice(all_nodes, replace=False,)
else:
u = np.random.choice(allowed_nodes, replace=False,)
# for v can only be within the n-hop neighbours of u
v_candidates = find_n_hop_neighbour(graph, u, n_hop)
if v_candidates.shape[0] == 0: # u is an isolated node (not supposed to happen for most of the time)
patience -= 1
continue
v = np.random.choice(v_candidates)
if prohibited_nodes is not None:
if u in prohibited_nodes or v in prohibited_nodes:
patience -= 1
continue
if prohibited_edges is not None:
if (u, v) in prohibited_edges or (v, u) in prohibited_edges:
patience -= 1
continue
u, v = min(u, v), max(u, v)
if graph.has_edges_between([u], [v])[0] and add_edge_only:
patience -= 1
continue
if not graph.has_edges_between([u], [v])[0] and remove_edge_only:
patience -= 1
continue
pert_graph = None
if not allow_disconnected:
if pert_graph is None: pert_graph = population_graphs(graph, [[(u, v)]], mode='flip')[0]
nx_graph = pert_graph.to_networkx().to_undirected()
if nx.number_connected_components(nx_graph) > 1:
patience -= 1
continue
if preserve_disconnected_components:
if pert_graph is None: pert_graph = population_graphs(graph, [[(u, v)]], mode='flip')[0]
new_n_components = number_connected_components(pert_graph)
if new_n_components != n_components:
patience -= 1
continue
edges_to_flip.add((u, v))
break
if patience < 0:
pass
return edges_to_flip
def random_sample_rewire_swap(graph: dgl.DGLGraph, budget: int, rewire_only: bool = False,
swap_only: bool = False,
n_hop: int = None,
allow_disconnected: bool = False,
preserve_disconnected_components: bool = False,
):
"""Rewire or swap 2 edges. Sample three nodes (u, v, w), where there is an existing edge on (u, v)
if an edge is present on (u, w), this operation does swap: (u, v) <-> (u, w)
otherwise, this operation does rewiring: (u, v) -> (u, w).
Also note that swap operation does not change the graph at all (hence meaningless) if the problem has unweighted
edges only.
Note there are two edge flips per rewire budget"""
edges_to_rewire = set()
n_components = number_connected_components(graph)
# return all nodes on which there are edges
us, vs = graph.all_edges(order='eid')
us, vs = us.numpy(), vs.numpy()
all_edges = np.array([us, vs]).T
while len(edges_to_rewire) < budget:
patience = 100
while patience > 0:
# select (u, v) where existing edge is present
idx = np.random.randint(all_edges.shape[0])
(u, v) = all_edges[idx]
# u = us[np.random.randint(0, len(us))]
# v = vs[np.random.randint(0, len(vs))]
if u == v:
patience -= 1
continue
# select (u, w)
if n_hop is None:
w = np.random.randint(u, graph.num_nodes())
else:
w_candidates = find_n_hop_neighbour(graph, u, n_hop)
if w_candidates.shape[0] == 0:
patience -= 1
continue
w = np.random.choice(w_candidates)
if u == w or v == w:
patience -= 1
continue
# check whether (u, w) is an edge
is_existing_edge = np.equal(all_edges, np.array([u, w])).all(1).any()
if is_existing_edge:
if rewire_only:
patience -= 1
continue
else:
if swap_only:
patience -= 1
continue
# print(u,v,w)
pert_graph = None
if not allow_disconnected:
if pert_graph is None:
pert_graph = population_graphs(graph, [[(u, v, w)]], mode='rewire')[0]
nx_graph = pert_graph.to_networkx().to_undirected()
if nx.number_connected_components(nx_graph) > 1:
patience -= 1
continue
if preserve_disconnected_components:
if pert_graph is None: pert_graph = population_graphs(graph, [[(u, v, w)]], mode='flip')[0]
new_n_components = number_connected_components(pert_graph)
if new_n_components != n_components:
patience -= 1
continue
edges_to_rewire.add((u, v, w))
break
if patience <= 0:
# print(f'Patience exhausted!')
return edges_to_rewire
return edges_to_rewire
def population_graphs(graph: dgl.DGLGraph, population: list, mode: str, ) -> list:
"""Takes the population and returns them in dgl.Graph format.
graph: the base graph upon which we make edits
population: a list of form
[(n1, n2), (n3, n4) ... ] (flip mode) or
[(n1, n2, n3), (n4, n5, n6)... ] (rewire mode)
mode: 'flip' or 'rewire'. See descriptions below:
for flip mode,
Recall the population is a list of sets. Each set contains elements (u, v) where (u < v) representing flipping
the undirected edge u ~ v. This method returns the population as a list of dgl.DGLGraph objects.
for rewire mode,
samples [(u, v, w), (u, v, w)...] where we rewire the edge u -> v to u -> w (if edge u -> w already exists,
we swap the edges u -> v and u -> w)
"""
perturbed_graphs = []
is_edge_attributed = 'weight' in graph.edata.keys()
for edge_to_edit in population:
perturbed_graph = deepcopy(graph)
for edge in edge_to_edit:
if mode == 'rewire':
(u, v, w) = edge
if perturbed_graph.has_edges_between([u], [w])[0] and is_edge_attributed:
# swap for unweighted graph is meaningless
perturbed_graph.edges[u, w][0]['weight'] = torch.clone(graph.edges[u, v][0]['weight'])
perturbed_graph.edges[w, u][0]['weight'] = torch.clone(graph.edges[u, v][0]['weight'])
perturbed_graph.edges[u, v][0]['weight'] = torch.clone(graph.edges[u, w][0]['weight'])
perturbed_graph.edges[v, u][0]['weight'] = torch.clone(graph.edges[u, w][0]['weight'])
else: # rewire
flip_edge(perturbed_graph, u, v) # delete the edge
flip_edge(perturbed_graph, u, w,
edata={'weight': graph.edges[u, w][0]['weight']} if is_edge_attributed else None)
else:
flip_edge(perturbed_graph, *edge)
perturbed_graphs.append(perturbed_graph)
return perturbed_graphs
def get_stages(max_budget, max_perturbation, mode='equidistant'):
"""
During we attack, we may partition the total budget allocated. For example, for a total budget of 100 queries and
if we allocate to up to 4 edge edits, we may partition the queries to 4 stages and only increase the number of edits
added to the adjacency matrix iff we reach the next stage without a successful attack.
Given the current query count, return its stage number
:param n: current query number
:param max_budget: maximum number of queries allowed
:param max_perturbation: maximum number of edge edits allowed
:param mode: 'equidistant': divide the total queries into stages with equal number of budgets. exp: use the
successive halving type of division.
:return:
"""
assert mode in ['equidistant', 'exp']
if mode == 'exp':
# todo: implement the successive halving-style
raise NotImplemented
else:
stages = np.linspace(0, max_budget, max_perturbation + 1)
return stages
def get_device(gpu):
"""Return device string."""
if torch.cuda.is_available() and gpu is not None:
device = torch.device(f'cuda:{gpu}')
print('Using', device, 'with cuda', torch.cuda.get_device_capability(device)[0], flush=True)
else:
device = torch.device('cpu')
print('Using cpu', flush=True)
return device
def setseed(seed):
"""Sets the seed for rng."""
np.random.seed(seed)
random.seed(seed)
if seed is not None:
torch.random.manual_seed(seed)
def nettack_loss(logits, labels, target_class=None, **kwargs):
"""Implement the loss function in nettack
target_class: if not None, the nettack loss will be the targeted loss w.r.t that label
"""
def _single_eval(logit, label):
logit = logit.flatten()
# print(label)
label = int(label)
if logit.shape[0] <= 2:
# binary problem -- convert the logit back to pseudo-probabilities (also, target class is not appliable here
# since in this case the targeted attack is equivalent to untargeted attack
logit = logit[0]
class0 = torch.sigmoid(logit)
diff_log = torch.log(class0) - torch.log(1. - class0)
if label > 0:
return -diff_log
return diff_log
else:
if target_class is None:
logit_ex_true = torch.cat([logit[:label], logit[label + 1:]])
return torch.max(logit_ex_true - logit[label])
return logit[target_class] - torch.max(logit)
assert logits.shape[0] == labels.shape[0]
if logits.shape[0] == 1:
return _single_eval(logits, labels)
else:
losses = [_single_eval(logits[i], labels[i]) for i in range(logits.shape[0])]
return torch.tensor(losses)
def nettack_loss_gunet(logits, labels, target_class=None, **kwargs):
"""Implement the loss function in nettack -- adapted to accommodate the slight difference in the GUNet API
target_class: if not None, the nettack loss will be the targeted loss w.r.t that label.
"""
def _single_eval(logit, label):
logit = logit.flatten()
# print(label)
label = int(label)
if target_class is None:
logit_ex_true = torch.cat([logit[:label], logit[label + 1:]])
return torch.max(logit_ex_true - logit[label])
res = logit[target_class] - torch.max(logit)
if torch.isnan(res):
res = -100
return res
assert logits.shape[0] == labels.shape[0]
if logits.shape[0] == 1:
return _single_eval(logits, labels)
else:
losses = [_single_eval(logits[i], labels[i]) for i in range(logits.shape[0])]
return torch.tensor(losses)
def classification_loss(is_binary):
"""Returns a loss function for classification tasks."""
if is_binary:
def loss_fn(x, y, **kwargs):
return nn.functional.binary_cross_entropy_with_logits(x.squeeze(), y.float(), **kwargs)
else:
loss_fn = nn.functional.cross_entropy
return loss_fn
def number_of_correct_predictions(predictions, labels, is_binary):
"""Sum of predictions with agree with labels. Predictions is given in logits."""
if is_binary:
return ((predictions.squeeze() > 0).float() == labels).sum()
else:
return (predictions.argmax(axis=1) == labels).sum()
def get_dataset_split(dataset):
if 'IMDB' in dataset: # for both IMDB-BINARY and IMDB-MULTI
return 0.5, 0.3, 0.2
elif dataset == 'er_graphs':
return 0.89, 0.1, 0.01
else:
return 0.9, 0.05, 0.05 # changed from 0.9, 0.08, 0.02 -- 0.02 is too small it looks for many datasets.
def flip_edge(graph: dgl.DGLGraph, u: int, v: int, edata: dict = None, check_directness=False):
"""Flip the edge u ~ v in `graph`.
This method assumes the graph is undirected. If the edge u ~ v exists it is deleted otherwise it is added.
edata: if supplied, this specifies the edge feature
"""
if check_directness:
is_directed = check_directed(graph)
else:
is_directed = False
if is_directed:
if graph.has_edges_between([u], [v])[0]:
edge_to_delete_id = graph.edge_ids(u, v)
graph.remove_edges([edge_to_delete_id])
else:
graph.add_edges(u, v, data=edata)
else:
if graph.has_edges_between([u], [v])[0]:
edge_to_delete_id = graph.edge_ids(u, v)
edge_to_delete_reverse_id = graph.edge_ids(v, u)
graph.remove_edges([edge_to_delete_id, edge_to_delete_reverse_id])
else:
graph.add_edges([u, v], [v, u], data=edata)
def correct_predictions(predictions: np.array, labels: np.array) -> int:
"""Returns number of predictions which are correctly assigned to their labels.
Args:
predictions: An (n x l) numpy array where n are the number of samples and l is the number of labels (or 1 if binary)
labels: A 1D numpy array of predictions for each graph
Returns:
Number of correct predictions
"""
assert isinstance(predictions, np.ndarray)
assert isinstance(labels, np.ndarray)
if predictions.shape[1] <= 2: # binary classification
predictions = predictions[:, 0]
correct = (predictions > 0.0) == labels
else: # multiclass classification
correct = np.argmax(predictions, axis=1) == labels
return correct
def extrapolate_breakeven(historical_loss, using_last: int = 500):
from sklearn.linear_model import LinearRegression
if using_last is not None and using_last > 0:
historical_loss = np.array(historical_loss).flatten()[-using_last:]
else:
historical_loss = np.array(historical_loss).flatten()
# clear out and remove any nan and/or inf entries
historical_loss = historical_loss[historical_loss == historical_loss]
x = np.arange(len(historical_loss))
model = LinearRegression()
model.fit(x.reshape(-1, 1), historical_loss)
m, c = model.coef_, model.intercept_
pt = -c / m
offset = max(historical_loss.shape[0] - using_last, 0)
return pt + offset
| 19,512 | 41.144708 | 124 | py |
grabnel | grabnel-master/src/attack/data.py | """
Using the convention of having an a, b, c dataset used in ReWatt.
Dataset a is used for training a model, the method training_dataloaders returns two dataloaders created by splitting
dataset a. The first dataloader is for training and the other for validation
Dataset b is used for training the adversarial attack agent.
Dataset c is used to evaluate the adversarial attack agent.
Dataset b and c are returned when calling advesarial_dataloaders.
"""
import os
import pickle
from pathlib import Path
import dgl
import numpy as np
import pandas as pd
import torch
import random
from dgl import add_self_loop
from dgl.data import MiniGCDataset, TUDataset
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader
from scipy.sparse.csgraph import connected_components
class Data:
def __init__(self, dataset_name='REDDIT-MULTI-5K', dataset=None, dataset_split=(0.9, 0.05, 0.05), valid_size=0.2,
seed=None, generator_specs: dict = {}):
"""
A Dataclass which downloads, stores and handles splitting of a dataset.
:param dataset_name: a TUDataset name (listed at https://chrsmrrs.github.io/datasets/docs/datasets/)
:param dataset: a dataset in the format of [(G1, y1), (G2, y2)...]. If this is supplied, the values input here
will be used as dataset and this overrides any dataset_name specification.
:param dataset_split: relative size of dataset a, b and c (see docstring for what these are)
:param valid_size: proportion of dataset a to assign to validation
:param seed: the seed which determine the dataset splits
:param generator_specs: dict. Used for generative datasets (e.g. the Mini graph classification dataset
in DGL, which expects arguments such as the number of graphs, the min/max nodes and etc.
changed the default data_split -- 0.02 is too small for some datasets.
"""
assert np.isclose(np.sum(dataset_split), 1.0)
self.dataset_name = dataset_name
self.valid_size = valid_size
self.dataset_split = dataset_split
self.seed = seed
if dataset is None and dataset_name is None:
raise ValueError("Either dataset or dataset_name must be provided! but got None for both.")
if dataset is None:
self.dataset = get_dataset(dataset_name, **generator_specs)
else:
self.dataset = dataset
dataset_a, dataset_b, dataset_c = self.three_way_split(self.dataset, self.dataset_split)
self.dataset_a = dataset_a
self.dataset_b = dataset_b
self.dataset_c = dataset_c
dataset_a_train, dataset_a_valid = self.dataset_a_split()
self.dataset_a_train = dataset_a_train
self.dataset_a_valid = dataset_a_valid
if dataset_name.lower() == 'twitter':
self.impute_and_normalise()
self.feature_dim = self.dataset[0][0].ndata['node_attr'].shape[1]
self.number_of_labels = len(np.unique([datapoint[1] for datapoint in self.dataset]))
self.is_binary = self.number_of_labels == 2
self.generator_specs = generator_specs
def three_way_split(self, dataset, dataset_split):
"""
Splits a dataset of the form [(G1, y1), (G2, y2)...] into three stratified datasets of the same form.
:param dataset: An iterable of items (Gi, yi) where Gi is a DGLGraph and yi is an int (the label)
:param dataset_split: A tuple (a, b, c) such that a+b+c=1.0 describing size of each dataset
:return: the three datasets
"""
_, b, c = dataset_split
graphs, labels = map(list, zip(*dataset)) # [(G1, y1), (G2, y2)...] -> [[G1, G2,...]. [y1, y2,...]]
a_graphs, bc_graphs, a_labels, bc_labels = \
train_test_split(graphs, labels, test_size=b + c, stratify=labels, random_state=self.seed)
b_graphs, c_graphs, b_labels, c_labels = \
train_test_split(bc_graphs, bc_labels, test_size=c / (b + c), stratify=bc_labels, random_state=self.seed)
dataset_a = list(zip(a_graphs, a_labels))
dataset_b = list(zip(b_graphs, b_labels))
dataset_c = list(zip(c_graphs, c_labels))
return dataset_a, dataset_b, dataset_c
def dataset_a_split(self):
"""Split dataset_a into train and validation."""
graphs_a, labels_a = map(list, zip(*self.dataset_a)) # [(G1, y1), (G2, y2)...] -> [[G1, G2,...]. [y1, y2,...]]
train_graphs, valid_graphs, train_labels, valid_labels = \
train_test_split(graphs_a, labels_a, test_size=self.valid_size, random_state=self.seed)
train = list(zip(train_graphs, train_labels)) # [G1, G2,...]. [y1, y2,...] -> [(G1, y1), (G2, y2)...]
valid = list(zip(valid_graphs, valid_labels))
return train, valid
def impute_and_normalise(self):
"""Impute and normalise datasets that are returned as dataloaders."""
pipe = self.build_pipe()
self.dataset_a_train = self.apply_pipe_to_dataset(pipe, self.dataset_a_train)
self.dataset_a_valid = self.apply_pipe_to_dataset(pipe, self.dataset_a_valid)
self.dataset_b = self.apply_pipe_to_dataset(pipe, self.dataset_b)
self.dataset_c = self.apply_pipe_to_dataset(pipe, self.dataset_c)
def build_pipe(self):
"""Build a pipe fitted to dataset_a_train."""
graphs = list(zip(*self.dataset_a_train))[0]
features = dgl.batch(graphs).ndata['node_attr'].numpy()
pipe = Pipeline([('impute', SimpleImputer()), ('scale', StandardScaler())])
pipe.fit(features)
return pipe
def apply_pipe_to_dataset(self, pipe, dataset):
"""Apply an sklearn PipeLine to the node_attr of a DGL.DGLGraphs in the dataset."""
return [(self.apple_pipe_to_graph(pipe, graph), label) for (graph, label) in dataset]
@staticmethod
def apple_pipe_to_graph(pipe, graph):
"""Apply an sklearn PipeLine to the node_attr of a DGL.DGLGraph."""
graph.ndata['node_attr'] = torch.FloatTensor(pipe.transform(graph.ndata['node_attr'].numpy()))
return graph
def training_dataloaders(self, batch_size=32):
"""
Returns two dataloaders, one for training a model and one for validating a model. The dataloaders come from
dataset a.
:param valid_size: The proportion of dataset a going into the validation set
:param batch_size: size of batches used in dataloaders
:return: two dataloaders
"""
train_loader = DataLoader(self.dataset_a_train, batch_size=batch_size, shuffle=True, collate_fn=collate)
valid_loader = DataLoader(self.dataset_a_valid, batch_size=batch_size, collate_fn=collate)
return train_loader, valid_loader
def adversarial_dataloaders(self, batch_size=32, shuffle_b=False):
"""
Returns dataset b and c used for training and evaluating an adversarial attack agent.
:param batch_size: size of batches used in dataloaders
:return: wo dataloaders
"""
train_loader = DataLoader(self.dataset_b, batch_size=batch_size, shuffle=shuffle_b, collate_fn=collate)
valid_loader = DataLoader(self.dataset_c, batch_size=batch_size, collate_fn=collate)
return train_loader, valid_loader
class ERData:
def __init__(self, dataset_split=(0.9, 0.09, 0.01), seed=None, **kwargs):
"""ER graphs dataset used in rls2v"""
self.dataset_split = dataset_split
if seed is None:
raise ValueError('Specify seed to match model seed.')
data_location = os.path.join(Path(os.path.dirname(os.path.abspath(__file__))).parent.parent, 'data')
# load datasets
self.dataset_a = pickle.load(open(os.path.join(data_location, 'er_train_dgl.pickle'), 'rb'))
self.dataset_bc = pickle.load(open(os.path.join(data_location, 'er_test_dgl.pickle'), 'rb'))
# split in the same way as rls2v code
random.seed(seed)
random.shuffle(self.dataset_bc)
proportion_b = dataset_split[1]/(dataset_split[1]+dataset_split[2])
size_b = int(len(self.dataset_bc) * proportion_b)
self.dataset_b = self.dataset_bc[:size_b]
self.dataset_c = self.dataset_bc[size_b:]
# set other attributes
self.feature_dim = 1
self.number_of_labels = 3
self.is_binary = False
def training_dataloaders(self, batch_size=32):
train_loader = DataLoader(self.dataset_bc, batch_size=batch_size, shuffle=True, collate_fn=collate)
valid_loader = DataLoader(self.dataset_c, batch_size=batch_size, collate_fn=collate)
return train_loader, valid_loader
def adversarial_dataloaders(self, batch_size=32, shuffle_b=False):
train_loader = DataLoader(self.dataset_b, batch_size=batch_size, shuffle=shuffle_b, collate_fn=collate)
valid_loader = DataLoader(self.dataset_c, batch_size=batch_size, collate_fn=collate)
return train_loader, valid_loader
def get_dataset(dataset_name, **kwargs):
"""
Returns an iterable where each item is of the (DGLGraph, int).
The DGLGraph has node features in graph.ndata['node_attr']
:param dataset_name: a name from TUDataset or one of 'er_graphs', 'minigc', 'Twitter'.
:param specs: the specification arguments to be passed to the graph generator.
:return: an iterable dataset
"""
if dataset_name.lower() == 'er_graphs':
dataset = get_er_dataset()
# elif dataset_name.lower() == 'triangles':
# dataset = get_triangles()
elif dataset_name.lower() == 'minigc':
dataset = get_minigc_dataset(**kwargs)
elif dataset_name.lower() == 'twitter':
dataset = get_twitter_dataset()
elif dataset_name.lower() == 'mnist':
dataset = get_mnist75sp()
else:
dataset = get_tu_dataset(dataset_name)
return dataset
def get_er_dataset():
"""
Benchmark used in Dai 2018. To run load this dataset, you have to first run src/data/er_generator.py
:return:A list of (DGLGraph, label)
"""
dataset = pickle.load(open('data/erdos_renyi.pl', 'rb'))
dataset = add_synthetic_features(dataset)
dataset = [(graph, label-1) for (graph, label) in dataset] # label is #connected_components-1 so labels start at 0
return dataset
def get_tu_dataset(dataset_name):
"""
Return a TUDataset in (DGLGraph, int) format. If the graph doesn't have 'node_attr' it will use node degree.
:param dataset_name: the name of the TUDataset
:return: A list of (graph, label)
"""
dataset = TUDataset(dataset_name)
dataset = add_synthetic_features(dataset)
return dataset
def get_minigc_dataset(num_graphs=1000, min_num_v=80, max_num_v=100):
"""
A wrapper for the following dataset: https://docs.dgl.ai/en/0.4.x/api/python/data.html#dgl.data.MiniGCDataset.
"""
dataset = MiniGCDataset(num_graphs, min_num_v, max_num_v)
dataset = add_synthetic_features(dataset)
return dataset
def get_mnist75sp():
data_location = os.path.join(Path(os.path.dirname(os.path.abspath(__file__))).parent, 'data')
dataset = pickle.load(open(os.path.join(data_location, 'mnist_75sp.p'), 'rb'))
return dataset
def add_synthetic_features(dataset):
"""
Adds a one hot encoded vector based on degree of the nodes.
:param dataset: An iteratable of tuples (graph, label)
:return: A list of (graph, label) where graph.ndata['node_attr'] is a pytorch feature matrix
"""
# generate a encoding scheme
observed_degrees = set()
for graph, label in dataset:
observed_degrees = observed_degrees.union(set(graph.in_degrees().numpy()))
one_hot_encode_map = {degree: i for i, degree in enumerate(observed_degrees)}
# generate features
new_dataset = []
for graph, label in dataset:
encoding = torch.zeros((graph.num_nodes(), len(observed_degrees)))
for i, node_degree in enumerate(graph.in_degrees().numpy()):
encoding[i][one_hot_encode_map[node_degree]] = 1.
graph.ndata['node_attr'] = encoding
new_dataset.append((graph, label))
return new_dataset
def get_twitter_dataset(balance=True, minimum_nodes=5):
"""
Returns the Twitter dataset from https://science.sciencemag.org/. Many of the graphs are tiny (a large number are
a single node). The dataset has three labels TRUE, FALSE and MIXED. The labels are imbalanced. To address these
concerns the function arguments lets one filter the graph size and balance the dataset
:param balance: If True the dataset is balanced by discarding samples from the majority class.
:param minimum_nodes: Only return graphs with nodes at least minimum_nodes
:return: A list of (DGLGraph, int) tuples of the form x_i, y_i
"""
data_location = os.path.join(Path(os.path.dirname(os.path.abspath(__file__))).parent.parent, 'data')
dataset = pickle.load(open(os.path.join(data_location, 'twitter.pl'), 'rb'))
dataset = [sample for sample in dataset if sample[0].num_nodes() >= minimum_nodes]
if balance:
dataset = balance_indices(dataset)
return dataset
def balance_indices(dataset):
"""Downsampling the majority classes."""
labels = [sample[1] for sample in dataset]
minority_label = pd.Series(labels).value_counts().argmin()
minority_label_count = pd.Series(labels).value_counts().min()
other_labels = list(set(np.unique(labels)) - set([minority_label]))
indices = np.where(labels == minority_label)[0]
for other_label in other_labels:
indices = np.append(indices, np.random.choice(np.where(labels == np.int64(other_label))[0],
size=minority_label_count, replace=False))
dataset = [sample for i, sample in enumerate(dataset) if i in indices]
return dataset
def collate(samples, add_selfloops=True):
"""Used to create DGL dataloaders."""
graphs, labels = map(list, zip(*samples))
if add_selfloops:
graphs = [add_self_loop(graph) for graph in graphs]
batched_graph = dgl.batch(graphs)
return batched_graph, torch.tensor(labels)
| 14,300 | 43.551402 | 119 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.