repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DeepAA | DeepAA-master/resnet.py | import os
import tensorflow as tf
# ref: https://github.com/gahaalt/resnets-in-tensorflow2/blob/master/Models/Resnets.py
_bn_momentum = 0.9
def regularized_padded_conv(*args, **kwargs):
return tf.keras.layers.Conv2D(*args, **kwargs, padding='same', kernel_regularizer=_regularizer, bias_regularizer=_regularizer,
kernel_initializer='he_normal', use_bias=True)
def bn_relu(x):
x = tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum)(x)
return tf.keras.layers.ReLU()(x)
def shortcut(x, filters, stride, mode):
if x.shape[-1] == filters: # maybe and stride==1
return x
elif mode == 'B':
return regularized_padded_conv(filters, 1, strides=stride)(x)
elif mode == 'B_original':
x = regularized_padded_conv(filters, 1, strides=stride)(x)
return tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum)(x)
elif mode == 'A':
return tf.pad(tf.keras.layers.MaxPool2D(1, stride)(x) if stride > 1 else x,
paddings=[(0, 0), (0, 0), (0, 0), (0, filters - x.shape[-1])])
else:
raise KeyError("Parameter shortcut_type not recognized!")
def original_block(x, filters, stride=1, **kwargs):
c1 = regularized_padded_conv(filters, 3, strides=stride)(x)
c2 = regularized_padded_conv(filters, 3)(bn_relu(c1))
c2 = tf.keras.layers.experimental.SyncBatchNormalization(momentum=_bn_momentum)(c2)
mode = 'B_original' if _shortcut_type == 'B' else _shortcut_type
x = shortcut(x, filters, stride, mode=mode)
return tf.keras.layers.ReLU()(x + c2)
def preactivation_block(x, filters, stride=1, preact_block=False):
flow = bn_relu(x)
c1 = regularized_padded_conv(filters, 3)(flow)
if _dropout:
c1 = tf.keras.layers.Dropout(_dropout)(c1)
c2 = regularized_padded_conv(filters, 3, strides=stride)(bn_relu(c1))
x = shortcut(x, filters, stride, mode=_shortcut_type)
return x + c2
def bootleneck_block(x, filters, stride=1, preact_block=False):
flow = bn_relu(x)
if preact_block:
x = flow
c1 = regularized_padded_conv(filters // _bootleneck_width, 1)(flow)
c2 = regularized_padded_conv(filters // _bootleneck_width, 3, strides=stride)(bn_relu(c1))
c3 = regularized_padded_conv(filters, 1)(bn_relu(c2))
x = shortcut(x, filters, stride, mode=_shortcut_type)
return x + c3
def group_of_blocks(x, block_type, num_blocks, filters, stride, block_idx=0):
global _preact_shortcuts
preact_block = True if _preact_shortcuts or block_idx == 0 else False
x = block_type(x, filters, stride, preact_block=preact_block)
for i in range(num_blocks - 1):
x = block_type(x, filters)
return x
def Resnet(input_shape, n_classes, l2_reg=1e-4, group_sizes=(2, 2, 2), features=(16, 32, 64), strides=(1, 2, 2),
shortcut_type='B', block_type='preactivated', first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
dropout=0, cardinality=1, bootleneck_width=4, preact_shortcuts=True,
final_dense_kernel_initializer=None, final_dense_bias_initializer=None):
global _regularizer, _shortcut_type, _preact_projection, _dropout, _cardinality, _bootleneck_width, _preact_shortcuts
_bootleneck_width = bootleneck_width # used in ResNeXts and bootleneck blocks
_regularizer = tf.keras.regularizers.l2(l2_reg)
_shortcut_type = shortcut_type # used in blocks
_cardinality = cardinality # used in ResNeXts
_dropout = dropout # used in Wide ResNets
_preact_shortcuts = preact_shortcuts
block_types = {'preactivated': preactivation_block,
'bootleneck': bootleneck_block,
'original': original_block}
selected_block = block_types[block_type]
inputs = tf.keras.layers.Input(shape=input_shape)
flow = regularized_padded_conv(**first_conv)(inputs)
if block_type == 'original':
flow = bn_relu(flow)
for block_idx, (group_size, feature, stride) in enumerate(zip(group_sizes, features, strides)):
flow = group_of_blocks(flow,
block_type=selected_block,
num_blocks=group_size,
block_idx=block_idx,
filters=feature,
stride=stride)
if block_type != 'original':
flow = bn_relu(flow)
flow = tf.keras.layers.GlobalAveragePooling2D()(flow)
if final_dense_kernel_initializer is not None:
assert final_dense_bias_initializer is not None, 'make sure kernel and bias initializer is not None at the same time'
outputs = tf.keras.layers.Dense(n_classes, kernel_regularizer=_regularizer,
kernel_initializer=final_dense_kernel_initializer,
bias_initializer=final_dense_bias_initializer)(flow)
else:
outputs = tf.keras.layers.Dense(n_classes, kernel_regularizer=_regularizer)(flow)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
def load_weights_func(model, model_name):
try:
model.load_weights(os.path.join('saved_models', model_name + '.tf'))
except tf.errors.NotFoundError:
print("No weights found for this model!")
return model
def cifar_resnet20(block_type='original', shortcut_type='A', l2_reg=1e-4, load_weights=False, input_shape=None, n_classes=None):
model = Resnet(input_shape=input_shape, n_classes=n_classes, l2_reg=l2_reg, group_sizes=(3, 3, 3), features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet20')
return model
def cifar_resnet32(block_type='original', shortcut_type='A', l2_reg=1e-4, load_weights=False, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(5, 5, 5), features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet32')
return model
def cifar_resnet44(block_type='original', shortcut_type='A', l2_reg=1e-4, load_weights=False, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(7, 7, 7), features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet44')
return model
def cifar_resnet56(block_type='original', shortcut_type='A', l2_reg=1e-4, load_weights=False, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(9, 9, 9), features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet56')
return model
def cifar_resnet110(block_type='preactivated', shortcut_type='B', l2_reg=1e-4, load_weights=False, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(18, 18, 18),
features=(16, 32, 64),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, preact_shortcuts=False)
if load_weights: model = load_weights_func(model, 'cifar_resnet110')
return model
def cifar_resnet164(shortcut_type='B', load_weights=False, l2_reg=1e-4, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(18, 18, 18),
features=(64, 128, 256),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type='bootleneck', preact_shortcuts=True)
if load_weights: model = load_weights_func(model, 'cifar_resnet164')
return model
def cifar_resnet1001(shortcut_type='B', load_weights=False, l2_reg=1e-4, input_shape=None):
model = Resnet(input_shape=input_shape, n_classes=10, l2_reg=l2_reg, group_sizes=(111, 111, 111),
features=(64, 128, 256),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type='bootleneck', preact_shortcuts=True)
if load_weights: model = load_weights_func(model, 'cifar_resnet1001')
return model
def cifar_wide_resnet(N, K, block_type='preactivated', shortcut_type='B', dropout=0, l2_reg=2.5e-4, n_classes=None, preact_shortcuts=False, input_shape=None):
assert (N - 4) % 6 == 0, "N-4 has to be divisible by 6"
lpb = (N - 4) // 6 # layers per block - since N is total number of convolutional layers in Wide ResNet
model = Resnet(input_shape=input_shape, n_classes=n_classes, l2_reg=l2_reg, group_sizes=(lpb, lpb, lpb),
features=(16 * K, 32 * K, 64 * K),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type=block_type, dropout=dropout, preact_shortcuts=preact_shortcuts)
return model
def cifar_WRN_16_4(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, input_shape=None):
model = cifar_wide_resnet(16, 4, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, input_shape=input_shape)
if load_weights: model = load_weights_func(model, 'cifar_WRN_16_4')
return model
def cifar_WRN_40_4(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, input_shape=None):
model = cifar_wide_resnet(40, 4, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, input_shape=input_shape)
if load_weights: model = load_weights_func(model, 'cifar_WRN_40_4')
return model
def cifar_WRN_16_8(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, input_shape=None):
model = cifar_wide_resnet(16, 8, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, input_shape=input_shape)
if load_weights: model = load_weights_func(model, 'cifar_WRN_16_8')
return model
def cifar_WRN_28_10(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, n_classes=None, preact_shortcuts=False, input_shape=None):
model = cifar_wide_resnet(28, 10, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, n_classes = n_classes, preact_shortcuts=preact_shortcuts, input_shape=input_shape)
return model
def cifar_WRN_28_2(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, n_classes=None, preact_shortcuts=False, input_shape=None):
model = cifar_wide_resnet(28, 2, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, n_classes = n_classes, preact_shortcuts=preact_shortcuts, input_shape=input_shape)
return model
def cifar_WRN_40_2(shortcut_type='B', load_weights=False, dropout=0, l2_reg=2.5e-4, n_classes=None, preact_shortcuts=False, input_shape=None):
model = cifar_wide_resnet(40, 2, 'preactivated', shortcut_type, dropout=dropout, l2_reg=l2_reg, n_classes = n_classes, preact_shortcuts=preact_shortcuts, input_shape=input_shape)
return model
def cifar_resnext(N, cardinality, width, shortcut_type='B', ):
assert (N - 3) % 9 == 0, "N-4 has to be divisible by 6"
lpb = (N - 3) // 9 # layers per block - since N is total number of convolutional layers in Wide ResNet
model = Resnet(input_shape=(32, 32, 3), n_classes=10, l2_reg=1e-4, group_sizes=(lpb, lpb, lpb),
features=(16 * width, 32 * width, 64 * width),
strides=(1, 2, 2), first_conv={"filters": 16, "kernel_size": 3, "strides": 1},
shortcut_type=shortcut_type,
block_type='resnext', cardinality=cardinality, width=width)
return model
if __name__ == '__main__':
model = cifar_WRN_28_10(dropout=0, l2_reg=5e-4/2., preact_shortcuts=False, n_classes=10) | 12,655 | 49.624 | 183 | py |
DeepAA | DeepAA-master/policy.py | import tensorflow as tf
import numpy as np
import math
import json
from tensorflow_probability import distributions as tfd
from resnet import Resnet
CIFAR_MEANS = np.array([0.49139968, 0.48215841, 0.44653091], dtype=np.float32)
CIFAR_STDS = np.array([0.2023, 0.1994, 0.2010], dtype=np.float32)
SVHN_MEANS = np.array([0.4379, 0.4440, 0.4729], dtype=np.float32)
SVHN_STDS = np.array([0.1980, 0.2010, 0.1970], dtype=np.float32)
IMAGENET_MEANS = np.array([0.485, 0.456, 0.406], dtype=np.float32)
IMAGENET_STDS = np.array([0.229, 0.224, 0.225], dtype=np.float32)
class DA_Policy_logits(tf.keras.Model):
def __init__(self, l_ops, l_mags, l_uniq, op_names, ops_mid_magnitude,
N_repeat_random, available_policies, policy_init='identity'):
super().__init__()
self.l_uniq = l_uniq
self.l_ops = l_ops
self.l_mags = l_mags
self.N_repeat_random = N_repeat_random
self.available_policies = available_policies
if policy_init == 'uniform':
init_value = tf.constant([0.0]*len(available_policies), dtype=tf.float32)
elif policy_init == 'identity':
init_value = tf.constant([8.0] + [0.0]*(len(available_policies)-1), dtype=tf.float32)
init_value = init_value - tf.reduce_mean(init_value)
else:
raise Exception
self.logits = tf.Variable(initial_value=init_value, trainable=True)
self.ops_mid_magnitude = ops_mid_magnitude
self.unique_policy = self._get_unique_policy(op_names, l_ops, l_mags)
self.N_random, self.repeat_cfg, self.reduce_random_mat = self._get_repeat_random(op_names, l_ops, l_mags,
l_uniq, N_repeat_random)
self.act = tf.nn.softmax
def sample(self, images_orig, images, onehot_ops_mags, augNum):
bs = len(images_orig)
probs = self.act(self.logits, axis=-1)
dist = tfd.Categorical(probs=probs)
samples_om = dist.sample(augNum*bs).numpy() # (augNum, bs)
ops_dense, mags_dense, reduce_random_mat, ops_mags_idx, probs, probs_exp = self.get_dense_aug(images, repeat_random_ops=False)
ops = ops_dense[samples_om]
mags = mags_dense[samples_om]
ops_mags_idx_sample = ops_mags_idx[samples_om]
probs_sample = probs.numpy()[samples_om]
return ops, mags, ops_mags_idx_sample, probs_sample
def probs(self, images_orig, images, onehot_ops_mags, training):
bs = len(images_orig)
probs = self.act(self.logits, axis=-1)
probs = tf.repeat(probs[tf.newaxis], bs, axis=0)
return probs
def get_dense_aug(self, images, repeat_random_ops):
ops_uniq, mags_uniq = self.unique_policy
ops_dense = np.squeeze(ops_uniq)[self.available_policies]
mags_dense = np.squeeze(mags_uniq)[self.available_policies]
ops_mags_idx = self.available_policies
if repeat_random_ops:
isRepeat = [np.any(np.array(ops_dense == repeat_op_idx), axis=1) for repeat_op_idx in self.repeat_ops_idx]
isRepeat = np.stack(isRepeat, axis=1)
isRepeat = np.any(isRepeat, axis=1)
nRepeat = [self.N_repeat_random if isrepeat else 1 for isrepeat in isRepeat]
ops_dense = np.repeat(ops_dense, nRepeat, axis=0)
mags_dense = np.repeat(mags_dense, nRepeat, axis=0)
reduce_random_mat = np.eye(len(self.available_policies)) / np.array(nRepeat, dtype=np.float32)
reduce_random_mat = np.repeat(reduce_random_mat, nRepeat, axis=1)
else:
nRepeat = [1] * len(self.available_policies)
reduce_random_mat = np.eye(len(self.available_policies))
probs = self.act(self.logits)
probs_exp = np.repeat(probs/np.array(nRepeat, dtype=np.float32), nRepeat, axis=0)
return ops_dense, mags_dense, reduce_random_mat, ops_mags_idx, probs, probs_exp
def _get_unique_policy(self, op_names, l_ops, l_mags):
names_modified = [op_name.split(':')[0] for op_name in op_names]
ops_list, mags_list = [], []
repeat_ops_idx = []
for k_name, name in enumerate(names_modified):
if self.ops_mid_magnitude[name] == 'random':
repeat_ops_idx.append(k_name)
ops_sub, mags_sub = np.array([[k_name]], dtype=np.int32), np.array([[(l_mags - 1) // 2]], dtype=np.int32)
elif self.ops_mid_magnitude[name] is not None and self.ops_mid_magnitude[name]>=0 and self.ops_mid_magnitude[name]<=l_mags-1:
ops_sub = k_name * np.ones([l_mags - 1, 1], dtype=np.int32)
mags_sub = np.array([l for l in range(l_mags) if l != self.ops_mid_magnitude[name]], dtype=np.int32)[:, np.newaxis]
elif self.ops_mid_magnitude[name] is not None and self.ops_mid_magnitude[name]<0: #or self.ops_mid_magnitude[name]>l_mags-1):
ops_sub = k_name * np.ones([l_mags, 1], dtype=np.int32)
mags_sub = np.arange(l_mags, dtype=np.int32)[:, np.newaxis]
elif self.ops_mid_magnitude[name] is None:
ops_sub, mags_sub = np.array([[k_name]], dtype=np.int32), np.array([[(l_mags - 1) // 2]], dtype=np.int32)
else:
raise Exception('Unrecognized middle magnitude')
ops_list.append(ops_sub)
mags_list.append(mags_sub)
ops = np.concatenate(ops_list, axis=0)
mags = np.concatenate(mags_list, axis=0)
self.repeat_ops_idx = repeat_ops_idx
return ops.astype(np.int32), mags.astype(np.int32)
def _get_repeat_random(self, op_names, l_ops, l_mags, l_uniq, N_repeat_random):
names_modified = [op_name.split(':')[0] for op_name in op_names]
N_random = sum([1 for name in names_modified if self.ops_mid_magnitude[name]=='random'])
repeat_cfg = []
for k_name, name in enumerate(names_modified):
if self.ops_mid_magnitude[name] == 'random':
repeat_cfg.append(N_repeat_random) # we may repeat random operations for N_repeat_random times
elif self.ops_mid_magnitude[name] is not None and self.ops_mid_magnitude[name] == -1:
repeat_cfg.append([1]*l_mags)
elif self.ops_mid_magnitude[name] is not None and self.ops_mid_magnitude[name] >= 0 and self.ops_mid_magnitude[name]<=l_mags-1:
repeat_cfg.extend([1]*(l_mags-1))
elif self.ops_mid_magnitude[name] is None:
repeat_cfg.append(1)
else:
raise Exception
repeat_cfg = np.array(repeat_cfg, dtype=np.int32)
reduce_mat = np.eye(l_uniq)/repeat_cfg[np.newaxis].astype(np.float)
reduce_mat = np.repeat(reduce_mat, repeat_cfg, axis=1)
return N_random, repeat_cfg, reduce_mat
@property
def idx_removed_redundant(self):
idx_removed_redundant = np.concatenate([[1] if rep == 1 else [1]+[0]*(rep-1) for rep in self.repeat_cfg ]).nonzero()[0]
assert len(idx_removed_redundant) == self.l_uniq, 'removing the repeated random operations'
return idx_removed_redundant | 7,142 | 51.138686 | 139 | py |
DeepAA | DeepAA-master/DeepAA_search.py | _PARALLEL_BATCH_small, _PARALLEL_BATCH_median, _PARALLEL_BATCH_large = 16, 128, 256 # 64
import os
import sys
import numpy as np
import tensorflow as tf
tf.config.threading.set_inter_op_parallelism_threads(0)
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
import multiprocessing
import argparse
from augmentation import get_mid_magnitude
from DeepAA_utils import test_loss, test_accuracy, train_loss, train_accuracy
from DeepAA_utils import get_model, get_dataset, get_augmentation, get_loss_fun, get_optim_net, get_optim_policy
from DeepAA_utils import get_lops_luniq, get_policy, get_img_size
from DeepAA_utils import PrefetchGenerator, save_policy
from tensorflow.keras.utils import Progbar
import matplotlib
matplotlib.use('Agg')
from utils import Logger as myLogger
from utils import repeat
parser = argparse.ArgumentParser()
# pretrain
parser.add_argument('--use_model', default='WRN_28_10', type=str, help='Model used for search')
parser.add_argument('--dataset', default='cifar10', type=str, help='Dataset, e.g., cifar10, imagenet')
parser.add_argument('--n_classes', default=100, type=int, help='Number of classes')
parser.add_argument('--nb_epochs', default=45, type=int, help='Number of epochs for pretrain')
parser.add_argument('--pretrain_size', default=5000, type=int, help='Number of images for pretraining')
parser.add_argument('--l_mags', default=13, type=int, help='Number of magnitudes, should be an odd number')
parser.add_argument('--policy_lr', default=0.025, type=float, help='Policy learning rate')
parser.add_argument('--pretrain_lr', default=0.1, type=float, help='maximum learning rate')
parser.add_argument('--batch_size', default=128, type=int, help='Training batch size')
parser.add_argument('--val_batch_size', default=1024, type=int, help='Validation batch size')
parser.add_argument('--test_batch_size', default=512, type=int, help='Testing batch size')
parser.add_argument('--clip_policy_gradient_norm', default=5.0, type=float, help='clipping the policy gradient by norm')
parser.add_argument('--debug', default=False, action='store_true', help='Debugging')
parser.add_argument('--seed', default=1, type=int, help='Random seed')
parser.add_argument('--policy_bn_training', default=False, action='store_true', help='use batchnorm for policy search, Default to False')
parser.add_argument('--n_policies', default=4, type=int, help='Number of policies')
parser.add_argument('--search_bno', default=256, type=int, help='Search steps for each policy')
parser.add_argument('--repeat_random_ops', default=False, action='store_true', help='repeat random operations (randCrop, randFlip, randCutout')
parser.add_argument('--N_repeat_random', default=1, type=int, help='Number to repeats')
parser.add_argument('--use_pool', default=False, action='store_true', help='Using multiprocessing for augmentation')
parser.add_argument('--chunk_size', default=None, type=int, help='Chunk size for augmentation')
parser.add_argument('--EXP_gT_factor', default=4, type=int, help='Expansion factor for calculating gradient')
parser.add_argument('--EXP_G', default=16, type=int, help='Expansion for Jacobian vector product')
parser.add_argument('--train_same_labels', default=16, type=int, help='Sample data from N randomly selected labels')
parser.add_argument('--mode', default='client', type=str, help='Dummy params')
parser.add_argument('--port', default=38277, type=int, help='Dummy params')
args=parser.parse_args()
if args.use_model in ['resnet50']:
_PARALLEL_BATCH = _PARALLEL_BATCH_small
elif args.use_model in ['WRN_28_10']:
_PARALLEL_BATCH = _PARALLEL_BATCH_median
elif args.use_model in ['WRN_40_2']:
_PARALLEL_BATCH = _PARALLEL_BATCH_large
else:
raise Exception('Unrecognized model {}'.format(args.use_model))
n_cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=n_cpus) if args.use_pool else None
np.random.seed(int(args.seed))
tf.random.set_seed(int(args.seed))
ops_mid_magnitude = get_mid_magnitude(args.l_mags)
args.l_ops, args.l_uniq = get_lops_luniq(args, ops_mid_magnitude)
args.img_size = get_img_size(args)
train_ds, val_ds, test_ds, search_ds = get_dataset(args)
nb_train_steps = len(train_ds)
augmentation_default, augmentation_search, augmentation_test = get_augmentation(args)
_, test_loss_fun, val_loss_fun = get_loss_fun()
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = get_model(args, args.use_model, args.n_classes)
checkpoint = tf.train.Checkpoint(model=model)
train_loss_fun = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
optim_net = get_optim_net(args, nb_train_steps)
assert args.train_same_labels % mirrored_strategy.num_replicas_in_sync == 0, "Make sure val_same_labels can be divided by num_replicas_in_sync"
available_policies = np.arange(args.l_uniq, dtype=np.int32)[:, np.newaxis]
print(available_policies)
all_using_policies, all_using_optim_policies = [], []
for k in range(args.n_policies):
policy_train_ = get_policy(args, op_names=augmentation_search.op_names, ops_mid_magnitude=ops_mid_magnitude, available_policies= available_policies)
optim_policy_ = get_optim_policy(args.policy_lr)
all_using_policies.append(policy_train_)
all_using_optim_policies.append(optim_policy_)
train_ds.on_epoch_end()
train_ds_iter = iter(train_ds)
def get_pretrain_data():
global train_ds_iter
try:
images, labels = next(train_ds_iter)
except:
train_ds.on_epoch_end()
train_ds_iter = iter(train_ds)
images, labels = next(train_ds_iter)
bs = len(labels)
images, _ = augmentation_default(images, labels,
[None]*bs, [None]*bs,
use_post_aug=True, pool=pool)
return tf.convert_to_tensor(images, dtype=tf.float32), tf.convert_to_tensor(labels, tf.int32)
@tf.function(
input_signature=[tf.TensorSpec(shape=(None, *args.img_size), dtype=tf.float32),
tf.TensorSpec(shape=(None, ), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.float32)],
)
def train_step(images_aug, labels, clip_gradient_norm):
bs = len(images_aug)
with tf.GradientTape() as tape:
labels_aug_pred = model(images_aug, training=True)
loss_aug = tf.reduce_mean(train_loss_fun(labels, labels_aug_pred))
loss_aug += sum(model.losses)
grad_net = tape.gradient(loss_aug, model.trainable_variables)
if clip_gradient_norm > 0:
grad_net, _ = tf.clip_by_global_norm(grad_net, clip_norm=clip_gradient_norm)
optim_net.apply_gradients(zip(grad_net, model.trainable_variables))
del tape
return loss_aug, labels_aug_pred
def pretrain():
for epoch in range(args.nb_epochs):
if epoch == args.nb_epochs+1:
break
pbar = Progbar(target=nb_train_steps, interval=0.05, width=30)
print('\n Pretrain Epoch {} \n'.format(epoch))
for bno in range(nb_train_steps):
images, labels = get_pretrain_data()
loss, labels_pred = train_step(images, labels, clip_gradient_norm=5.)
train_loss(loss) # only record the last method's loss and accuracy
train_accuracy(labels, labels_pred)
pbar.update(bno + 1)
print('Saving the checkpoint to {}'.format('./results/images/ckpt{}/model_ckpt{}'.format(os.environ['CUDA_VISIBLE_DEVICES'], epoch-1)))
# FixMe: We need to save and then load the pretrain model, otherwise the pretrained model won't be synchronized across all GPUs
model.save_weights('./results/images/ckpt{}/model_ckpt{}'.format(os.environ['CUDA_VISIBLE_DEVICES'], args.nb_epochs))
model.load_weights('./results/images/ckpt{}/model_ckpt{}'.format(os.environ['CUDA_VISIBLE_DEVICES'], args.nb_epochs))
search_summary_writer = tf.summary.create_file_writer('./results/images/logs/cuda{}/search'.format(os.environ['CUDA_VISIBLE_DEVICES']))
graph_summary_writer = tf.summary.create_file_writer('./results/images/logs/cuda{}/graph'.format(os.environ['CUDA_VISIBLE_DEVICES']))
save_folder = './results/images/cuda{}'.format(os.environ['CUDA_VISIBLE_DEVICES'])
save_folder_ckpt = './results/images/ckpt{}'.format(os.environ['CUDA_VISIBLE_DEVICES'])
if not os.path.isdir(save_folder):
os.mkdir(save_folder)
if not os.path.isdir(save_folder_ckpt):
os.mkdir(save_folder_ckpt)
if __name__ == '__main__':
sys.stdout = myLogger('./results/images/cuda{}/stdout'.format(os.environ['CUDA_VISIBLE_DEVICES']))
# pretraining
if 'imagenet' in args.dataset:
checkpoint.restore('./pretrained_imagenet/imagenet_resnet50_ckpt')
else:
pretrain()
# disable batch normalization updating
for layer in model.layers:
if isinstance(layer, tf.keras.layers.experimental.SyncBatchNormalization) or isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = False
gradients_like = tf.nest.map_structure(lambda g: tf.zeros_like(g), model.trainable_variables)
@tf.function(
input_signature=[tf.TensorSpec(shape=(None, *args.img_size), dtype=tf.float32),
tf.TensorSpec(shape=(None, ), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.float32),
tf.TensorSpec(shape=(None, ), dtype=tf.float32)]
)
def step2_cal_JVP_vStep(images_aug2, labels, weight_1, weights_2):
if not args.debug:
print('*'*40 + ' retracing step2_cal_JVP_vStep ' + '*'*40)
with tf.GradientTape() as tape:
labels_aug_pred = model(images_aug2, training=False)
loss_aug = train_loss_fun(labels, labels_aug_pred)
grad_new = tape.gradient(loss_aug, model.trainable_variables, output_gradients = weights_2 * weight_1)
del tape
return grad_new
@tf.function(
input_signature=[tf.TensorSpec(shape=(None, *args.img_size), dtype=tf.float32),
tf.TensorSpec(shape=(None,), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.float32), tf.TensorSpec(shape=(), dtype=tf.float32),
[tf.TensorSpec.from_tensor(v) for v in tf.nest.flatten(gradients_like)]]
)
def step2_cal_JVP_jvpStep(images_aug2, labels, g_norm_train, g_norm_val, tangents):
if not args.debug:
print('*'*40 + ' retracing step2_cal_JVP_jStep ' + '*'*40)
with tf.autodiff.ForwardAccumulator(primals=model.trainable_variables, tangents=tangents) as acc:
labels_aug_pred = model(images_aug2, training=False)
loss_aug = train_loss_fun(labels, labels_aug_pred)
grad_importance_new = acc.jvp(loss_aug) / (g_norm_train * g_norm_val)
del acc
return grad_importance_new
@tf.function
def policy_gradient_stage1(reduce_random_mat,
images_aug, labels_aug,
images_val, labels_val,
weight_1, weights_2):
reduce_random_mat = tf.squeeze(reduce_random_mat)
images_aug = tf.squeeze(images_aug)
labels_aug = tf.squeeze(labels_aug)
images_val = tf.squeeze(images_val)
labels_val = tf.squeeze(labels_val)
weight_1 = tf.squeeze(weight_1)
weights_2 = tf.squeeze(weights_2)
bs = _PARALLEL_BATCH
val_bs = tf.shape(images_val)[0]
mult = tf.cast(val_bs, dtype=tf.float32)
def batching(L, bs, k): # Get Batch Range
start = k * bs
if start + bs > L:
end = L
else:
end = start + bs
return start, end
# 1) Step1: Get gradients of augmented and clean data
def one_batch_grad(imgs, labs, w1, w2, grad):
grad_new = step2_cal_JVP_vStep(imgs, labs, w1, w2)
grad = tf.nest.map_structure(lambda g1, g2: g1+g2, grad, grad_new)
return grad
@tf.function
def cal_grad(imgs, labs, w1, w2):
L = tf.shape(imgs)[0]
grad0 = tf.nest.map_structure(lambda g: tf.zeros_like(g), model.trainable_variables)
grad, _ = tf.while_loop(
cond = lambda grad_acc, k: tf.cast(k, dtype=tf.int32) < tf.cast(tf.math.ceil(tf.cast(L, dtype=tf.float32)/tf.cast(bs, dtype=tf.float32)), dtype=tf.int32),
body = lambda grad_acc, k: (one_batch_grad(imgs[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
labs[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
w1,
w2[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
grad_acc), k+1),
loop_vars = (grad0, tf.constant(0)),
back_prop = False,
parallel_iterations = 1,
)
return grad
grad_val = cal_grad(images_val, labels_val, tf.constant(1.0, dtype=tf.float32), tf.ones(val_bs, dtype=tf.float32)/tf.cast(val_bs, dtype=tf.float32))
grad_train = cal_grad(images_aug, labels_aug, weight_1 * mult, weights_2)
grad_train = tf.nest.map_structure(lambda g: g/mult, grad_train) # for numerical stability
# 2) compute tangents
g_norm_val = tf.linalg.global_norm(grad_val)
g_norm_train = tf.linalg.global_norm(grad_train)
gradV_gradT = sum([tf.reduce_sum(g1*g2) for g1, g2 in zip(grad_val, grad_train)])
gradV_gradT_gradTrainNorm2 = gradV_gradT/(g_norm_train**2)
tangents = tf.nest.map_structure(lambda g1, g2: g1 - g2 * gradV_gradT_gradTrainNorm2, grad_val, grad_train)
# 3) compute JVP
def one_step_JVP(grad_importance_array, imgs, labs, k):
grad_importance_ = tf.stop_gradient(
step2_cal_JVP_jvpStep(imgs, labs, g_norm_train, g_norm_val, tangents)
)
grad_importance_array = grad_importance_array.write(tf.cast(k, dtype=tf.int32), grad_importance_)
return grad_importance_array
@tf.function
def run_JVP(imgs, labs):
L = tf.shape(imgs)[0]
grad_importance_array = tf.TensorArray(tf.float32, size=0, dynamic_size=True, infer_shape=False, element_shape=[None])
grad_importance_array, _ = tf.while_loop(
cond = lambda grad_TA, k: tf.cast(k, dtype=tf.int32) < tf.cast(tf.math.ceil(tf.cast(L, dtype=tf.float32)/tf.cast(bs, dtype=tf.float32)), dtype=tf.int32),
body = lambda grad_TA, k: (one_step_JVP(grad_TA,
imgs[batching(L,bs,k)[0]:batching(L,bs,k)[1]],
labs[batching(L,bs,k)[0]:batching(L,bs,k)[1]],
k), k+1),
loop_vars = (grad_importance_array, tf.constant(0)),
back_prop = False,
parallel_iterations = 1,
)
return grad_importance_array.concat()
grad_importance = run_JVP(images_aug, labels_aug)
if args.repeat_random_ops:
grad_importance = tf.matmul(grad_importance[tf.newaxis], reduce_random_mat, transpose_b=True)[0]
# 4) compute cosine similarity
cos_sim = gradV_gradT / (g_norm_train * g_norm_val)
return cos_sim, grad_importance
@tf.function()
def policy_gradient_stage2(reduce_random_mat, images_aug_s, labels_aug_s, images_aug2, labels, images_val, labels_val, weights_gT, weights_G):
reduce_random_mat = tf.squeeze(reduce_random_mat)
images_aug_s = tf.squeeze(images_aug_s)
labels_aug_s = tf.squeeze(labels_aug_s)
images_val = tf.squeeze(images_val)
labels_val = tf.squeeze(labels_val)
weights_gT = tf.squeeze(weights_gT)
bs = _PARALLEL_BATCH
val_bs = tf.shape(images_val)[0]
mult = 1.0
def batching(L, bs, k): # Get Batch Range
start = k * bs
if start + bs > L:
end = L
else:
end = start + bs
return start, end
# 1) Step1: Get gradients of augmented and clean data
def one_batch_grad(imgs, labs, w1, w2, grad):
grad_new = step2_cal_JVP_vStep(imgs, labs, w1, w2)
grad = tf.nest.map_structure(lambda g1, g2: g1+g2, grad, grad_new)
return grad
@tf.function
def cal_grad(imgs, labs, w1, w2):
L = tf.shape(imgs)[0]
grad0 = tf.nest.map_structure(lambda g: tf.zeros_like(g), model.trainable_variables)
grad, _ = tf.while_loop(
cond = lambda grad_acc, k: tf.cast(k, dtype=tf.int32) < tf.cast(tf.math.ceil(tf.cast(L, dtype=tf.float32)/tf.cast(bs, dtype=tf.float32)), dtype=tf.int32),
body = lambda grad_acc, k: (one_batch_grad(imgs[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
labs[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
w1,
w2[batching(L, bs, k)[0]:batching(L, bs, k)[1]],
grad_acc), k+1),
loop_vars = (grad0, tf.constant(0)),
back_prop = False,
parallel_iterations = 1,
)
return grad
grad_val = cal_grad(images_val, labels_val, tf.constant(1.0, dtype=tf.float32), tf.ones(val_bs, dtype=tf.float32)/tf.cast(val_bs, dtype=tf.float32))
grad_train = cal_grad(images_aug_s, labels_aug_s, tf.constant(mult, dtype=tf.float32), weights_gT)
grad_train = tf.nest.map_structure(lambda g: g/mult, grad_train) # for numerical stability
# 2) compute tangents
g_norm_val = tf.linalg.global_norm(grad_val)
g_norm_train = tf.linalg.global_norm(grad_train)
gradV_gradT = sum([tf.reduce_sum(g1*g2) for g1, g2 in zip(grad_val, grad_train)])
gradV_gradT_gradTrainNorm2 = gradV_gradT/(g_norm_train**2)
tangents = tf.nest.map_structure(lambda g1, g2: g1 - g2 * gradV_gradT_gradTrainNorm2, grad_val, grad_train)
# 3) compute JVP
def one_step_JVP(grad_importance_array, imgs, labs, k):
grad_importance_ = tf.stop_gradient(
step2_cal_JVP_jvpStep(imgs, labs, g_norm_train, g_norm_val, tangents)
)
grad_importance_array = grad_importance_array.write(tf.cast(k, dtype=tf.int32), grad_importance_)
return grad_importance_array
@tf.function
def run_JVP(imgs, labs):
L = tf.shape(imgs)[0]
grad_importance_array = tf.TensorArray(tf.float32, size=0, dynamic_size=True, infer_shape=False, element_shape=[None])
grad_importance_array, _ = tf.while_loop(
cond = lambda grad_TA, k: tf.cast(k, dtype=tf.int32) < tf.cast(tf.math.ceil(tf.cast(L, dtype=tf.float32)/tf.cast(bs, dtype=tf.float32)), dtype=tf.int32),
body = lambda grad_TA, k: (one_step_JVP(grad_TA,
imgs[batching(L,bs,k)[0]:batching(L,bs,k)[1]],
labs[batching(L,bs,k)[0]:batching(L,bs,k)[1]],
k), k+1),
loop_vars = (grad_importance_array, tf.constant(0)),
back_prop = False,
parallel_iterations = 1,
)
return grad_importance_array.concat()
aug_n, l_seq, w, h, c = images_aug2.shape
images_aug2_ = tf.reshape(images_aug2, [aug_n * l_seq, w, h, c])
labels_ = tf.reshape(labels, [aug_n * l_seq])
grad_importance = run_JVP(images_aug2_, labels_)
grad_importance = tf.reshape(grad_importance, [aug_n, l_seq])
if args.repeat_random_ops:
grad_importance = tf.matmul(grad_importance, reduce_random_mat, transpose_b=True)
# 4) compute cosine similarity
cos_sim = gradV_gradT / (g_norm_train * g_norm_val)
return cos_sim, grad_importance
@tf.function
def distributed_train_stage1(dist_inputs):
per_replica_cos_sim, per_replica_grad_importance = mirrored_strategy.run(policy_gradient_stage1, args=(*dist_inputs,))
return mirrored_strategy.experimental_local_results(per_replica_cos_sim), mirrored_strategy.experimental_local_results(per_replica_grad_importance)
@tf.function
def distributed_train_stage2(dist_inputs):
per_replica_cos_sim, per_replica_grad_importance = mirrored_strategy.run(policy_gradient_stage2, args=(*dist_inputs,))
return mirrored_strategy.experimental_local_results(per_replica_cos_sim), mirrored_strategy.experimental_local_results(per_replica_grad_importance)
def train_policy_stage1(stage, images_val_, labels_val_, images_batch, labels_batch):
search_bs = len(images_val_)
val_bs = len(images_val_[0])
assert search_bs == len(images_batch), 'Check dimensions'
assert len(images_val_) % search_bs == 0, 'Use different validation batch for different search data point'
EXP = 1 # expansion factor
images_val_, labels_val_ = augmentation_test(sum(images_val_, []), np.concatenate(labels_val_),
np.array([[0]]*search_bs*val_bs, dtype=np.int32),
np.array([[0]]*search_bs*val_bs, dtype=np.float32) / float(args.l_mags - 1),
use_post_aug=True, pool=pool, chunksize=args.chunk_size)
images_val_ = np.reshape(images_val_, [search_bs, val_bs, *args.img_size])
labels_val_ = np.reshape(labels_val_, [search_bs, val_bs])
images_batch = repeat(images_batch, EXP, axis=0)
labels_batch = repeat(labels_batch, EXP, axis=0)
ops_dense, mags_dense, reduce_random_mat, ops_mags_idx, probs, probs_exp = all_using_policies[stage-1].get_dense_aug(None, args.repeat_random_ops)
if isinstance(images_batch[0], list):
images_aug_last, labels_aug_last = augmentation_search(repeat(sum(images_batch,[]), len(ops_dense), axis=0), repeat(np.concatenate(labels_batch), len(ops_dense), axis=0),
np.tile(ops_dense, [search_bs * EXP, 1]), np.tile(mags_dense, [search_bs * EXP, 1]).astype(np.float32)/float(args.l_mags-1),
use_post_aug=False, pool=pool,
chunksize=None)
images_aug_last = np.reshape(images_aug_last, [-1, len(ops_dense), *args.img_size])
labels_aug_last = np.reshape(labels_aug_last, [-1, len(ops_dense)])
weights_1 = np.ones(search_bs*EXP, dtype=np.float32)
weights_2 = probs_exp
assert search_bs % mirrored_strategy.num_replicas_in_sync == 0, 'Make sure that search_bs is multiples of mirrored_trategy'
all_local_cos_sim, all_local_grad_importance = [], []
for used_batch in range(0, search_bs, mirrored_strategy.num_replicas_in_sync):
get_value_fn = lambda ctx: (
tf.constant(reduce_random_mat, dtype=tf.float32),
tf.convert_to_tensor(images_aug_last[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_aug_last[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(images_val_[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_val_[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(weights_1[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.constant(weights_2, dtype=tf.float32),
)
dist_values = mirrored_strategy.experimental_distribute_values_from_function(get_value_fn)
all_local_cos_sim_, all_local_grad_importance_ = distributed_train_stage1(dist_values)
all_local_cos_sim.extend(all_local_cos_sim_)
all_local_grad_importance.extend(all_local_grad_importance_)
grad_importance = tf.stack(all_local_grad_importance, axis=0)
grad_importance = tf.reduce_mean(grad_importance, axis=0)
mult_factor = 0.25
with tf.GradientTape() as tape:
probs = tf.nn.softmax(all_using_policies[stage-1].logits)
loss_policy_final = -tf.reduce_sum(grad_importance * probs) * mult_factor
grad_policy = tape.gradient(loss_policy_final, all_using_policies[stage-1].trainable_variables)
all_using_optim_policies[stage-1].apply_gradients(zip(grad_policy, all_using_policies[stage-1].trainable_variables))
del tape
def train_policy_stage2(stage, images_val_, labels_val_, images_batch, labels_batch):
assert stage >= 2, 'depth starts from 2'
search_bs = len(images_val_)
val_bs = len(images_val_[0])
assert search_bs == len(images_batch), 'Check dimension'
assert len(images_val_) % search_bs == 0, 'Use different validation batch for different search data point'
images_val_, labels_val_ = augmentation_test(sum(images_val_, []), np.concatenate(labels_val_),
np.array([[0]]*search_bs*val_bs, dtype=np.int32),
np.array([[0]]*search_bs*val_bs, dtype=np.float32) / float(args.l_mags - 1),
use_post_aug=True, pool=pool, chunksize=args.chunk_size)
images_val_ = np.reshape(images_val_, [search_bs, val_bs, *args.img_size])
labels_val_ = np.reshape(labels_val_, [search_bs, val_bs])
EXP_gT = args.l_uniq * args.EXP_gT_factor # Expansion for calculating gradients
EXP_G = args.EXP_G # Expansion for calculating JVP
images_batch_EXPgT = repeat(images_batch, EXP_gT, axis=0)
labels_batch_EXPgT = repeat(labels_batch, EXP_gT, axis=0)
images_batch_EXPG = repeat(images_batch, EXP_G, axis=0)
labels_batch_EXPG = repeat(labels_batch, EXP_G, axis=0)
images_aug_s, labels_aug_s = images_batch_EXPgT, labels_batch_EXPgT
ops_s, mags_s = [], []
for k_stage in range(1, stage+1):
dummy_images = [None] * search_bs * EXP_gT
assert search_bs * EXP_gT == len(images_aug_s)
assert len(images_aug_s[0]) == 1
ops_s_, mags_s_, ops_mags_idx_s, probs_sample = all_using_policies[k_stage-1].sample(dummy_images, dummy_images, None, augNum=1)
ops_s.append(ops_s_)
mags_s.append(mags_s_)
ops_s = np.concatenate(ops_s, axis=1)
mags_s = np.concatenate(mags_s, axis=1)
images_aug_s, labels_aug_s = augmentation_search(sum(images_aug_s, []), np.concatenate(labels_aug_s, axis=0),
ops_s, mags_s.astype(np.float32)/float(args.l_mags-1),
use_post_aug=False, pool=pool,
chunksize=None)
images_aug_s = np.reshape(images_aug_s, [search_bs, EXP_gT, *args.img_size])
labels_aug_s = np.reshape(labels_aug_s, [search_bs, EXP_gT])
images_aug_k, labels_aug_k = images_batch_EXPG, labels_batch_EXPG
ops_k, mags_k = [], []
for k_stage in range(1, stage):
dummy_images = [None] * search_bs * EXP_G
assert search_bs * EXP_G == len(images_aug_k)
assert len(images_aug_k[0]) == 1
ops_k_, mags_k_, ops_mags_idx_k, probs_sample = all_using_policies[k_stage-1].sample(dummy_images, dummy_images, None, augNum=1)
ops_k.append(ops_k_)
mags_k.append(mags_k_)
ops_k = np.concatenate(ops_k, axis=1)
mags_k = np.concatenate(mags_k, axis=1)
images_aug_k, labels_aug_k = augmentation_search(sum(images_aug_k, []), np.concatenate(labels_aug_k, axis=0),
ops_k, mags_k.astype(np.float32)/float(args.l_mags-1),
use_post_aug=False, pool=pool, aug_finish=False, chunksize=args.chunk_size)
ops_dense, mags_dense, reduce_random_mat, ops_mags_idx, probs, probs_exp = all_using_policies[stage-1].get_dense_aug(None, repeat_random_ops=args.repeat_random_ops)
images_aug_k, labels_aug_k = augmentation_search(repeat(images_aug_k, len(ops_dense), axis=0), np.repeat(labels_aug_k, len(ops_dense), axis=0),
np.tile(ops_dense, [search_bs * EXP_G, 1]), np.tile(mags_dense, [search_bs * EXP_G, 1]).astype(np.float32)/float(args.l_mags-1),
use_post_aug=False, pool=pool,
chunksize=None)
images_aug_k = np.reshape(images_aug_k, [search_bs, EXP_G, len(ops_dense), *args.img_size])
labels_aug_k = np.reshape(labels_aug_k, [search_bs, EXP_G, len(ops_dense)])
weights_gT = np.ones(EXP_gT, dtype=np.float32) / float(EXP_gT)
weights_G = np.ones(EXP_G, dtype=np.float32) / float(EXP_G)
assert search_bs % mirrored_strategy.num_replicas_in_sync == 0, 'Make sure that search_bs is multiples of mirrored_trategy'
all_local_cos_sim, all_local_grad_importance = [], []
for used_batch in range(0, search_bs, mirrored_strategy.num_replicas_in_sync):
get_value_fn = lambda ctx: (
tf.convert_to_tensor(reduce_random_mat, dtype=tf.float32),
tf.convert_to_tensor(images_aug_s[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_aug_s[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(images_aug_k[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_aug_k[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(images_val_[ctx.replica_id_in_sync_group + used_batch], dtype=tf.float32),
tf.convert_to_tensor(labels_val_[ctx.replica_id_in_sync_group + used_batch], dtype=tf.int32),
tf.convert_to_tensor(weights_gT, dtype=tf.float32),
tf.convert_to_tensor(weights_G, dtype=tf.float32),
)
dist_values = mirrored_strategy.experimental_distribute_values_from_function(get_value_fn)
all_local_cos_sim_, all_local_grad_importance_ = distributed_train_stage2(dist_values)
all_local_cos_sim.extend(all_local_cos_sim_)
all_local_grad_importance.extend(all_local_grad_importance_)
grad_importance = tf.stack(all_local_grad_importance, axis=0)
grad_importance = tf.reduce_mean(grad_importance, axis=1)
assert grad_importance.shape == [search_bs, args.l_uniq], 'Check dimension'
grad_importance = tf.reduce_mean(grad_importance.numpy(), axis=0) - tf.math.reduce_std(grad_importance.numpy(), axis=0)
mult_factor = float(search_bs)
with tf.GradientTape() as tape:
probs = tf.nn.softmax(all_using_policies[stage - 1].logits)
loss_policy_final = -tf.reduce_sum(grad_importance * probs) * mult_factor
grad_policy = tape.gradient(loss_policy_final, all_using_policies[stage - 1].trainable_variables)
all_using_optim_policies[stage - 1].apply_gradients(zip(grad_policy, all_using_policies[stage - 1].trainable_variables))
del tape
def search_policy(search_bno, search_bs=16, val_bs=128):
data_prefetch_iterator = PrefetchGenerator(search_ds, val_ds, args.n_classes, search_bs, val_bs)
for stage in range(1, args.n_policies + 1):
pbar = Progbar(target=search_bno, interval=1, width=30)
for bno in range(search_bno):
images_val_, labels_val_, images_batch, labels_batch = data_prefetch_iterator.next()
if stage == 1:
train_policy_stage1(stage, images_val_, labels_val_, images_batch, labels_batch)
elif stage > 1:
train_policy_stage2(stage, images_val_, labels_val_, images_batch, labels_batch)
pbar.update(bno + 1)
if __name__ == '__main__':
search_policy(search_bno=args.search_bno, search_bs=args.train_same_labels, val_bs=64)
save_policy(args, all_using_policies, augmentation_search)
pool.close()
pool.join() | 31,956 | 53.908935 | 187 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/lr_scheduler.py | import torch
from theconf import Config as C
def adjust_learning_rate_resnet(optimizer):
"""
Sets the learning rate to the initial LR decayed by 10 on every predefined epochs
Ref: AutoAugment
"""
if C.get()['epoch'] == 90:
return torch.optim.lr_scheduler.MultiStepLR(optimizer, [30, 60, 80])
elif C.get()['epoch'] == 180:
return torch.optim.lr_scheduler.MultiStepLR(optimizer, [60, 120, 160])
elif C.get()['epoch'] == 270:
return torch.optim.lr_scheduler.MultiStepLR(optimizer, [90, 180, 240])
else:
raise ValueError('invalid epoch=%d for resnet scheduler' % C.get()['epoch'])
| 645 | 31.3 | 85 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/augmentations.py | # code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import numpy as np
import torch
from DeepAA_evaluate import autoaugment, fast_autoaugment
import aug_lib
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def get_randaugment(n,m,weights,bs):
if n == 101 and m == 101:
return autoaugment.CifarAutoAugment(fixed_posterize=False)
if n == 102 and m == 102:
return autoaugment.CifarAutoAugment(fixed_posterize=True)
if n == 201 and m == 201:
return autoaugment.SVHNAutoAugment(fixed_posterize=False)
if n == 202 and m == 202:
return autoaugment.SVHNAutoAugment(fixed_posterize=False)
if n == 301 and m == 301:
return fast_autoaugment.cifar10_faa
if n == 401 and m == 401:
return fast_autoaugment.svhn_faa
assert m < 100 and n < 100
if m == 0:
if weights is not None:
return aug_lib.UniAugmentWeighted(n, probs=weights)
elif n == 0:
return aug_lib.UniAugment()
else:
raise ValueError('Wrong RandAug Params.')
else:
assert n > 0 and m > 0
return aug_lib.RandAugment(n, m) | 2,507 | 30.35 | 72 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/deep_autoaugment.py | # code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import math
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
import os
import json
import hashlib
import requests
import scipy
from torchvision.transforms.transforms import Compose
random_mirror = True
##########################################################################
CIFAR_MEANS = np.array([0.49139968, 0.48215841, 0.44653091], dtype=np.float32)
# CIFAR10_STDS = np.array([0.24703223, 0.24348513, 0.26158784], dtype=np.float32)
CIFAR_STDS = np.array([0.2023, 0.1994, 0.2010], dtype=np.float32)
SVHN_MEANS = np.array([0.4379, 0.4440, 0.4729], dtype=np.float32)
SVHN_STDS = np.array([0.1980, 0.2010, 0.1970], dtype=np.float32)
IMAGENET_MEANS = np.array([0.485, 0.456, 0.406], dtype=np.float32)
IMAGENET_STDS = np.array([0.229, 0.224, 0.225], dtype=np.float32)
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateXAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateYAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random_mirror and random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def Posterize(img, v): # [4, 8]
assert 4 <= v <= 8
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Posterize2(img, v): # [0, 4]
assert 0 <= v <= 4
v = int(v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.:
return img
v = v * img.size[0]
return Cutout_default(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
# x0 = np.random.uniform(w)
# y0 = np.random.uniform(h)
x0 = random.uniform(0, w)
y0 = random.uniform(0, h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
# color = (125, 123, 114)
color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
# =============== OPS for DeepAA ==============:
def mean_pad_randcrop(img, v):
# v: Pad with mean value=[125, 123, 114] by v pixels on each side and then take random crop
assert v <= 10, 'The maximum shift should be less then 10'
padded_size = (img.size[0] + 2*v, img.size[1] + 2*v)
new_img = PIL.Image.new('RGB', padded_size, color=(125, 123, 114))
# new_img = PIL.Image.new('RGB', padded_size, color=(0, 0, 0))
new_img.paste(img, (v, v))
top = random.randint(0, v*2)
left = random.randint(0, v*2)
new_img = new_img.crop((left, top, left + img.size[0], top + img.size[1]))
return new_img
def Cutout_default(img, v): # Used in FastAA, different from CutoutABS, the actual cutout size can be smaller than v on the boundary
# Passed random number generation test
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
# x = np.random.uniform(w)
# y = np.random.uniform(h)
if v <= 16: # for cutout of cifar and SVHN
assert w == h == 32
x = random.uniform(0, w)
y = random.uniform(0, h)
x0 = int(min(w, max(0, x - v // 2))) # clip to the range (0, w)
x1 = int(min(w, max(0, x + v // 2)))
y0 = int(min(h, max(0, y - v // 2)))
y1 = int(min(h, max(0, y + v // 2)))
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
# img = CutoutAbs(img, v)
return img
else:
raise NotImplementedError
def RandCrop(img, _):
v = 4
return mean_pad_randcrop(img, v)
def RandCutout(img, _):
v = 16 # Cutout 0.5 means 0.5*32=16 pixels as in the FastAA paper
return Cutout_default(img, v)
def RandCutout60(img, _):
v = 60 # Cutout 0.5 means 0.5*32=16 pixels as in the FastAA paper
return Cutout_default(img, v)
def RandFlip(img, _):
if random.random() > 0.5:
img = Flip(img, None)
return img
def Identity(img, _):
return img
# ===================== ops for imagenet =============
def RandResizeCrop_imagenet(img, _):
# ported from torchvision
# for ImageNet use only
scale = (0.08, 1.0)
ratio = (3. / 4., 4. / 3.)
size = IMAGENET_SIZE # (224, 224)
def get_params(img, scale, ratio):
width, height = img.size
area = float(width * height)
log_ratio = [math.log(r) for r in ratio]
for _ in range(10):
target_area = area * random.uniform(scale[0], scale[1])
aspect_ratio = math.exp(random.uniform(log_ratio[0], log_ratio[1]))
w = round(math.sqrt(target_area * aspect_ratio))
h = round(math.sqrt(target_area / aspect_ratio))
if 0 < w <= width and 0 < h <= height:
top = random.randint(0, height - h)
left = random.randint(0, width - w)
return left, top, w, h
# fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = round(w / min(ratio))
elif in_ratio > max(ratio):
h = height
w = round(h * max(ratio))
else:
w = width
h = height
top = (height - h) // 2
left = (width - w) // 2
return left, top, w, h
left, top, w_box, h_box = get_params(img, scale, ratio)
box = (left, top, left + w_box, top + h_box)
img = img.resize(size=size, resample=PIL.Image.CUBIC, box=box)
return img
def Resize_imagenet(img, size):
w, h = img.size
if isinstance(size, int):
short, long = (w, h) if w <= h else (h, w)
if short == size:
return img
new_short, new_long = size, int(size * long / short)
new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
return img.resize((new_w, new_h), PIL.Image.BICUBIC)
elif isinstance(size, tuple) or isinstance(size, list):
assert len(size) == 2, 'Check the size {}'.format(size)
return img.resize(size, PIL.Image.BICUBIC)
else:
raise Exception
def centerCrop_imagenet(img, _):
# for ImageNet only
# https://github.com/pytorch/vision/blob/master/torchvision/transforms/functional.py
crop_width, crop_height = IMAGENET_SIZE # (224,224)
image_width, image_height = img.size
if crop_width > image_width or crop_height > image_height:
padding_ltrb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
img = pad(img, padding_ltrb, fill=0)
image_width, image_height = img.size
if crop_width == image_width and crop_height == image_height:
return img
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))
def _parse_fill(fill, img, name="fillcolor"):
# Process fill color for affine transforms
num_bands = len(img.getbands())
if fill is None:
fill = 0
if isinstance(fill, (int, float)) and num_bands > 1:
fill = tuple([fill] * num_bands)
if isinstance(fill, (list, tuple)):
if len(fill) != num_bands:
msg = ("The number of elements in 'fill' does not match the number of "
"bands of the image ({} != {})")
raise ValueError(msg.format(len(fill), num_bands))
fill = tuple(fill)
return {name: fill}
def pad(img, padding_ltrb, fill=0, padding_mode='constant'):
if isinstance(padding_ltrb, list):
padding_ltrb = tuple(padding_ltrb)
if padding_mode == 'constant':
opts = _parse_fill(fill, img, name='fill')
if img.mode == 'P':
palette = img.getpalette()
image = PIL.ImageOps.expand(img, border=padding_ltrb, **opts)
image.putpalette(palette)
return image
return PIL.ImageOps.expand(img, border=padding_ltrb, **opts)
elif len(padding_ltrb) == 4:
image_width, image_height = img.size
cropping = -np.minimum(padding_ltrb, 0)
if cropping.any():
crop_left, crop_top, crop_right, crop_bottom = cropping
img = img.crop((crop_left, crop_top, image_width - crop_right, image_height - crop_bottom))
pad_left, pad_top, pad_right, pad_bottom = np.maximum(padding_ltrb, 0)
if img.mode == 'P':
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
img = PIL.Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
return PIL.Image.fromarray(img)
else:
raise Exception
def augment_list(for_autoaug=True, for_DeepAA_cifar=True, for_DeepAA_imagenet=True): # 16 oeprations and their ranges
l = [
(ShearX, -0.3, 0.3), # 0
(ShearY, -0.3, 0.3), # 1
(TranslateX, -0.45, 0.45), # 2
(TranslateY, -0.45, 0.45), # 3
(Rotate, -30, 30), # 4
(AutoContrast, 0, 1), # 5
(Invert, 0, 1), # 6
(Equalize, 0, 1), # 7
(Solarize, 0, 256), # 8
(Posterize, 4, 8), # 9
(Contrast, 0.1, 1.9), # 10
(Color, 0.1, 1.9), # 11
(Brightness, 0.1, 1.9), # 12
(Sharpness, 0.1, 1.9), # 13
(Cutout, 0, 0.2), # 14
# (SamplePairing(imgs), 0, 0.4), # 15
]
if for_autoaug:
l += [
(CutoutAbs, 0, 20), # compatible with auto-augment
(Posterize2, 0, 4), # 9
(TranslateXAbs, 0, 10), # 9
(TranslateYAbs, 0, 10), # 9
]
if for_DeepAA_cifar:
l += [
(Identity, 0., 1.0),
(RandFlip, 0., 1.0), # Additional 15
(RandCutout, 0., 1.0), # 16
(RandCrop, 0., 1.0), # 17
]
if for_DeepAA_imagenet:
l += [
(RandResizeCrop_imagenet, 0., 1.0),
(RandCutout60, 0., 1.0)
]
return l
augment_dict = {fn.__name__: (fn, v1, v2) for fn, v1, v2 in augment_list()}
def Cutout16(img, _):
# return CutoutAbs(img, 16)
return Cutout_default(img, 16)
augmentation_TA_list = [
(Identity, 0., 1.0),
(ShearX, -0.3, 0.3), # 0
(ShearY, -0.3, 0.3), # 1
(TranslateX, -0.45, 0.45), # 2
(TranslateY, -0.45, 0.45), # 3
(Rotate, -30, 30), # 4
(AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
(Equalize, 0, 1), # 7
(Solarize, 0, 256), # 8
(Posterize, 4, 8), # 9
(Contrast, 0.1, 1.9), # 10
(Color, 0.1, 1.9), # 11
(Brightness, 0.1, 1.9), # 12
(Sharpness, 0.1, 1.9), # 13
(Flip, 0., 1.0), # Additional 15
(Cutout16, 0, 20), # (RandCutout, 0, 20), # compatible with auto-augment
(RandCrop, 0., 1.0), # 17
]
def get_augment(name):
return augment_dict[name]
def apply_augment(img, name, level):
augment_fn, low, high = get_augment(name)
return augment_fn(img.copy(), level * (high - low) + low)
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class Augmentation_DeepAA(object):
def __init__(self, EXP='cifar', use_crop=False):
self.use_crop = use_crop
policy_data = np.load('./policy_port/policy_DeepAA_{}.npz'.format(EXP))
self.policy_probs = policy_data['policy_probs']
self.l_ops = policy_data['l_ops']
self.l_mags = policy_data['l_mags']
self.ops = policy_data['ops']
self.mags = policy_data['mags']
self.op_names = policy_data['op_names']
def __call__(self, img):
for k_policy in self.policy_probs:
k_samp = random.choices(range(len(k_policy)), weights=k_policy, k=1)[0]
op, mag = np.squeeze(self.ops[k_samp]), np.squeeze(self.mags[k_samp]).astype(np.float32)/float(self.l_mags-1)
op_name = self.op_names[op].split(':')[0]
img = apply_augment(img, op_name, mag)
if self.use_crop:
w, h = img.size
if w==IMAGENET_SIZE[0] and h==IMAGENET_SIZE[1]:
return img
# return centerCrop_imagenet(Resize_imagenet(img, 256), None)
return centerCrop_imagenet(img, None)
return img
IMAGENET_SIZE = (224, 224) | 16,098 | 30.879208 | 133 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/utils.py | import torch
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import torchvision.transforms.functional as F
plt.rcParams["savefig.bbox"] = 'tight'
def save_images(imgs, dir):
if not isinstance(imgs, list):
imgs = [imgs]
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
for i, img in enumerate(imgs):
img = img.detach()
img = F.to_pil_image(img)
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
fix.savefig(dir)
return fix | 590 | 24.695652 | 75 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/data.py | import logging
import os
import random
from collections import Counter
import torchvision
from PIL import Image
from torch.utils.data import SubsetRandomSampler, Sampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.dataset import ConcatDataset, Subset
from torchvision.transforms import transforms
from sklearn.model_selection import StratifiedShuffleSplit
from theconf import Config as C
from DeepAA_evaluate.augmentations import *
from DeepAA_evaluate.common import get_logger, copy_and_replace_transform, stratified_split, denormalize
from DeepAA_evaluate.imagenet import ImageNet
from DeepAA_evaluate.augmentations import Lighting
from DeepAA_evaluate.deep_autoaugment import Augmentation_DeepAA
logger = get_logger('DeepAA_evaluate')
logger.setLevel(logging.INFO)
_IMAGENET_PCA = {
'eigval': [0.2175, 0.0188, 0.0045],
'eigvec': [
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
}
_CIFAR_MEAN, _CIFAR_STD = (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010) # these are for CIFAR 10, not for cifar100 actaully. They are pretty similar, though.
# mean für cifar 100: tensor([0.5071, 0.4866, 0.4409])
def expand(num_classes, dtype, tensor):
e = torch.zeros(
tensor.size(0), num_classes, dtype=dtype, device=torch.device("cuda")
)
e = e.scatter(1, tensor.unsqueeze(1), 1.0)
return e
def mixup_data(data, label, alpha):
with torch.no_grad():
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1.0
batch_size = data.size()[0]
index = torch.randperm(batch_size).to(data.device)
mixed_data = lam * data + (1.0-lam) * data[index,:]
return mixed_data, label, label[index], lam
class PrefetchedWrapper(object):
# Ref: https://github.com/NVIDIA/DeepLearningExamples/blob/d788e8d4968e72c722c5148a50a7d4692f6e7bd3/PyTorch/Classification/ConvNets/image_classification/dataloaders.py#L405
def prefetched_loader(loader, num_classes, one_hot):
mean = (
torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255])
.cuda()
.view(1, 3, 1, 1)
)
std = (
torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255])
.cuda()
.view(1, 3, 1, 1)
)
stream = torch.cuda.Stream()
first = True
for next_input, next_target in loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
next_input = next_input.float()
if one_hot:
raise Exception('Currently do not use onehot encoding, becasue num_calsses==None')
next_target = expand(num_classes, torch.float, next_target)
next_input = next_input.sub_(mean).div_(std)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __init__(self, dataloader, start_epoch, num_classes, one_hot):
self.dataloader = dataloader
self.epoch = start_epoch
self.one_hot = one_hot
self.num_classes = num_classes
def __iter__(self):
if self.dataloader.sampler is not None and isinstance(
self.dataloader.sampler, torch.utils.data.distributed.DistributedSampler
):
self.dataloader.sampler.set_epoch(self.epoch)
self.epoch += 1
return PrefetchedWrapper.prefetched_loader(
self.dataloader, self.num_classes, self.one_hot
)
def __len__(self):
return len(self.dataloader)
def get_dataloaders(dataset, batch, dataroot, split=0.15, split_idx=0, distributed=False, started_with_spawn=False, summary_writer=None):
print(f'started with spawn {started_with_spawn}')
dataset_info = {}
pre_transform_train = transforms.Compose([])
if 'cifar' in dataset and (C.get()['aug'] in ['DeepAA']):
transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
dataset_info['mean'] = _CIFAR_MEAN
dataset_info['std'] = _CIFAR_STD
dataset_info['img_dims'] = (3,32,32)
dataset_info['num_labels'] = 100 if '100' in dataset and 'ten' not in dataset else 10
elif 'cifar' in dataset:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
dataset_info['mean'] = _CIFAR_MEAN
dataset_info['std'] = _CIFAR_STD
dataset_info['img_dims'] = (3,32,32)
dataset_info['num_labels'] = 100 if '100' in dataset and 'ten' not in dataset else 10
elif 'pre_transform_cifar' in dataset:
pre_transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
])
dataset_info['mean'] = _CIFAR_MEAN
dataset_info['std'] = _CIFAR_STD
dataset_info['img_dims'] = (3, 32, 32)
dataset_info['num_labels'] = 100 if '100' in dataset and 'ten' not in dataset else 10
elif 'svhn' in dataset:
svhn_mean = [0.4379, 0.4440, 0.4729]
svhn_std = [0.1980, 0.2010, 0.1970]
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(svhn_mean, svhn_std),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(svhn_mean, svhn_std),
])
dataset_info['mean'] = svhn_mean
dataset_info['std'] = svhn_std
dataset_info['img_dims'] = (3, 32, 32)
dataset_info['num_labels'] = 10
elif 'imagenet' in dataset and C.get()['aug'] in ['DeepAA']:
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Image size (224, 224) instead of (224, 244) in TA
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [0.229, 0.224, 0.225]
dataset_info['img_dims'] = (3,224,224)
dataset_info['num_labels'] = 1000
elif 'imagenet' in dataset and C.get()['aug']=='inception':
transform_train = transforms.Compose([
transforms.RandomResizedCrop((224,224), scale=(0.08, 1.0), interpolation=Image.BICUBIC), # Image size (224, 224) instead of (224, 244) in TA
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
transforms.ToTensor(),
Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [0.229, 0.224, 0.225]
dataset_info['img_dims'] = (3,224,224)
dataset_info['num_labels'] = 1000
elif 'smallwidth_imagenet' in dataset:
transform_train = transforms.Compose([
transforms.RandomResizedCrop((224,224), scale=(0.08, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
transforms.ToTensor(),
Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [0.229, 0.224, 0.225]
dataset_info['img_dims'] = (3,224,224)
dataset_info['num_labels'] = 1000
elif 'ohl_pipeline_imagenet' in dataset:
pre_transform_train = transforms.Compose([
transforms.RandomResizedCrop((224, 224), scale=(0.08, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
])
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[1.,1.,1.])
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[1.,1.,1.])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [1.,1.,1.]
dataset_info['img_dims'] = (3,224,224)
dataset_info['num_labels'] = 1000
elif 'largewidth_imagenet' in dataset:
transform_train = transforms.Compose([
transforms.RandomResizedCrop((224, 244), scale=(0.08, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
transforms.ToTensor(),
Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop((224, 244)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset_info['mean'] = [0.485, 0.456, 0.406]
dataset_info['std'] = [0.229, 0.224, 0.225]
dataset_info['img_dims'] = (3, 224, 244)
dataset_info['num_labels'] = 1000
else:
raise ValueError('dataset=%s' % dataset)
logger.debug('augmentation: %s' % C.get()['aug'])
if C.get()['aug'] == 'randaugment':
assert not C.get()['randaug'].get('corrected_sample_space') and not C.get()['randaug'].get('google_augmentations')
transform_train.transforms.insert(0, get_randaugment(n=C.get()['randaug']['N'], m=C.get()['randaug']['M'],
weights=C.get()['randaug'].get('weights',None), bs=C.get()['batch']))
elif C.get()['aug'] in ['default', 'inception', 'inception320']:
pass
elif C.get()['aug'] in ['DeepAA']:
transform_train.transforms.insert(0, Augmentation_DeepAA(EXP = C.get()['deepaa']['EXP'],
use_crop = ('imagenet' in dataset) and C.get()['aug'] == 'DeepAA'
))
else:
raise ValueError('not found augmentations. %s' % C.get()['aug'])
transform_train.transforms.insert(0, pre_transform_train)
if C.get()['cutout'] > 0:
transform_train.transforms.append(CutoutDefault(C.get()['cutout']))
if 'preprocessor' in C.get():
if 'imagenet' in dataset:
print("Only using cropping/centering transforms on dataset, since preprocessor active.")
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.08, 1.0), interpolation=Image.BICUBIC),
PILImageToHWCByteTensor(),
])
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.CenterCrop(224),
PILImageToHWCByteTensor(),
])
else:
print("Not using any transforms in dataset, since preprocessor is active.")
transform_train = PILImageToHWCByteTensor()
transform_test = PILImageToHWCByteTensor()
if dataset in ('cifar10', 'pre_transform_cifar10'):
total_trainset = torchvision.datasets.CIFAR10(root=dataroot, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root=dataroot, train=False, download=True, transform=transform_test)
elif dataset in ('cifar100', 'pre_transform_cifar100'):
total_trainset = torchvision.datasets.CIFAR100(root=dataroot, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root=dataroot, train=False, download=True, transform=transform_test)
elif dataset == 'svhncore':
total_trainset = torchvision.datasets.SVHN(root=dataroot, split='train', download=True,
transform=transform_train)
testset = torchvision.datasets.SVHN(root=dataroot, split='test', download=True, transform=transform_test)
elif dataset == 'svhn':
trainset = torchvision.datasets.SVHN(root=dataroot, split='train', download=True, transform=transform_train)
extraset = torchvision.datasets.SVHN(root=dataroot, split='extra', download=True, transform=transform_train)
total_trainset = ConcatDataset([trainset, extraset])
testset = torchvision.datasets.SVHN(root=dataroot, split='test', download=True, transform=transform_test)
elif dataset in ('imagenet', 'ohl_pipeline_imagenet', 'smallwidth_imagenet'):
# Ignore archive only means to not to try to extract the files again, because they already are and the zip files
# are not there no more
total_trainset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), transform=transform_train, ignore_archive=True)
testset = ImageNet(root=os.path.join(dataroot, 'imagenet-pytorch'), split='val', transform=transform_test, ignore_archive=True)
# compatibility
total_trainset.targets = [lb for _, lb in total_trainset.samples]
else:
raise ValueError('invalid dataset name=%s' % dataset)
if 'throwaway_share_of_ds' in C.get():
assert 'val_step_trainloader_val_share' not in C.get()
share = C.get()['throwaway_share_of_ds']['throwaway_share']
train_subset_inds, rest_inds = stratified_split(total_trainset.targets if hasattr(total_trainset, 'targets') else list(total_trainset.labels),share)
if C.get()['throwaway_share_of_ds']['use_throwaway_as_val']:
testset = copy_and_replace_transform(Subset(total_trainset, rest_inds), transform_test)
total_trainset = Subset(total_trainset, train_subset_inds)
train_sampler = None
if split > 0.0:
sss = StratifiedShuffleSplit(n_splits=5, test_size=split, random_state=0)
sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)
for _ in range(split_idx + 1):
train_idx, valid_idx = next(sss)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetSampler(valid_idx)
else:
valid_sampler = SubsetSampler([])
if distributed:
assert split == 0.0, "Split not supported for distributed training."
if C.get().get('all_workers_use_the_same_batches', False):
train_sampler = DistributedSampler(total_trainset, num_replicas=1, rank=0)
else:
train_sampler = DistributedSampler(total_trainset)
test_sampler = None
test_train_sampler = None # if these are specified, acc/loss computation is wrong for results.
# while one has to say, that this setting leads to the test sets being computed seperately on each gpu which
# might be considered not-very-climate-friendly
else:
test_sampler = None
test_train_sampler = None
trainloader = torch.utils.data.DataLoader(
total_trainset, batch_size=batch, shuffle=train_sampler is None, num_workers= os.cpu_count()//8 if distributed else 32, # fix the data laoder
pin_memory=True,
sampler=train_sampler, drop_last=True, persistent_workers=True)
validloader = torch.utils.data.DataLoader(
total_trainset, batch_size=batch, shuffle=False, num_workers=0 if started_with_spawn else 8, pin_memory=True,
sampler=valid_sampler, drop_last=False)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch, shuffle=False, num_workers=16 if started_with_spawn else 8, pin_memory=True,
drop_last=False, sampler=test_sampler, persistent_workers=True
)
# We use this 'hacky' solution s.t. we do not need to keep the dataset twice in memory.
test_total_trainset = copy_and_replace_transform(total_trainset, transform_test)
test_trainloader = torch.utils.data.DataLoader(
test_total_trainset, batch_size=batch, shuffle=False, num_workers=0 if started_with_spawn else 8, pin_memory=True,
drop_last=False, sampler=test_train_sampler
)
test_trainloader.denorm = lambda x: denormalize(x, dataset_info['mean'], dataset_info['std'])
return train_sampler, trainloader, validloader, testloader, test_trainloader, dataset_info
# trainloader_prefetch = PrefetchedWrapper(trainloader, start_epoch=0, num_classes=None, one_hot=False)
# testloader_prefetch = PrefetchedWrapper(testloader, start_epoch=0, num_classes=None, one_hot=False)
# return train_sampler, trainloader_prefetch, validloader, testloader_prefetch, test_trainloader, dataset_info
class SubsetSampler(Sampler):
r"""Samples elements from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (i for i in self.indices)
def __len__(self):
return len(self.indices) | 19,585 | 44.761682 | 176 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/fast_autoaugment.py | # code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from torchvision.transforms.transforms import Compose
random_mirror = True
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateXAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateYAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random_mirror and random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def Posterize(img, v): # [4, 8]
assert 4 <= v <= 8
v = int(v)
return PIL.ImageOps.posterize(img, v)
def Posterize2(img, v): # [0, 4]
assert 0 <= v <= 4
v = int(v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
# =============== OPS for DeepAA ==============:
def mean_pad_randcrop(img, v):
# v: Pad with mean value=[125, 123, 114] by v pixels on each side and then take random crop
assert v <= 10, 'The maximum shift should be less then 10'
padded_size = (img.size[0] + 2*v, img.size[1] + 2*v)
new_img = PIL.Image.new('RGB', padded_size, color=(125, 123, 114))
new_img.paste(img, (v, v))
top = random.randint(0, v*2)
left = random.randint(0, v*2)
new_img = new_img.crop((left, top, left + img.size[0], top + img.size[1]))
return new_img
def Cutout_default(img, v): # Used in FastAA, different from CutoutABS, the actual cutout size can be smaller than v on the boundary
# Passed random number generation test
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
# x = np.random.uniform(w)
# y = np.random.uniform(h)
x = random.uniform(0, w)
y = random.uniform(0, h)
x0 = int(min(w, max(0, x - v // 2))) # clip to the range (0, w)
x1 = int(min(w, max(0, x + v // 2)))
y0 = int(min(h, max(0, y - v // 2)))
y1 = int(min(h, max(0, y + v // 2)))
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def RandCrop(img, _):
v = 4
return mean_pad_randcrop(img, v)
def RandCutout(img, _):
v = 16 # Cutout 0.5 means 0.5*32=16 pixels as in the FastAA paper
return Cutout_default(img, v)
def RandFlip(img, _):
if random.random() > 0.5:
img = Flip(img, None)
return img
def Identity(img, _):
return img
def augment_list(for_autoaug=True, for_DeepAA=False): # 16 oeprations and their ranges
l = [
(ShearX, -0.3, 0.3), # 0
(ShearY, -0.3, 0.3), # 1
(TranslateX, -0.45, 0.45), # 2
(TranslateY, -0.45, 0.45), # 3
(Rotate, -30, 30), # 4
(AutoContrast, 0, 1), # 5
(Invert, 0, 1), # 6
(Equalize, 0, 1), # 7
(Solarize, 0, 256), # 8
(Posterize, 4, 8), # 9
(Contrast, 0.1, 1.9), # 10
(Color, 0.1, 1.9), # 11
(Brightness, 0.1, 1.9), # 12
(Sharpness, 0.1, 1.9), # 13
(Cutout, 0, 0.2), # 14
# (SamplePairing(imgs), 0, 0.4), # 15
]
if for_autoaug:
l += [
(CutoutAbs, 0, 20), # compatible with auto-augment
(Posterize2, 0, 4), # 9
(TranslateXAbs, 0, 10), # 9
(TranslateYAbs, 0, 10), # 9
]
if for_DeepAA:
l += [
(Identity, 0., 1.0),
(RandFlip, 0., 1.0), # Additional 15
(RandCutout, 0., 1.0), # 16
(RandCrop, 0., 1.0), # 17
]
return l
augment_dict = {fn.__name__: (fn, v1, v2) for fn, v1, v2 in augment_list()}
def get_augment(name):
return augment_dict[name]
def apply_augment(img, name, level):
augment_fn, low, high = get_augment(name)
return augment_fn(img.copy(), level * (high - low) + low)
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
def fa_reduced_cifar10():
p = [[["Contrast", 0.8320659688593578, 0.49884310562180767], ["TranslateX", 0.41849883971249136, 0.394023086494538]], [["Color", 0.3500483749890918, 0.43355143929883955], ["Color", 0.5120716140300229, 0.7508299643325016]], [["Rotate", 0.9447932604389472, 0.29723465088990375], ["Sharpness", 0.1564936149799504, 0.47169309978091745]], [["Rotate", 0.5430015349185097, 0.6518626678905443], ["Color", 0.5694844928020679, 0.3494533005430269]], [["AutoContrast", 0.5558922032451064, 0.783136004977799], ["TranslateY", 0.683914191471972, 0.7597025305860181]], [["TranslateX", 0.03489224481658926, 0.021025488042663354], ["Equalize", 0.4788637403857401, 0.3535481281496117]], [["Sharpness", 0.6428916269794158, 0.22791511918580576], ["Contrast", 0.016014045073950323, 0.26811312269487575]], [["Rotate", 0.2972727228410451, 0.7654251516829896], ["AutoContrast", 0.16005809254943348, 0.5380523650108116]], [["Contrast", 0.5823671057717301, 0.7521166301398389], ["TranslateY", 0.9949449214751978, 0.9612671341689751]], [["Equalize", 0.8372126687702321, 0.6944127225621206], ["Rotate", 0.25393282929784755, 0.3261658365286546]], [["Invert", 0.8222011603194572, 0.6597915864008403], ["Posterize", 0.31858707654447327, 0.9541013715579584]], [["Sharpness", 0.41314621282107045, 0.9437344470879956], ["Cutout", 0.6610495837889337, 0.674411664255093]], [["Contrast", 0.780121736705407, 0.40826152397463156], ["Color", 0.344019192125256, 0.1942922781355767]], [["Rotate", 0.17153139555621344, 0.798745732456474], ["Invert", 0.6010555860501262, 0.320742172554767]], [["Invert", 0.26816063450777416, 0.27152062163148327], ["Equalize", 0.6786829200236982, 0.7469412443514213]], [["Contrast", 0.3920564414367518, 0.7493644582838497], ["TranslateY", 0.8941657805606704, 0.6580846856375955]], [["Equalize", 0.875509207399372, 0.9061130537645283], ["Cutout", 0.4940280679087308, 0.7896229623628276]], [["Contrast", 0.3331423298065147, 0.7170041362529597], ["ShearX", 0.7425484291842793, 0.5285117152426109]], [["Equalize", 0.97344237365026, 0.4745759720473106], ["TranslateY", 0.055863458430295276, 0.9625142022954672]], [["TranslateX", 0.6810614083109192, 0.7509937355495521], ["TranslateY", 0.3866463019475701, 0.5185481505576112]], [["Sharpness", 0.4751529944753671, 0.550464012488733], ["Cutout", 0.9472914750534814, 0.5584925992985023]], [["Contrast", 0.054606784909375095, 0.17257080196712182], ["Cutout", 0.6077026782754803, 0.7996504165944938]], [["ShearX", 0.328798428243695, 0.2769563264079157], ["Cutout", 0.9037632437023772, 0.4915809476763595]], [["Cutout", 0.6891202672363478, 0.9951490996172914], ["Posterize", 0.06532762462628705, 0.4005246609075227]], [["TranslateY", 0.6908583592523334, 0.725612120376128], ["Rotate", 0.39907735501746666, 0.36505798032223147]], [["TranslateX", 0.10398364107399072, 0.5913918470536627], ["Rotate", 0.7169811539340365, 0.8283850670648724]], [["ShearY", 0.9526373530768361, 0.4482347365639251], ["Contrast", 0.4203947336351471, 0.41526799558953864]], [["Contrast", 0.24894431199700073, 0.09578870500994707], ["Solarize", 0.2273713345927395, 0.6214942914963707]], [["TranslateX", 0.06331228870032912, 0.8961907489444944], ["Cutout", 0.5110007859958743, 0.23704875994050723]], [["Cutout", 0.3769183548846172, 0.6560944580253987], ["TranslateY", 0.7201924599434143, 0.4132476526938319]], [["Invert", 0.6707431156338866, 0.11622795952464149], ["Posterize", 0.12075972752370845, 0.18024933294172307]], [["Color", 0.5010057264087142, 0.5277767327434318], ["Rotate", 0.9486115946366559, 0.31485546630220784]], [["ShearX", 0.31741302466630406, 0.1991215806270692], ["Invert", 0.3744727015523084, 0.6914113986757578]], [["Brightness", 0.40348479064392617, 0.8924182735724888], ["Brightness", 0.1973098763857779, 0.3939288933689655]], [["Color", 0.01208688664030888, 0.6055693000885217], ["Equalize", 0.433259451147881, 0.420711137966155]], [["Cutout", 0.2620018360076487, 0.11594468278143644], ["Rotate", 0.1310401567856766, 0.7244318146544101]], [["ShearX", 0.15249651845933576, 0.35277277071866986], ["Contrast", 0.28221794032094016, 0.42036586509397444]], [["Brightness", 0.8492912150468908, 0.26386920887886056], ["Solarize", 0.8764208056263386, 0.1258195122766067]], [["ShearX", 0.8537058239675831, 0.8415101816171269], ["AutoContrast", 0.23958568830416294, 0.9889049529564014]], [["Rotate", 0.6463207930684552, 0.8750192129056532], ["Contrast", 0.6865032211768652, 0.8564981333033417]], [["Equalize", 0.8877190311811044, 0.7370995897848609], ["TranslateX", 0.9979660314391368, 0.005683998913244781]], [["Color", 0.6420017551677819, 0.6225337265571229], ["Solarize", 0.8344504978566362, 0.8332856969941151]], [["ShearX", 0.7439332981992567, 0.9747608698582039], ["Equalize", 0.6259189804002959, 0.028017478098245174]], [["TranslateY", 0.39794770293366843, 0.8482966537902709], ["Rotate", 0.9312935630405351, 0.5300586925826072]], [["Cutout", 0.8904075572021911, 0.3522934742068766], ["Equalize", 0.6431186289473937, 0.9930577962126151]], [["Contrast", 0.9183553386089476, 0.44974266209396685], ["TranslateY", 0.8193684583123862, 0.9633741156526566]], [["ShearY", 0.616078299924283, 0.19219314358924766], ["Solarize", 0.1480945914138868, 0.05922109541654652]], [["Solarize", 0.25332455064128157, 0.18853037431947994], ["ShearY", 0.9518390093954243, 0.14603930044061142]], [["Color", 0.8094378664335412, 0.37029830225408433], ["Contrast", 0.29504113617467465, 0.065096365468442]], [["AutoContrast", 0.7075167558685455, 0.7084621693458267], ["Sharpness", 0.03555539453323875, 0.5651948313888351]], [["TranslateY", 0.5969982600930229, 0.9857264201029572], ["Rotate", 0.9898628564873607, 0.1985685534926911]], [["Invert", 0.14915939942810352, 0.6595839632446547], ["Posterize", 0.768535289994361, 0.5997358684618563]], [["Equalize", 0.9162691815967111, 0.3331035307653627], ["Color", 0.8169118187605557, 0.7653910258006366]], [["Rotate", 0.43489185299530897, 0.752215269135173], ["Brightness", 0.1569828560334806, 0.8002808712857853]], [["Invert", 0.931876215328345, 0.029428644395760872], ["Equalize", 0.6330036052674145, 0.7235531014288485]], [["ShearX", 0.5216138393704968, 0.849272958911589], ["AutoContrast", 0.19572688655120263, 0.9786551568639575]], [["ShearX", 0.9899586208275011, 0.22580547500610293], ["Brightness", 0.9831311903178727, 0.5055159610855606]], [["Brightness", 0.29179117009211486, 0.48003584672937294], ["Solarize", 0.7544252317330058, 0.05806581735063043]], [["AutoContrast", 0.8919800329537786, 0.8511261613698553], ["Contrast", 0.49199446084551035, 0.7302297140181429]], [["Cutout", 0.7079723710644835, 0.032565015538375874], ["AutoContrast", 0.8259782090388609, 0.7860708789468442]], [["Posterize", 0.9980262659801914, 0.6725084224935673], ["ShearY", 0.6195568269664682, 0.5444170291816751]], [["Posterize", 0.8687351834713217, 0.9978004914422602], ["Equalize", 0.4532646848325955, 0.6486748015710573]], [["Contrast", 0.2713928776950594, 0.15255249557027806], ["ShearY", 0.9276834387970199, 0.5266542862333478]], [["AutoContrast", 0.5240786618055582, 0.9325642258930253], ["Cutout", 0.38448627892037357, 0.21219415055662394]], [["TranslateX", 0.4299517937295352, 0.20133751201386152], ["TranslateX", 0.6753468310276597, 0.6985621035400441]], [["Rotate", 0.4006472499103597, 0.6704748473357586], ["Equalize", 0.674161668148079, 0.6528530101705237]], [["Equalize", 0.9139902833674455, 0.9015103149680278], ["Sharpness", 0.7289667720691948, 0.7623606352376232]], [["Cutout", 0.5911267429414259, 0.5953141187177585], ["Rotate", 0.5219064817468504, 0.11085141355857986]], [["TranslateX", 0.3620095133946267, 0.26194039409492476], ["Rotate", 0.3929841359545597, 0.4913406720338047]], [["Invert", 0.5175298901458896, 0.001661410821811482], ["Invert", 0.004656581318332242, 0.8157622192213624]], [["AutoContrast", 0.013609693335051465, 0.9318651749409604], ["Invert", 0.8980844358979592, 0.2268511862780368]], [["ShearY", 0.7717126261142194, 0.09975547983707711], ["Equalize", 0.7808494401429572, 0.4141412091009955]], [["TranslateX", 0.5878675721341552, 0.29813268038163376], ["Posterize", 0.21257276051591356, 0.2837285296666412]], [["Brightness", 0.4268335108566488, 0.4723784991635417], ["Cutout", 0.9386262901570471, 0.6597686851494288]], [["ShearX", 0.8259423807590159, 0.6215304795389204], ["Invert", 0.6663365779667443, 0.7729669184580387]], [["ShearY", 0.4801338723951297, 0.5220145420100984], ["Solarize", 0.9165803796596582, 0.04299335502862134]], [["Color", 0.17621114853558817, 0.7092601754635434], ["ShearX", 0.9014406936728542, 0.6028711944367818]], [["Rotate", 0.13073284972300658, 0.9088831512880851], ["ShearX", 0.4228105332316806, 0.7985249783662675]], [["Brightness", 0.9182753692730031, 0.0063635477774044436], ["Color", 0.4279825602663798, 0.28727149118585327]], [["Equalize", 0.578218285372267, 0.9611758542158054], ["Contrast", 0.5471552264150691, 0.8819635504027596]], [["Brightness", 0.3208589067274543, 0.45324733565167497], ["Solarize", 0.5218455808633233, 0.5946097503647126]], [["Equalize", 0.3790381278653, 0.8796082535775276], ["Solarize", 0.4875526773149246, 0.5186585878052613]], [["ShearY", 0.12026461479557571, 0.1336953429068397], ["Posterize", 0.34373988646025766, 0.8557727670803785]], [["Cutout", 0.2396745247507467, 0.8123036135209865], ["Equalize", 0.05022807681008945, 0.6648492261984383]], [["Brightness", 0.35226676470748264, 0.5950011514888855], ["Rotate", 0.27555076067000894, 0.9170063321486026]], [["ShearX", 0.320224630647278, 0.9683584649071976], ["Invert", 0.6905585196648905, 0.5929115667894518]], [["Color", 0.9941395717559652, 0.7474441679798101], ["Sharpness", 0.7559998478658021, 0.6656052889626682]], [["ShearY", 0.4004220568345669, 0.5737646992826074], ["Equalize", 0.9983495213746147, 0.8307907033362303]], [["Color", 0.13726809242038207, 0.9378850119950549], ["Equalize", 0.9853362454752445, 0.42670264496554156]], [["Invert", 0.13514636153298576, 0.13516363849081958], ["Sharpness", 0.2031189356693901, 0.6110226359872745]], [["TranslateX", 0.7360305209630797, 0.41849698571655614], ["Contrast", 0.8972161549144564, 0.7820296625565641]], [["Color", 0.02713118828682548, 0.717110684828096], ["TranslateY", 0.8118759006836348, 0.9120098002024992]], [["Sharpness", 0.2915428949403711, 0.7630303724396518], ["Solarize", 0.22030536162851078, 0.38654526772661757]], [["Equalize", 0.9949114839538582, 0.7193630656062793], ["AutoContrast", 0.00889496657931299, 0.2291400476524672]], [["Rotate", 0.7120948976490488, 0.7804359309791055], ["Cutout", 0.10445418104923654, 0.8022999156052766]], [["Equalize", 0.7941710117902707, 0.8648170634288153], ["Invert", 0.9235642581144047, 0.23810725859722381]], [["Cutout", 0.3669397998623156, 0.42612815083245004], ["Solarize", 0.5896322046441561, 0.40525016166956795]], [["Color", 0.8389858785714184, 0.4805764176488667], ["Rotate", 0.7483931487048825, 0.4731174601400677]], [["Sharpness", 0.19006538611394763, 0.9480745790240234], ["TranslateY", 0.13904429049439282, 0.04117685330615939]], [["TranslateY", 0.9958097661701637, 0.34853788612580905], ["Cutout", 0.2235829624082113, 0.3737887095480745]], [["ShearX", 0.635453761342424, 0.6063917273421382], ["Posterize", 0.8738297843709666, 0.4893042590265556]], [["Brightness", 0.7907245198402727, 0.7082189713070691], ["Color", 0.030313003541849737, 0.6927897798493439]], [["Cutout", 0.6965622481073525, 0.8103522907758203], ["ShearY", 0.6186794303078708, 0.28640671575703547]], [["ShearY", 0.43734910588450226, 0.32549342535621517], ["ShearX", 0.08154980987651872, 0.3286764923112455]], [["AutoContrast", 0.5262462005050853, 0.8175584582465848], ["Contrast", 0.8683217097363655, 0.548776281479276]], [["ShearY", 0.03957878500311707, 0.5102350637943197], ["Rotate", 0.13794708520303778, 0.38035687712954236]], [["Sharpness", 0.634288567312677, 0.6387948309075822], ["AutoContrast", 0.13437288694693272, 0.7150448869023095]], [["Contrast", 0.5198339640088544, 0.9409429390321714], ["Cutout", 0.09489154903321972, 0.6228488803821982]], [["Equalize", 0.8955909061806043, 0.7727336527163008], ["AutoContrast", 0.6459479564441762, 0.7065467781139214]], [["Invert", 0.07214420843537739, 0.15334721382249505], ["ShearX", 0.9242027778363903, 0.5809187849982554]], [["Equalize", 0.9144084379856188, 0.9457539278608998], ["Sharpness", 0.14337499858300173, 0.5978054365425495]], [["Posterize", 0.18894269796951202, 0.14676331276539045], ["Equalize", 0.846204299950047, 0.0720601838168885]], [["Contrast", 0.47354445405741163, 0.1793650330107468], ["Solarize", 0.9086106327264657, 0.7578807802091502]], [["AutoContrast", 0.11805466892967886, 0.6773620948318575], ["TranslateX", 0.584222568299264, 0.9475693349391936]], [["Brightness", 0.5833017701352768, 0.6892593824176294], ["AutoContrast", 0.9073141314561828, 0.5823085733964589]], [["TranslateY", 0.5711231614144834, 0.6436240447620021], ["Contrast", 0.21466964050052473, 0.8042843954486391]], [["Contrast", 0.22967904487976765, 0.2343103109298762], ["Invert", 0.5502897289159286, 0.386181060792375]], [["Invert", 0.7008423439928628, 0.4234003051405053], ["Rotate", 0.77270460187611, 0.6650852696828039]], [["Invert", 0.050618322309703534, 0.24277027926683614], ["TranslateX", 0.789703489736613, 0.5116446685339312]], [["Color", 0.363898083076868, 0.7870323584210503], ["ShearY", 0.009608425513626617, 0.6188625018465327]], [["TranslateY", 0.9447601615216088, 0.8605867115798349], ["Equalize", 0.24139180127003634, 0.9587337957930782]], [["Equalize", 0.3968589440144503, 0.626206375426996], ["Solarize", 0.3215967960673186, 0.826785464835443]], [["TranslateX", 0.06947339047121326, 0.016705969558222122], ["Contrast", 0.6203392406528407, 0.6433525559906872]], [["Solarize", 0.2479835265518212, 0.6335009955617831], ["Sharpness", 0.6260191862978083, 0.18998095149428562]], [["Invert", 0.9818841924943431, 0.03252098144087934], ["TranslateY", 0.9740718042586802, 0.32038951753031475]], [["Solarize", 0.8795784664090814, 0.7014953994354041], ["AutoContrast", 0.8508018319577783, 0.09321935255338443]], [["Color", 0.8067046326105318, 0.13732893832354054], ["Contrast", 0.7358549680271418, 0.7880588355974301]], [["Posterize", 0.5005885536838065, 0.7152229305267599], ["ShearX", 0.6714249591308944, 0.7732232697859908]], [["TranslateY", 0.5657943483353953, 0.04622399873706862], ["AutoContrast", 0.2787442688649845, 0.567024378767143]], [["ShearY", 0.7589839214283295, 0.041071003934029404], ["Equalize", 0.3719852873722692, 0.43285778682687326]], [["Posterize", 0.8841266183653291, 0.42441306955476366], ["Cutout", 0.06578801759412933, 0.5961125797961526]], [["Rotate", 0.4057875004314082, 0.20241115848366442], ["AutoContrast", 0.19331542807918067, 0.7175484678480565]], [["Contrast", 0.20331327116693088, 0.17135387852218742], ["Cutout", 0.6282459410351067, 0.6690015305529187]], [["ShearX", 0.4309850328306535, 0.99321178125828], ["AutoContrast", 0.01809604030453338, 0.693838277506365]], [["Rotate", 0.24343531125298268, 0.5326412444169899], ["Sharpness", 0.8663989992597494, 0.7643990609130789]], [["Rotate", 0.9785019204622459, 0.8941922576710696], ["ShearY", 0.3823185048761075, 0.9258854046017292]], [["ShearY", 0.5502613342963388, 0.6193478797355644], ["Sharpness", 0.2212116534610532, 0.6648232390110979]], [["TranslateY", 0.43222920981513757, 0.5657636397633089], ["ShearY", 0.9153733286073634, 0.4868521171273169]], [["Posterize", 0.12246560519738336, 0.9132288825898972], ["Cutout", 0.6058471327881816, 0.6426901876150983]], [["Color", 0.3693970222695844, 0.038929141432555436], ["Equalize", 0.6228052875653781, 0.05064436511347281]], [["Color", 0.7172600331356893, 0.2824542634766688], ["Color", 0.425293116261649, 0.1796441283313972]], [["Cutout", 0.7539608428122959, 0.9896141728228921], ["Solarize", 0.17811081117364758, 0.9064195503634402]], [["AutoContrast", 0.6761242607012717, 0.6484842446399923], ["AutoContrast", 0.1978135076901828, 0.42166879492601317]], [["ShearY", 0.25901666379802524, 0.4770778270322449], ["Solarize", 0.7640963173407052, 0.7548463227094349]], [["TranslateY", 0.9222487731783499, 0.33658389819616463], ["Equalize", 0.9159112511468139, 0.8877136302394797]], [["TranslateX", 0.8994836977137054, 0.11036053676846591], ["Sharpness", 0.9040333410652747, 0.007266095214664592]], [["Invert", 0.627758632524958, 0.8075245097227242], ["Color", 0.7525387912148516, 0.05950239294733184]], [["TranslateX", 0.43505193292761857, 0.38108822876120796], ["TranslateY", 0.7432578052364004, 0.685678116134759]], [["Contrast", 0.9293507582470425, 0.052266842951356196], ["Posterize", 0.45187123977747456, 0.8228290399726782]], [["ShearX", 0.07240786542746291, 0.8945667925365756], ["Brightness", 0.5305443506561034, 0.12025274552427578]], [["Invert", 0.40157564448143335, 0.5364745514006678], ["Posterize", 0.3316124671813876, 0.43002413237035997]], [["ShearY", 0.7152314630009072, 0.1938339083417453], ["Invert", 0.14102478508140615, 0.41047623580174253]], [["Equalize", 0.19862832613849246, 0.5058521685279254], ["Sharpness", 0.16481208629549782, 0.29126323102770557]], [["Equalize", 0.6951591703541872, 0.7294822018800076], ["ShearX", 0.8726656726111219, 0.3151484225786487]], [["Rotate", 0.17234370554263745, 0.9356543193000078], ["TranslateX", 0.4954374070084091, 0.05496727345849217]], [["Contrast", 0.347405480122842, 0.831553005022885], ["ShearX", 0.28946367213071134, 0.11905898704394013]], [["Rotate", 0.28096672507990683, 0.16181284050307398], ["Color", 0.6554918515385365, 0.8739728050797386]], [["Solarize", 0.05408073374114053, 0.5357087283758337], ["Posterize", 0.42457175211495335, 0.051807130609045515]], [["TranslateY", 0.6216669362331361, 0.9691341207381867], ["Rotate", 0.9833579358130944, 0.12227426932415297]], [["AutoContrast", 0.7572619475282892, 0.8062834082727393], ["Contrast", 0.1447865402875591, 0.40242646573228436]], [["Rotate", 0.7035658783466086, 0.9840285268256428], ["Contrast", 0.04613961510519471, 0.7666683217450163]], [["TranslateX", 0.4580462177951252, 0.6448678609474686], ["AutoContrast", 0.14845695613708987, 0.1581134188537895]], [["Color", 0.06795037145259564, 0.9115552821158709], ["TranslateY", 0.9972953449677655, 0.6791016521791214]], [["Cutout", 0.3586908443690823, 0.11578558293480945], ["Color", 0.49083981719164294, 0.6924851425917189]], [["Brightness", 0.7994717831637873, 0.7887316255321768], ["Posterize", 0.01280463502435425, 0.2799086732858721]], [["ShearY", 0.6733451536131859, 0.8122332639516706], ["AutoContrast", 0.20433889615637357, 0.29023346867819966]], [["TranslateY", 0.709913512385177, 0.6538196931503809], ["Invert", 0.06629795606579203, 0.40913219547548296]], [["Sharpness", 0.4704559834362948, 0.4235993305308414], ["Equalize", 0.7578132044306966, 0.9388824249397175]], [["AutoContrast", 0.5281702802395268, 0.8077253610116979], ["Equalize", 0.856446858814119, 0.0479755681647559]], [["Color", 0.8244145826797791, 0.038409264586238945], ["Equalize", 0.4933123249234237, 0.8251940933672189]], [["TranslateX", 0.23949314158035084, 0.13576027004706692], ["ShearX", 0.8547563771688399, 0.8309262160483606]], [["Cutout", 0.4655680937486001, 0.2819807000622825], ["Contrast", 0.8439552665937905, 0.4843617871587037]], [["TranslateX", 0.19142454476784831, 0.7516148119169537], ["AutoContrast", 0.8677128351329768, 0.34967990912346336]], [["Contrast", 0.2997868299880966, 0.919508054854469], ["AutoContrast", 0.3003418493384957, 0.812314984368542]], [["Invert", 0.1070424236198183, 0.614674386498809], ["TranslateX", 0.5010973510899923, 0.20828478805259465]], [["Contrast", 0.6775882415611454, 0.6938564815591685], ["Cutout", 0.4814634264207498, 0.3086844939744179]], [["TranslateY", 0.939427105020265, 0.02531043619423201], ["Contrast", 0.793754257944812, 0.6676072472565451]], [["Sharpness", 0.09833672397575444, 0.5937214638292085], ["Rotate", 0.32530675291753763, 0.08302275740932441]], [["Sharpness", 0.3096455511562728, 0.6726732004553959], ["TranslateY", 0.43268997648796537, 0.8755012330217743]], [["ShearY", 0.9290771880324833, 0.22114736271319912], ["Equalize", 0.5520199288501478, 0.34269650332060553]], [["AutoContrast", 0.39763980746649374, 0.4597414582725454], ["Contrast", 0.941507852412761, 0.24991270562477041]], [["Contrast", 0.19419400547588095, 0.9127524785329233], ["Invert", 0.40544905179551727, 0.770081532844878]], [["Invert", 0.30473757368608334, 0.23534811781828846], ["Cutout", 0.26090722356706686, 0.5478390909877727]], [["Posterize", 0.49434361308057373, 0.05018423270527428], ["Color", 0.3041910676883317, 0.2603810415446437]], [["Invert", 0.5149061746764011, 0.9507449210221298], ["TranslateY", 0.4458076521892904, 0.8235358255774426]], [["Cutout", 0.7900006753351625, 0.905578861382507], ["Cutout", 0.6707153655762056, 0.8236715672258502]], [["Solarize", 0.8750534386579575, 0.10337670467100568], ["Posterize", 0.6102379615481381, 0.9264503915416868]], [["ShearY", 0.08448689377082852, 0.13981233725811626], ["TranslateX", 0.13979689669329498, 0.768774869872818]], [["TranslateY", 0.35752572266759985, 0.22827299847812488], ["Solarize", 0.3906957174236011, 0.5663314388307709]], [["ShearY", 0.29155240367061563, 0.8427516352971683], ["ShearX", 0.988825367441916, 0.9371258864857649]], [["Posterize", 0.3470780859769458, 0.5467686612321239], ["Rotate", 0.5758606274160093, 0.8843838082656007]], [["Cutout", 0.07825368363221841, 0.3230799425855425], ["Equalize", 0.2319163865298529, 0.42133965674727325]], [["Invert", 0.41972172597448654, 0.34618622513582953], ["ShearX", 0.33638469398198834, 0.9098575535928108]], [["Invert", 0.7322652233340448, 0.7747502957687412], ["Cutout", 0.9643121397298106, 0.7983335094634907]], [["TranslateY", 0.30039942808098496, 0.229018798182827], ["TranslateY", 0.27009499739380194, 0.6435577237846236]], [["Color", 0.38245274994070644, 0.7030758568461645], ["ShearX", 0.4429321461666281, 0.6963787864044149]], [["AutoContrast", 0.8432798685515605, 0.5775214369578088], ["Brightness", 0.7140899735355927, 0.8545854720117658]], [["Rotate", 0.14418935535613786, 0.5637968282213426], ["Color", 0.7115231912479835, 0.32584796564566776]], [["Sharpness", 0.4023501062807533, 0.4162097130412771], ["Brightness", 0.5536372686153666, 0.03004023273348777]], [["TranslateX", 0.7526053265574295, 0.5365938133399961], ["Cutout", 0.07914142706557492, 0.7544953091603148]], [["TranslateY", 0.6932934644882822, 0.5302211727137424], ["Invert", 0.5040606028391255, 0.6074863635108957]], [["Sharpness", 0.5013938602431629, 0.9572417724333157], ["TranslateY", 0.9160516359783026, 0.41798927975391675]], [["ShearY", 0.5130018836722556, 0.30209438428424185], ["Color", 0.15017170588500262, 0.20653495360587826]], [["TranslateX", 0.5293300090022314, 0.6407011888285266], ["Rotate", 0.4809817860439001, 0.3537850070371702]], [["Equalize", 0.42243081336551014, 0.13472721311046565], ["Posterize", 0.4700309639484068, 0.5197704360874883]], [["AutoContrast", 0.40674959899687235, 0.7312824868168921], ["TranslateX", 0.7397527975920833, 0.7068339877944815]], [["TranslateY", 0.5880995184787206, 0.41294111378078946], ["ShearX", 0.3181387627799316, 0.4810010147143413]], [["Color", 0.9898680233928507, 0.13241525577655167], ["Contrast", 0.9824932511238534, 0.5081145010853807]], [["Invert", 0.1591854062582687, 0.9760371953250404], ["Color", 0.9913399302056851, 0.8388709501056177]], [["Rotate", 0.6427451962231163, 0.9486793975292853], ["AutoContrast", 0.8501937877930463, 0.021326757974406196]], [["Contrast", 0.13611684531087598, 0.3050858709483848], ["Posterize", 0.06618644756084646, 0.8776928511951034]], [["TranslateX", 0.41021065663839407, 0.4965319749091702], ["Rotate", 0.07088831484595115, 0.4435516708223345]], [["Sharpness", 0.3151707977154323, 0.28275482520179296], ["Invert", 0.36980384682133804, 0.20813616084536624]], [["Cutout", 0.9979060206661017, 0.39712948644725854], ["Brightness", 0.42451052896163466, 0.942623075649937]], [["Equalize", 0.5300853308425644, 0.010183500830128867], ["AutoContrast", 0.06930788523716991, 0.5403125318991522]], [["Contrast", 0.010385458959237814, 0.2588311035539086], ["ShearY", 0.9347048553928764, 0.10439028366854963]], [["ShearY", 0.9867649486508592, 0.8409258132716434], ["ShearX", 0.48031199530836444, 0.7703375364614137]], [["ShearY", 0.04835889473136512, 0.2671081675890492], ["Brightness", 0.7856432618509617, 0.8032169570159564]], [["Posterize", 0.11112884927351185, 0.7116956530752987], ["TranslateY", 0.7339151092128607, 0.3331241226029017]], [["Invert", 0.13527036207875454, 0.8425980515358883], ["Color", 0.7836395778298139, 0.5517059252678862]], [["Sharpness", 0.012541163521491816, 0.013197550692292892], ["Invert", 0.6295957932861318, 0.43276521236056054]], [["AutoContrast", 0.7681480991225756, 0.3634284648496289], ["Brightness", 0.09708289828517969, 0.45016725043529726]], [["Brightness", 0.5839450499487329, 0.47525965678316795], ["Posterize", 0.43096581990183735, 0.9332382960125196]], [["Contrast", 0.9725334964552795, 0.9142902966863341], ["Contrast", 0.12376116410622995, 0.4355916974126801]], [["TranslateX", 0.8572708473690132, 0.02544522678265526], ["Sharpness", 0.37902120723460364, 0.9606092969833118]], [["TranslateY", 0.8907359001296927, 0.8011363927236099], ["Color", 0.7693777154407178, 0.0936768686746503]], [["Equalize", 0.0002657688243309364, 0.08190798535970034], ["Rotate", 0.5215478065240905, 0.5773519995038368]], [["TranslateY", 0.3383007813932477, 0.5733428274739165], ["Sharpness", 0.2436110797174722, 0.4757790814590501]], [["Cutout", 0.0957402176213592, 0.8914395928996034], ["Cutout", 0.4959915628586883, 0.25890349461645246]], [["AutoContrast", 0.594787300189186, 0.9627455357634459], ["ShearY", 0.5136027621132064, 0.10419602450259002]], [["Solarize", 0.4684077211553732, 0.6592850629431414], ["Sharpness", 0.2382385935956325, 0.6589291408243176]], [["Cutout", 0.4478786947325877, 0.6893616643143388], ["TranslateX", 0.2761781720270474, 0.21750622627277727]], [["Sharpness", 0.39476077929016484, 0.930902796668923], ["Cutout", 0.9073012208742808, 0.9881122386614257]], [["TranslateY", 0.0933719180021565, 0.7206252503441172], ["ShearX", 0.5151400441789256, 0.6307540083648309]], [["AutoContrast", 0.7772689258806401, 0.8159317013156503], ["AutoContrast", 0.5932793713915097, 0.05262217353927168]], [["Equalize", 0.38017352056118914, 0.8084724050448412], ["ShearY", 0.7239725628380852, 0.4246314890359326]], [["Cutout", 0.741157483503503, 0.13244380646497977], ["Invert", 0.03395378056675935, 0.7140036618098844]], [["Rotate", 0.0662727247460636, 0.7099861732415447], ["Rotate", 0.3168532707508249, 0.3553167425022127]], [["AutoContrast", 0.7429303516734129, 0.07117444599776435], ["Posterize", 0.5379537435918104, 0.807221330263993]], [["TranslateY", 0.9788586874795164, 0.7967243851346594], ["Invert", 0.4479103376922362, 0.04260360776727545]], [["Cutout", 0.28318121763188997, 0.7748680701406292], ["AutoContrast", 0.9109258369403016, 0.17126397858002085]], [["Color", 0.30183727885272027, 0.46718354750112456], ["TranslateX", 0.9628952256033627, 0.10269543754135535]], [["AutoContrast", 0.6316709389784041, 0.84287698792044], ["Brightness", 0.5544761629904337, 0.025264772745200004]], [["Rotate", 0.08803313299532567, 0.306059720523696], ["Invert", 0.5222165872425064, 0.045935208620454304]], [["TranslateY", 0.21912346831923835, 0.48529224559004436], ["TranslateY", 0.15466734731903942, 0.8929485418495068]], [["ShearX", 0.17141022847016563, 0.8607600402165531], ["ShearX", 0.6890511341106859, 0.7540899265679949]], [["Invert", 0.9417455522972059, 0.9021733684991224], ["Solarize", 0.7693107057723746, 0.7268007946568782]], [["Posterize", 0.02376991543373752, 0.6768442864453844], ["Rotate", 0.7736875065112697, 0.6706331753139825]], [["Contrast", 0.3623841610390669, 0.15023657344457686], ["Equalize", 0.32975472189318666, 0.05629246869510651]], [["Sharpness", 0.7874882420165824, 0.49535778020457066], ["Posterize", 0.09485578893387558, 0.6170768580482466]], [["Brightness", 0.7099280202949585, 0.021523012961427335], ["Posterize", 0.2076371467666719, 0.17168118578815206]], [["Color", 0.8546367645761538, 0.832011891505731], ["Equalize", 0.6429734783051777, 0.2618995960561532]], [["Rotate", 0.8780793721476224, 0.5920897827664297], ["ShearX", 0.5338303685064825, 0.8605424531336439]], [["Sharpness", 0.7504493806631884, 0.9723552387375258], ["Sharpness", 0.3206385634203266, 0.45127845905824693]], [["ShearX", 0.23794709526711355, 0.06257530645720066], ["Solarize", 0.9132374030587093, 0.6240819934824045]], [["Sharpness", 0.790583587969259, 0.28551171786655405], ["Contrast", 0.39872982844590554, 0.09644706751019538]], [["Equalize", 0.30681999237432944, 0.5645045018157916], ["Posterize", 0.525966242669736, 0.7360106111256014]], [["TranslateX", 0.4881014179825114, 0.6317220208872226], ["ShearX", 0.2935158995550958, 0.23104608987381758]], [["Rotate", 0.49977116738568395, 0.6610761068306319], ["TranslateY", 0.7396566602715687, 0.09386747830045217]], [["ShearY", 0.5909773790018789, 0.16229529902832718], ["Equalize", 0.06461394468918358, 0.6661349001143908]], [["TranslateX", 0.7218443721851834, 0.04435720302810153], ["Cutout", 0.986686540951642, 0.734771197038724]], [["ShearX", 0.5353800096911666, 0.8120139502148365], ["Equalize", 0.4613239578449774, 0.5159528929124512]], [["Color", 0.0871713897628631, 0.7708895183198486], ["Solarize", 0.5811386808912219, 0.35260648120785887]], [["Posterize", 0.3910857927477053, 0.4329219555775561], ["Color", 0.9115983668789468, 0.6043069944145293]], [["Posterize", 0.07493067637060635, 0.4258000066006725], ["AutoContrast", 0.4740957581389772, 0.49069587151651295]], [["Rotate", 0.34086200894268937, 0.9812149332288828], ["Solarize", 0.6801012471371733, 0.17271491146753837]], [["Color", 0.20542270872895207, 0.5532087457727624], ["Contrast", 0.2718692536563381, 0.20313287569510108]], [["Equalize", 0.05199827210980934, 0.0832859890912212], ["AutoContrast", 0.8092395764794107, 0.7778945136511004]], [["Sharpness", 0.1907689513066838, 0.7705754572256907], ["Color", 0.3911178658498049, 0.41791326933095485]], [["Solarize", 0.19611855804748257, 0.2407807485604081], ["AutoContrast", 0.5343964972940493, 0.9034209455548394]], [["Color", 0.43586520148538865, 0.4711164626521439], ["ShearY", 0.28635408186820555, 0.8417816793020271]], [["Cutout", 0.09818482420382535, 0.1649767430954796], ["Cutout", 0.34582392911178494, 0.3927982995799828]], [["ShearX", 0.001253882705272269, 0.48661629027584596], ["Solarize", 0.9229221435457137, 0.44374894836659073]], [["Contrast", 0.6829734655718668, 0.8201750485099037], ["Cutout", 0.7886756837648936, 0.8423285219631946]], [["TranslateY", 0.857017093561528, 0.3038537151773969], ["Invert", 0.12809228606383538, 0.23637166191748027]], [["Solarize", 0.9829027723424164, 0.9723093910674763], ["Color", 0.6346495302126811, 0.5405494753107188]], [["AutoContrast", 0.06868643520377715, 0.23758659417688077], ["AutoContrast", 0.6648225411500879, 0.5618315648260103]], [["Invert", 0.44202305603311676, 0.9945938909685547], ["Equalize", 0.7991650497684454, 0.16014142656347097]], [["AutoContrast", 0.8778631604769588, 0.03951977631894088], ["ShearY", 0.8495160088963707, 0.35771447321250416]], [["Color", 0.5365078341001592, 0.21102444169782308], ["ShearX", 0.7168869678248874, 0.3904298719872734]], [["TranslateX", 0.6517203786101899, 0.6467598990650437], ["Invert", 0.26552491504364517, 0.1210812827294625]], [["Posterize", 0.35196021684368994, 0.8420648319941891], ["Invert", 0.7796829363930631, 0.9520895999240896]], [["Sharpness", 0.7391572148971984, 0.4853940393452846], ["TranslateX", 0.7641915295592839, 0.6351349057666782]], [["Posterize", 0.18485880221115913, 0.6117603277356728], ["Rotate", 0.6541660490605724, 0.5704041108375348]], [["TranslateY", 0.27517423188070533, 0.6610080904072458], ["Contrast", 0.6091250547289317, 0.7702443247557892]], [["Equalize", 0.3611798581067118, 0.6623615672642768], ["TranslateX", 0.9537265090885917, 0.06352772509358584]], [["ShearX", 0.09720029389103535, 0.7800423126320308], ["Invert", 0.30314352455858884, 0.8519925470889914]], [["Brightness", 0.06931529763458055, 0.57760829499712], ["Cutout", 0.637251974467394, 0.7184346129191052]], [["AutoContrast", 0.5026722100286064, 0.32025257156541886], ["Contrast", 0.9667478703047919, 0.14178519432669368]], [["Equalize", 0.5924463845816984, 0.7187610262181517], ["TranslateY", 0.7059479079159405, 0.06551471830655187]], [["Sharpness", 0.18161164512332928, 0.7576138481173385], ["Brightness", 0.19191138767695282, 0.7865880269424701]], [["Brightness", 0.36780861866078696, 0.0677855546737901], ["AutoContrast", 0.8491446654142264, 0.09217782099938121]], [["TranslateY", 0.06011399855120858, 0.8374487034710264], ["TranslateY", 0.8373922962070498, 0.1991295720254297]], [["Posterize", 0.702559916122481, 0.30257509683007755], ["Rotate", 0.249899495398891, 0.9370437251176267]], [["ShearX", 0.9237874098232075, 0.26241907483351146], ["Brightness", 0.7221766836146657, 0.6880749752986671]], [["Cutout", 0.37994098189193104, 0.7836874473657957], ["ShearX", 0.9212861960976824, 0.8140948561570449]], [["Posterize", 0.2584098274786417, 0.7990847652004848], ["Invert", 0.6357731737590063, 0.1066304859116326]], [["Sharpness", 0.4412790857539922, 0.9692465283229825], ["Color", 0.9857401617339051, 0.26755393929808713]], [["Equalize", 0.22348671644912665, 0.7370019910830038], ["Posterize", 0.5396106339575417, 0.5559536849843303]], [["Equalize", 0.8742967663495852, 0.2797122599926307], ["Rotate", 0.4697322053105951, 0.8769872942579476]], [["Sharpness", 0.44279911640509206, 0.07729581896071613], ["Cutout", 0.3589177366154631, 0.2704031551235969]], [["TranslateX", 0.614216412574085, 0.47929659784170453], ["Brightness", 0.6686234118438007, 0.05700784068205689]], [["ShearY", 0.17920614630857634, 0.4699685075827862], ["Color", 0.38251870810870003, 0.7262706923005887]], [["Solarize", 0.4951799001144561, 0.212775278026479], ["TranslateX", 0.8666105646463097, 0.6750496637519537]], [["Color", 0.8110864170849051, 0.5154263861958484], ["Sharpness", 0.2489044083898776, 0.3763372541462343]], [["Cutout", 0.04888193613483871, 0.06041664638981603], ["Color", 0.06438587718683708, 0.5797881428892969]], [["Rotate", 0.032427448352152166, 0.4445797818376559], ["Posterize", 0.4459357828482998, 0.5879865187630777]], [["ShearX", 0.1617179557693058, 0.050796802246318884], ["Cutout", 0.8142465452060423, 0.3836391305618707]], [["TranslateY", 0.1806857249209416, 0.36697730355422675], ["Rotate", 0.9897576550818276, 0.7483432452225264]], [["Brightness", 0.18278016458098223, 0.952352527690299], ["Cutout", 0.3269735224453044, 0.3924869905012752]], [["ShearX", 0.870832707718742, 0.3214743207190739], ["Cutout", 0.6805560681792573, 0.6984188155282459]], [["TranslateX", 0.4157118388833776, 0.3964216288135384], ["TranslateX", 0.3253012682285006, 0.624835513104391]], [["Contrast", 0.7678168037628158, 0.31033802162621793], ["ShearX", 0.27022424855977134, 0.3773245605126201]], [["TranslateX", 0.37812621869017593, 0.7657993810740699], ["Rotate", 0.18081890120092914, 0.8893511219618171]], [["Posterize", 0.8735859716088367, 0.18243793043074286], ["TranslateX", 0.90435994250313, 0.24116383818819453]], [["Invert", 0.06666709253664793, 0.3881076083593933], ["TranslateX", 0.3783333964963522, 0.14411014979589543]], [["Equalize", 0.8741147867162096, 0.14203839235846816], ["TranslateX", 0.7801536758037405, 0.6952401607812743]], [["Cutout", 0.6095335117944475, 0.5679026063718094], ["Posterize", 0.06433868172233115, 0.07139559616012303]], [["TranslateY", 0.3020364047315408, 0.21459810361176246], ["Cutout", 0.7097677414888889, 0.2942144632587549]], [["Brightness", 0.8223662419048653, 0.195700694016108], ["Invert", 0.09345407040803999, 0.779843655582099]], [["TranslateY", 0.7353462929356228, 0.0468520680237382], ["Cutout", 0.36530918247940425, 0.3897292909049672]], [["Invert", 0.9676896451721213, 0.24473302189463453], ["Invert", 0.7369271521408992, 0.8193267003356975]], [["Sharpness", 0.8691871972054326, 0.4441713912682772], ["ShearY", 0.47385584832119887, 0.23521684584675429]], [["ShearY", 0.9266946026184021, 0.7611986713358834], ["TranslateX", 0.6195820760253926, 0.14661428669483678]], [["Sharpness", 0.08470870576026868, 0.3380219099907229], ["TranslateX", 0.3062343307496658, 0.7135777338095889]], [["Sharpness", 0.5246448204194909, 0.3193061215236702], ["ShearX", 0.8160637208508432, 0.9720697396582731]], [["Posterize", 0.5249259956549405, 0.3492042382504774], ["Invert", 0.8183138799547441, 0.11107271762524618]], [["TranslateY", 0.210869733350744, 0.7138905840721885], ["Sharpness", 0.7773226404450125, 0.8005353621959782]], [["Posterize", 0.33067522385556025, 0.32046239220630124], ["AutoContrast", 0.18918147708798405, 0.4646281070474484]], [["TranslateX", 0.929502026131094, 0.8029128121556285], ["Invert", 0.7319794306118105, 0.5421878712623392]], [["ShearX", 0.25645940834182723, 0.42754710760160963], ["ShearX", 0.44640695310173306, 0.8132185532296811]], [["Color", 0.018436846416536312, 0.8439313862001113], ["Sharpness", 0.3722867661453415, 0.5103570873163251]], [["TranslateX", 0.7285989086776543, 0.4809027697099264], ["TranslateY", 0.9740807004893643, 0.8241085438636939]], [["Posterize", 0.8721868989693397, 0.5700907310383815], ["Posterize", 0.4219074410577852, 0.8032643572845402]], [["Contrast", 0.9811380092558266, 0.8498397471632105], ["Sharpness", 0.8380884329421594, 0.18351306571903125]], [["TranslateY", 0.3878939366762001, 0.4699103438753077], ["Invert", 0.6055556353233807, 0.8774727658400134]], [["TranslateY", 0.052317005261018346, 0.39471450378745787], ["ShearX", 0.8612486845942395, 0.28834103278807466]], [["Color", 0.511993351208063, 0.07251427040525904], ["Solarize", 0.9898097047354855, 0.299761565689576]], [["Equalize", 0.2721248231619904, 0.6870975927455507], ["Cutout", 0.8787327242363994, 0.06228061428917098]], [["Invert", 0.8931880335225408, 0.49720931867378193], ["Posterize", 0.9619698792159256, 0.17859639696940088]], [["Posterize", 0.0061688075074411985, 0.08082938731035938], ["Brightness", 0.27745128028826993, 0.8638528796903816]], [["ShearY", 0.9140200609222026, 0.8240421430867707], ["Invert", 0.651734417415332, 0.08871906369930926]], [["Color", 0.45585010413511196, 0.44705070078574316], ["Color", 0.26394624901633146, 0.11242877788650807]], [["ShearY", 0.9200278466372522, 0.2995901331149652], ["Cutout", 0.8445407215116278, 0.7410524214287446]], [["ShearY", 0.9950483746990132, 0.112964468262847], ["ShearY", 0.4118332303218585, 0.44839613407553636]], [["Contrast", 0.7905821952255192, 0.23360046159385106], ["Posterize", 0.8611787233956044, 0.8984260048943528]], [["TranslateY", 0.21448061359312853, 0.8228112806838331], ["Contrast", 0.8992297266152983, 0.9179231590570998]], [["Invert", 0.3924194798946006, 0.31830516468371495], ["Rotate", 0.8399556845248508, 0.3764892022932781]], [["Cutout", 0.7037916990046816, 0.9214620769502728], ["AutoContrast", 0.02913794613018239, 0.07808607528954048]], [["ShearY", 0.6041490474263381, 0.6094184590800105], ["Equalize", 0.2932954517354919, 0.5840888946081727]], [["ShearX", 0.6056801676269449, 0.6948580442549543], ["Cutout", 0.3028001021044615, 0.15117101733894078]], [["Brightness", 0.8011486803860253, 0.18864079729374195], ["Solarize", 0.014965327213230961, 0.8842620292527029]], [["Invert", 0.902244007904273, 0.5634673798052033], ["Equalize", 0.13422913507398349, 0.4110956745883727]], [["TranslateY", 0.9981773319103838, 0.09568550987216096], ["Color", 0.7627662124105109, 0.8494409737419493]], [["Cutout", 0.3013527640416782, 0.03377226729898486], ["ShearX", 0.5727964831614619, 0.8784196638222834]], [["TranslateX", 0.6050722426803684, 0.3650103962378708], ["TranslateX", 0.8392084589130886, 0.6479816470292911]], [["Rotate", 0.5032806606500023, 0.09276980118866307], ["TranslateY", 0.7800234515261191, 0.18896454379343308]], [["Invert", 0.9266027256244017, 0.8246111062199752], ["Contrast", 0.12112023357797697, 0.33870762271759436]], [["Brightness", 0.8688784756993134, 0.17263759696106606], ["ShearX", 0.5133700431071326, 0.6686811994542494]], [["Invert", 0.8347840440941976, 0.03774897445901726], ["Brightness", 0.24925057499276548, 0.04293631677355758]], [["Color", 0.5998145279485104, 0.4820093200092529], ["TranslateY", 0.6709586184077769, 0.07377334081382858]], [["AutoContrast", 0.7898846202957984, 0.325293526672498], ["Contrast", 0.5156435596826767, 0.2889223168660645]], [["ShearX", 0.08147389674998307, 0.7978924681113669], ["Contrast", 0.7270003309106291, 0.009571215234092656]], [["Sharpness", 0.417607614440786, 0.9532566433338661], ["Posterize", 0.7186586546796782, 0.6936509907073302]], [["ShearX", 0.9555300215926675, 0.1399385550263872], ["Color", 0.9981041061848231, 0.5037462398323248]], [["Equalize", 0.8003487831375474, 0.5413759363796945], ["ShearY", 0.0026607045117773565, 0.019262273030984933]], [["TranslateY", 0.04845391502469176, 0.10063445212118283], ["Cutout", 0.8273170186786745, 0.5045257728554577]], [["TranslateX", 0.9690985344978033, 0.505202991815533], ["TranslateY", 0.7255326592928096, 0.02103609500701631]], [["Solarize", 0.4030771176836736, 0.8424237871457034], ["Cutout", 0.28705805963928965, 0.9601617893682582]], [["Sharpness", 0.16865290353070606, 0.6899673563468826], ["Posterize", 0.3985430034869616, 0.6540651997730774]], [["ShearY", 0.21395578485362032, 0.09519358818949009], ["Solarize", 0.6692821708524135, 0.6462523623552485]], [["AutoContrast", 0.912360598054091, 0.029800239085051583], ["Invert", 0.04319256403746308, 0.7712501517098587]], [["ShearY", 0.9081969961839055, 0.4581560239984739], ["AutoContrast", 0.5313894814729159, 0.5508393335751848]], [["ShearY", 0.860528568424097, 0.8196987216301588], ["Posterize", 0.41134650331494205, 0.3686632018978778]], [["AutoContrast", 0.8753670810078598, 0.3679438326304749], ["Invert", 0.010444228965415858, 0.9581244779208277]], [["Equalize", 0.07071836206680682, 0.7173594756186462], ["Brightness", 0.06111434312497388, 0.16175064669049277]], [["AutoContrast", 0.10522219073562122, 0.9768776621069855], ["TranslateY", 0.2744795945215529, 0.8577967957127298]], [["AutoContrast", 0.7628146493166175, 0.996157376418147], ["Contrast", 0.9255565598518469, 0.6826126662976868]], [["TranslateX", 0.017225816199011312, 0.2470332491402908], ["Solarize", 0.44048494909493807, 0.4492422515972162]], [["ShearY", 0.38885252627795064, 0.10272256704901939], ["Equalize", 0.686154959829183, 0.8973517148655337]], [["Rotate", 0.29628991573592967, 0.16639926575004715], ["ShearX", 0.9013782324726413, 0.0838318162771563]], [["Color", 0.04968391374688563, 0.6138600739645352], ["Invert", 0.11177127838716283, 0.10650198522261578]], [["Invert", 0.49655016367624016, 0.8603374164829688], ["ShearY", 0.40625439617553727, 0.4516437918820778]], [["TranslateX", 0.15015718916062992, 0.13867777502116208], ["Brightness", 0.3374464418810188, 0.7613355669536931]], [["Invert", 0.644644393321966, 0.19005804481199562], ["AutoContrast", 0.2293259789431853, 0.30335723256340186]], [["Solarize", 0.004968793254801596, 0.5370892072646645], ["Contrast", 0.9136902637865596, 0.9510587477779084]], [["Rotate", 0.38991518440867123, 0.24796987467455756], ["Sharpness", 0.9911180315669776, 0.5265657122981591]], [["Solarize", 0.3919646484436238, 0.6814994037194909], ["Sharpness", 0.4920838987787103, 0.023425724294012018]], [["TranslateX", 0.25107587874378867, 0.5414936560189212], ["Cutout", 0.7932919623814599, 0.9891303444820169]], [["Brightness", 0.07863012174272999, 0.045175652208389594], ["Solarize", 0.889609658064552, 0.8228793315963948]], [["Cutout", 0.20477096178169596, 0.6535063675027364], ["ShearX", 0.9216318577173639, 0.2908690977359947]], [["Contrast", 0.7035118947423187, 0.45982709058312454], ["Contrast", 0.7130268070749464, 0.8635123354235471]], [["Sharpness", 0.26319477541228997, 0.7451278726847078], ["Rotate", 0.8170499362173754, 0.13998593411788207]], [["Rotate", 0.8699365715164192, 0.8878057721750832], ["Equalize", 0.06682350555715044, 0.7164702080630689]], [["ShearY", 0.3137466057521987, 0.6747433496011368], ["Rotate", 0.42118828936218133, 0.980121180104441]], [["Solarize", 0.8470375049950615, 0.15287589264139223], ["Cutout", 0.14438435054693055, 0.24296463267973512]], [["TranslateY", 0.08822241792224905, 0.36163911974799356], ["TranslateY", 0.11729726813270003, 0.6230889726445291]], [["ShearX", 0.7720112337718541, 0.2773292905760122], ["Sharpness", 0.756290929398613, 0.27830353710507705]], [["Color", 0.33825031007968287, 0.4657590047522816], ["ShearY", 0.3566628994713067, 0.859750504071925]], [["TranslateY", 0.06830147433378053, 0.9348778582086664], ["TranslateX", 0.15509346516378553, 0.26320778885339435]], [["Posterize", 0.20266751150740858, 0.008351463842578233], ["Sharpness", 0.06506971109417259, 0.7294471760284555]], [["TranslateY", 0.6278911394418829, 0.8702181892620695], ["Invert", 0.9367073860264247, 0.9219230428944211]], [["Sharpness", 0.1553425337673321, 0.17601557714491345], ["Solarize", 0.7040449681338888, 0.08764313147327729]], [["Equalize", 0.6082233904624664, 0.4177428549911376], ["AutoContrast", 0.04987405274618151, 0.34516208204700916]], [["Brightness", 0.9616085936167699, 0.14561237331885468], ["Solarize", 0.8927707736296572, 0.31176907850205704]], [["Brightness", 0.6707778304730988, 0.9046457117525516], ["Brightness", 0.6801448953060988, 0.20015313057149042]], [["Color", 0.8292680845499386, 0.5181603879593888], ["Brightness", 0.08549161770369762, 0.6567870536463203]], [["ShearY", 0.267802208078051, 0.8388133819588173], ["Sharpness", 0.13453409120796123, 0.10028351311149486]], [["Posterize", 0.775796593610272, 0.05359034561289766], ["Cutout", 0.5067360625733027, 0.054451986840317934]], [["TranslateX", 0.5845238647690084, 0.7507147553486293], ["Brightness", 0.2642051786121197, 0.2578358927056452]], [["Cutout", 0.10787517610922692, 0.8147986902794228], ["Contrast", 0.2190149206329539, 0.902210615462459]], [["TranslateX", 0.5663614214181296, 0.05309965916414028], ["ShearX", 0.9682797885154938, 0.41791929533938466]], [["ShearX", 0.2345325577621098, 0.383780128037189], ["TranslateX", 0.7298083748149163, 0.644325797667087]], [["Posterize", 0.5138725709682734, 0.7901809917259563], ["AutoContrast", 0.7966018627776853, 0.14529337543427345]], [["Invert", 0.5973031989249785, 0.417399314592829], ["Solarize", 0.9147539948653116, 0.8221272315548086]], [["Posterize", 0.601596043336383, 0.18969646160963938], ["Color", 0.7527275484079655, 0.431793831326888]], [["Equalize", 0.6731483454430538, 0.7866786558207602], ["TranslateX", 0.97574396899191, 0.5970255778044692]], [["Cutout", 0.15919495850169718, 0.8916094305850562], ["Invert", 0.8351348834751027, 0.4029937360314928]], [["Invert", 0.5894085405226027, 0.7283806854157764], ["Brightness", 0.3973976860470554, 0.949681121498567]], [["AutoContrast", 0.3707914135327408, 0.21192068592079616], ["ShearX", 0.28040127351140676, 0.6754553511344856]], [["Solarize", 0.07955132378694896, 0.15073572961927306], ["ShearY", 0.5735850168851625, 0.27147326850217746]], [["Equalize", 0.678653949549764, 0.8097796067861455], ["Contrast", 0.2283048527510083, 0.15507804874474185]], [["Equalize", 0.286013868374536, 0.186785848694501], ["Posterize", 0.16319021740810458, 0.1201304443285659]], [["Sharpness", 0.9601590830563757, 0.06267915026513238], ["AutoContrast", 0.3813920685124327, 0.294224403296912]], [["Brightness", 0.2703246632402241, 0.9168405377492277], ["ShearX", 0.6156009855831097, 0.4955986055846403]], [["Color", 0.9065504424987322, 0.03393612216080133], ["ShearY", 0.6768595880405884, 0.9981068127818191]], [["Equalize", 0.28812842368483904, 0.300387487349145], ["ShearY", 0.28812248704858345, 0.27105076231533964]], [["Brightness", 0.6864882730513477, 0.8205553299102412], ["Cutout", 0.45995236371265424, 0.5422030370297759]], [["Color", 0.34941404877084326, 0.25857961830158516], ["AutoContrast", 0.3451390878441899, 0.5000938249040454]], [["Invert", 0.8268247541815854, 0.6691380821226468], ["Cutout", 0.46489193601530476, 0.22620873109485895]], [["Rotate", 0.17879730528062376, 0.22670425330593935], ["Sharpness", 0.8692795688221834, 0.36586055020855723]], [["Brightness", 0.31203975139659634, 0.6934046293010939], ["Cutout", 0.31649437872271236, 0.08078625004157935]], [["Cutout", 0.3119482836150119, 0.6397160035509996], ["Contrast", 0.8311248624784223, 0.22897510169718616]], [["TranslateX", 0.7631157841429582, 0.6482890521284557], ["Brightness", 0.12681196272427664, 0.3669813784257344]], [["TranslateX", 0.06027722649179801, 0.3101104512201861], ["Sharpness", 0.5652076706249394, 0.05210008400968136]], [["AutoContrast", 0.39213552101583127, 0.5047021194355596], ["ShearY", 0.7164003055682187, 0.8063370761002899]], [["Solarize", 0.9574307011238342, 0.21472064809226854], ["AutoContrast", 0.8102612285047174, 0.716870148067014]], [["Rotate", 0.3592634277567387, 0.6452602893051465], ["AutoContrast", 0.27188430331411506, 0.06003099168464854]], [["Cutout", 0.9529536554825503, 0.5285505311027461], ["Solarize", 0.08478231903311029, 0.15986449762728216]], [["TranslateY", 0.31176130458018936, 0.5642853506158253], ["Equalize", 0.008890883901317648, 0.5146121040955942]], [["Color", 0.40773645085566157, 0.7110398926612682], ["Color", 0.18233100156439364, 0.7830036002758337]], [["Posterize", 0.5793809197821732, 0.043748553135581236], ["Invert", 0.4479962016131668, 0.7349663010359488]], [["TranslateX", 0.1994882312299382, 0.05216859488899439], ["Rotate", 0.48288726352035416, 0.44713829026777585]], [["Posterize", 0.22122838185154603, 0.5034546841241283], ["TranslateX", 0.2538745835410222, 0.6129055170893385]], [["Color", 0.6786559960640814, 0.4529749369803212], ["Equalize", 0.30215879674415336, 0.8733394611096772]], [["Contrast", 0.47316062430673456, 0.46669538897311447], ["Invert", 0.6514906551984854, 0.3053339444067804]], [["Equalize", 0.6443202625334524, 0.8689731394616441], ["Color", 0.7549183794057628, 0.8889001426329578]], [["Solarize", 0.616709740662654, 0.7792180816399313], ["ShearX", 0.9659155537406062, 0.39436937531179495]], [["Equalize", 0.23694011299406226, 0.027711152164392128], ["TranslateY", 0.1677339686527083, 0.3482126536808231]], [["Solarize", 0.15234175951790285, 0.7893840414281341], ["TranslateX", 0.2396395768284183, 0.27727219214979715]], [["Contrast", 0.3792017455380605, 0.32323660409845334], ["Contrast", 0.1356037413846466, 0.9127772969992305]], [["ShearX", 0.02642732222284716, 0.9184662576502115], ["Equalize", 0.11504884472142995, 0.8957638893097964]], [["TranslateY", 0.3193812913345325, 0.8828100030493128], ["ShearY", 0.9374975727563528, 0.09909415611083694]], [["AutoContrast", 0.025840721736048122, 0.7941037581373024], ["TranslateY", 0.498518003323313, 0.5777122846572548]], [["ShearY", 0.6042199307830248, 0.44809668754508836], ["Cutout", 0.3243978207701482, 0.9379740926294765]], [["ShearY", 0.6858549297583574, 0.9993252035788924], ["Sharpness", 0.04682428732773203, 0.21698099707915652]], [["ShearY", 0.7737469436637263, 0.8810127181224531], ["ShearY", 0.8995655445246451, 0.4312416220354539]], [["TranslateY", 0.4953094136709374, 0.8144161580138571], ["Solarize", 0.26301211718928097, 0.518345311180405]], [["Brightness", 0.8820246486031275, 0.571075863786249], ["ShearX", 0.8586669146703955, 0.0060476383595142735]], [["Sharpness", 0.20519233710982254, 0.6144574759149729], ["Posterize", 0.07976625267460813, 0.7480145046726968]], [["ShearY", 0.374075419680195, 0.3386105402023202], ["ShearX", 0.8228083637082115, 0.5885174783155361]], [["Brightness", 0.3528780713814561, 0.6999884884306623], ["Sharpness", 0.3680348120526238, 0.16953358258959617]], [["Brightness", 0.24891223104442084, 0.7973853494920095], ["TranslateX", 0.004256803835524736, 0.0470216343108546]], [["Posterize", 0.1947344282646012, 0.7694802711054367], ["Cutout", 0.9594385534844785, 0.5469744140592429]], [["Invert", 0.19012504762806026, 0.7816140211434693], ["TranslateY", 0.17479746932338402, 0.024249345245078602]], [["Rotate", 0.9669262055946796, 0.510166180775991], ["TranslateX", 0.8990602034610352, 0.6657802719304693]], [["ShearY", 0.5453049050407278, 0.8476872739603525], ["Cutout", 0.14226529093962592, 0.15756960661106634]], [["Equalize", 0.5895291156113004, 0.6797218994447763], ["TranslateY", 0.3541442192192753, 0.05166001155849864]], [["Equalize", 0.39530681662726097, 0.8448335365081087], ["Brightness", 0.6785483272734143, 0.8805568647038574]], [["Cutout", 0.28633258271917905, 0.7750870268336066], ["Equalize", 0.7221097824537182, 0.5865506280531162]], [["Posterize", 0.9044429629421187, 0.4620266401793388], ["Invert", 0.1803008045494473, 0.8073190766288534]], [["Sharpness", 0.7054649148075851, 0.3877207948962055], ["TranslateX", 0.49260224225927285, 0.8987462620731029]], [["Sharpness", 0.11196934729294483, 0.5953704422694938], ["Contrast", 0.13969334315069737, 0.19310569898434204]], [["Posterize", 0.5484346101051778, 0.7914140118600685], ["Brightness", 0.6428044691630473, 0.18811316670808076]], [["Invert", 0.22294834094984717, 0.05173157689962704], ["Cutout", 0.6091129168510456, 0.6280845506243643]], [["AutoContrast", 0.5726444076195267, 0.2799840903601295], ["Cutout", 0.3055752727786235, 0.591639807512993]], [["Brightness", 0.3707116723204462, 0.4049175910826627], ["Rotate", 0.4811601625588309, 0.2710760253723644]], [["ShearY", 0.627791719653608, 0.6877498291550205], ["TranslateX", 0.8751753308366824, 0.011164650018719358]], [["Posterize", 0.33832547954522263, 0.7087039872581657], ["Posterize", 0.6247474435007484, 0.7707784192114796]], [["Contrast", 0.17620186308493468, 0.9946224854942095], ["Solarize", 0.5431896088395964, 0.5867904203742308]], [["ShearX", 0.4667959516719652, 0.8938082224109446], ["TranslateY", 0.7311343008292865, 0.6829842246020277]], [["ShearX", 0.6130281467237769, 0.9924010909612302], ["Brightness", 0.41039241699696916, 0.9753218875311392]], [["TranslateY", 0.0747250386427123, 0.34602725521067534], ["Rotate", 0.5902597465515901, 0.361094672021087]], [["Invert", 0.05234890878959486, 0.36914978664919407], ["Sharpness", 0.42140532878231374, 0.19204058551048275]], [["ShearY", 0.11590485361909497, 0.6518540857972316], ["Invert", 0.6482444740361704, 0.48256237896163945]], [["Rotate", 0.4931329446923608, 0.037076242417301675], ["Contrast", 0.9097939772412852, 0.5619594905306389]], [["Posterize", 0.7311032479626216, 0.4796364593912915], ["Color", 0.13912123993932402, 0.03997286439663705]], [["AutoContrast", 0.6196602944085344, 0.2531430457527588], ["Rotate", 0.5583937060431972, 0.9893379795224023]], [["AutoContrast", 0.8847753125072959, 0.19123028952580057], ["TranslateY", 0.494361716097206, 0.14232297727461696]], [["Invert", 0.6212360716340707, 0.033898871473033165], ["AutoContrast", 0.30839896957008295, 0.23603569542166247]], [["Equalize", 0.8255583546605049, 0.613736933157845], ["AutoContrast", 0.6357166629525485, 0.7894617347709095]], [["Brightness", 0.33840706322846814, 0.07917167871493658], ["ShearY", 0.15693175752528676, 0.6282773652129153]], [["Cutout", 0.7550520024859294, 0.08982367300605598], ["ShearX", 0.5844942417320858, 0.36051195083380105]]]
return p
def fa_reduced_svhn():
p = [[["TranslateX", 0.001576965129744562, 0.43180488809874773], ["Invert", 0.7395307279252639, 0.7538444307982558]], [["Contrast", 0.5762062225409211, 0.7532431872873473], ["TranslateX", 0.45212523461624615, 0.02451684483019846]], [["Contrast", 0.18962433143225088, 0.29481185671147325], ["Contrast", 0.9998112218299271, 0.813015355163255]], [["Posterize", 0.9633391295905683, 0.4136786222304747], ["TranslateY", 0.8011655496664203, 0.44102126789970797]], [["Color", 0.8231185187716968, 0.4171602946893402], ["TranslateX", 0.8684965619113907, 0.36514568324909674]], [["Color", 0.904075230324581, 0.46319140331093767], ["Contrast", 0.4115196534764559, 0.7773329158740563]], [["Sharpness", 0.6600262774093967, 0.8045637700026345], ["TranslateY", 0.5917663766021198, 0.6844241908520602]], [["AutoContrast", 0.16223989311434306, 0.48169653554195924], ["ShearX", 0.5433173232860344, 0.7460278151912152]], [["ShearX", 0.4913604762760715, 0.83391837859561], ["Color", 0.5580367056511908, 0.2961512691312932]], [["Color", 0.18567091721211237, 0.9296983204905286], ["Cutout", 0.6074026199060156, 0.03303273406448193]], [["Invert", 0.8049054771963224, 0.1340792344927909], ["Color", 0.4208839940504979, 0.7096454840962345]], [["ShearX", 0.7997786664546294, 0.6492629575700173], ["AutoContrast", 0.3142777134084793, 0.6526010594925064]], [["TranslateX", 0.2581027144644976, 0.6997433332894101], ["Rotate", 0.45490480973606834, 0.238620570022944]], [["Solarize", 0.837397161027719, 0.9311141273136286], ["Contrast", 0.640364826293148, 0.6299761518677469]], [["Brightness", 0.3782457347141744, 0.7085036717054278], ["Brightness", 0.5346150083208507, 0.5858930737867671]], [["Invert", 0.48780391510474086, 0.610086407879722], ["Color", 0.5601999247616932, 0.5393836220423195]], [["Brightness", 0.00250086643283564, 0.5003355864896979], ["Brightness", 0.003922153283353616, 0.41107110154584925]], [["TranslateX", 0.4073069009685957, 0.9843435292693372], ["Invert", 0.38837085318721926, 0.9298542033875989]], [["ShearY", 0.05479740443795811, 0.9113983424872698], ["AutoContrast", 0.2181108114232728, 0.713996037012164]], [["Brightness", 0.27747508429413903, 0.3217467607288693], ["ShearX", 0.02715239061946995, 0.5430731635396449]], [["Sharpness", 0.08994432959374538, 0.004706443546453831], ["Posterize", 0.10768206853226996, 0.39020299239900236]], [["Cutout", 0.37498679037853905, 0.20784809761469553], ["Color", 0.9825516352194511, 0.7654155662756019]], [["Color", 0.8899349124453552, 0.7797700766409008], ["Rotate", 0.1370222187174981, 0.2622119295138398]], [["Cutout", 0.7088223332663685, 0.7884456023190028], ["Solarize", 0.5362257505160836, 0.6426837537811545]], [["Invert", 0.15686225694987552, 0.5500563899117913], ["Rotate", 0.16315224193260078, 0.4246854030170752]], [["Rotate", 0.005266247922433631, 0.06612026206223394], ["Contrast", 0.06494357829209037, 0.2738420319474947]], [["Cutout", 0.30200619566806275, 0.06558008068236942], ["Rotate", 0.2168576483823022, 0.878645566986328]], [["Color", 0.6358930679444622, 0.613404714161498], ["Rotate", 0.08733206733004326, 0.4348276574435751]], [["Cutout", 0.8834634887239585, 0.0006853845293474659], ["Solarize", 0.38132051231951847, 0.42558752668491195]], [["ShearY", 0.08830136548479937, 0.5522438878371283], ["Brightness", 0.23816560427834074, 0.3033709051157141]], [["Solarize", 0.9015331490756151, 0.9108788708847556], ["Contrast", 0.2057898014670072, 0.03260096030427456]], [["Equalize", 0.9455978685121174, 0.14850077333434056], ["TranslateY", 0.6888705996522545, 0.5300565492007543]], [["Cutout", 0.16942673959343585, 0.7294197201361826], ["TranslateX", 0.41184830642301534, 0.7060207449376135]], [["Color", 0.30133344118702166, 0.24384417956342314], ["Sharpness", 0.4640904544421743, 0.32431840288061864]], [["Sharpness", 0.5195055033472676, 0.9386677467005835], ["Color", 0.9536519432978372, 0.9624043444556467]], [["Rotate", 0.8689597230556101, 0.23955490826730633], ["Contrast", 0.050071600927462656, 0.1309891556004179]], [["Cutout", 0.5349421090878962, 0.08239510727779054], ["Rotate", 0.46064964710717216, 0.9037689320897339]], [["AutoContrast", 0.5625256909986802, 0.5358003783186498], ["Equalize", 0.09204330691163354, 0.4386906784850649]], [["ShearX", 0.0011061172864470226, 0.07150284682189278], ["AutoContrast", 0.6015956946553209, 0.4375362295530898]], [["ShearY", 0.25294276499800983, 0.7937560397859562], ["Brightness", 0.30834103299704474, 0.21960258701547009]], [["Posterize", 0.7423948904688074, 0.4598609935109695], ["Rotate", 0.5510348811675979, 0.26763724868985933]], [["TranslateY", 0.3208729319318745, 0.945513054853888], ["ShearX", 0.4916473963030882, 0.8743840560039451]], [["ShearY", 0.7557718687011286, 0.3125397104722828], ["Cutout", 0.5565359791865849, 0.5151359251135629]], [["AutoContrast", 0.16652786355571275, 0.1101575800958632], ["Rotate", 0.05108851703032641, 0.2612966401802814]], [["Brightness", 0.380296489835016, 0.0428162454174662], ["ShearX", 0.3911934083168285, 0.18933607362790178]], [["Color", 0.002476250465397678, 0.07795275305347571], ["Posterize", 0.08131841266654188, 0.14843363184306413]], [["Cutout", 0.36664558716104434, 0.20904484995063996], ["Cutout", 0.07986452057223141, 0.9287747671053432]], [["Color", 0.9296812469919231, 0.6634239915141935], ["Rotate", 0.07632463573240006, 0.408624029443747]], [["Cutout", 0.7594470171961278, 0.9834672124229463], ["Solarize", 0.4471371303745053, 0.5751101102286562]], [["Posterize", 0.051186719734032285, 0.5110941294710823], ["Sharpness", 0.040432522797391596, 0.42652298706992164]], [["Sharpness", 0.2645335264327221, 0.8844553189835457], ["Brightness", 0.7229600357932696, 0.16660749270785696]], [["Sharpness", 0.6296376086802589, 0.15564989758083458], ["Sharpness", 0.7913410481400365, 0.7022615408082826]], [["Cutout", 0.5517247347343883, 0.43794888517764674], ["ShearX", 0.6951051782530201, 0.6230992857867065]], [["ShearX", 0.9015708556331022, 0.6322135168527783], ["Contrast", 0.4285629283441831, 0.18158321019502988]], [["Brightness", 0.9014292329524769, 0.3660463325457713], ["Invert", 0.6700729097206592, 0.16502732071917703]], [["AutoContrast", 0.6432764477303431, 0.9998909112400834], ["Invert", 0.8124063975545761, 0.8149683327882365]], [["Cutout", 0.6023944009428617, 0.9630976951918225], ["ShearX", 0.2734723568803071, 0.3080911542121765]], [["Sharpness", 0.048949115014412806, 0.44497866256845164], ["Brightness", 0.5611832867244329, 0.12994217480426257]], [["TranslateY", 0.4619112333002525, 0.47317728091588396], ["Solarize", 0.618638784910472, 0.9508297099190338]], [["Sharpness", 0.9656274391147018, 0.3402622993963962], ["Cutout", 0.8452511174508919, 0.3094717093312621]], [["ShearX", 0.04942201651478659, 0.6910568465705691], ["AutoContrast", 0.7155342517619936, 0.8565418847743523]], [["Brightness", 0.5222290590721783, 0.6462675303633422], ["Sharpness", 0.7756317511341633, 0.05010730683866704]], [["Contrast", 0.17098396012942796, 0.9128908626236187], ["TranslateY", 0.1523815376677518, 0.4269909829886339]], [["Cutout", 0.7679024720089866, 0.22229116396644455], ["Sharpness", 0.47714827844878843, 0.8242815864830401]], [["Brightness", 0.9321772357292445, 0.11339758604001371], ["Invert", 0.7021078495093375, 0.27507749184928154]], [["ShearY", 0.7069449324510433, 0.07262757954730437], ["Cutout", 0.6298690227159313, 0.8866813664859028]], [["ShearX", 0.8153137620199989, 0.8478194179953927], ["ShearX", 0.7519451353411938, 0.3914579556959725]], [["Cutout", 0.07152574469472753, 0.2629935229222503], ["TranslateX", 0.43728405510089485, 0.2610201002449789]], [["AutoContrast", 0.5824529633013098, 0.5619551536261955], ["Rotate", 0.45434137552116965, 0.7567169855140041]], [["TranslateY", 0.9338431187142137, 0.14230481341042783], ["Cutout", 0.744797723251028, 0.4346601666787713]], [["ShearX", 0.3197252560289169, 0.8770408070016171], ["Color", 0.7657013088540465, 0.2685586719812284]], [["ShearY", 0.6542181749801549, 0.8148188744344297], ["Sharpness", 0.5108985661436543, 0.9926016115463769]], [["ShearY", 0.39218730620135694, 0.857769946478945], ["Color", 0.39588355914920886, 0.9910530523789284]], [["Invert", 0.4993610396803735, 0.08449723470758526], ["TranslateX", 0.46267456928508305, 0.46691125646493964]], [["Equalize", 0.8640576819821256, 0.3973808869887604], ["ShearY", 0.5491163877063172, 0.422429328786161]], [["Contrast", 0.6146206387722841, 0.8453559854684094], ["TranslateX", 0.7974333014574718, 0.47395476786951773]], [["Contrast", 0.6828704722015236, 0.6952755697785722], ["Brightness", 0.7903069452567497, 0.8350915035109574]], [["Rotate", 0.1211091761531299, 0.9667702562228727], ["Color", 0.47888534537103344, 0.8298620028065332]], [["Equalize", 0.20009722872711086, 0.21851235854853018], ["Invert", 0.4433641154198673, 0.41902203581091935]], [["AutoContrast", 0.6333190204577053, 0.23965630032835372], ["Color", 0.38651217030044804, 0.06447323778198723]], [["Brightness", 0.378274337541471, 0.5482593116308322], ["Cutout", 0.4856574442608347, 0.8889688535495244]], [["Rotate", 0.8201259323479384, 0.7404525573938633], ["Color", 0.28371236449364595, 0.7866003515933161]], [["Brightness", 0.10053196350009105, 0.18814037089411267], ["Sharpness", 0.5572102497672569, 0.04458217557977126]], [["AutoContrast", 0.6445330112376135, 0.48082049184921843], ["TranslateY", 0.378898917914949, 0.9338102625289362]], [["AutoContrast", 0.08482623401924708, 0.25199930695784384], ["Solarize", 0.5981823550521426, 0.19626357596662092]], [["Solarize", 0.4373030803918095, 0.22907881245285625], ["AutoContrast", 0.6383084635487905, 0.29517603235993883]], [["AutoContrast", 0.922112624726991, 0.29398098144910145], ["AutoContrast", 0.8550184811514672, 0.8030331582292343]], [["ShearX", 0.38761582800913896, 0.06304125015084923], ["Contrast", 0.3225758804984975, 0.7089696696094797]], [["TranslateY", 0.27499498563849206, 0.1917583097241206], ["Color", 0.5845853711746438, 0.5353520071667661]], [["ShearY", 0.530881951424285, 0.47961248148116453], ["ShearX", 0.04666387744533289, 0.275772822690165]], [["Solarize", 0.5727309318844802, 0.02889734544563341], ["AutoContrast", 0.638852434854615, 0.9819440776921611]], [["AutoContrast", 0.9766868312173507, 0.9651796447738792], ["AutoContrast", 0.3489760216898085, 0.3082182741354106]], [["Sharpness", 0.13693510871346704, 0.08297205456926067], ["Contrast", 0.3155812019005854, 0.031402991638917896]], [["TranslateY", 0.2664707540547008, 0.4838091910041236], ["ShearX", 0.5935665395229432, 0.7813088248538167]], [["ShearY", 0.7578577752251343, 0.5116014090216161], ["ShearX", 0.8332831240873545, 0.26781876290841017]], [["TranslateY", 0.473254381651761, 0.4203181582821155], ["ShearY", 0.732848696900726, 0.47895514793728433]], [["Solarize", 0.6922689176672292, 0.36403255869823725], ["AutoContrast", 0.910654040826914, 0.888651414068326]], [["ShearX", 0.37326536936166244, 0.47830923320699525], ["Equalize", 0.4724702976076929, 0.8176108279939023]], [["Contrast", 0.3839906424759326, 0.09109695563933692], ["Invert", 0.36305435543972325, 0.5701589223795499]], [["Invert", 0.5175591137387999, 0.38815675919253867], ["TranslateY", 0.1354848160153554, 0.41734106283245065]], [["Color", 0.829616006981199, 0.18631472346156963], ["Color", 0.2465115448326214, 0.9439365672808333]], [["Contrast", 0.18207939197942158, 0.39841173152850873], ["ShearX", 0.16723588254695632, 0.2868649619006758]], [["Posterize", 0.1941909136988733, 0.6322499882557473], ["Contrast", 0.6109060391509794, 0.27329598688783296]], [["AutoContrast", 0.9148775146158022, 0.09129288311923844], ["Sharpness", 0.4222442287436423, 0.847961820057229]], [["Color", 0.21084007475489852, 0.008218056412554131], ["Contrast", 0.43996934555301637, 0.500680146508504]], [["ShearY", 0.6745287915240038, 0.6120305524405164], ["Equalize", 0.467403794543269, 0.2207148995882467]], [["Color", 0.7712823974371379, 0.2839161885566902], ["Color", 0.8725368489709752, 0.3349470222415115]], [["Solarize", 0.5563976601161562, 0.540446614847802], ["Invert", 0.14228071175107454, 0.2242332811481905]], [["Contrast", 0.34596757983998383, 0.9158971503395041], ["Cutout", 0.6823724203724072, 0.5221518922863516]], [["Posterize", 0.3275475232882672, 0.6520033254468702], ["Color", 0.7434224109271398, 0.0824308188060544]], [["Cutout", 0.7295122229650082, 0.277887573018184], ["Brightness", 0.5303655506515258, 0.28628046739964497]], [["Color", 0.8533293996815943, 0.24909788223027743], ["Color", 0.6915962825167857, 0.33592561040195834]], [["TranslateX", 0.0761441550001345, 0.7043906245420134], ["Equalize", 0.670845297717783, 0.30986063097084215]], [["Contrast", 0.30592723366237995, 0.7365013059287382], ["Color", 0.6173835128817455, 0.6417028717640598]], [["Rotate", 0.05558240682703821, 0.7284722849011761], ["Color", 0.7814801133853666, 0.13335113981884217]], [["ShearY", 0.6521743070190724, 0.6272195913574455], ["Rotate", 0.36278432239870423, 0.2335623679787695]], [["Color", 0.6799351102482663, 0.3850250771244986], ["Brightness", 0.613901077818094, 0.2374900558949702]], [["Color", 0.551451255148252, 0.7284757153447965], ["Solarize", 0.4863815212982878, 0.3857941567681324]], [["Contrast", 0.32516343965159267, 0.689921852601276], ["Cutout", 0.5922142001124506, 0.7709605594115009]], [["Brightness", 0.23760063764495856, 0.6392077018854179], ["Brightness", 0.7288124083714078, 0.4487520490201095]], [["Sharpness", 0.5631112298553713, 0.6803534985114782], ["ShearX", 0.6743791169050775, 0.34039227245151127]], [["AutoContrast", 0.8260911840078349, 0.7705607269534767], ["Rotate", 0.8880749478363638, 0.8182460047684648]], [["ShearY", 0.7037620764408412, 0.5219573160970589], ["Posterize", 0.7186150466761102, 0.6187857686944253]], [["TranslateY", 0.2140494926702246, 0.9104233882669488], ["TranslateX", 0.4096039512896902, 0.9692703030784571]], [["Equalize", 0.5404313549028165, 0.04094078980738014], ["AutoContrast", 0.07870278300673744, 0.841020779977939]], [["ShearY", 0.2684638876128488, 0.5599793678740521], ["Cutout", 0.19537995362704022, 0.2400995206366768]], [["AutoContrast", 0.19366394417090382, 0.4130755503251951], ["Sharpness", 0.11735660606190662, 0.39276612830651914]], [["Cutout", 0.8313266945081518, 0.37171822186374703], ["Contrast", 0.5088549187459019, 0.2956405118511817]], [["Cutout", 0.28375485371479847, 0.37020183949342683], ["Posterize", 0.718761436947423, 0.2278804627251678]], [["ShearY", 0.6625840735667625, 0.5045065697748213], ["Rotate", 0.5175257698523389, 0.39496923901188824]], [["Color", 0.6498154010188212, 0.38674158604408604], ["Brightness", 0.8157804892728057, 0.05660118670560971]], [["Color", 0.5512855420254102, 0.7812054820692542], ["Solarize", 0.8851292984174468, 0.2808951606943277]], [["Contrast", 0.35258433539074363, 0.8085377169629859], ["Cutout", 0.5197965849563265, 0.8657111726930974]], [["Cutout", 0.23650925054419358, 0.746860862983295], ["Brightness", 0.8842190203336139, 0.4389347348156118]], [["Rotate", 0.8651460526861932, 0.0031372441327392753], ["Equalize", 0.3909498933963822, 0.6221687914603954]], [["TranslateX", 0.5793690303540427, 0.37939687327382987], ["Invert", 0.846172545690258, 0.36950442052945853]], [["Invert", 0.5151721602607067, 0.5860134277259832], ["Contrast", 0.6868708526377458, 0.2188104093363727]], [["Contrast", 0.28019632529718025, 0.8403553410328943], ["Cutout", 0.5238340355491738, 0.6948434115725599]], [["Rotate", 0.1592592617684533, 0.5212044951482974], ["Color", 0.42404215473874546, 0.45894052919059103]], [["AutoContrast", 0.21780978427851283, 0.11813011387113281], ["Contrast", 0.14557770349869537, 0.5468616480449002]], [["Cutout", 0.03573873600256905, 0.8747186430368771], ["AutoContrast", 0.4804465018567564, 0.3968185812087325]], [["ShearY", 0.027192162947493492, 0.35923750027515866], ["Sharpness", 0.03207302705814674, 0.25868625346023777]], [["AutoContrast", 0.9111793886013045, 0.33534571661592005], ["ShearY", 0.31365410004768934, 0.37055495208177025]], [["Color", 0.5119732811716222, 0.10635303813092001], ["Solarize", 0.9828759703639677, 0.33302532900783466]], [["Contrast", 0.9652840964645487, 0.9550826002089741], ["ShearY", 0.16934262075572262, 0.35893022906919625]], [["Invert", 0.21526903298837538, 0.5491812432380025], ["TranslateX", 0.27691575128765095, 0.9916365493500338]], [["AutoContrast", 0.7223428288831728, 0.3001506080569529], ["Posterize", 0.28280773693692957, 0.5630226986948541]], [["TranslateY", 0.5334698670580152, 0.4329627064903895], ["Solarize", 0.11621274404555687, 0.38564564358937725]], [["Brightness", 0.9001900081991266, 0.15453762529292236], ["Equalize", 0.6749827304986464, 0.2174408558291521]], [["TranslateY", 0.703293071780793, 0.20371204513522137], ["Invert", 0.7921926919880306, 0.2647654009616249]], [["AutoContrast", 0.32650519442680254, 0.5567514700913352], ["ShearY", 0.7627653627354407, 0.5363510886152073]], [["Rotate", 0.364293676091047, 0.4262321334071656], ["Posterize", 0.7284189361001443, 0.6052618047275847]], [["Contrast", 0.004679138490284229, 0.6985327823420937], ["Posterize", 0.25412559986607497, 0.969098825421215]], [["ShearY", 0.6831738973100172, 0.6916463366962687], ["TranslateY", 0.8744153159733203, 0.3667879549647143]], [["Posterize", 0.39138456188265913, 0.8617909225610128], ["TranslateX", 0.5198303654364824, 0.5518823068009463]], [["Invert", 0.6471155996761706, 0.4793957129423701], ["ShearX", 0.8046274258703997, 0.9711394307595065]], [["Solarize", 0.2442520851809611, 0.5518114414771629], ["Sharpness", 0.02324109511463257, 0.18216585433541427]], [["Cutout", 0.7004457278387007, 0.4904439660213413], ["Contrast", 0.6516622044646659, 0.7324290164242575]], [["Brightness", 0.594212018801632, 0.5624822682300464], ["ShearX", 0.47929863548325596, 0.5610640338380719]], [["TranslateX", 0.20863492063218445, 0.23761872077836552], ["Color", 0.9374148559524687, 0.06390809573246009]], [["AutoContrast", 0.5548946725094693, 0.40547561665765874], ["Equalize", 0.26341425401933344, 0.2763692089379619]], [["Invert", 0.8224614398122034, 0.15547159819315676], ["Rotate", 0.4915912924663281, 0.6995695827608112]], [["Equalize", 0.05752620481520809, 0.80230125774557], ["Rotate", 0.16338857010673558, 0.8066738989167762]], [["ShearY", 0.5437502855505825, 0.252101665309144], ["Contrast", 0.9268450172095902, 0.13437399256747992]], [["TranslateY", 0.6946438457089812, 0.35376889837139813], ["Sharpness", 0.15438234648960253, 0.2668696344562673]], [["Invert", 0.24506516252953542, 0.1939315433476327], ["Sharpness", 0.8921986990130818, 0.21478051316241717]], [["TranslateY", 0.5292829065905086, 0.6896826369723732], ["Invert", 0.4461047865540309, 0.9854416526561315]], [["Posterize", 0.8085062334285464, 0.4538963572040656], ["Brightness", 0.2623572045603854, 0.16723779221170698]], [["Solarize", 0.1618752496191097, 0.6007634864056693], ["TranslateY", 0.07808851801433346, 0.3951252736249746]], [["TranslateX", 0.35426056783145843, 0.8875451782909476], ["Brightness", 0.5537927990151869, 0.3042790536918476]], [["Cutout", 0.9051584028783342, 0.6050507821593669], ["ShearX", 0.31185875057627255, 0.39145181108334876]], [["Brightness", 0.43157388465566776, 0.45511767545129933], ["ShearY", 0.626464342187273, 0.5251031991594401]], [["Contrast", 0.7978520212540166, 0.45088491126800995], ["ShearY", 0.20415027867560143, 0.24369493783350643]], [["ShearX", 0.48152242363853065, 0.001652619381325604], ["Sharpness", 0.6154899720956758, 0.22465778944283568]], [["Posterize", 0.0008092255557418104, 0.8624848793450179], ["Solarize", 0.7580784903978838, 0.4141187863855049]], [["TranslateY", 0.4829597846471378, 0.6077028815706373], ["ShearX", 0.43316420981872894, 0.007119694447608018]], [["Equalize", 0.2914045973615852, 0.6298874433109889], ["Cutout", 0.18663096101056076, 0.20634383363149222]], [["TranslateX", 0.6909947340830737, 0.40843889682671003], ["ShearX", 0.3693105697811625, 0.070573833710386]], [["Rotate", 0.6184027722396339, 0.6483359499288176], ["AutoContrast", 0.8658233903089285, 0.31462524418660626]], [["Brightness", 0.8165837262133947, 0.38138221738335765], ["Contrast", 0.01566790570443702, 0.1250581265407818]], [["Equalize", 0.16745169701901802, 0.9239433721204139], ["ShearY", 0.5535908803004554, 0.35879199699526654]], [["Color", 0.9675880875486578, 0.19745998576077994], ["Posterize", 0.641736196661405, 0.5702363593336868]], [["ShearY", 0.27730895136251943, 0.4730273890919014], ["Posterize", 0.35829530316120517, 0.9040968539551122]], [["Cutout", 0.9989158254302966, 0.3210048366589035], ["Equalize", 0.9226385492886618, 0.21132010337062]], [["Posterize", 0.32861829410989934, 0.7608163668499222], ["TranslateY", 0.528381246453454, 0.6837459631017135]], [["ShearY", 0.6786278797045173, 0.49006792710382946], ["ShearX", 0.7860409944610941, 0.7960317025665418]], [["Solarize", 0.4420731874598513, 0.7163961196254427], ["Sharpness", 0.11927615232343353, 0.3649599343067734]], [["Cutout", 0.4606157449857542, 0.4682141505042986], ["Contrast", 0.8955528913735222, 0.8468556570983498]], [["Brightness", 0.5742349576881501, 0.5633914487991978], ["ShearX", 0.8288987143597276, 0.5937556836469728]], [["Posterize", 0.05362153577922808, 0.40072961361335696], ["Rotate", 0.6681795049585278, 0.5348470042353504]], [["TranslateY", 0.6190833866612555, 0.7338431624993972], ["Color", 0.5352400737236565, 0.1598194251940268]], [["Brightness", 0.9942846465176832, 0.11918348505217388], ["Brightness", 0.0659098729688602, 0.6558077481794591]], [["Equalize", 0.34089122700685126, 0.048940774058585546], ["ShearX", 0.5472987107071652, 0.2965222509150173]], [["Sharpness", 0.3660728361470086, 0.37607120931207433], ["Sharpness", 0.9974987257291261, 0.2483317486035219]], [["Posterize", 0.931283270966942, 0.7525022430475327], ["Cutout", 0.6299208568533524, 0.3313382622423058]], [["Invert", 0.5074998650080915, 0.9722820836624784], ["Solarize", 0.13997049847474802, 0.19340041815763026]], [["AutoContrast", 0.6804950477263457, 0.31675149536227815], ["Solarize", 0.800632422196852, 0.09054278636377117]], [["TranslateY", 0.6886579465517867, 0.549118383513461], ["Brightness", 0.7298771973550124, 0.59421647759784]], [["Equalize", 0.8117050130827859, 0.22494316766261946], ["AutoContrast", 0.5217061631918504, 0.6106946809838144]], [["Equalize", 0.4734718117645248, 0.7746036952254298], ["Posterize", 0.032049205574512685, 0.9681402692267316]], [["Brightness", 0.4724177066851541, 0.7969700024018729], ["Solarize", 0.6930049134926459, 0.3880086567038069]], [["TranslateX", 0.2833979092130342, 0.6873833799104118], ["Rotate", 0.37167767436617366, 0.03249352593350204]], [["Posterize", 0.7080588381354884, 0.03014586990329654], ["Posterize", 0.20883930954891392, 0.1328596635826556]], [["Cutout", 0.1992050307454733, 0.8079881690617468], ["ShearY", 0.3057279570820446, 0.34868823290010564]], [["TranslateY", 0.6204358851346782, 0.24978856155434062], ["ShearX", 0.2403059671388028, 0.6706906799258086]], [["Contrast", 0.5527380063918701, 0.27504242043334765], ["Rotate", 0.37361791978638376, 0.17818567121454373]], [["Cutout", 0.3368229687890997, 0.013512329226772313], ["Contrast", 0.18480406673028238, 0.21653280083721013]], [["AutoContrast", 0.13634047961070397, 0.5322441057075571], ["Posterize", 0.3409948654529233, 0.2562132228604077]], [["Invert", 0.3375636037272626, 0.5417577242453775], ["Sharpness", 0.10271458969925179, 0.5125859420868099]], [["Invert", 0.26465503753231256, 0.7386494688407392], ["AutoContrast", 0.5310106090963371, 0.14699248759273964]], [["Sharpness", 0.8494538270706318, 0.9524607358113082], ["Solarize", 0.21142978953773187, 0.10711867917080763]], [["Equalize", 0.5185117903942263, 0.06342404369282638], ["ShearY", 0.26812877371366156, 0.32386585917978056]], [["TranslateY", 0.42724471339053904, 0.5218262942425845], ["Brightness", 0.7618037699290332, 0.5773256674209075]], [["Solarize", 0.5683461491921462, 0.7988018975591509], ["AutoContrast", 0.21826664523938988, 0.4395073407383595]], [["Posterize", 0.2564295537162734, 0.6778150727248975], ["Equalize", 0.7571361164411801, 0.4281744623444925]], [["Invert", 0.5171620125994946, 0.8719074953677988], ["ShearX", 0.10216776728552601, 0.20888013515457593]], [["Equalize", 0.934033636879294, 0.7724470445507672], ["ShearX", 0.14671590364536757, 0.06500753170863127]], [["Cutout", 0.48433709681747783, 0.8989915985203363], ["ShearY", 0.5161346572684965, 0.3154078452465332]], [["AutoContrast", 0.4337913490682531, 0.8651407398083308], ["AutoContrast", 0.31402168607643444, 0.5001710653814162]], [["Brightness", 0.4805460794016203, 0.8182812769485313], ["Equalize", 0.6811585495672738, 0.25172380097389147]], [["TranslateX", 0.05384872718386273, 0.7854623644701991], ["Color", 0.12583336502656287, 0.08656304042059215]], [["TranslateX", 0.3949348949001942, 0.0668909826131569], ["ShearX", 0.2895255694762277, 0.23998090792480392]], [["TranslateY", 0.3183346601371876, 0.5869865305603826], ["Cutout", 0.38601500458347904, 0.37785641359408184]], [["Sharpness", 0.3676509660134142, 0.6370727445512337], ["Rotate", 0.17589815946040205, 0.912442427082365]], [["Equalize", 0.46427003979798154, 0.7771177715171392], ["Cutout", 0.6622980582423883, 0.47780927252115374]], [["TranslateX", 0.4535588156726688, 0.9548833090146791], ["ShearY", 0.18609208838268262, 0.034329918652624025]], [["Rotate", 0.4896172340987028, 0.4842683413051553], ["Brightness", 0.08416972178617699, 0.2946109607041465]], [["TranslateY", 0.1443363248914217, 0.7352253161146544], ["ShearX", 0.025210952382823004, 0.6249971039957651]], [["Brightness", 0.08771030702840285, 0.5926338109828604], ["Contrast", 0.629121304110493, 0.36114268164347396]], [["Cutout", 0.003318169533990778, 0.984234627407162], ["Color", 0.5656264894233379, 0.9913705503959709]], [["Cutout", 0.17582168928005226, 0.5163176285036686], ["Sharpness", 0.42976684239235224, 0.9936723374147685]], [["Rotate", 0.13343297511611085, 0.730719022391835], ["Cutout", 0.43419793455016154, 0.9802436121876401]], [["ShearX", 0.8761482122895571, 0.11688364945899332], ["Solarize", 0.6071032746712549, 0.9972373138154098]], [["Contrast", 0.2721995133325574, 0.9467839388553563], ["AutoContrast", 0.357368427575824, 0.6530359095247653]], [["Equalize", 0.5334298945812708, 0.7157629957411794], ["Brightness", 0.8885107405370157, 0.2909013041171791]], [["Equalize", 0.4907081744271751, 0.9999203497290372], ["ShearX", 0.0055186544890628575, 0.20501406304441697]], [["Color", 0.4865852751351166, 0.14717278223914915], ["TranslateX", 0.0492335566831905, 0.01654291587484527]], [["Contrast", 0.3753662301521211, 0.866484274102244], ["Color", 0.21148416029328898, 0.37861792266657684]], [["TranslateY", 0.03960047686663052, 0.9948086048192006], ["TranslateX", 0.5802633545422445, 0.7696464344779717]], [["Contrast", 0.6456791961464718, 0.6304663998505495], ["Sharpness", 0.594774521429873, 0.8024138008893688]], [["Equalize", 0.5326123709954759, 0.7361990154971826], ["Invert", 0.5337609996065145, 0.06826577456972233]], [["ShearY", 0.7177596430755101, 0.16672206074906565], ["Equalize", 0.1847132768987843, 0.16186121936769876]], [["ShearY", 0.037342495065949534, 0.7762322168034441], ["Rotate", 0.28731231550023495, 0.4605573565280328]], [["Contrast", 0.6815742688289678, 0.04073638022156048], ["Cutout", 0.20201133153964437, 0.048429819360450654]], [["Color", 0.5295323372448824, 0.8591352159356821], ["Posterize", 0.7743900815037675, 0.8308865010050488]], [["Solarize", 0.9325362059095493, 0.4070769736318192], ["Contrast", 0.09359008071252661, 0.2808191171337515]], [["Sharpness", 0.6413241263332543, 0.5493867784897841], ["Solarize", 0.021951790397463734, 0.1045868634597023]], [["Color", 0.006027943433085061, 0.698043169126901], ["TranslateX", 0.06672167045857719, 0.6096719632236709]], [["TranslateX", 0.42167004878865333, 0.8844171486107537], ["Color", 0.12383835252312375, 0.9559595374068695]], [["Posterize", 0.5382560989047361, 0.6014252438301297], ["Color", 0.26197040526014054, 0.3423981550778665]], [["Cutout", 0.33150268513579584, 0.40828564490879615], ["AutoContrast", 0.6907753092981255, 0.05779246756831708]], [["Equalize", 0.31608006376116865, 0.9958870759781376], ["TranslateY", 0.15842255624921547, 0.5764254535539765]], [["Contrast", 0.19859706438565994, 0.12680764238281503], ["TranslateY", 0.4694115475285127, 0.45831161348904836]], [["TranslateX", 0.18768081492494126, 0.7718605539481094], ["Cutout", 0.2340834739291012, 0.3290460999084155]], [["Posterize", 0.17300123510877463, 0.5276823821218432], ["AutoContrast", 0.5861008799330297, 0.31557924295308126]], [["TranslateX", 0.36140745478517367, 0.4172762477431993], ["Sharpness", 0.6518477061748665, 0.9033991248207786]], [["AutoContrast", 0.1757278990984992, 0.9562490311064124], ["Invert", 0.43712652497757065, 0.26925880337078234]], [["TranslateX", 0.38113274849599377, 0.35742156735271613], ["TranslateY", 0.47708889990018216, 0.7975974044609476]], [["Brightness", 0.39538470887490523, 0.09692156164771923], ["Equalize", 0.876825166573471, 0.0979346217138612]], [["Solarize", 0.07679586061933875, 0.45996163577975313], ["Invert", 0.039726680682847904, 0.23574574397443826]], [["ShearX", 0.9739648414905278, 0.5217986621319772], ["TranslateY", 0.21653455086845896, 0.30415852174016683]], [["TranslateY", 0.26965366633030263, 0.4355259497820251], ["Sharpness", 0.6343493801543757, 0.9337027079656623]], [["Rotate", 0.42301232492240126, 0.07813015342326983], ["AutoContrast", 0.28524730310382906, 0.24127293503900557]], [["Color", 0.826300213905907, 0.008451115447607682], ["Equalize", 0.6770124607838715, 0.2889698349030014]], [["Cutout", 0.3461911530045792, 0.7481322146924341], ["Brightness", 0.1831459184570124, 0.5487074846857195]], [["Brightness", 0.8455429603962046, 0.4838335496721761], ["Cutout", 0.5778222397066808, 0.7789798279724414]], [["Brightness", 0.7859388330361665, 0.5907006126719181], ["Brightness", 0.5299842953874527, 0.008670514958094622]], [["Rotate", 0.9584331504536162, 0.7242692977964363], ["TranslateY", 0.46941406313257866, 0.748911298847083]], [["AutoContrast", 0.5878130357161462, 0.25218818797390996], ["Solarize", 0.815466142337258, 0.20231731395730107]], [["ShearX", 0.15594838773787617, 0.9764784874102524], ["TranslateY", 0.5805369037495945, 0.1412009058745196]], [["Sharpness", 0.7936370935749524, 0.5142489498674206], ["Sharpness", 0.1544307510097193, 0.3678451501088748]], [["TranslateY", 0.29391437860633873, 0.3520843012638746], ["Brightness", 0.5885278199370352, 0.04915265122854349]], [["AutoContrast", 0.3329771519033218, 0.2459852352278583], ["Equalize", 0.8674782697650298, 0.2900192232303214]], [["Cutout", 0.58997726901359, 0.9910393463442352], ["Contrast", 0.09792234559792412, 0.23341828880112486]], [["Cutout", 0.4643317809492098, 0.3224299097542076], ["TranslateY", 0.7998033586490294, 0.27086436352896565]], [["AutoContrast", 0.13138317155414905, 0.3419742927322439], ["TranslateY", 0.05413070060788905, 0.5504283113763994]], [["Posterize", 0.3645493423712921, 0.10684861674653627], ["Color", 0.6343589365592908, 0.9712261380583729]], [["Color", 0.06539862123316142, 0.34370535435837324], ["Equalize", 0.8098077629435421, 0.1272416658849032]], [["Invert", 0.3600258964493429, 0.7455698641930473], ["Color", 0.4118102215241555, 0.4489347750419333]], [["Sharpness", 0.2230673636976691, 0.2240713255305713], ["AutoContrast", 0.5039292091174429, 0.033700713206763835]], [["ShearX", 0.10611028325684749, 0.4235430688519599], ["Brightness", 0.354597328722803, 0.6835155193055997]], [["ShearX", 0.101313662029975, 0.3048854771395032], ["ShearX", 0.39832929626318425, 0.5569152062399838]], [["ShearX", 0.46033087857932264, 0.5976525683159943], ["Color", 0.8117411866929898, 0.22950658046373415]], [["Cutout", 0.04125062306390376, 0.5021647863925347], ["TranslateY", 0.4949139091550513, 0.40234738545601595]], [["TranslateX", 0.9982425877241792, 0.3912268450702254], ["Cutout", 0.8094853705295444, 0.4628037417520003]], [["Contrast", 0.47154787535001147, 0.5116549800625204], ["Invert", 0.4929108509901112, 0.713690694626014]], [["ShearX", 0.3073913369156325, 0.5912409524756753], ["Equalize", 0.5603975982699875, 0.12046838435247365]], [["TranslateY", 0.8622939212850868, 0.057802109037417344], ["Invert", 0.7577173459800602, 0.33727019024447835]], [["Cutout", 0.3646694663986778, 0.6285264075514656], ["Color", 0.5589259087346165, 0.6650676195317845]], [["Invert", 0.8563008117600374, 0.6216056385231019], ["AutoContrast", 0.7575002303510038, 0.6906934785154547]], [["ShearX", 0.4415411885102101, 0.301535484182858], ["TranslateY", 0.779716145113622, 0.5792057745092073]], [["Invert", 0.10736083594024397, 0.10640910911300788], ["Posterize", 0.5923391813408784, 0.5437447559328059]], [["Color", 0.4745215286268124, 0.08046291318852558], ["Rotate", 0.1642897827127771, 0.20754337935267492]], [["Invert", 0.3141086213412405, 0.5865422721808763], ["AutoContrast", 0.7551954144793225, 0.5588044000850431]], [["Equalize", 0.979500405577596, 0.6846916489547885], ["Rotate", 0.11257616752512875, 0.8137724117751907]], [["Equalize", 0.6315666801659133, 0.71548254701219], ["Cutout", 0.38805635642306224, 0.29282906744304604]], [["Posterize", 0.022485702859896456, 0.2794994040845844], ["Color", 0.4554990465860552, 0.5842888808848151]], [["Invert", 0.15787502346886398, 0.5137397924063724], ["TranslateY", 0.487638703473969, 0.6428121360825987]], [["Rotate", 0.20473927977443407, 0.6090899892067203], ["Contrast", 0.3794752343740154, 0.8056548374185936]], [["AutoContrast", 0.35889225269685354, 0.7311496777471619], ["Sharpness", 0.10152796686794396, 0.34768639850633193]], [["Rotate", 0.6298704242033275, 0.09649334401126405], ["Solarize", 0.24713244934163017, 0.4292117526982358]], [["Contrast", 0.9851015107131748, 0.30895068679118054], ["Sharpness", 0.7167845732283787, 0.36269175386392893]], [["Equalize", 0.49699932368219435, 0.21262924430159158], ["Contrast", 0.8497731498354579, 0.672321242252727]], [["ShearX", 0.18955591368056923, 0.47178691165954034], ["Sharpness", 0.17732805705271348, 0.5486957094984023]], [["ShearY", 0.5087926728214892, 0.8236809302978783], ["AutoContrast", 0.9661195881001936, 0.1309360428195535]], [["Rotate", 0.7825835251082691, 0.8292427086033229], ["TranslateX", 0.2034110174253454, 0.4073091408820304]], [["Cutout", 0.33457316681888716, 0.480098511703719], ["Sharpness", 0.8686004956803908, 0.21719357589897192]], [["ShearX", 0.30750577846813, 0.6349236735519613], ["Color", 0.5096781256213182, 0.5367289796478476]], [["Rotate", 0.7881847986981432, 0.846966895144323], ["Posterize", 0.33955649631388407, 0.9484449471562024]], [["Posterize", 0.5154127791998345, 0.8765287012129974], ["Posterize", 0.09621562708431097, 0.42108077474553995]], [["ShearX", 0.5513772653411826, 0.27285892893658015], ["AutoContrast", 0.027608088485522986, 0.1738173285576814]], [["Equalize", 0.7950881609822011, 0.05938388811616446], ["ShearX", 0.7864733097562856, 0.5928584864954718]], [["Equalize", 0.03401947599579436, 0.4936643525799874], ["Solarize", 0.8445332527647407, 0.4695434980914176]], [["AutoContrast", 0.9656295942383031, 0.6330670076537706], ["Brightness", 0.303859679517296, 0.8882002295195086]], [["ShearY", 0.5242765280639856, 0.7977406809732712], ["Rotate", 0.24810823616083127, 0.41392557985700773]], [["Posterize", 0.6824268148168342, 0.21831492475831715], ["ShearY", 0.0008811906288737209, 0.1939566265644924]], [["ShearY", 0.8413370823124643, 0.7075999817793881], ["Brightness", 0.7942266192900009, 0.0384845738170444]], [["ShearY", 0.9003919463843213, 0.5068340457708402], ["AutoContrast", 0.9990937631537938, 0.35323621376481695]], [["Contrast", 0.3266913024108897, 0.5470774782762176], ["Contrast", 0.31235464476196995, 0.5723334696204473]], [["AutoContrast", 0.40137522654585955, 0.4274859892417776], ["Sharpness", 0.6173858127038773, 0.9629236289042568]], [["Sharpness", 0.3728210261025356, 0.7873518787942092], ["Solarize", 0.4319848902062112, 0.799524274852396]], [["Sharpness", 0.009379857090624758, 0.3143858944787348], ["ShearY", 0.20273037650420184, 0.3501104740582885]], [["Color", 0.1837135820716444, 0.5709648984713641], ["Solarize", 0.36312838060628455, 0.3753448575775562]], [["Cutout", 0.3400431457353702, 0.6871688775988243], ["ShearX", 0.42524570507364123, 0.7108865889616602]], [["Sharpness", 0.30703348499729893, 0.885278643437672], ["Cutout", 0.04407034125935705, 0.6821013415071144]], [["Brightness", 0.7164362367177879, 0.3383891625406651], ["Posterize", 0.002136409392137939, 0.5744439712876557]], [["Rotate", 0.757566991428807, 0.41351586654059386], ["TranslateY", 0.6716670812367449, 0.45381701497377025]], [["Color", 0.29554345831738604, 0.5747484938203239], ["Brightness", 0.6495565535422139, 0.38353714282675055]], [["Color", 0.6552239827844064, 0.6396684879350223], ["Rotate", 0.4078437959841622, 0.8229364582618871]], [["ShearX", 0.3325165311431108, 0.99875651917317], ["Cutout", 0.060614087173980605, 0.8655206968462149]], [["ShearY", 0.8591223614020521, 0.47375809606391645], ["ShearY", 0.09964216351993155, 0.7076762087109618]], [["Color", 0.9353968383925787, 0.5171703648813921], ["Cutout", 0.7542267059402566, 0.4591488152776885]], [["ShearX", 0.6832456179177027, 0.6798505733549863], ["Color", 0.7408439718746301, 0.5061967673457707]], [["Equalize", 0.4451729339243929, 0.9242958562575693], ["Posterize", 0.2426742903818478, 0.7914731845374992]], [["Posterize", 0.6241497285503436, 0.6800650930438693], ["Rotate", 0.8212761169895445, 0.42470879405266637]], [["Sharpness", 0.35467334577635123, 0.4150922293649909], ["Color", 0.38988011871489925, 0.08762395748275534]], [["Invert", 0.20231176261188386, 0.34300045056881756], ["Color", 0.6311643386438919, 0.4311911861691113]], [["Contrast", 0.2892223327756343, 0.533349670629816], ["ShearY", 0.6483243327679983, 0.37584367848303185]], [["Contrast", 0.6516401043089397, 0.3801387361685983], ["Contrast", 0.38470661862567795, 0.994720698440467]], [["Contrast", 0.44558087160644655, 0.4234506152228727], ["AutoContrast", 0.30132391715441104, 0.7758068064149011]], [["ShearY", 0.8336612877669443, 0.6961881064757953], ["TranslateX", 0.111182606133131, 0.7138593872015647]], [["Brightness", 0.7252053408816349, 0.6883715819669095], ["Cutout", 0.6664014893052573, 0.5118622737562747]], [["TranslateX", 0.04294623433241698, 0.4737274091618545], ["Solarize", 0.15848056715239178, 0.436678451116009]], [["ShearX", 0.41843604414439584, 0.5571669083243844], ["Solarize", 0.31754187268874345, 0.643294796216908]], [["Cutout", 0.308644829376876, 0.9455913104658791], ["Cutout", 0.04221174396591258, 0.8004389485099825]], [["Invert", 0.7644819805649288, 0.393641460630097], ["Posterize", 0.20832144467525543, 0.6449709932505365]], [["ShearY", 0.60954354330238, 0.45193814135157406], ["Rotate", 0.07564178568434804, 0.5700158941616946]], [["Color", 0.47993653910354905, 0.18770437256254732], ["Equalize", 0.16540989366253533, 0.3295832145751728]], [["Sharpness", 0.773656112445468, 0.899183686347773], ["AutoContrast", 0.6225833171499476, 0.8375805811436356]], [["Brightness", 0.3119630413126101, 0.21694186245727698], ["Cutout", 0.08263220622864997, 0.9910421137289533]], [["TranslateY", 0.5200200210314198, 0.44467464167817444], ["Cutout", 0.3466375681433383, 0.22385957813397142]], [["ShearY", 0.4445374219718209, 0.23917745675733915], ["Equalize", 0.32094329607540717, 0.6286388268054685]], [["Invert", 0.6194633221674505, 0.6219326801360905], ["Color", 0.43219405413154555, 0.5463431710956901]], [["ShearX", 0.5491808798436206, 0.4485147269153593], ["ShearX", 0.9624243432991532, 0.581319457926692]], [["Cutout", 0.8486066390061917, 0.48538785811340557], ["Cutout", 0.15945182827781573, 0.4114259503742423]], [["TranslateX", 0.9845485123667319, 0.7590166645874611], ["Solarize", 0.9920857955871512, 0.33259831689209834]], [["Brightness", 0.3985764491687188, 0.3516086190155328], ["Cutout", 0.13907765098725244, 0.42430309616193995]], [["Color", 0.35877942890428727, 0.363294622757879], ["Equalize", 0.4997709941984466, 0.34475754120666147]], [["Sharpness", 0.5234916035905941, 0.8988480410886609], ["AutoContrast", 0.793554237802939, 0.2575758806963965]], [["Brightness", 0.36998588693418133, 0.24144652775222428], ["Cutout", 0.06610767765334377, 0.9979246311006975]], [["TranslateY", 0.6132425595571164, 0.43952345951359123], ["Cutout", 0.361849532200793, 0.8462247954545264]], [["Posterize", 0.36953849915949677, 0.3144747463577223], ["Equalize", 0.3258985378881982, 0.6314053736452068]], [["TranslateY", 0.35835648104981205, 0.08075066564380576], ["TranslateX", 0.5242389109555177, 0.11959330395816647]], [["ShearX", 0.32773751079554303, 0.9307864751586945], ["Sharpness", 0.006921805496030664, 0.8736511230672348]], [["TranslateY", 0.48202000226401526, 0.7058919195136056], ["ShearY", 0.6998308555145181, 0.21074360071080764]], [["AutoContrast", 0.7615852152325713, 0.24914859158079972], ["Cutout", 0.8270894478252626, 0.5804285538051077]], [["AutoContrast", 0.5391662421077847, 0.5233969710179517], ["Brightness", 0.04205906143049083, 0.382677139318253]], [["Brightness", 0.6904817357054526, 0.9116378156160974], ["Invert", 0.24305250280628815, 0.2384731852843838]], [["TranslateX", 0.2661235046256291, 0.9705982948874188], ["Sharpness", 0.35821873293899625, 0.0030835471296858444]], [["Posterize", 0.39029991982997647, 0.4286238191447004], ["TranslateX", 0.08954883207184736, 0.7263973533121859]], [["Cutout", 0.040284118298638344, 0.0388330236482832], ["Posterize", 0.7807814946471116, 0.5238352731112299]], [["ShearY", 0.43556653451802413, 0.6924037743225071], ["Contrast", 0.001081515338562919, 0.7340363920548519]], [["Sharpness", 0.6966467544442373, 0.10202517317137291], ["Color", 0.18836344735972566, 0.31736252662501935]], [["Contrast", 0.6460000689193517, 0.16242196500430484], ["AutoContrast", 0.6003831047484897, 0.8612141912778188]], [["Brightness", 0.9172874494072921, 0.292364504408795], ["Solarize", 0.344602582555059, 0.7054248176903991]], [["Brightness", 0.020940469451794064, 0.5051042440134866], ["Cutout", 0.569500058123745, 0.9091247933460598]], [["Invert", 0.7367715506799225, 0.636137024500329], ["TranslateY", 0.6186960283294023, 0.37626001619073624]], [["TranslateX", 0.2863246154089121, 0.7454318730628517], ["ShearY", 0.6649909124084395, 0.37639265910774133]], [["Equalize", 0.34603376919062656, 0.9324026002997775], ["Sharpness", 0.8481669261233902, 0.14545759197862507]], [["Contrast", 0.6184370038862784, 0.8074198580702933], ["TranslateX", 0.07036135693949985, 0.46222686847401306]], [["Invert", 0.9304884364616345, 0.26298808050002387], ["Color", 0.8027813156985396, 0.7748486756116594]], [["Posterize", 0.2887993806199106, 0.9576118517235523], ["Contrast", 0.07498577510121784, 0.09131727137211232]], [["Contrast", 0.8110536569461197, 0.051038215841138386], ["Solarize", 0.8799018446258887, 0.25028365826721977]], [["Cutout", 0.006954733791187662, 0.030507696587206496], ["Brightness", 0.45329597160103124, 0.9623148451520953]], [["TranslateX", 0.7436227980344521, 0.45996857241163086], ["Solarize", 0.9682234479355196, 0.70777684485634]], [["Brightness", 0.2080557865889058, 0.025557286020371328], ["AutoContrast", 0.4786039197123853, 0.9271157120589375]], [["Solarize", 0.1822930503108656, 0.8448222682426465], ["ShearX", 0.6221001240196488, 0.207994745014715]], [["Color", 0.27879201870553094, 0.9112278219836276], ["Color", 0.7508664408516654, 0.14885798940641318]], [["ShearX", 0.5496326925552889, 0.7643918760952656], ["AutoContrast", 0.7887459433195374, 0.5993900500657054]], [["ShearY", 0.7182376017241904, 0.7470412126724141], ["Rotate", 0.7644845975844854, 0.38510752407409893]], [["Contrast", 0.7984591239416293, 0.054767400038152704], ["Posterize", 0.7324315466290486, 0.41749946919991243]], [["Contrast", 0.596887781894766, 0.14832691232456097], ["Contrast", 0.05140651977459313, 0.14459348285712803]], [["TranslateX", 0.32766681876233766, 0.5291103977440215], ["Color", 0.6039423443931029, 0.6280077043167083]], [["Invert", 0.5267106136816635, 0.9429838545064784], ["Sharpness", 0.9999053422304087, 0.24764251340211074]], [["Contrast", 0.495767451313242, 0.6744720418896594], ["Brightness", 0.2220993631062378, 0.023842431692152832]], [["Invert", 0.7609399278201697, 0.38010826932678554], ["Color", 0.8454251931688355, 0.5876680099851194]], [["Posterize", 0.24967505238473384, 0.3801835337368412], ["Contrast", 0.15106121477353399, 0.6785384814310887]], [["Invert", 0.49594153211743874, 0.32307787492774986], ["Contrast", 0.46822075688054793, 0.7106858486805577]], [["Sharpness", 0.7204076261101202, 0.5928585438185809], ["Rotate", 0.2922878012111486, 0.2742491027179961]], [["Solarize", 0.2866813728691532, 0.2856363754608978], ["TranslateY", 0.7817609208793659, 0.17156048740523572]], [["Cutout", 0.03345540659323987, 0.30068271036485605], ["ShearY", 0.2556603044234358, 0.32397855468866993]], [["TranslateY", 0.20032231858163152, 0.4577561841994639], ["Cutout", 0.8063563515601337, 0.9224365467344459]], [["TranslateY", 0.27130034613023113, 0.7446375583249849], ["ShearX", 0.8254766023480402, 0.4187078898038131]], [["ShearX", 0.2937536068210411, 0.3864492533047109], ["Contrast", 0.7069611463424469, 0.686695922492015]], [["TranslateX", 0.5869084659063555, 0.7866008068031776], ["Invert", 0.289041613918004, 0.5774431720429087]], [["Posterize", 0.6199250263408456, 0.36010044446077893], ["Color", 0.7216853388297056, 0.18586684958836489]], [["Posterize", 0.16831615585406814, 0.08052519983493259], ["Cutout", 0.7325882891023244, 0.77416439921321]], [["Posterize", 0.3000961100422498, 0.5181759282337892], ["Contrast", 0.40376073196794304, 0.613724714153924]], [["ShearX", 0.32203193464136226, 0.037459860897434916], ["Solarize", 0.961542785512965, 0.5176575408248285]], [["Posterize", 0.8986732529036036, 0.7773257927223327], ["AutoContrast", 0.9765986969928243, 0.2092264330225745]], [["Posterize", 0.7463386563644007, 0.7086671048242543], ["Posterize", 0.6433819807034994, 0.00541136425219968]], [["Contrast", 0.8810746688690078, 0.4821029611474963], ["Invert", 0.5121169325265204, 0.6360694878582249]], [["AutoContrast", 0.457606735372388, 0.6104794570624505], ["Color", 0.0020511991982608124, 0.6488142202778011]], [["Invert", 0.01744463899367027, 0.9799156424364703], ["ShearY", 0.3448213456605478, 0.04437356383800711]], [["Solarize", 0.28511589596283315, 0.283465265528744], ["Rotate", 0.6831807199089897, 0.0617176467316177]], [["Sharpness", 0.329148970281285, 0.398397318402924], ["Color", 0.9125837011914073, 0.4724426676489746]], [["Posterize", 0.05701522811381192, 0.17109014518445975], ["Cutout", 0.785885656821686, 0.39072624694455804]], [["TranslateY", 0.36644251447248277, 0.5818480868136134], ["Equalize", 0.06162286852923926, 0.710929848709861]], [["ShearY", 0.8667124241442813, 0.7556246528256454], ["ShearY", 0.505190335528531, 0.2935701441277698]], [["Brightness", 0.6369570015916268, 0.5131486964430919], ["Color", 0.4887119711633827, 0.9364572089679907]], [["Equalize", 0.06596702627228657, 0.42632445412423303], ["Equalize", 0.583434672187985, 0.045592788478947655]], [["ShearY", 0.12701084021549092, 0.501622939075192], ["Cutout", 0.7948319202684251, 0.5662618207034569]], [["Posterize", 0.24586808377061664, 0.5178008194277262], ["Contrast", 0.1647040530405073, 0.7459410952796975]], [["Solarize", 0.346601298126444, 0.02933266448415553], ["ShearY", 0.9571781647031095, 0.4992610484566735]], [["Brightness", 0.5174960605130408, 0.4387498174634591], ["AutoContrast", 0.6327403754086753, 0.8279630556620247]], [["Posterize", 0.7591448754183128, 0.6265369743070788], ["Posterize", 0.5030300462943854, 0.00401699185532868]], [["Contrast", 0.02643254602183477, 0.44677741300429646], ["Invert", 0.2921779546234399, 0.732876182854368]], [["TranslateY", 0.3516821152310867, 0.7142224211142528], ["Brightness", 0.07382104862245475, 0.45368581543623165]], [["Invert", 0.21382474908836685, 0.8413922690356168], ["Invert", 0.4082563426777157, 0.17018243778787834]], [["Brightness", 0.9533955059573749, 0.8279651051553477], ["Cutout", 0.6730769221406385, 0.07780554260470988]], [["Brightness", 0.6022173063382547, 0.6008500678386571], ["Sharpness", 0.5051909719558138, 0.002298383273851839]], [["Contrast", 0.03373395758348563, 0.3343918835437655], ["Sharpness", 0.8933651164916847, 0.21738300404986516]], [["TranslateX", 0.7095755408419822, 0.26445508146225394], ["Equalize", 0.18255527363432034, 0.38857557766574147]], [["Solarize", 0.4045911117686074, 0.009106925727519921], ["Posterize", 0.9380296936271705, 0.5485821516085955]], [["Posterize", 0.20361995432403968, 0.45378735898242406], ["AutoContrast", 0.9020357653982511, 0.7880592087609304]], [["AutoContrast", 0.9921550787672145, 0.7396130723399785], ["Cutout", 0.4203609896071977, 0.13000504717682415]], [["Equalize", 0.1917806394805356, 0.5549114911941102], ["Posterize", 0.27636900597148506, 0.02953514963949344]], [["AutoContrast", 0.5427071893197213, 0.6650127340685553], ["Color", 0.011762461060904839, 0.3793508738225649]], [["Invert", 0.18495006059896424, 0.8561476625981166], ["ShearY", 0.6417068692813954, 0.9908751019535517]], [["Solarize", 0.2992385431633619, 0.33622162977907644], ["Rotate", 0.6070550252540432, 0.010205544695142064]], [["Sharpness", 0.33292787606841845, 0.549446566149951], ["Color", 0.9097665730481233, 0.9947658451503181]], [["Posterize", 0.11207465085954937, 0.23296263754645155], ["Cutout", 0.6159972426858633, 0.38289684517298556]], [["TranslateX", 0.7343689718523805, 0.16303049089087485], ["Equalize", 0.3138385390145809, 0.6096356352129273]], [["Solarize", 0.4807269891506887, 0.28116279654856363], ["Posterize", 0.9753467973380021, 0.6327025372916857]], [["Posterize", 0.837244997106023, 0.5586046483574153], ["AutoContrast", 0.9005775602024721, 0.7983389828641411]], [["AutoContrast", 0.8347112949943837, 0.7321850307727004], ["Cutout", 0.3322676575657192, 0.14409873524237032]], [["Equalize", 0.12285967262649124, 0.5368519477089722], ["Posterize", 0.2693593445898034, 0.15098267759162076]], [["Invert", 0.331021587020619, 0.3140868578915853], ["Cutout", 0.48268387543799884, 0.7642598986625201]], [["Equalize", 0.47573794714622175, 0.8628185952549363], ["Solarize", 0.14860046214144496, 0.3739284346347912]], [["AutoContrast", 0.6747373196190459, 0.2912917979635714], ["Posterize", 0.27259573208358623, 0.9643671211873469]], [["Sharpness", 0.15019788105901233, 0.7289238028242861], ["ShearY", 0.7998448015985137, 0.5924798900807636]], [["Brightness", 0.7874052186079156, 0.9446398428550358], ["Equalize", 0.5105557539139616, 0.6719808885741001]], [["ShearX", 0.783252331899515, 0.74960184771181], ["ShearX", 0.4327935527932927, 0.29980994764698565]], [["Rotate", 0.03892023906368644, 0.24868635699639904], ["Cutout", 0.6408903979315637, 0.32135851733523907]], [["Invert", 0.9972802027590713, 0.9374194642823106], ["ShearX", 0.20016463162924894, 0.0052278586143255645]], [["AutoContrast", 0.9328687102578992, 0.44280614999256235], ["Color", 0.05637751621265141, 0.26921974769786455]], [["AutoContrast", 0.2798532308065416, 0.5283914274806746], ["Cutout", 0.12930089032151, 0.25624459046884057]], [["Invert", 0.2397428994839993, 0.31011715409282065], ["Cutout", 0.5875151915473042, 0.7454458580264322]], [["Equalize", 0.374815667651982, 0.9502053862625081], ["Solarize", 0.10100323698574426, 0.5124939317648691]], [["AutoContrast", 0.6009889057852652, 0.3080148907275367], ["Posterize", 0.6543352447742621, 0.17498668744492413]], [["Sharpness", 0.14402909409016001, 0.9239239955843186], ["ShearY", 0.8959818090635513, 0.7258262803413784]], [["Brightness", 0.8672271320432974, 0.8241439816189235], ["Equalize", 0.4954433852960082, 0.6687050430971254]], [["Solarize", 0.47813402689782114, 0.9447222576804901], ["TranslateY", 0.32546974113401694, 0.8367777573080345]], [["Sharpness", 0.48098022972519927, 0.2731904819197933], ["Rotate", 0.14601550238940067, 0.3955290089346866]], [["AutoContrast", 0.3777442613874327, 0.9991495158709968], ["TranslateY", 0.2951496731751222, 0.6276755696126608]], [["Cutout", 0.487150344941835, 0.7976642551725155], ["Solarize", 0.643407733524025, 0.6313641977306543]], [["Rotate", 0.35017053741686033, 0.23960877779589906], ["Sharpness", 0.8741761196478873, 0.12362019972427862]], [["Invert", 0.8849459784626776, 0.48532144354199647], ["Invert", 0.702430443380318, 0.924655906426149]], [["Equalize", 0.6324140359298986, 0.9780539325897597], ["AutoContrast", 0.39105074227907843, 0.3636856607173081]], [["AutoContrast", 0.8049993541952016, 0.3231157206314408], ["ShearY", 0.6675686366141409, 0.7345332792455934]], [["Sharpness", 0.12332351413693327, 0.9345179453120547], ["Solarize", 0.1594280186083361, 0.422049311332906]], [["Rotate", 0.38227253679386375, 0.7664364038099101], ["AutoContrast", 0.5725492572719726, 0.21049701651094446]], [["Brightness", 0.6432891832524184, 0.8243948738979008], ["Equalize", 0.20355899618080098, 0.7983877568044979]], [["ShearY", 0.694393675204811, 0.3686964692262895], ["TranslateX", 0.5593122846101599, 0.3378904046390629]], [["Invert", 0.9139730140623171, 0.7183505086140822], ["Posterize", 0.2675839177893596, 0.21399738931234905]], [["TranslateX", 0.05309461965184896, 0.032983777975422554], ["Sharpness", 0.412621944330688, 0.4752089612268503]], [["Equalize", 0.06901149860261116, 0.27405796188385945], ["AutoContrast", 0.7710451977604326, 0.20474249114426807]], [["ShearX", 0.47416427531072325, 0.2738614239087857], ["Cutout", 0.2820106413231565, 0.6295219975308107]], [["Cutout", 0.19984489885141582, 0.7019895950299546], ["ShearX", 0.4264722378410729, 0.8483962467724536]], [["ShearY", 0.42111446850243256, 0.1837626718066795], ["Brightness", 0.9187856196205942, 0.07478292286531767]], [["Solarize", 0.2832036589192868, 0.8253473638854684], ["Cutout", 0.7279303826662196, 0.615420010694839]], [["ShearX", 0.963251873356884, 0.5625577053738846], ["Color", 0.9637046840298858, 0.9992644813427337]], [["Invert", 0.7976502716811696, 0.43330238739921956], ["ShearY", 0.9113181667853614, 0.9066729024232627]], [["Posterize", 0.5750620807485399, 0.7729691927432935], ["Contrast", 0.4527879467651071, 0.9647739595774402]], [["Posterize", 0.5918751472569104, 0.26467375535556653], ["Posterize", 0.6347402742279589, 0.7476940787143674]], [["Invert", 0.16552404612306285, 0.9829939598708993], ["Solarize", 0.29886553921638087, 0.22487098773064948]], [["Cutout", 0.24209211313246753, 0.5522928952260516], ["AutoContrast", 0.6212831649673523, 0.4191071063984261]], [["ShearX", 0.4726406722647257, 0.26783614257572447], ["TranslateY", 0.251078162624763, 0.26103450676044304]], [["Cutout", 0.8721775527314426, 0.6284108541347894], ["ShearX", 0.7063325779145683, 0.8467168866724094]], [["ShearY", 0.42226987564279606, 0.18012694533480308], ["Brightness", 0.858499853702629, 0.4738929353785444]], [["Solarize", 0.30039851082582764, 0.8151511479162529], ["Cutout", 0.7228873804059033, 0.6174351379837011]], [["ShearX", 0.4921198221896609, 0.5678998037958154], ["Color", 0.7865298825314806, 0.9309020966406338]], [["Invert", 0.8077821007916464, 0.7375015762124386], ["Cutout", 0.032464574567796195, 0.25405044477004846]], [["Color", 0.6061325441870133, 0.2813794250571565], ["TranslateY", 0.5882949270385848, 0.33262043078220227]], [["ShearX", 0.7877331864215293, 0.8001131937448647], ["Cutout", 0.19828215489868783, 0.5949317580743655]], [["Contrast", 0.529508728421701, 0.36477855845285007], ["Color", 0.7145481740509138, 0.2950794787786947]], [["Contrast", 0.9932891064746089, 0.46930062926732646], ["Posterize", 0.9033014136780437, 0.5745902253320527]]]
return p
class Augmentation(object):
def __init__(self, policies):
self.policies = policies
def __call__(self, img):
for _ in range(1):
policy = random.choice(self.policies)
for name, pr, level in policy:
if random.random() > pr:
continue
img = apply_augment(img, name, level)
return img
cifar10_faa = Augmentation(fa_reduced_cifar10())
svhn_faa = Augmentation(fa_reduced_svhn()) | 117,783 | 392.926421 | 55,120 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/common.py | import logging
import warnings
import random
from copy import copy
from typing import Union
from collections import Counter
import numpy as np
import torch
from torch.utils.checkpoint import check_backward_validity, detach_variable, get_device_states, set_device_states
from torchvision.datasets import VisionDataset, CIFAR10, CIFAR100, ImageFolder
from torch.utils.data import Subset, ConcatDataset
from PIL import Image
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
def get_logger(name, level=logging.DEBUG):
logger = logging.getLogger(name)
logger.handlers.clear()
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def add_filehandler(logger, filepath):
fh = logging.FileHandler(filepath)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
def copy_and_replace_transform(ds: Union[CIFAR10, ImageFolder, Subset], transform):
assert ds.dataset.transform is not None if isinstance(ds,Subset) else (all(d.transform is not None for d in ds.datasets) if isinstance(ds,ConcatDataset) else ds.transform is not None) # make sure still uses old style transform
if isinstance(ds, Subset):
new_super_ds = copy(ds.dataset)
new_super_ds.transform = transform
new_ds = copy(ds)
new_ds.dataset = new_super_ds
elif isinstance(ds, ConcatDataset):
def copy_and_replace_transform(ds):
new_ds = copy(ds)
new_ds.transform = transform
return new_ds
new_ds = ConcatDataset([copy_and_replace_transform(d) for d in ds.datasets])
else:
new_ds = copy(ds)
new_ds.transform = transform
return new_ds
def apply_weightnorm(nn):
def apply_weightnorm_(module):
if 'Linear' in type(module).__name__ or 'Conv' in type(module).__name__:
torch.nn.utils.weight_norm(module, name='weight', dim=0)
nn.apply(apply_weightnorm_)
def shufflelist_with_seed(lis, seed='2020'):
s = random.getstate()
random.seed(seed)
random.shuffle(lis)
random.setstate(s)
def stratified_split(labels, val_share):
assert isinstance(labels, list)
counter = Counter(labels)
indices_per_label = {label: [i for i,l in enumerate(labels) if l == label] for label in counter}
per_label_split = {}
for label, count in counter.items():
indices = indices_per_label[label]
assert count == len(indices)
shufflelist_with_seed(indices, f'2020_{label}_{count}')
train_val_border = round(count*(1.-val_share))
per_label_split[label] = (indices[:train_val_border], indices[train_val_border:])
final_split = ([],[])
for label, split in per_label_split.items():
for f_s, s in zip(final_split, split):
f_s.extend(s)
shufflelist_with_seed(final_split[0], '2020_yoyo')
shufflelist_with_seed(final_split[1], '2020_yo')
return final_split
def denormalize(img, mean, std):
mean, std = torch.tensor(mean).to(img.device), torch.tensor(std).to(img.device)
return img.mul_(std[:,None,None]).add_(mean[:,None,None])
def normalize(img, mean, std):
mean, std = torch.tensor(mean).to(img.device), torch.tensor(std).to(img.device)
return img.sub_(mean[:,None,None]).div_(std[:,None,None]) | 3,469 | 34.408163 | 230 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/metrics.py | import copy
import torch
from collections import defaultdict
from torch import nn
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0)
res.append(correct_k.mul_(1. / batch_size))
return res
def cross_entropy_smooth(input, target, size_average=True, label_smoothing=0.1):
y = torch.eye(10).cuda()
lb_oh = y[target]
target = lb_oh * (1 - label_smoothing) + 0.5 * label_smoothing
logsoftmax = nn.LogSoftmax()
if size_average:
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
else:
return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))
class Accumulator:
def __init__(self):
self.metrics = defaultdict(lambda: 0.)
def add(self, key, value):
self.metrics[key] += value
def add_dict(self, dict):
for key, value in dict.items():
self.add(key, value)
def __getitem__(self, item):
return self.metrics[item]
def __setitem__(self, key, value):
self.metrics[key] = value
def __contains__(self, item):
return self.metrics.__contains__(item)
def get_dict(self):
return copy.deepcopy(dict(self.metrics))
def items(self):
return self.metrics.items()
def __str__(self):
return str(dict(self.metrics))
def __truediv__(self, other):
newone = Accumulator()
for key, value in self.items():
newone[key] = value / other
return newone
def divide(self, divisor, **special_divisors):
newone = Accumulator()
for key, value in self.items():
if key in special_divisors:
newone[key] = value/special_divisors[key]
else:
newone[key] = value/divisor
return newone
class SummaryWriterDummy:
def __init__(self, log_dir):
pass
def add_scalar(self, *args, **kwargs):
pass
def add_image(self, *args, **kwargs):
pass | 2,281 | 24.076923 | 80 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/train.py | import itertools
import json, csv
import logging
import math
import os
from collections import OrderedDict
import gc
import tempfile
import pickle
from dataclasses import dataclass
import random
from time import time
import numpy as np
import torch
from torch import nn, optim
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from torchvision import transforms
from tqdm import tqdm
import yaml
from theconf import Config as C, ConfigArgumentParser
from argparse import ArgumentParser
from DeepAA_evaluate.common import get_logger
from DeepAA_evaluate.data import get_dataloaders, mixup_data
from DeepAA_evaluate.lr_scheduler import adjust_learning_rate_resnet
from DeepAA_evaluate.metrics import accuracy, Accumulator
from DeepAA_evaluate.networks import get_model, num_class
from warmup_scheduler import GradualWarmupScheduler
import aug_lib
logger = get_logger('DeepAA_evaluate')
logger.setLevel(logging.DEBUG)
def run_epoch(rank, worldsize, model, loader, loss_fn, optimizer, desc_default='', epoch=0, writer=None, verbose=1, scheduler=None,sample_pairing_loader=None):
tqdm_disable = bool(os.environ.get('TASK_NAME', '')) # KakaoBrain Environment
if verbose:
logging_loader = tqdm(loader, disable=tqdm_disable)
logging_loader.set_description('[%s %04d/%04d]' % (desc_default, epoch, C.get()['epoch']))
else:
logging_loader = loader
metrics = Accumulator()
cnt = 0
eval_cnt = 0
total_steps = len(loader)
steps = 0
gc.collect()
torch.cuda.empty_cache()
#print('mem usage', resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
communicate_grad_every = C.get().get('communicate_grad_every', 1)
before_load_time = time()
if C.get().get('load_sample_pairing_batch',False) and sample_pairing_loader is not None:
sample_pairing_iter = iter(sample_pairing_loader)
aug_lib.blend_images = [transforms.ToPILImage()(sample_pairing_loader.denorm(ti)) for ti in
next(sample_pairing_iter)[0]]
for batch_idx, batch in enumerate(logging_loader): # logging loader might be a loader or a loader wrapped into tqdm
data, label = batch[:2]
steps += 1
if C.get().get('load_sample_pairing_batch',False) and sample_pairing_loader is not None:
try:
aug_lib.blend_images = [transforms.ToPILImage()(sample_pairing_loader.denorm(ti)) for ti in next(sample_pairing_iter)[0]]
except StopIteration:
print("Blend images iterator ended. If this is printed twice per loop, there is something out-of-order.")
pass
if worldsize > 1:
data, label = data.to(rank), label.to(rank)
else:
data, label = data.cuda(), label.cuda()
if C.get().get('mixup', 0) > 0 and 'train' in desc_default:
data, label_a, label_b, lam = mixup_data(data, label, C.get().get('mixup', 0))
preds = model(data)
loss = lam * loss_fn(preds, label_a) + (1.0 - lam) * loss_fn(preds, label_b)
else:
preds = model(data)
loss = loss_fn(preds, label)
if C.get().get('label_smooth', 0) > 0 and 'train' in desc_default:
smooth = C.get().get('label_smooth', 0)
loss = (1.0-smooth) * loss - smooth * torch.nn.functional.log_softmax(preds, dim=-1).mean()
communicate_grad = steps % communicate_grad_every == 0
just_communicated_grad = steps % communicate_grad_every == 1 # also is true in first step of each epoch
if optimizer and (communicate_grad_every == 1 or just_communicated_grad):
optimizer.zero_grad()
if optimizer:
if communicate_grad:
loss.backward()
else:
with model.no_sync():
loss.backward()
if C.get()['optimizer'].get('clip', 5) > 0:
nn.utils.clip_grad_norm_(model.parameters(), C.get()['optimizer'].get('clip', 5))
if (steps-1) % C.get().get('step_optimizer_every', 1) == C.get().get('step_optimizer_nth_step', 0): # default is to step on the first step of each pack
optimizer.step()
#print(f"Time for forward/backward {time()-fb_time}")
top1, top5 = accuracy(preds, label, (1, 5))
metrics.add_dict({
'loss': loss.item() * len(data),
'top1': top1.item() * len(data),
'top5': top5.item() * len(data),
'top1_error': (1.0 - top1.item()) * len(data),
'top5_error': (1.0 - top5.item()) * len(data),
})
if steps % 2 == 0:
metrics.add('eval_top1', top1.item() * len(data)) # times 2 since it is only recorded every sec step
eval_cnt += len(data)
cnt += len(data)
if verbose:
postfix = metrics.divide(cnt, eval_top1=eval_cnt)
if optimizer:
postfix['lr'] = optimizer.param_groups[0]['lr']
logging_loader.set_postfix(postfix)
if scheduler is not None:
scheduler.step(epoch - 1 + float(steps) / total_steps)
# visualize augmented images
#before_load_time = time()
del preds, loss, top1, top5, data, label
if tqdm_disable:
if optimizer:
logger.info('[%s %03d/%03d] %s lr=%.6f', desc_default, epoch, C.get()['epoch'], metrics.divide(cnt, eval_top1=eval_cnt), optimizer.param_groups[0]['lr'])
else:
logger.info('[%s %03d/%03d] %s', desc_default, epoch, C.get()['epoch'], metrics.divide(cnt, eval_top1=eval_cnt))
metrics = metrics.divide(cnt, eval_top1=eval_cnt)
if optimizer:
metrics.metrics['lr'] = optimizer.param_groups[0]['lr']
if verbose:
for key, value in metrics.items():
writer.add_scalar(key, value, epoch)
return metrics
def train_and_eval(rank, worldsize, tag, dataroot, test_ratio=0.0, cv_fold=0, reporter=None, metric='last', save_path=None, only_eval=False):
if not reporter:
reporter = lambda **kwargs: 0
if not tag or (worldsize and torch.distributed.get_rank() > 0):
from DeepAA_evaluate.metrics import SummaryWriterDummy as SummaryWriter
logger.warning('tag not provided or rank > 0 -> no tensorboard log.')
else:
from tensorboardX import SummaryWriter
os.makedirs('./logs/', exist_ok=True)
writers = [SummaryWriter(log_dir='./logs/%s/%s' % (tag, x)) for x in ['train', 'valid', 'test', 'testtrain']]
aug_lib.set_augmentation_space(C.get().get('augmentation_search_space', 'standard'), C.get().get('augmentation_parameter_max', 30), C.get().get('custom_search_space_augs', None))
max_epoch = C.get()['epoch']
trainsampler, trainloader, validloader, testloader_, testtrainloader_, dataset_info = get_dataloaders(C.get()['dataset'], C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, distributed=worldsize>1, started_with_spawn=C.get()['started_with_spawn'], summary_writer=writers[0])
# create a model & an optimizer
model_conf = C.get()['model']
model = get_model(model_conf, C.get()['batch'], num_class(C.get()['dataset']), writer=writers[0]) #
if worldsize > 1:
model = DDP(model.to(rank), device_ids=[rank])
else:
model = model.to('cuda:0')
criterion = nn.CrossEntropyLoss()
bn_parameters = sum([list(m.parameters()) for m in model.modules() if isinstance(m, torch.nn.modules.batchnorm._BatchNorm)], [])
other_parameters = [param for param in model.parameters() if id(param) not in [id(p) for p in bn_parameters]]
assert len(list(model.parameters())) == len(bn_parameters) + len(other_parameters), 'Some parameters are missing'
if C.get()['optimizer']['type'] == 'sgd':
optimizer = optim.SGD(
[{'params': bn_parameters, 'weight_decay': 0},
{'params': other_parameters}],
lr=C.get()['lr'],
momentum=C.get()['optimizer'].get('momentum', 0.9),
weight_decay=C.get()['optimizer']['decay'],
nesterov=C.get()['optimizer']['nesterov']
)
elif C.get()['optimizer']['type'] == 'adam':
optimizer = optim.Adam(
model.parameters(),
lr=C.get()['lr'],
betas=(C.get()['optimizer'].get('momentum',.9),.999)
)
else:
raise ValueError('invalid optimizer type=%s' % C.get()['optimizer']['type'])
lr_scheduler_type = C.get()['lr_schedule'].get('type', 'cosine')
if lr_scheduler_type == 'cosine':
warmup_epochs = 0
if C.get()['lr_schedule'].get('warmup', None):
warmup_epochs = C.get()['lr_schedule']['warmup']['epoch']
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
T_max=C.get()['epoch'] - warmup_epochs,
eta_min=0.)
elif lr_scheduler_type == 'resnet':
scheduler = adjust_learning_rate_resnet(optimizer)
elif lr_scheduler_type == 'constant':
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda e: 1.)
else:
raise ValueError('invalid lr_schduler=%s' % lr_scheduler_type)
if C.get()['lr_schedule'].get('warmup', None):
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=C.get()['lr_schedule']['warmup']['multiplier'],
total_epoch=C.get()['lr_schedule']['warmup']['epoch'],
after_scheduler=scheduler
)
result = OrderedDict()
epoch_start = 1
if save_path and os.path.exists(save_path):
logger.info('%s file found. loading...' % save_path)
data = torch.load(save_path, map_location='cpu')
if 'model' in data or 'state_dict' in data:
key = 'model' if 'model' in data else 'state_dict'
logger.info('checkpoint epoch@%d' % data['epoch'])
if C.get().get('load_main_model', False):
# model.load_state_dict(data[key])
if not isinstance(model, DDP):
model.load_state_dict({k.replace('module.', ''): v for k, v in data[key].items()})
else:
model.load_state_dict({k if 'module.' in k else 'module.'+k: v for k, v in data[key].items()})
optimizer.load_state_dict(data['optimizer'])
if data['epoch'] < C.get()['epoch']:
epoch_start = data['epoch'] + 1
else:
only_eval = True
else:
#model.load_state_dict({k: v for k, v in data.items()})
raise ValueError(f"Wrong format of data in save path: {save_path}.")
del data
else:
logger.info('"%s" file not found. skip to pretrain weights...' % save_path)
if only_eval:
logger.warning('model checkpoint not found. only-evaluation mode is off.')
only_eval = False
if only_eval:
logger.info('evaluation only+')
model.eval()
rs = dict()
with torch.no_grad():
rs['train'] = run_epoch(rank, worldsize, model, trainloader, criterion, None, desc_default='train', epoch=0, writer=writers[0])
#rs['valid'] = run_epoch(rank, worldsize, model, validloader, criterion, None, desc_default='valid', epoch=0, writer=writers[1])
rs['test'] = run_epoch(rank, worldsize, model, testloader_, criterion, None, desc_default='*test', epoch=0, writer=writers[2])
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'test']):
if setname not in rs:
continue
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = 0
return result
# train loop
best_top1 = 0
for epoch in range(epoch_start, max_epoch + 1):
if worldsize > 1:
trainsampler.set_epoch(epoch)
model.train()
rs = dict()
rs['train'] = run_epoch(rank, worldsize,model, trainloader, criterion, optimizer, desc_default='train', epoch=epoch, writer=writers[0], verbose=True, scheduler=scheduler, sample_pairing_loader=testtrainloader_)
model.eval()
if math.isnan(rs['train']['loss']):
raise Exception('train loss is NaN.')
if epoch % C.get().get('test_interval', 20) == 0 or epoch > max_epoch-5:
with torch.no_grad():
if C.get().get('compute_testtrain', False):
rs['testtrain'] = run_epoch(rank, worldsize, model, testtrainloader_, criterion, None, desc_default='testtrain', epoch=epoch, writer=writers[3], verbose=True)
rs['test'] = run_epoch(rank, worldsize, model, testloader_, criterion, None, desc_default='*test', epoch=epoch, writer=writers[2], verbose=True)
if metric == 'last' or rs[metric]['top1'] > best_top1:
if metric != 'last':
best_top1 = rs[metric]['top1']
for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'test', 'testtrain']):
if setname in rs and key in rs[setname]:
result['%s_%s' % (key, setname)] = rs[setname][key]
result['epoch'] = epoch
#writers[1].add_scalar('valid_top1/best', rs['valid']['top1'], epoch)
writers[2].add_scalar('test_top1/best', rs['test']['top1'], epoch)
reporter(
loss_valid=rs['test']['loss'], top1_valid=rs['test']['top1'],
loss_test=rs['test']['loss'], top1_test=rs['test']['top1']
)
# save checkpoint
if save_path and C.get().get('save_model', True) and (worldsize <= 1 or torch.distributed.get_rank() == 0):
logger.info('save model@%d to %s' % (epoch, save_path))
torch.save({
'epoch': epoch,
'log': {
'train': rs['train'].get_dict(),
'test': rs['test'].get_dict(),
},
'optimizer': optimizer.state_dict(),
'model': model.state_dict()
}, save_path)
torch.save({
'epoch': epoch,
'log': {
'train': rs['train'].get_dict(),
'test': rs['test'].get_dict(),
},
'optimizer': optimizer.state_dict(),
'model': model.state_dict()
}, save_path.replace('.pth', '_e%d_top1_%.3f_%.3f' % (epoch, rs['train']['top1'], rs['test']['top1']) + '.pth'))
early_finish_epoch = C.get().get('early_finish_epoch', None)
if early_finish_epoch == epoch:
break
del model
return result
def setup(global_rank, local_rank, world_size, port_suffix):
torch.cuda.set_device(local_rank)
if port_suffix is not None:
if C.get().get('master_addr', None) is None:
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = f'12{port_suffix}'
# initialize the process group
dist.init_process_group("nccl", rank=global_rank, world_size=world_size)
else:
assert C.get().get('master_port', None) is not None
# os.environ['MASTER_ADDR'] = C.get()['master_addr']
# os.environ['MASTER_PORT'] = '12{}'.format(C.get()['master_port'])
master_url = 'tcp://{}:{}'.format(C.get()['master_addr'], C.get()['master_port'])
# initialize the process group
dist.init_process_group("nccl", rank=global_rank, world_size=world_size, init_method=master_url)
return global_rank, world_size
else:
dist.init_process_group(backend='NCCL', init_method='env://')
return torch.distributed.get_rank(), torch.distributed.get_world_size()
def cleanup():
dist.destroy_process_group()
def parse_args():
parser = ConfigArgumentParser(conflict_handler='resolve')
parser.add_argument('--tag', type=str, default='')
parser.add_argument('--dataroot', type=str, default='/data/private/pretrainedmodels',
help='torchvision data folder')
parser.add_argument('--save', type=str, default='')
parser.add_argument('--cv-ratio', type=float, default=0.0)
parser.add_argument('--cv', type=int, default=0)
parser.add_argument('--only-eval', action='store_true')
parser.add_argument('--local_rank', default=None, type=int)
return parser.parse_args()
def spawn_process(global_rank, worldsize, port_suffix, args, config_path=None, communicate_results_with_queue=None, local_rank=None, node_id=None):
if config_path is not None:
C(config_path)
if local_rank is None:
if C.get().get('num_nodes', 1) == 1:
local_rank = global_rank
else:
local_rank = global_rank
global_rank = local_rank + n_gpus_per_node * node_id
print('local_rank={}, global_rank={}, world_size={}, Master={}, 12{}'.format(local_rank, global_rank, worldsize, C.get()['master_addr'], C.get()['master_port']))
started_with_spawn = worldsize is not None and worldsize > 0
if worldsize != 0:
global_rank, worldsize = setup(global_rank, local_rank, worldsize, port_suffix)
print('dist info', local_rank,global_rank,worldsize)
#communicate_results_with_queue.value = 1.
#return
C.get()['started_with_spawn'] = started_with_spawn
if worldsize:
assert worldsize == C.get()['gpus'], f"Did not specify the number of GPUs in Config with which it was started: {worldsize} vs {C.get()['gpus']}"
else:
assert 'gpus' not in C.get() or C.get()['gpus'] == 1
assert (args.only_eval and args.save) or not args.only_eval, 'checkpoint path not provided in evaluation mode.'
if not args.only_eval:
if args.save:
logger.info('checkpoint will be saved at %s' % args.save)
else:
logger.warning('Provide --save argument to save the checkpoint. Without it, training result will not be saved!')
#if args.save:
#add_filehandler(logger, args.save.replace('.pth', '.log'))
#logger.info(json.dumps(C.get().conf, indent=4))
torch.backends.cudnn.benchmark = True
if 'seed' in C.get():
seed = C.get()['seed']
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
#torch.backends.cudnn.benchmark = False
import time
t = time.time()
result = train_and_eval(local_rank, worldsize, args.tag, args.dataroot, test_ratio=args.cv_ratio, cv_fold=args.cv, save_path=args.save, only_eval=args.only_eval, metric='last')
elapsed = time.time() - t
print('done')
logger.info(f'done on rank {global_rank}.')
logger.info('model: %s' % C.get()['model'])
logger.info('augmentation: %s' % C.get()['aug'])
logger.info('\n' + json.dumps(result, indent=4))
logger.info('elapsed time: %.3f Hours' % (elapsed / 3600.))
logger.info('top1 error in testset: %.4f' % (1. - result['top1_test']))
logger.info(args.save)
if worldsize:
cleanup()
if global_rank == 0 and communicate_results_with_queue is not None:
#communicate_results_with_queue.put([result])
communicate_results_with_queue.value = result['top1_test']
@dataclass
class Args:
tag: str = ''
dataroot: str = None
save: str = ''
cv_ratio: float = 0.
cv: int = 0
only_eval: bool = False
local_rank: None = None
def run_from_py(dataroot, config_dict, save=''):
args = Args(dataroot=dataroot, save=save)
with tempfile.NamedTemporaryFile(mode='w+') as f, tempfile.NamedTemporaryFile() as result_file:
path = f.name
yaml.dump(config_dict, f)
world_size = torch.cuda.device_count()
port_suffix = str(random.randint(100, 999))
#result_queue = mp.get_context('spawn').Queue()
result_queue = mp.get_context('spawn').Value('d',.0)
if world_size > 1:
outcome = mp.spawn(spawn_process,
args=(world_size, port_suffix, args, path, result_queue),
nprocs=world_size,
join=True)
else:
outcome = spawn_process(0, 0, port_suffix, args, path, result_queue)
#result = result_queue.get()[0]
result = result_queue.value
return result
n_gpus_per_node = torch.cuda.device_count()
if __name__ == '__main__':
pre_parser = ArgumentParser()
pre_parser.add_argument('--local_rank', default=None, type=int)
args, _ = pre_parser.parse_known_args()
parsed_args = parse_args()
# generate CSV file:
if C.get().get('save_to_csv', False):
if not os.path.isfile('eval_performance.csv'):
with open('eval_performance.csv', mode='w') as csv_file:
fieldnames = ['decay', 'warmup_multiplier', 'epoch', 'top1_test', 'top1_train', 'top5_test', 'top5_train']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
if args.local_rank is None:
print("Spawning processes")
world_size = n_gpus_per_node * C.get().get('num_nodes', 1)
assert world_size == C.get()['gpus'], f"Did not specify the number of GPUs in Config with which it was started: {world_size} vs {C.get()['gpus']}"
port_suffix = str(random.randint(10,99))
if world_size > 1:
if C.get().get('num_nodes', 1) == 1:
outcome = mp.spawn(spawn_process,
args=(world_size,port_suffix,parsed_args, parsed_args.config),
nprocs=world_size,
join=True)
else:
port_suffix = C.get()['master_port']
outcome = mp.spawn(spawn_process,
args=(world_size, port_suffix, parsed_args, parsed_args.config, None, None, C.get()['node_id']),
nprocs=n_gpus_per_node,
join=True)
else:
spawn_process(0, 0, None, parsed_args)
with open(f'/tmp/samshpopt/training_with_portsuffix_{port_suffix}.pkl', 'r') as f:
result = pickle.load(f)
else:
spawn_process(None, -1, None, parsed_args, local_rank=args.local_rank) | 22,694 | 44.209163 | 286 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/imagenet.py | from torchvision.datasets.imagenet import *
class ImageNet(ImageFolder):
"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
Copied from torchvision, besides warning below.
Args:
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``, or ``val``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
WARN::
This is the same ImageNet class as in torchvision.datasets.imagenet, but it has the `ignore_archive` argument.
This allows us to only copy the unzipped files before training.
"""
def __init__(self, root, split='train', download=None, ignore_archive=False, **kwargs):
if download is True:
msg = ("The dataset is no longer publicly accessible. You need to "
"download the archives externally and place them in the root "
"directory.")
raise RuntimeError(msg)
elif download is False:
msg = ("The use of the download flag is deprecated, since the dataset "
"is no longer publicly accessible.")
warnings.warn(msg, RuntimeWarning)
root = self.root = os.path.expanduser(root)
self.split = verify_str_arg(split, "split", ("train", "val"))
if not ignore_archive:
self.parse_archives()
wnid_to_classes = load_meta_file(self.root)[0]
super(ImageNet, self).__init__(self.split_folder, **kwargs)
self.root = root
self.wnids = self.classes
self.wnid_to_idx = self.class_to_idx
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {cls: idx
for idx, clss in enumerate(self.classes)
for cls in clss}
def parse_archives(self):
if not check_integrity(os.path.join(self.root, META_FILE)):
parse_devkit_archive(self.root)
if not os.path.isdir(self.split_folder):
if self.split == 'train':
parse_train_archive(self.root)
elif self.split == 'val':
parse_val_archive(self.root)
@property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__) | 3,096 | 42.013889 | 118 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/networks/resnet.py | # Original code: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# gamma is initialized ot 0 in the last BN of each residual block
import torch.nn as nn
import math
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
nn.init.zeros_(self.bn2.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * Bottleneck.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * Bottleneck.expansion)
nn.init.zeros_(self.bn3.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, dataset, depth, num_classes, bottleneck=False):
super(ResNet, self).__init__()
self.dataset = dataset
if self.dataset.startswith('cifar'):
self.inplanes = 16
print(bottleneck)
if bottleneck == True:
n = int((depth - 2) / 9)
block = Bottleneck
else:
n = int((depth - 2) / 6)
block = BasicBlock
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
# self.avgpool = nn.AvgPool2d(8)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(64 * block.expansion, num_classes)
elif dataset == 'imagenet':
blocks ={18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck}
layers ={18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]}
assert layers[depth], 'invalid detph for ResNet (depth should be one of 18, 34, 50, 101, 152, and 200)'
self.inplanes = 64
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(blocks[depth], 64, layers[depth][0])
self.layer2 = self._make_layer(blocks[depth], 128, layers[depth][1], stride=2)
self.layer3 = self._make_layer(blocks[depth], 256, layers[depth][2], stride=2)
self.layer4 = self._make_layer(blocks[depth], 512, layers[depth][3], stride=2)
# self.avgpool = nn.AvgPool2d(7)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * blocks[depth].expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 6,492 | 34.288043 | 135 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/networks/mlp.py | import torch
from torch import nn
def MLP(D_out,in_dims,adaptive_dropouter_creator):
print('adaptive dropouter', adaptive_dropouter_creator)
in_dim = 1
for d in in_dims: in_dim *= d
ada_dropper = adaptive_dropouter_creator(100) if adaptive_dropouter_creator is not None else None
model = nn.Sequential(
nn.Flatten(),
nn.Linear(in_dim, 300),
nn.Tanh(),
nn.Linear(300,100),
ada_dropper or nn.Identity(),
nn.Tanh(),
nn.Linear(100,D_out)
)
model.adaptive_dropouters = [ada_dropper] if ada_dropper is not None else []
return model
| 616 | 28.380952 | 101 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/networks/__init__.py | import torch
from torch import nn
from torch.nn import DataParallel
import torch.backends.cudnn as cudnn
# from torchvision import models
from DeepAA_evaluate.networks.resnet import ResNet
from DeepAA_evaluate.networks.shakeshake.shake_resnet import ShakeResNet
from DeepAA_evaluate.networks.wideresnet import WideResNet
from DeepAA_evaluate.networks.shakeshake.shake_resnext import ShakeResNeXt
from DeepAA_evaluate.networks.convnet import SeqConvNet
from DeepAA_evaluate.networks.mlp import MLP
from DeepAA_evaluate.common import apply_weightnorm
# example usage get_model(
def get_model(conf, bs, num_class=10, writer=None):
name = conf['type']
ad_creators = (None,None)
if name == 'resnet50':
model = ResNet(dataset='imagenet', depth=50, num_classes=num_class, bottleneck=True)
elif name == 'resnet200':
model = ResNet(dataset='imagenet', depth=200, num_classes=num_class, bottleneck=True)
elif name == 'resnet18':
model = ResNet(dataset='imagenet', depth=18, num_classes=num_class, bottleneck=False)
elif name == 'wresnet40_2':
model = WideResNet(40, 2, dropout_rate=conf.get('dropout',0.0), num_classes=num_class, adaptive_dropouter_creator=ad_creators[0],adaptive_conv_dropouter_creator=ad_creators[1], groupnorm=conf.get('groupnorm', False), examplewise_bn=conf.get('examplewise_bn', False), virtual_bn=conf.get('virtual_bn', False))
elif name == 'wresnet28_10':
model = WideResNet(28, 10, dropout_rate=conf.get('dropout',0.0), num_classes=num_class, adaptive_dropouter_creator=ad_creators[0],adaptive_conv_dropouter_creator=ad_creators[1], groupnorm=conf.get('groupnorm',False), examplewise_bn=conf.get('examplewise_bn', False), virtual_bn=conf.get('virtual_bn', False))
elif name == 'wresnet28_2':
model = WideResNet(28, 2, dropout_rate=conf.get('dropout', 0.0), num_classes=num_class,
adaptive_dropouter_creator=ad_creators[0], adaptive_conv_dropouter_creator=ad_creators[1],
groupnorm=conf.get('groupnorm', False), examplewise_bn=conf.get('examplewise_bn', False),
virtual_bn=conf.get('virtual_bn', False))
elif name == 'miniconvnet':
model = SeqConvNet(num_class,adaptive_dropout_creator=ad_creators[0],batch_norm=False)
elif name == 'mlp':
model = MLP(num_class, (3,32,32), adaptive_dropouter_creator=ad_creators[0])
elif name == 'shakeshake26_2x96d':
model = ShakeResNet(26, 96, num_class)
elif name == 'shakeshake26_2x112d':
model = ShakeResNet(26, 112, num_class)
elif name == 'shakeshake26_2x96d_next':
model = ShakeResNeXt(26, 96, 4, num_class)
else:
raise NameError('no model named, %s' % name)
if conf.get('weight_norm', False):
print('Using weight norm.')
apply_weightnorm(model)
#model = model.cuda()
#model = DataParallel(model)
cudnn.benchmark = True
return model
def num_class(dataset):
return {
'cifar10': 10,
'noised_cifar10': 10,
'targetnoised_cifar10': 10,
'reduced_cifar10': 10,
'cifar10.1': 10,
'pre_transform_cifar10': 10,
'cifar100': 100,
'pre_transform_cifar100': 100,
'fiftyexample_cifar100': 100,
'tenclass_cifar100': 10,
'svhn': 10,
'svhncore': 10,
'reduced_svhn': 10,
'imagenet': 1000,
'smallwidth_imagenet': 1000,
'ohl_pipeline_imagenet': 1000,
'reduced_imagenet': 120,
}[dataset]
| 3,545 | 42.243902 | 316 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/networks/convnet.py | import torch
from torch import nn
class SeqConvNet(nn.Module):
def __init__(self,D_out,fixed_dropout=None,in_channels=3,channels=(64,64),h_dims=(200,100),adaptive_dropout_creator=None,batch_norm=False):
super().__init__()
print("Using SeqConvNet")
assert len(channels) == 2 == len(h_dims)
pool = lambda: nn.MaxPool2d(2,2)
dropout = lambda: torch.nn.Dropout(p=fixed_dropout)
dropout_li = lambda: ([] if fixed_dropout is None else [dropout()])
relu = lambda: torch.nn.ReLU(inplace=False)
flatten = lambda l: [item for sublist in l for item in sublist]
convs = [nn.Conv2d(in_channels, channels[0], 5),nn.Conv2d(channels[0], channels[1], 5)]
fcs = [nn.Linear(channels[1] * 5 * 5, h_dims[0]),nn.Linear(h_dims[0], h_dims[1])]
self.final_fc = nn.Linear(h_dims[1], D_out)
self.conv_blocks = nn.Sequential(*flatten([[conv,relu(),pool()] + dropout_li() for conv in convs]))
self.bn = nn.BatchNorm1d(h_dims[1], momentum=.9) if batch_norm else nn.Identity()
self.fc_blocks = nn.Sequential(*flatten([[fc,relu()] + dropout_li() for fc in fcs]))
self.adaptive_dropouters = [adaptive_dropout_creator(h_dims[1])] if adaptive_dropout_creator is not None else []
def forward(self, x):
x = self.conv_blocks(x)
x = torch.nn.Flatten()(x)
x = self.fc_blocks(x)
if self.adaptive_dropouters:
x = self.adaptive_dropouters[0](x)
x = self.bn(x)
x = self.final_fc(x)
return x
| 1,546 | 47.34375 | 143 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/networks/wideresnet.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import numpy as np
_bn_momentum = 0.1
CpG = 8
class ExampleWiseBatchNorm2d(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1,
affine=True, track_running_stats=True):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
def forward(self, input):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
# calculate running estimates
if self.training:
mean = input.mean([0, 2, 3])
# use biased var in train
var = input.var([0, 2, 3], unbiased=False)
n = input.numel() / input.size(1)
with torch.no_grad():
self.running_mean = exponential_average_factor * mean\
+ (1 - exponential_average_factor) * self.running_mean
# update running_var with unbiased var
self.running_var = exponential_average_factor * var * n / (n - 1)\
+ (1 - exponential_average_factor) * self.running_var
local_means = input.mean([2, 3])
local_global_means = local_means + (mean.unsqueeze(0) - local_means).detach()
local_vars = input.var([2, 3], unbiased=False)
local_global_vars = local_vars + (var.unsqueeze(0) - local_vars).detach()
input = (input - local_global_means[:,:,None,None]) / (torch.sqrt(local_global_vars[:,:,None,None] + self.eps))
else:
mean = self.running_mean
var = self.running_var
input = (input - mean[None, :, None, None]) / (torch.sqrt(var[None, :, None, None] + self.eps))
if self.affine:
input = input * self.weight[None, :, None, None] + self.bias[None, :, None, None]
return input
class VirtualBatchNorm2d(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1,
affine=True, track_running_stats=True):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
def forward(self, input):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
# calculate running estimates
if self.training:
mean = input.mean([0, 2, 3])
# use biased var in train
var = input.var([0, 2, 3], unbiased=False)
n = input.numel() / input.size(1)
with torch.no_grad():
self.running_mean = exponential_average_factor * mean \
+ (1 - exponential_average_factor) * self.running_mean
# update running_var with unbiased var
self.running_var = exponential_average_factor * var * n / (n - 1) \
+ (1 - exponential_average_factor) * self.running_var
input = (input - mean.detach()[None, :, None, None]) / (torch.sqrt(var.detach()[None, :, None, None] + self.eps))
else:
mean = self.running_mean
var = self.running_var
input = (input - mean[None, :, None, None]) / (torch.sqrt(var[None, :, None, None] + self.eps))
if self.affine:
input = input * self.weight[None, :, None, None] + self.bias[None, :, None, None]
return input
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, norm_creator, stride=1, adaptive_dropouter_creator=None):
super(WideBasic, self).__init__()
self.bn1 = norm_creator(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
if adaptive_dropouter_creator is None:
self.dropout = nn.Dropout(p=dropout_rate)
else:
self.dropout = adaptive_dropouter_creator(planes, 3, stride, 1)
self.bn2 = norm_creator(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes, adaptive_dropouter_creator, adaptive_conv_dropouter_creator, groupnorm, examplewise_bn, virtual_bn):
super(WideResNet, self).__init__()
self.in_planes = 16
self.adaptive_conv_dropouter_creator = adaptive_conv_dropouter_creator
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
assert sum([groupnorm,examplewise_bn,virtual_bn]) <= 1
n = int((depth - 4) / 6)
k = widen_factor
nStages = [16, 16*k, 32*k, 64*k]
self.adaptive_dropouters = [] #nn.ModuleList()
if groupnorm:
print('Uses group norm.')
self.norm_creator = lambda c: nn.GroupNorm(max(c//CpG, 1), c)
elif examplewise_bn:
print("Uses Example Wise BN")
self.norm_creator = lambda c: ExampleWiseBatchNorm2d(c, momentum=_bn_momentum)
elif virtual_bn:
print("Uses Virtual BN")
self.norm_creator = lambda c: VirtualBatchNorm2d(c, momentum=_bn_momentum)
else:
self.norm_creator = lambda c: nn.BatchNorm2d(c, momentum=_bn_momentum)
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(WideBasic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(WideBasic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(WideBasic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = self.norm_creator(nStages[3])
self.linear = nn.Linear(nStages[3], num_classes)
if adaptive_dropouter_creator is not None:
last_dropout = adaptive_dropouter_creator(nStages[3])
else:
last_dropout = lambda x: x
self.adaptive_dropouters.append(last_dropout)
# self.apply(conv_init)
def to(self, *args, **kwargs):
super().to(*args,**kwargs)
print(*args)
for ad in self.adaptive_dropouters:
if hasattr(ad,'to'):
ad.to(*args,**kwargs)
return self
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
ada_conv_drop_c = self.adaptive_conv_dropouter_creator if i == 0 else None
new_block = block(self.in_planes, planes, dropout_rate, self.norm_creator, stride, adaptive_dropouter_creator=ada_conv_drop_c)
layers.append(new_block)
if ada_conv_drop_c is not None:
self.adaptive_dropouters.append(new_block.dropout)
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
# out = F.avg_pool2d(out, 8)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
out = self.adaptive_dropouters[-1](out)
out = self.linear(out)
return out
| 8,885 | 39.949309 | 171 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/networks/shakeshake/shake_resnet.py | # -*- coding: utf-8 -*-
import math
import torch.nn as nn
import torch.nn.functional as F
from DeepAA_evaluate.networks.shakeshake.shakeshake import ShakeShake
from DeepAA_evaluate.networks.shakeshake.shakeshake import Shortcut
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
if self.equal_io:
self.shortcut = lambda x: x
else:
self.shortcut = Shortcut(in_ch, out_ch, stride=stride)
#self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch, stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
#h0 = x if self.equal_io else self.shortcut(x)
h0 = self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(in_ch, out_ch, 3, padding=1, stride=stride, bias=False),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=False),
nn.Conv2d(out_ch, out_ch, 3, padding=1, stride=1, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNet(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNet, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
# Initialize paramters
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
| 2,927 | 32.655172 | 89 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/networks/shakeshake/shake_resnext.py | # -*- coding: utf-8 -*-
import math
import torch.nn as nn
import torch.nn.functional as F
from DeepAA_evaluate.networks.shakeshake.shakeshake import ShakeShake
from DeepAA_evaluate.networks.shakeshake.shakeshake import Shortcut
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch, stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(
nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=False),
nn.Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=cardinary, bias=False),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=False),
nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXt(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXt, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
# Initialize paramters
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch, cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
| 3,094 | 35.411765 | 97 | py |
DeepAA | DeepAA-master/DeepAA_evaluate/networks/shakeshake/shakeshake.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.cuda.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.cuda.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
| 1,413 | 27.857143 | 86 | py |
3DTrans | 3DTrans-master/setup.py | import os
import subprocess
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
def get_git_commit_number():
if not os.path.exists('.git'):
return '0000000'
cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
git_commit_number = cmd_out.stdout.decode('utf-8')[:7]
return git_commit_number
def make_cuda_ext(name, module, sources):
cuda_ext = CUDAExtension(
name='%s.%s' % (module, name),
sources=[os.path.join(*module.split('.'), src) for src in sources]
)
return cuda_ext
def write_version_to_file(version, target_file):
with open(target_file, 'w') as f:
print('__version__ = "%s"' % version, file=f)
if __name__ == '__main__':
version = '0.5.2+%s' % get_git_commit_number()
write_version_to_file(version, 'pcdet/version.py')
setup(
name='pcdet',
version=version,
description='3DTrans Autonomous Driving Transfer Learning Codebase',
install_requires=[
'numpy',
'llvmlite',
'numba',
'tensorboardX',
'easydict',
'pyyaml',
'scikit-image',
'tqdm',
'SharedArray',
# 'spconv', # spconv has different names depending on the cuda version
],
author='3DTrans Development Team',
author_email='bo.zhangzx@gmail.com',
license='Apache License 2.0',
packages=find_packages(exclude=['tools', 'data', 'output']),
cmdclass={
'build_ext': BuildExtension,
},
ext_modules=[
make_cuda_ext(
name='iou3d_nms_cuda',
module='pcdet.ops.iou3d_nms',
sources=[
'src/iou3d_cpu.cpp',
'src/iou3d_nms_api.cpp',
'src/iou3d_nms.cpp',
'src/iou3d_nms_kernel.cu',
]
),
make_cuda_ext(
name='roiaware_pool3d_cuda',
module='pcdet.ops.roiaware_pool3d',
sources=[
'src/roiaware_pool3d.cpp',
'src/roiaware_pool3d_kernel.cu',
]
),
make_cuda_ext(
name='roipoint_pool3d_cuda',
module='pcdet.ops.roipoint_pool3d',
sources=[
'src/roipoint_pool3d.cpp',
'src/roipoint_pool3d_kernel.cu',
]
),
make_cuda_ext(
name='pointnet2_stack_cuda',
module='pcdet.ops.pointnet2.pointnet2_stack',
sources=[
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/voxel_query.cpp',
'src/voxel_query_gpu.cu',
'src/vector_pool.cpp',
'src/vector_pool_gpu.cu'
],
),
make_cuda_ext(
name='pointnet2_batch_cuda',
module='pcdet.ops.pointnet2.pointnet2_batch',
sources=[
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
],
),
],
)
| 3,945 | 31.344262 | 83 | py |
3DTrans | 3DTrans-master/tools/train_active_CLUE.py | import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_CLUE import train_active_model_target
from test import repeat_eval_ckpt
import math
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
# total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=cfg['ANNOTATION_BUDGET'], total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_model_target(
model=model,
optimizer=optimizer_detector,
source_train_loader=source_loader,
target_train_loader=target_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_detector,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 10,579 | 42.00813 | 169 | py |
3DTrans | 3DTrans-master/tools/train_multi_db.py | import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader_mdf, build_dataloader
from pcdet.models import build_network_multi_db, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_multi_db_utils import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--frozen_backbone', action='store_true', default=False, help='froze the backbone when training')
parser.add_argument('--source_one_name', type=str, default="nusc", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.source_one_name not in ["waymo", "nusc", "kitti"]:
raise RuntimeError('Does not exist for source_one_name')
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
logger.info('**********************Using Two DataLoader and Merge Loss**********************')
logger.info('**********************VALUE of source_one_name= %s**********************' % args.source_one_name)
source_set, source_loader, source_sampler = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_set_2, source_loader_2, source_sampler_2 = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# add the dataset_source flag into uni3d_norm layer, for training stage, we use the default value of 1
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_3D_MoE', None):
cfg.MODEL.DENSE_3D_MoE.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_2D_MoE', None):
cfg.MODEL.DENSE_2D_MoE.update({"db_source": 1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": 1})
model = build_network_multi_db(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
dataset=source_set, dataset_s2=source_set_2, source_one_name=args.source_one_name)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if args.frozen_backbone:
logger.info('**********************Note that Frozen Backbone: %s**********************')
model.frozen_model(model)
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()],
find_unused_parameters=True)
# model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
max_len_dataset = len(source_loader) if len(source_loader) > len(source_loader_2) else len(source_loader_2)
total_iters_each_epoch = max_len_dataset if not args.merge_all_iters_to_one_epoch \
else max_len_dataset // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
train_func = train_model
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_func(
model,
optimizer,
source_loader,
source_loader_2,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
ps_label_dir=ps_label_dir,
source_sampler=source_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main() | 11,953 | 44.800766 | 142 | py |
3DTrans | 3DTrans-master/tools/train_pointcontrast.py | print('program started',)
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_unsupervised_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from unsupervised_utils.pointcontrast_utils import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666 + cfg.LOCAL_RANK)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
args.batch_size = {
'unlabeled': args.batch_size,
'test': args.batch_size
}
# build unsupervised dataloader
datasets, dataloaders, samplers = build_unsupervised_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
root_path=cfg.DATA_CONFIG.DATA_PATH,
dist=dist_train, workers=args.workers,
logger=logger,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=datasets['unlabeled'])
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=len(datasets['unlabeled']), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
voxel_size = cfg.DATA_CONFIG.VOXEL_SIZE
point_cloud_range = cfg.DATA_CONFIG.POINT_CLOUD_RANGE
train_model(
model=model,
optimizer=optimizer,
train_loader=dataloaders['unlabeled'],
lr_scheduler=lr_scheduler,
cfg=cfg.OPTIMIZATION,
voxel_size=voxel_size,
point_cloud_range=point_cloud_range,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
train_sampler=samplers['unlabeled'],
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
if hasattr(datasets['unlabeled'], 'use_shared_memory') and datasets['unlabeled'].use_shared_memory:
datasets['unlabeled'].clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = datasets['test'], dataloaders['test'], samplers['test']
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main() | 9,328 | 44.286408 | 169 | py |
3DTrans | 3DTrans-master/tools/test.py | import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
print("******model for testing",model)
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
if cfg.get('DATA_CONFIG_TAR', None):
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
else:
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 8,740 | 40.42654 | 120 | py |
3DTrans | 3DTrans-master/tools/train_ada.py | import os
import math
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from tools.test import repeat_eval_ckpt
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_source_utils import train_active_model_source_only
from train_utils.train_active_target_utils import train_active_model_dual_tar
from test import repeat_eval_ckpt
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1])
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
if cfg.get('ADA_STAGE', None) == 'TARGET':
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=cfg.DATA_CONFIG.FILE_PATH,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
else:
source_sample_set, source_sample_loader, source_sample_sampler = None
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_list = [optimizer_detector, optimizer_discriminator]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
if cfg.get('ADA_STAGE', None) == 'SOURCE':
total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
else:
total_iters_each_epoch = len(source_sample_loader)
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if cfg.get('ADA_STAGE', None) == 'SOURCE':
train_active_model_source_only(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
sample_loader=None,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
total_iters_each_epoch=total_iters_each_epoch,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
source_budget=cfg.SOURCE_THRESHOD,
source_file_path=cfg.DATA_CONFIG.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
sample_sampler=None,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
else:
train_active_model_dual_tar(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
source_sample_loader=source_sample_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
source_sample_sampler=source_sample_loader,
lr_warmup_scheduler=None,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 13,489 | 42.376206 | 169 | py |
3DTrans | 3DTrans-master/tools/train_uda.py | import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_st_utils import train_model_st
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
if cfg.get('SELF_TRAIN', None):
target_set, target_loader, target_sampler = build_dataloader(
cfg.DATA_CONFIG_TAR, cfg.DATA_CONFIG_TAR.CLASS_NAMES, args.batch_size,
dist_train, workers=args.workers, logger=logger, training=True
)
else:
target_set = target_loader = target_sampler = None
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
if cfg.get('SELF_TRAIN', None):
total_iters_each_epoch = len(target_loader) if not args.merge_all_iters_to_one_epoch \
else len(target_loader) // args.epochs
else:
total_iters_each_epoch = len(source_loader) if not args.merge_all_iters_to_one_epoch \
else len(source_loader) // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# select proper trainer
train_func = train_model_st
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_func(
model,
optimizer,
source_loader,
target_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
ps_label_dir=ps_label_dir,
source_sampler=source_sampler,
target_sampler=target_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
ema_model=None
)
if cfg.get('SELF_TRAIN', None):
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
else:
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main() | 10,156 | 42.592275 | 125 | py |
3DTrans | 3DTrans-master/tools/train_semi.py | import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
import copy
import torch
import torch.distributed as dist
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_semi_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_semi_utils import train_model
from ssl_utils.semi_train_utils import train_ssl_model
from test import repeat_eval_ckpt
from eval_utils.eval_utils import eval_one_epoch
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=8888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=1, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
parser.add_argument('--runs_on', type=str, default='server', choices=['server', 'cloud'],help='runs on server or cloud')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
class DistStudent(nn.Module):
def __init__(self, student):
super().__init__()
self.onepass = student
def forward(self, ld_batch, ud_batch):
return self.onepass(ld_batch), self.onepass(ud_batch)
class DistTeacher(nn.Module):
def __init__(self, teacher):
super().__init__()
self.onepass = teacher
def forward(self, ld_batch, ud_batch):
if ld_batch is not None:
return self.onepass(ld_batch), self.onepass(ud_batch)
else:
return None, self.onepass(ud_batch)
def main():
args, cfg = parse_config()
if args.runs_on == 'cloud':
cfg.DATA_CONFIG.DATA_PATH = cfg.DATA_CONFIG.CLOUD_DATA_PATH
if args.launcher == 'none':
dist_train = False
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
if args.runs_on == 'cloud':
output_dir = Path('/cache/output/') / cfg.EXP_GROUP_PATH / cfg.TAG
pretrain_ckpt_dir = output_dir / 'pretrain_ckpt'
ssl_ckpt_dir = output_dir / 'ssl_ckpt'
student_ckpt_dir = output_dir / 'ssl_ckpt' / 'student'
teacher_ckpt_dir = output_dir / 'ssl_ckpt' / 'teacher'
output_dir.mkdir(parents=True, exist_ok=True)
pretrain_ckpt_dir.mkdir(parents=True, exist_ok=True)
student_ckpt_dir.mkdir(parents=True, exist_ok=True)
teacher_ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
batch_size = {
'pretrain': cfg.OPTIMIZATION.PRETRAIN.BATCH_SIZE_PER_GPU,
'labeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.LD_BATCH_SIZE_PER_GPU,
'unlabeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.UD_BATCH_SIZE_PER_GPU,
'test': cfg.OPTIMIZATION.TEST.BATCH_SIZE_PER_GPU,
}
# -----------------------create dataloader & network & optimizer---------------------------
datasets, dataloaders, samplers = build_semi_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
root_path=cfg.DATA_CONFIG.DATA_PATH,
workers=args.workers,
logger=logger,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
)
# --------------------------------stage I pretraining---------------------------------------
logger.info('************************Stage I Pretraining************************')
MODEL_PRETRAINED = copy.deepcopy(cfg.MODEL)
pretrain_model = build_network(model_cfg=MODEL_PRETRAINED, num_class=len(cfg.CLASS_NAMES), dataset=datasets['pretrain'])
pretrain_model.set_model_type('origin')
if cfg.get('USE_PRETRAIN_MODEL', False):
pretrain_ckpt = cfg.PRETRAIN_CKPT
if args.runs_on == 'cloud':
pretrain_ckpt = cfg.CLOUD_PRETRAIN_CKPT
pretrain_model.load_params_from_file(filename=pretrain_ckpt, logger=logger, to_cpu=dist_train)
pretrain_model.cuda()
pretrain_model.eval() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
pretrain_model = nn.parallel.DistributedDataParallel(pretrain_model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(pretrain_model)
eval_pretrain_dir = output_dir / 'eval' / 'eval_with_pretraining'
eval_pretrain_dir.mkdir(parents=True, exist_ok=True)
eval_one_epoch(cfg, pretrain_model, dataloaders['test'], -1, logger, dist_test=dist_train, save_to_file=False, result_dir=eval_pretrain_dir)
else:
pretrain_model.cuda()
pretrain_optimizer = build_optimizer(pretrain_model, cfg.OPTIMIZATION.PRETRAIN)
pretrain_model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
pretrain_model = nn.parallel.DistributedDataParallel(pretrain_model, device_ids=[
cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(pretrain_model)
last_epoch = -1
start_epoch = it = 0
pretrain_lr_scheduler, pretrain_lr_warmup_scheduler = build_scheduler(
pretrain_optimizer, total_iters_each_epoch=len(dataloaders['pretrain']),
total_epochs=cfg.OPTIMIZATION.PRETRAIN.NUM_EPOCHS,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.PRETRAIN
)
logger.info('**********************Start pre-training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
pretrain_model,
pretrain_optimizer,
dataloaders['pretrain'],
model_func=model_fn_decorator(),
lr_scheduler=pretrain_lr_scheduler,
optim_cfg=cfg.OPTIMIZATION.PRETRAIN,
start_epoch=start_epoch,
total_epochs=cfg.OPTIMIZATION.PRETRAIN.NUM_EPOCHS,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=pretrain_ckpt_dir,
train_sampler=samplers['pretrain'],
lr_warmup_scheduler=pretrain_lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
logger.info('**********************End pre-training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation for pre-training %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_pretrain_dir = output_dir / 'eval' / 'eval_with_pretraining'
eval_pretrain_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = cfg.OPTIMIZATION.PRETRAIN.NUM_EPOCHS - 10
repeat_eval_ckpt(
model=pretrain_model.module if dist_train else pretrain_model,
test_loader=dataloaders['test'],
args=args,
eval_output_dir=eval_pretrain_dir,
logger=logger,
ckpt_dir=pretrain_ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation for pre-training %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# --------------------------------stage II SSL training---------------------------------------
logger.info('************************Stage II SSL training************************')
MODEL_TEACHER = copy.deepcopy(cfg.MODEL)
teacher_model = build_network(model_cfg=MODEL_TEACHER, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
"""
for param in teacher_model.parameters(): # ema teacher model
param.detach_()
"""
MODEL_STUDENT = copy.deepcopy(cfg.MODEL)
student_model = build_network(model_cfg=MODEL_STUDENT, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
teacher_model.set_model_type('teacher')
student_model.set_model_type('student')
teacher_model.cuda()
student_model.cuda()
# only update student model by gradient descent, teacher model are updated by EMA
student_optimizer = build_optimizer(student_model, cfg.OPTIMIZATION.SEMI_SUP_LEARNING.STUDENT)
# load checkpoint if it is possible
last_epoch = -1
start_epoch = it = 0
based_on_pretrained = True
teacher_ckpt_list = glob.glob(str(teacher_ckpt_dir / '*checkpoint_epoch_*.pth'))
student_ckpt_list = glob.glob(str(student_ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(teacher_ckpt_list) > 0 and len(student_ckpt_list) > 0:
based_on_pretrained = False
teacher_ckpt_list.sort(key=os.path.getmtime)
student_ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = teacher_model.load_params_with_optimizer(
teacher_ckpt_list[-1], to_cpu=dist_train, optimizer=student_optimizer, logger=logger
)
it, start_epoch = student_model.load_params_with_optimizer(
student_ckpt_list[-1], to_cpu=dist_train, optimizer=student_optimizer, logger=logger
)
last_epoch = start_epoch + 1
if based_on_pretrained:
if cfg.get('USE_PRETRAIN_MODEL', False):
pretrained_model = cfg.PRETRAIN_CKPT
if args.runs_on == 'cloud':
pretrained_model = cfg.CLOUD_PRETRAIN_CKPT
else:
ckpt_list = glob.glob(str(pretrain_ckpt_dir / '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
pretrained_model = ckpt_list[-1]
teacher_model.load_params_from_file(filename=pretrained_model, to_cpu=dist, logger=logger)
student_model.load_params_from_file(filename=pretrained_model, to_cpu=dist, logger=logger)
if dist_train:
student_model = DistStudent(student_model) # add wrapper for dist training
student_model = nn.parallel.DistributedDataParallel(student_model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
# teacher doesn't need dist train
teacher_model = DistTeacher(teacher_model)
teacher_model = nn.parallel.DistributedDataParallel(teacher_model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
student_model.train()
"""
Notes: we found for pseudo labels, teacher_model.eval() is better; for EMA update and consistency, teacher_model.train() is better
"""
if cfg.OPTIMIZATION.SEMI_SUP_LEARNING.TEACHER.NUM_ITERS_PER_UPDATE == -1: # for pseudo label
teacher_model.eval() # Set to eval mode to avoid BN update and dropout
else: # for EMA teacher with consistency
teacher_model.train() # Set to train mode
for t_param in teacher_model.parameters():
t_param.requires_grad = False
logger.info(student_model)
# use unlabeled data as epoch counter
student_lr_scheduler, student_lr_warmup_scheduler = build_scheduler(
student_optimizer, total_iters_each_epoch=len(dataloaders['labeled']), total_epochs=cfg.OPTIMIZATION.SEMI_SUP_LEARNING.NUM_EPOCHS,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.SEMI_SUP_LEARNING.STUDENT
)
logger.info('**********************Start ssl-training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
#"""
train_ssl_model(
teacher_model = teacher_model,
student_model = student_model,
student_optimizer = student_optimizer,
labeled_loader = dataloaders['labeled'],
unlabeled_loader = dataloaders['unlabeled'],
lr_scheduler=student_lr_scheduler,
ssl_cfg=cfg.OPTIMIZATION.SEMI_SUP_LEARNING,
start_epoch=start_epoch,
total_epochs=cfg.OPTIMIZATION.SEMI_SUP_LEARNING.NUM_EPOCHS,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ssl_ckpt_dir,
labeled_sampler=samplers['labeled'],
unlabeled_sampler=samplers['unlabeled'],
lr_warmup_scheduler=student_lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
dist = dist_train
)
#"""
logger.info('**********************End ssl-training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation for student model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_ssl_dir = output_dir / 'eval' / 'eval_with_student_model'
eval_ssl_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = cfg.OPTIMIZATION.SEMI_SUP_LEARNING.NUM_EPOCHS - 25
repeat_eval_ckpt(
model = student_model.module.onepass if dist_train else student_model,
test_loader = dataloaders['test'],
args = args,
eval_output_dir = eval_ssl_dir,
logger = logger,
ckpt_dir = ssl_ckpt_dir / 'student',
dist_test=dist_train
)
logger.info('**********************End evaluation for student model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation for teacher model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_ssl_dir = output_dir / 'eval' / 'eval_with_teacher_model'
eval_ssl_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = cfg.OPTIMIZATION.SEMI_SUP_LEARNING.NUM_EPOCHS - 25
if dist_train:
teacher_model.module.onepass.set_model_type('origin') # ret filtered boxes
else:
teacher_model.set_model_type('origin')
for t_param in teacher_model.parameters(): # Add this to avoid errors
t_param.requires_grad = True
repeat_eval_ckpt(
model = teacher_model.module.onepass if dist_train else teacher_model,
test_loader = dataloaders['test'],
args = args,
eval_output_dir = eval_ssl_dir,
logger = logger,
ckpt_dir = ssl_ckpt_dir / 'teacher',
dist_test=dist_train
)
logger.info('**********************End evaluation for teacher model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main() | 17,324 | 46.465753 | 148 | py |
3DTrans | 3DTrans-master/tools/train_multi_db_merge_loss.py | import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model
from train_utils.train_multi_db_loss_merge import train_multi_db_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
if cfg.get('MULTI_DB', None):
logger.info('**********************Using Two DataLoader and Merge Loss**********************')
source_set_2, source_loader_2, source_sampler_2 = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
else:
source_set_2 = source_loader_2 = source_sampler_2 = None
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
if cfg.get('MULTI_DB', None):
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()],
broadcast_buffers=False, find_unused_parameters=True)
else:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
max_len_dataset = len(source_loader) if len(source_loader) > len(source_loader_2) else len(source_loader_2)
total_iters_each_epoch = max_len_dataset if not args.merge_all_iters_to_one_epoch \
else max_len_dataset // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# select proper trainer
if cfg.get('MULTI_DB', None):
train_func = train_multi_db_model
else:
train_func = train_model
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if cfg.get('MULTI_DB', None):
train_func(
model,
optimizer,
source_loader,
source_loader_2,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
ps_label_dir=ps_label_dir,
source_sampler=source_sampler,
target_sampler=source_sampler_2,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
ema_model=None
)
else:
train_model(
model,
optimizer,
source_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
source_sampler=source_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
if cfg.get('MULTI_DB', None):
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
source_set_2.clean_shared_memory()
else:
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main() | 11,823 | 43.119403 | 125 | py |
3DTrans | 3DTrans-master/tools/demo.py | import argparse
import glob
from pathlib import Path
try:
import open3d
from visual_utils import open3d_vis_utils as V
OPEN3D_FLAG = True
except:
import mayavi.mlab as mlab
from visual_utils import visualize_utils as V
OPEN3D_FLAG = False
import numpy as np
import torch
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils
class DemoDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.root_path = root_path
self.ext = ext
data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]
data_file_list.sort()
self.sample_file_list = data_file_list
def __len__(self):
return len(self.sample_file_list)
def __getitem__(self, index):
if self.ext == '.bin':
points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4)
elif self.ext == '.npy':
points = np.load(self.sample_file_list[index])
else:
raise NotImplementedError
input_dict = {
'points': points,
'frame_id': index,
}
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml',
help='specify the config for demo')
parser.add_argument('--data_path', type=str, default='demo_data',
help='specify the point cloud data file or directory')
parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')
parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
return args, cfg
def main():
args, cfg = parse_config()
logger = common_utils.create_logger()
logger.info('-----------------Quick Demo of 3DTrans-------------------------')
demo_dataset = DemoDataset(
dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
root_path=Path(args.data_path), ext=args.ext, logger=logger
)
logger.info(f'Total number of samples: \t{len(demo_dataset)}')
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True)
model.cuda()
model.eval()
with torch.no_grad():
for idx, data_dict in enumerate(demo_dataset):
logger.info(f'Visualized sample index: \t{idx + 1}')
data_dict = demo_dataset.collate_batch([data_dict])
load_data_to_gpu(data_dict)
pred_dicts, _ = model.forward(data_dict)
V.draw_scenes(
points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'],
ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']
)
if not OPEN3D_FLAG:
mlab.show(stop=True)
logger.info('Demo done.')
if __name__ == '__main__':
main()
| 3,748 | 32.176991 | 118 | py |
3DTrans | 3DTrans-master/tools/test_multi_db_sim.py | import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, build_network_multi_db
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--source_1', type=int, default=2, help='if test the source_1 data')
parser.add_argument('--source_one_name', type=str, default="kitti", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=0, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt_parallel(model, show_db, test_loader, test_loader_s2, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch_parallel(
cfg, model, show_db, test_loader, test_loader_s2, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader_s1, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
test_set_s2, test_loader_s2, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
# add the dataset_source flag into Dual_BN layer
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_2D_MoE', None):
cfg.MODEL.DENSE_2D_MoE.update({"db_source": 1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": 1})
#model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
model = build_network_multi_db(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
dataset=test_set, dataset_s2=test_set_s2, source_one_name=args.source_one_name)
if args.source_1 == 1:
logger.info('**********************Testing Dataset=%s**********************' % test_set.dataset_cfg.DATASET)
eval_single_ckpt_parallel(model, 1, test_loader_s1, test_loader_s2, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
elif args.source_1 == 2:
logger.info('**********************Testing Dataset=%s**********************' % test_set_s2.dataset_cfg.DATASET)
eval_single_ckpt_parallel(model, 2, test_loader_s1, test_loader_s2, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main() | 10,010 | 43.691964 | 142 | py |
3DTrans | 3DTrans-master/tools/pseudo_label.py | import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader, build_semi_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_pseudo_label_utils import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# batch_size = {
# 'pretrain': cfg.OPTIMIZATION.PRETRAIN.BATCH_SIZE_PER_GPU,
# 'labeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.LD_BATCH_SIZE_PER_GPU,
# 'unlabeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.UD_BATCH_SIZE_PER_GPU,
# 'test': cfg.OPTIMIZATION.TEST.BATCH_SIZE_PER_GPU,
# }
batch_size = {
'pretrain': args.batch_size,
'labeled': args.batch_size,
'unlabeled': args.batch_size,
'test': args.batch_size,
}
# -----------------------create dataloader & network & optimizer---------------------------
datasets, dataloaders, samplers = build_semi_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
root_path=cfg.DATA_CONFIG.DATA_PATH,
workers=args.workers,
logger=logger,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
total_iters_each_epoch = len(dataloaders['unlabeled']) if not args.merge_all_iters_to_one_epoch else len(dataloaders['unlabeled']) // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
model,
optimizer,
dataloaders['labeled'],
dataloaders['unlabeled'],
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
cfg=cfg,
dist_train=dist_train,
ps_label_dir=ps_label_dir,
labeled_sampler=samplers['labeled'],
unlabeled_sampler=samplers['unlabeled'],
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
ema_model=None
)
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main() | 9,821 | 42.460177 | 149 | py |
3DTrans | 3DTrans-master/tools/train_multi_db_3db.py | import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader_mdf, build_dataloader
from pcdet.models import build_network_multi_db_3, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from tools.train_utils.train_multi_db_utils_3cls import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--frozen_backbone', action='store_true', default=False, help='froze the backbone when training')
parser.add_argument('--source_one_name', type=str, default="kitti", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.source_one_name not in ["waymo", "nusc", "kitti"]:
raise RuntimeError('Does not exist for source_one_name')
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
logger.info('**********************Using Two DataLoader and Merge Loss**********************')
logger.info('**********************VALUE of source_one_name= %s**********************' % args.source_one_name)
source_set, source_loader, source_sampler = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_set_2, source_loader_2, source_sampler_2 = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_set_3, source_loader_3, source_sampler_3 = build_dataloader_mdf(
dataset_cfg=cfg.DATA_CONFIG_SRC_3,
class_names=cfg.DATA_CONFIG_SRC_3.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
drop_last=True,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# add the dataset_source flag into Dual_BN layer, for training stage, we use the default value of 1
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_3D_MoE', None):
cfg.MODEL.DENSE_3D_MoE.update({"db_source": 1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": 1})
if cfg.MODEL.get('DENSE_2D_MoE', None):
cfg.MODEL.DENSE_2D_MoE.update({"db_source": 1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": 1})
model = build_network_multi_db_3(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
num_class_s3=len(cfg.DATA_CONFIG_SRC_3.CLASS_NAMES), dataset=source_set, dataset_s2=source_set_2, dataset_s3=source_set_3, \
source_one_name=args.source_one_name, source_1=1)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if args.frozen_backbone:
logger.info('**********************Note that Frozen Backbone: %s**********************')
model.frozen_model(model)
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()],
find_unused_parameters=True)
# model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
max_len_dataset = len(source_loader) if len(source_loader) > len(source_loader_2) else len(source_loader_2)
total_iters_each_epoch = max_len_dataset if not args.merge_all_iters_to_one_epoch \
else max_len_dataset // args.epochs
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
train_func = train_model
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_func(
model,
optimizer,
source_loader,
source_loader_2,
source_loader_3,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
ps_label_dir=ps_label_dir,
source_sampler=source_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
logger=logger,
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main() | 12,450 | 44.441606 | 144 | py |
3DTrans | 3DTrans-master/tools/train_active_dual_target.py | import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_target_utils import train_active_model_dual_tar
from test import repeat_eval_ckpt
import math
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
if cfg['DATA_CONFIG']['DATASET'] == 'ActiveWaymoDataset' and cfg['DATA_CONFIG_TAR']['DATASET'] == 'ActiveNuScenesDataset':
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SRC_SAMPLE,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=None,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
else:
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=cfg.DATA_CONFIG.FILE_PATH,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_list = [optimizer_detector, optimizer_discriminator]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
# total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=len(source_sample_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=len(source_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_model_dual_tar(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
source_sample_loader=source_sample_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
source_sample_sampler=source_sample_loader,
lr_warmup_scheduler=None,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 12,316 | 42.83274 | 169 | py |
3DTrans | 3DTrans-master/tools/train_active_source.py | import _init_path
import os
import math
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from tools.test import eval_single_ckpt
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_source_utils import train_active_model_source_only
from test import repeat_eval_ckpt
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# sample_set, sample_loader, sample_sampler = build_dataloader_ada(
# dataset_cfg=cfg.DATA_CONFIG_TAR,
# class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
# batch_size=args.batch_size,
# dist=dist_train, workers=args.workers,
# logger=logger,
# training=True,
# info_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
# merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
# total_epochs=args.epochs
# )
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_list = [optimizer_detector, optimizer_discriminator]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_model_source_only(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
sample_loader=None,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
total_iters_each_epoch=total_iters_each_epoch,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
source_budget=cfg.SOURCE_THRESHOD,
source_file_path=cfg.DATA_CONFIG.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
sample_sampler=None,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# logger.info('**********************Start evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# test_set, test_loader, sampler = build_dataloader_ada(
# dataset_cfg=cfg.DATA_CONFIG,
# class_names=cfg.CLASS_NAMES,
# batch_size=args.batch_size,
# dist=dist_train, workers=args.workers, logger=logger, training=False
# )
# eval_output_dir = output_dir / 'eval' / 'eval_with_train'
# eval_output_dir.mkdir(parents=True, exist_ok=True)
# args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
# 0) # Only evaluate the last args.num_epochs_to_eval epochs
# args.ckpt = ckpt_dir / 'checkpoint_epoch_%d.pth' % args.epochs
# eval_single_ckpt(
# model.module if dist_train else model,
# test_loader, args, eval_output_dir, logger, ckpt_dir,
# dist_test=dist_train
# )
# logger.info('**********************End evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 11,822 | 42.307692 | 169 | py |
3DTrans | 3DTrans-master/tools/test_multi_db.py | import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, build_network_multi_db
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--source_1', type=int, default=2, help='if test the source_1 data')
parser.add_argument('--source_one_name', type=str, default="kitti", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader_s1, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
test_set_s2, test_loader_s2, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
# add the dataset_source flag into Dual_BN layer
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": args.source_1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": args.source_1})
if cfg.MODEL.get('DENSE_3D_MoE', None):
cfg.MODEL.DENSE_3D_MoE.update({"db_source": args.source_1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": args.source_1})
if cfg.MODEL.get('DENSE_2D_MoE', None):
cfg.MODEL.DENSE_2D_MoE.update({"db_source": args.source_1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": args.source_1})
model = build_network_multi_db(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
dataset=test_set, dataset_s2=test_set_s2, source_one_name=args.source_one_name)
if args.source_1 == 1:
logger.info('**********************Testing Dataset=%s**********************' % test_set.dataset_cfg.DATASET)
test_loader = test_loader_s1
elif args.source_1 == 2:
logger.info('**********************Testing Dataset=%s**********************' % test_set_s2.dataset_cfg.DATASET)
test_loader = test_loader_s2
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main() | 10,103 | 42.74026 | 142 | py |
3DTrans | 3DTrans-master/tools/train_random.py | import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils, active_learning_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_random_utils import train_model
from test import repeat_eval_ckpt
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=0, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
target_list_dir = output_dir / 'target_list'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
source_list = active_learning_utils.get_dataset_list(cfg['DATA_CONFIG']['FILE_PATH'], oss=True)
target_list = active_learning_utils.get_dataset_list(cfg['DATA_CONFIG_TAR']['FILE_PATH'], oss=True)
sample_source_path, sample_target_path = active_learning_utils.random_sample(source_list, target_list, cfg['SOURCE_BUDGET'], cfg['ANNOTATION_BUDGET'], target_list_dir)
source_set, source_loader, source_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=sample_source_path,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
target_set, target_loader, target_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=sample_target_path,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
lr_scheduler, lr_warmup_scheduler_detector = build_scheduler(
optimizer, total_iters_each_epoch=len(source_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
model=model,
optimizer=optimizer,
train_source_loader=source_loader,
train_target_loader=target_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
source_sampler=source_sampler,
target_sampler=target_sampler,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# logger.info('**********************Start evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# test_set, test_loader, sampler = build_dataloader(
# dataset_cfg=cfg.DATA_CONFIG,
# class_names=cfg.CLASS_NAMES,
# batch_size=args.batch_size,
# dist=dist_train, workers=args.workers, logger=logger, training=False
# )
# eval_output_dir = output_dir / 'eval' / 'eval_with_train'
# eval_output_dir.mkdir(parents=True, exist_ok=True)
# args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
# 0) # Only evaluate the last args.num_epochs_to_eval epochs
# repeat_eval_ckpt(
# model.module if dist_train else model,
# test_loader, args, eval_output_dir, logger, ckpt_dir,
# dist_test=dist_train
# )
# logger.info('**********************End evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 10,606 | 43.195833 | 171 | py |
3DTrans | 3DTrans-master/tools/test_semi.py | import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import copy
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_semi_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
print("******model for testing",model)
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
break
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ssl_ckpt_dir = output_dir / 'ssl_ckpt'
eval_output_dir = output_dir / 'eval'
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * cfg.OPTIMIZATION.TEST.BATCH_SIZE_PER_GPU))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
batch_size = {
'pretrain': cfg.OPTIMIZATION.PRETRAIN.BATCH_SIZE_PER_GPU,
'labeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.LD_BATCH_SIZE_PER_GPU,
'unlabeled': cfg.OPTIMIZATION.SEMI_SUP_LEARNING.UD_BATCH_SIZE_PER_GPU,
'test': cfg.OPTIMIZATION.TEST.BATCH_SIZE_PER_GPU,
}
# -----------------------create dataloader & network & optimizer---------------------------
datasets, dataloaders, samplers = build_semi_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_test,
root_path=cfg.DATA_CONFIG.DATA_PATH,
workers=args.workers,
logger=logger,
)
MODEL_TEACHER = copy.deepcopy(cfg.MODEL)
teacher_model = build_network(model_cfg=MODEL_TEACHER, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
MODEL_STUDENT = copy.deepcopy(cfg.MODEL)
student_model = build_network(model_cfg=MODEL_STUDENT, num_class=len(cfg.CLASS_NAMES), dataset=datasets['labeled'])
teacher_model.set_model_type('teacher')
student_model.set_model_type('student')
with torch.no_grad():
logger.info('**********************Start evaluation for student model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_ssl_dir = output_dir / 'eval' / 'eval_all' / 'eval_with_student_model'
eval_ssl_dir.mkdir(parents=True, exist_ok=True)
repeat_eval_ckpt(
model = student_model,
test_loader = dataloaders['test'],
args = args,
eval_output_dir = eval_ssl_dir,
logger = logger,
ckpt_dir = ssl_ckpt_dir / 'student',
dist_test=dist_test
)
logger.info('**********************End evaluation for student model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation for teacher model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
eval_ssl_dir = output_dir / 'eval' / 'eval_all' / 'eval_with_teacher_model'
eval_ssl_dir.mkdir(parents=True, exist_ok=True)
teacher_model.set_model_type('origin')
repeat_eval_ckpt(
model = teacher_model,
test_loader = dataloaders['test'],
args = args,
eval_output_dir = eval_ssl_dir,
logger = logger,
ckpt_dir = ssl_ckpt_dir / 'teacher',
dist_test=dist_test
)
logger.info('**********************End evaluation for teacher model %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 9,442 | 41.15625 | 120 | py |
3DTrans | 3DTrans-master/tools/train_random_target.py | import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils, active_learning_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model
from test import repeat_eval_ckpt
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=0, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
ps_label_dir = output_dir / 'ps_label'
target_list_dir = output_dir / 'target_list'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
target_list = active_learning_utils.get_dataset_list(cfg['DATA_CONFIG']['FILE_PATH'], oss=True)
sample_target_path = active_learning_utils.random_sample_target(target_list, cfg['ANNOTATION_BUDGET'], target_list_dir)
target_set, target_loader, target_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.DATA_CONFIG.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=sample_target_path,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=target_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
lr_scheduler, lr_warmup_scheduler_detector = build_scheduler(
optimizer, total_iters_each_epoch=len(target_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
model=model,
optimizer=optimizer,
train_loader=target_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
source_sampler=target_sampler,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False
)
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# logger.info('**********************Start evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
# test_set, test_loader, sampler = build_dataloader(
# dataset_cfg=cfg.DATA_CONFIG,
# class_names=cfg.CLASS_NAMES,
# batch_size=args.batch_size,
# dist=dist_train, workers=args.workers, logger=logger, training=False
# )
# eval_output_dir = output_dir / 'eval' / 'eval_with_train'
# eval_output_dir.mkdir(parents=True, exist_ok=True)
# args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
# 0) # Only evaluate the last args.num_epochs_to_eval epochs
# repeat_eval_ckpt(
# model.module if dist_train else model,
# test_loader, args, eval_output_dir, logger, ckpt_dir,
# dist_test=dist_train
# )
# logger.info('**********************End evaluation %s/%s(%s)**********************' %
# (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 9,802 | 42.568889 | 169 | py |
3DTrans | 3DTrans-master/tools/train.py | print('program started',)
import _init_path
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_utils import train_model
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666 + cfg.LOCAL_RANK)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
train_set, train_loader, train_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
logger.info(model)
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=len(train_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_model(
model,
optimizer,
train_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
source_sampler=train_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
if hasattr(train_set, 'use_shared_memory') and train_set.use_shared_memory:
train_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main() | 9,054 | 43.605911 | 125 | py |
3DTrans | 3DTrans-master/tools/test_multi_db_3db.py | import _init_path
import argparse
import datetime
import glob
import os
import re
import time
from pathlib import Path
import numpy as np
import torch
from tensorboardX import SummaryWriter
from eval_utils import eval_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, build_network_multi_db_3
from pcdet.utils import common_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--source_1', type=int, default=1, help='if test the source_1 data')
parser.add_argument('--source_one_name', type=str, default="kitti", help='enter the name of the first dataset of merged datasets')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
np.random.seed(1024)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
logger.info('GPU_NAME=%s' % torch.cuda.get_device_name())
if dist_test:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader_s1, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
test_set_s2, test_loader_s2, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_2,
class_names=cfg.DATA_CONFIG_SRC_2.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
test_set_s3, test_loader_s3, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_SRC_3,
class_names=cfg.DATA_CONFIG_SRC_3.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
# add the dataset_source flag into Dual_BN layer
if cfg.MODEL.get('POINT_T', None):
cfg.MODEL.POINT_T.update({"db_source": args.source_1})
if cfg.MODEL.get('BACKBONE_3D', None):
cfg.MODEL.BACKBONE_3D.update({"db_source": args.source_1})
if cfg.MODEL.get('BACKBONE_2D', None):
cfg.MODEL.BACKBONE_2D.update({"db_source": args.source_1})
if cfg.MODEL.get('PFE', None):
cfg.MODEL.PFE.update({"db_source": args.source_1})
#model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
model = build_network_multi_db_3(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), num_class_s2=len(cfg.DATA_CONFIG_SRC_2.CLASS_NAMES), \
num_class_s3=len(cfg.DATA_CONFIG_SRC_3.CLASS_NAMES), dataset=test_set, dataset_s2=test_set_s2, dataset_s3=test_set_s3, \
source_one_name=args.source_one_name, source_1=args.source_1)
if args.source_1 == 1:
logger.info('**********************Testing Dataset=%s**********************' % test_set.dataset_cfg.DATASET)
test_loader = test_loader_s1
elif args.source_1 == 2:
logger.info('**********************Testing Dataset=%s**********************' % test_set_s2.dataset_cfg.DATASET)
test_loader = test_loader_s2
elif args.source_1 == 3:
logger.info('**********************Testing Dataset=%s**********************' % test_set_s3.dataset_cfg.DATASET)
test_loader = test_loader_s3
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main() | 10,576 | 43.441176 | 144 | py |
3DTrans | 3DTrans-master/tools/train_active_TQS.py | import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.train_active_TQS import train_active_model_target
from test import repeat_eval_ckpt
import math
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=2, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=15, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=cfg.DATA_CONFIG.FILE_PATH,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_mul_cls = build_optimizer(model.roi_head, cfg.OPTIMIZATION.MUL_CLS) #dense head
optimizer_list = [optimizer_detector, optimizer_discriminator, optimizer_mul_cls]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
# total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=len(source_sample_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=len(source_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_mul_cls, lr_warmup_scheduler_mul_cls = build_scheduler(
optimizer_mul_cls, total_iters_each_epoch=len(source_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.MUL_CLS
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator, lr_scheduler_mul_cls]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_model_target(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
sample_loader=source_sample_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
sample_sampler=source_sample_loader,
lr_warmup_scheduler=None,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
main()
| 12,020 | 42.554348 | 169 | py |
3DTrans | 3DTrans-master/tools/train_bi3d_st3d.py | import _init_path
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, log_config_to_file, cfg_from_yaml_file, cfg_from_list
from pcdet.utils import common_utils
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.models import build_network, model_fn_decorator
import torch.distributed as dist
from train_utils.optimization import build_optimizer, build_scheduler
from train_utils.active_with_st3d_utils import train_active_with_st3d
from test import repeat_eval_ckpt
import math
from pathlib import Path
import argparse
import datetime
import glob
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
# active domain adaptation args
parser.add_argument('--annotation_budget', type=int, default=5, help='annotation budget')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
print ("None args.launcher********",args.launcher)
dist_train = False
total_gpus = 1
else:
print ("args.launcher********",args.launcher)
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
if args.fix_random_seed:
common_utils.set_random_seed(666)
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
target_list_dir = output_dir / 'target_list'
ps_label_dir = output_dir / 'ps_label'
ps_label_dir.mkdir(parents=True, exist_ok=True)
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
target_list_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
if cfg.LOCAL_RANK == 0:
os.system('cp %s %s' % (args.cfg_file, output_dir))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
# fine tune model
source_set, source_loader, source_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
cfg.DATA_CONFIG_TAR.USE_PSEUDO_LABEL = False
# unsupervised target dataloader
target_set, target_loader, target_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
info_path=cfg.DATA_CONFIG.FILE_PATH,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=source_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer_detector = build_optimizer(model, cfg.OPTIMIZATION)
optimizer_discriminator = build_optimizer(model.discriminator, cfg.OPTIMIZATION.DISCRIMINATOR)
optimizer_list = [optimizer_detector, optimizer_discriminator]
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
if args.ckpt is not None:
it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=None,
logger=logger)
last_epoch = start_epoch + 1
else:
ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
if len(ckpt_list) > 0:
ckpt_list.sort(key=os.path.getmtime)
it, start_epoch = model.load_params_with_optimizer(
ckpt_list[-1], to_cpu=dist_train, optimizer=None, logger=logger
)
last_epoch = start_epoch + 1
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()], find_unused_parameters=True, broadcast_buffers=False)
logger.info(model)
total_iters_each_epoch = len(target_loader) if not args.merge_all_iters_to_one_epoch else len(target_loader) // args.epochs
# total_iters_each_epoch = math.ceil(cfg['SOURCE_THRESHOD'] / (args.batch_size * total_gpus))
lr_scheduler_detector, lr_warmup_scheduler_detector = build_scheduler(
optimizer_detector, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
lr_scheduler_discriminator, lr_warmup_scheduler_discriminator = build_scheduler(
optimizer_discriminator, total_iters_each_epoch=total_iters_each_epoch, total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION.DISCRIMINATOR
)
lr_scheduler_list = [lr_scheduler_detector, lr_scheduler_discriminator]
# -----------------------start training---------------------------
logger.info('**********************Start training %s/%s(%s)**********************'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
train_active_with_st3d(
model=model,
optimizer=optimizer_list,
source_train_loader=source_loader,
target_train_loader=target_loader,
source_sample_loader=source_sample_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler_list,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
ckpt_save_dir=ckpt_dir,
sample_epoch=cfg.SAMPLE_EPOCHS,
annotation_budget=cfg.ANNOTATION_BUDGET,
target_file_path=cfg.DATA_CONFIG_TAR.FILE_PATH,
sample_save_path=target_list_dir,
ps_label_dir=ps_label_dir,
cfg=cfg,
batch_size=args.batch_size,
workers=args.workers,
dist_train=dist_train,
source_sampler=source_sampler,
target_sampler=target_sampler,
source_sample_sampler=source_sample_loader,
lr_warmup_scheduler=None,
ckpt_save_interval=1,
max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False,
logger=logger,
ema_model=None
)
if hasattr(source_set, 'use_shared_memory') and source_set.use_shared_memory:
source_set.clean_shared_memory()
if hasattr(target_set, 'use_shared_memory') and target_set.use_shared_memory:
target_set.clean_shared_memory()
logger.info('**********************End training %s/%s(%s)**********************\n\n\n'
% (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
logger.info('**********************Start evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers, logger=logger, training=False
)
eval_output_dir = output_dir / 'eval' / 'eval_with_train'
eval_output_dir.mkdir(parents=True, exist_ok=True)
args.start_epoch = max(args.epochs - args.num_epochs_to_eval,
0) # Only evaluate the last args.num_epochs_to_eval epochs
repeat_eval_ckpt(
model.module if dist_train else model,
test_loader, args, eval_output_dir, logger, ckpt_dir,
dist_test=dist_train
)
logger.info('**********************End evaluation %s/%s(%s)**********************' %
(cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))
if __name__ == '__main__':
print('start')
main()
| 11,970 | 42.530909 | 169 | py |
3DTrans | 3DTrans-master/tools/tools_utils/open3d_vis_utils.py | """
Open3d visualization tool box
Written by Jihan YANG
All rights preserved from 2021 - present.
"""
import open3d
import torch
import matplotlib
import numpy as np
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def get_coor_colors(obj_labels):
"""
Args:
obj_labels: 1 is ground, labels > 1 indicates different instance cluster
Returns:
rgb: [N, 3]. color for each point.
"""
colors = matplotlib.colors.XKCD_COLORS.values()
max_color_num = obj_labels.max()
color_list = list(colors)[:max_color_num+1]
colors_rgba = [matplotlib.colors.to_rgba_array(color) for color in color_list]
label_rgba = np.array(colors_rgba)[obj_labels]
label_rgba = label_rgba.squeeze()[:, :3]
return label_rgba
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_labels=None, ref_scores=None, point_colors=None, draw_origin=True):
if isinstance(points, torch.Tensor):
points = points.cpu().numpy()
if isinstance(gt_boxes, torch.Tensor):
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(ref_boxes, torch.Tensor):
ref_boxes = ref_boxes.cpu().numpy()
vis = open3d.visualization.Visualizer()
vis.create_window()
vis.get_render_option().point_size = 1.0
vis.get_render_option().background_color = np.zeros(3)
# draw origin
if draw_origin:
axis_pcd = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0])
vis.add_geometry(axis_pcd)
pts = open3d.geometry.PointCloud()
pts.points = open3d.utility.Vector3dVector(points[:, :3])
vis.add_geometry(pts)
if point_colors is None:
pts.colors = open3d.utility.Vector3dVector(np.repeat(np.array([[1, 0, 1]]), points.shape[0], axis=0)) # (np.zeros((points.shape[0], 3)))
else:
pts.colors = open3d.utility.Vector3dVector(point_colors)
if gt_boxes is not None:
vis = draw_box(vis, gt_boxes, (0, 1, 0))
if ref_boxes is not None:
vis = draw_box(vis, ref_boxes, (0, 0, 1), ref_labels, ref_scores)
vis.run()
vis.destroy_window()
def translate_boxes_to_open3d_instance(gt_boxes):
"""
4-------- 6
/| /|
5 -------- 3 .
| | | |
. 7 -------- 1
|/ |/
2 -------- 0
"""
center = gt_boxes[0:3]
lwh = gt_boxes[3:6]
axis_angles = np.array([0, 0, gt_boxes[6] + 1e-10])
rot = open3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles)
box3d = open3d.geometry.OrientedBoundingBox(center, rot, lwh)
line_set = open3d.geometry.LineSet.create_from_oriented_bounding_box(box3d)
# import ipdb; ipdb.set_trace(context=20)
lines = np.asarray(line_set.lines)
lines = np.concatenate([lines, np.array([[1, 4], [7, 6]])], axis=0)
line_set.lines = open3d.utility.Vector2iVector(lines)
return line_set, box3d
def draw_box(vis, gt_boxes, color=(0, 1, 0), ref_labels=None, score=None):
for i in range(gt_boxes.shape[0]):
line_set, box3d = translate_boxes_to_open3d_instance(gt_boxes[i])
if ref_labels is None:
line_set.paint_uniform_color(color)
else:
line_set.paint_uniform_color(box_colormap[ref_labels[i]])
vis.add_geometry(line_set)
# if score is not None:
# corners = box3d.get_box_points()
# vis.add_3d_label(corners[5], '%.2f' % score[i])
return vis
| 3,478 | 28.483051 | 145 | py |
3DTrans | 3DTrans-master/tools/tools_utils/dataset.py | from ast import arg
# from http.client import _DataType
import os
import matplotlib.pyplot as plt
import boto3
import io
import pickle
import numpy as np
import argparse
import pickle
import os
from collections import defaultdict
import time, copy
import numpy as np
import torch
import open3d as o3d
import open3d
import matplotlib
from open3d import geometry
import pickle
from itertools import groupby
import open3d_vis_utils as V
import calibration_kitti
class Dataset():
def __init__(self, args):
super().__init__()
self.dataset_name = args.dataset_name
self.data_root = args.data_root
if args.bucket_name is not None:
self.client = boto3.client(service_name='s3', endpoint_url='')
def get_data(self, args, info):
if self.dataset_name == "kitti":
lidar_idx = info['point_cloud']['lidar_idx']
# get image shape
img_shape = info['image']['image_shape']
print(lidar_idx)
pointcloud = self.get_lidar_kitti(args, lidar_idx)[:, :4]
calib = self.get_calib(args, lidar_idx)
pts_rect = calib.lidar_to_rect(pointcloud[:, 0:3])
# FOV_only
if args.fov:
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
pointcloud = pointcloud[pts_valid_flag]
annos = info['annos']
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes = self.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
object_idx = []
for item in info['annos']['name']:
if item in args.visualize_categories:
object_idx.append(True)
else:
object_idx.append(False)
gt_boxes = gt_boxes[object_idx, :]
elif self.dataset_name == "nuscenes":
pointcloud = self.get_lidar_with_sweeps(args, info)[:, :3]
object_idx = []
for item in info['gt_names']:
if item in args.visualize_categories:
object_idx.append(True)
else:
object_idx.append(False)
gt_boxes = info['gt_boxes'][object_idx, :7]
elif self.dataset_name == "waymo":
pc_info = info['point_cloud']
pointcloud = self.get_lidar_waymo(args, pc_info)[:, :3]
object_idx = []
for item in info['annos']['name']:
if item in args.visualize_categories:
object_idx.append(True)
else:
object_idx.append(False)
gt_boxes = info['annos']['gt_boxes_lidar'][object_idx, :7]
elif self.dataset_name == "once":
frame_id = info['frame_id']
sequence_id = info['sequence_id']
pointcloud = self.get_lidar_once(args, sequence_id, frame_id)
object_idx = []
for item in info['annos']['name']:
if item in args.visualize_categories:
object_idx.append(True)
else:
object_idx.append(False)
gt_boxes = info['annos']['boxes_3d'][object_idx, :]
return pointcloud, gt_boxes
def get_lidar_once(self, args, seq_id, frame_id):
if args.bucket_name is not None:
bin_path = os.path.join("dataset/once/data", seq_id, 'lidar_roof', '{}.bin'.format(frame_id))
obj = self.client.get_object(Bucket=args.bucket_name, Key=bin_path)
points = np.frombuffer(io.BytesIO(obj['Body'].read()).read(), dtype=np.float32).reshape(-1, 4).copy()
else:
bin_path = os.path.join(self.data_root, seq_id, 'lidar_roof', '{}.bin'.format(frame_id))
points = np.fromfile(bin_path, dtype=np.float32).reshape(-1, 4)
return points
def get_lidar_kitti(self, args, idx):
if args.bucket_name is not None:
lidar_file = os.path.join("dataset", args.dataset_name, "training", 'velodyne', '%s.bin' % idx)
obj = self.client.get_object(Bucket=args.bucket_name, Key=lidar_file)
lidar_points = np.frombuffer(io.BytesIO(obj['Body'].read()).read(), dtype=np.float32).reshape(-1, 4).copy()
else:
lidar_file = os.path.join(self.data_root, 'training/velodyne', '%s.bin' % idx)
lidar_points = np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
return lidar_points
def get_sweep(self, args, sweep_info):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
if args.bucket_name is not None:
lidar_path = os.path.join("", sweep_info['lidar_path'])
obj = self.client.get_object(Bucket=args.bucket_name, Key=lidar_path)
points_sweep = np.frombuffer(io.BytesIO(obj['Body'].read()).read(), count=-1).reshape([-1, 5])[:, :4].copy()
else:
lidar_path = os.path.join(self.data_root, sweep_info['lidar_path'])
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, args, info):
if args.bucket_name is not None:
lidar_path = os.path.join("dataset/nuScenes", info['lidar_path'])
obj = self.client.get_object(Bucket=args.bucket_name, Key=lidar_path)
points_pre = np.frombuffer(io.BytesIO(obj['Body'].read()).read(), dtype=np.float32, count=-1).reshape([-1, 5]).copy()
points = points_pre[:, :4]
else:
lidar_path = os.path.join(self.data_root, info['lidar_path'])
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), 1 - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def get_lidar_waymo(self, args, pc_info):
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
if args.bucket_name is not None:
lidar_file = os.path.join("dataset/waymo_0.5.0/waymo_processed_data_v0_5_0", sequence_name, ('%04d.npy' % sample_idx))
obj = self.client.get_object(Bucket=args.bucket_name, Key=lidar_file)
lidar_points = np.load(io.BytesIO(obj['Body'].read())).copy()
else:
lidar_file = os.path.join(self.data_root, sequence_name, ('%04d.npy' % sample_idx))
lidar_points = np.load(lidar_file)
points_all, NLZ_flag = lidar_points[:, 0:5], lidar_points[:, 5]
points_all = points_all[NLZ_flag == -1]
points_all[:, 3] = np.tanh(points_all[:, 3])
return points_all
def boxes3d_kitti_camera_to_lidar(self, boxes3d_camera, calib):
"""
Args:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
calib:
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_camera_copy = copy.deepcopy(boxes3d_camera)
xyz_camera, r = boxes3d_camera_copy[:, 0:3], boxes3d_camera_copy[:, 6:7]
l, h, w = boxes3d_camera_copy[:, 3:4], boxes3d_camera_copy[:, 4:5], boxes3d_camera_copy[:, 5:6]
xyz_lidar = calib.rect_to_lidar(xyz_camera)
xyz_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1)
def get_calib(self, args, idx):
if args.bucket_name is not None:
calib_file = os.path.join("dataset", args.dataset_name, "training", "calib", ('%s.txt' % idx))
text_bytes = self.client.get_object(Bucket=args.bucket_name, Key=calib_file)
text_bytes = text_bytes['Body'].read().decode('utf-8')
calibrated_res = calibration_kitti.Calibration(io.StringIO(text_bytes), True)
else:
calib_file = os.path.join(self.data_root, 'calib', ('%s.txt' % idx))
calibrated_res = calibration_kitti.Calibration(calib_file, False)
return calibrated_res | 9,518 | 43.274419 | 131 | py |
3DTrans | 3DTrans-master/tools/tools_utils/split_kitti_train.py | import os
import torch
import pickle
import json
import copy
import random
nuscenes_info_path_train = ""
with open(nuscenes_info_path_train, 'rb') as f:
infos_train = pickle.load(f)
random.shuffle(infos_train)
total_len = len(infos_train)
# list_01 = infos_train[:int(total_len*0.01)]
list_05 = infos_train[:int(total_len*0.05)]
# list_10 = infos_train[:int(total_len*0.10)]
# list_25 = infos_train[:int(total_len*0.25)]
# list_50 = infos_train[:int(total_len*0.5)]
# list_75 = infos_train[:int(total_len*0.75)]
#list_700 = 6*infos_train
# with open('01_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_01, f)
with open('05_kitti_infos_train.pkl', 'wb') as f:
pickle.dump(list_05, f)
# with open('10_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_10, f)
# with open('25_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_25, f)
# with open('50_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_50, f)
# with open('75_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_75, f)
# with open('700_kitti_infos_train.pkl', 'wb') as f:
# pickle.dump(list_700, f)
| 1,128 | 24.088889 | 52 | py |
3DTrans | 3DTrans-master/tools/tools_utils/split_nuscenes_location.py | import os
import torch
import pickle
import json
location_info_path = ""
nuscenes_info_path_train = ""
nuscenes_info_path_val = ""
with open(nuscenes_info_path_train, 'rb') as f:
infos_train = pickle.load(f)
with open(nuscenes_info_path_val, 'rb') as f:
infos_val = pickle.load(f)
with open(location_info_path, 'rb') as f:
location_info = json.load(f)
token2location = {}
for info in location_info:
token2location[info['logfile']] = info['location']
location2token = {}
for token in token2location.keys():
if token2location[token] not in location2token.keys():
location2token[token2location[token]] = []
location2token[token2location[token]].append(token)
singapore_onenorth_list_train = []
boston_seaport_list_train = []
singapore_queenstown_list_train = []
singapore_hollandvillage_list_train = []
for info in infos_train:
token = info['cam_front_path'].split('/')[-1].split('_')[0]
location = token2location[token]
if location == 'singapore-onenorth':
singapore_onenorth_list_train.append(info)
elif location == 'boston-seaport':
boston_seaport_list_train.append(info)
elif location =='singapore-queenstown':
singapore_queenstown_list_train.append(info)
elif location == 'singapore-hollandvillage':
singapore_hollandvillage_list_train.append(info)
with open('singapore-onenorth_data_train.pkl', 'wb') as f:
pickle.dump(singapore_onenorth_list_train, f)
with open('boston-seaport_data_train.pkl', 'wb') as f:
pickle.dump(boston_seaport_list_train, f)
with open('singapore-queenstown_data_train.pkl', 'wb') as f:
pickle.dump(singapore_queenstown_list_train, f)
with open('singapore-hollandvillage_data_train.pkl', 'wb') as f:
pickle.dump(singapore_hollandvillage_list_train, f)
singapore_onenorth_list_val = []
boston_seaport_list_val = []
singapore_queenstown_list_val = []
singapore_hollandvillage_list_val = []
for info in infos_val:
token = info['cam_front_path'].split('/')[-1].split('_')[0]
location = token2location[token]
if location == 'singapore-onenorth':
singapore_onenorth_list_val.append(info)
elif location == 'boston-seaport':
boston_seaport_list_val.append(info)
elif location =='singapore-queenstown':
singapore_queenstown_list_val.append(info)
elif location == 'singapore-hollandvillage':
singapore_hollandvillage_list_val.append(info)
with open('singapore-onenorth_data_val.pkl', 'wb') as f:
pickle.dump(singapore_onenorth_list_val, f)
with open('boston-seaport_data_val.pkl', 'wb') as f:
pickle.dump(boston_seaport_list_val, f)
with open('singapore-queenstown_data_val.pkl', 'wb') as f:
pickle.dump(singapore_queenstown_list_val, f)
with open('singapore-hollandvillage_data_val.pkl', 'wb') as f:
pickle.dump(singapore_hollandvillage_list_val, f)
print('singapore_onenorth_list_train:', len(singapore_onenorth_list_train))
print('singapore_onenorth_list_val:', len(singapore_onenorth_list_val))
print('boston_seaport_list_train:', len(boston_seaport_list_train))
print('boston_seaport_list_val', len(boston_seaport_list_val))
print('singapore_queenstown_list_train:', len(singapore_queenstown_list_train))
print('singapore_queenstown_list_val:', len(singapore_queenstown_list_val))
print('singapore_hollandvillage_list_train:', len(singapore_hollandvillage_list_train))
print('singapore_hollandvillage_list_val:', len(singapore_hollandvillage_list_val))
# print(len(infos_train) + len(infos_val))
# print(len(singapore_onenorth_list)+len(boston_seaport_list)+len(singapore_queenstown_list)+len(singapore_hollandvillage_list)) | 3,656 | 33.828571 | 128 | py |
3DTrans | 3DTrans-master/tools/tools_utils/split_nusc_train.py | import os
import torch
import pickle
import json
import random
import copy
nuscenes_info_path_train = ""
once_info_path_train = ""
kitti_info = ""
with open(once_info_path_train, 'rb') as f:
infos_train = pickle.load(f)
# random.shuffle(infos_train)
total_len = len(infos_train)
N = 10
infos_train_enlarge = copy.deepcopy(infos_train)
for i in range (1, N):
infos_train_enlarge.extend(infos_train)
list_01 = infos_train[:int(total_len*0.01)]
list_05 = infos_train[:int(total_len*0.05)]
list_10 = infos_train[:int(total_len*0.10)]
with open('01_once_infos_train_vehicle.pkl', 'wb') as f:
pickle.dump(list_01, f)
with open('05_once_infos_train_vehicle.pkl', 'wb') as f:
pickle.dump(list_05, f)
with open('10_once_infos_train_vehicle.pkl', 'wb') as f:
pickle.dump(list_10, f)
| 801 | 21.914286 | 56 | py |
3DTrans | 3DTrans-master/tools/unsupervised_utils/pointcontrast_utils.py | import os
import glob
# from plotly import data
from pcdet.models import load_data_to_gpu
import torch
import tqdm
from pcdet.models import load_data_to_gpu
from torch.nn.utils import clip_grad_norm_
from ssl_utils.semi_utils import random_world_flip, random_world_rotation, random_world_scaling
from pcdet.models.detectors.unsupervised_model.pvrcnn_plus_backbone import HardestContrastiveLoss
# @torch.no_grad()
# def get_positive_pairs(batch_dict_1, batch_dict_2):
# augmentation_functions = {
# 'random_world_flip': random_world_flip,
# 'random_world_rotation': random_world_rotation,
# 'random_world_scaling': random_world_scaling
# }
# for bs_idx in range(len(batch_dict_1)):
# aug_list_1 = batch_dict_1['augmentation_list'][bs_idx]
# aug_list_2 = batch_dict_2['augmentation_list'][bs_idx]
# aug_param_1 = batch_dict_1['augmentation_params'][bs_idx]
# aug_param_2 = batch_dict_2['augmentation_params'][bs_idx]
def pointcontrast(model, batch_dict_1, batch_dict_2, loss_cfg, dist, voxel_size, point_cloud_range):
load_data_to_gpu(batch_dict_1)
load_data_to_gpu(batch_dict_2)
if not dist:
batch_dict_1 = model(batch_dict_1)
batch_dict_2 = model(batch_dict_2)
else:
batch_dict_1, batch_dict_2 = model(batch_dict_1, batch_dict_2)
contrastive_loss = HardestContrastiveLoss(loss_cfg, voxel_size, point_cloud_range)
pos_loss, neg_loss = contrastive_loss.get_hardest_contrastive_loss(batch_dict_1, batch_dict_2)
loss = pos_loss + neg_loss
return loss
def train_pointcontrast_one_epoch(model, optimizer, data_loader, lr_scheduler,
voxel_size, point_cloud_range,
accumulated_iter, cfg, rank, tbar, total_it_each_epoch,
dataloader_iter, tb_log=None, leave_pbar=False, dist=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
disp_dict = {}
for cur_epoch in range(total_it_each_epoch):
try:
batch_1, batch_2 = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(data_loader)
batch_1, batch_2 = next(dataloader_iter)
print('new sample dataloader')
try:
cur_lr = float(optimizer.lr)
except StopIteration:
cur_lr = optimizer.param_group[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
optimizer.zero_grad()
loss = pointcontrast(model, batch_1, batch_2, cfg.LOSS_CFG, dist, voxel_size, point_cloud_range)
loss.backward()
clip_grad_norm_(model.parameters(), cfg.GRAD_NORM_CLIP)
optimizer.step()
lr_scheduler.step(accumulated_iter)
accumulated_iter += 1
disp_dict.update({
'loss': loss.item(),
'lr': cur_lr
})
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
# for key, val in tb_dict.items():
# tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, lr_scheduler, cfg, voxel_size, point_cloud_range,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
train_sampler, lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False, dist=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader) # total iterations set to labeled set
assert merge_all_iters_to_one_epoch is False
train_loader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_pointcontrast_one_epoch(
model=model,
optimizer=optimizer,
data_loader=train_loader,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter,
point_cloud_range=point_cloud_range,
voxel_size=voxel_size, cfg=cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dist = dist,
dataloader_iter=train_loader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 2), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
"""
if param.requires_grad:
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
else:
ema_param.data.mul_(0).add_(1, param.data)
"""
def update_ema_variables_with_fixed_momentum(model, ema_model, alpha):
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
"""
if param.requires_grad:
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
else:
ema_param.data.mul_(0).add_(1, param.data)
""" | 8,215 | 39.27451 | 117 | py |
3DTrans | 3DTrans-master/tools/eval_utils/eval_utils.py | import pickle
import time
import numpy as np
import torch
import tqdm
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += ret_dict.get('gt', 0)
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'gt_num': 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
def eval_one_epoch_parallel(cfg, model, show_db, dataloader_s1, dataloader_s2, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'gt_num': 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
if show_db == 1:
dataset = dataloader_s1.dataset
class_names = dataset.class_names
det_annos = []
elif show_db == 2:
dataset = dataloader_s2.dataset
class_names = dataset.class_names
det_annos = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
if show_db == 1:
progress_bar = tqdm.tqdm(total=len(dataloader_s1), leave=True, desc='eval', dynamic_ncols=True)
elif show_db == 2:
progress_bar = tqdm.tqdm(total=len(dataloader_s2), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
if show_db == 1:
dataloader_iter_2 = iter(dataloader_s2)
for i, batch_1 in enumerate(dataloader_s1):
try:
batch_2 = next(dataloader_iter_2)
except StopIteration:
dataloader_iter_2 = iter(dataloader_s2)
batch_2 = next(dataloader_iter_2)
batch_dict = common_utils.merge_two_batch_dict(batch_1, batch_2)
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict, _, _ = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
elif show_db == 2:
dataloader_iter_1 = iter(dataloader_s1)
for i, batch_2 in enumerate(dataloader_s2):
try:
batch_1 = next(dataloader_iter_1)
except StopIteration:
dataloader_iter_1 = iter(dataloader_s1)
batch_1 = next(dataloader_iter_1)
batch_dict = common_utils.merge_two_batch_dict(batch_1, batch_2)
load_data_to_gpu(batch_dict)
with torch.no_grad():
_, _, pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
if __name__ == '__main__':
pass
| 10,327 | 37.251852 | 151 | py |
3DTrans | 3DTrans-master/tools/show_squence_demo/demo.py | import os
import copy
import pickle
from collections import defaultdict
import json
import numpy as np
from pathlib import Path
import argparse
import torch
from utils import Visualizer, LabelLUT
from utils.base_dataset import DataCollect
from pcdet.ops.roiaware_pool3d.roiaware_pool3d_utils import points_in_boxes_gpu
def sequence_visualize3d(**infos):
data_collect = DataCollect(color_attr=[
"class",
# "id"
],
text_attr=[
# "class",
# "id",
# "score",
],
show_text=True)
data_collect.offline_process_infos(**infos)
lut = LabelLUT()
lut_labels = {
"track": [1., 1., 1.],
"gt": [1., 0., 0.],
"detect": [0., 1., 0.],
"detect_pro": [0.7, 0.2, 0.7],
}
lut_labels = {
"gt_Car": [0., 1., 0.], # once
"gt_Truck": [0., 1., 0.],
"gt_Bus": [0., 1., 0.],
"gt_Pedestrian": [0., 0., 1.],
"gt_Cyclist": [1., 0.0, 0.0],
"gt_car": [0., 1., 0.], # nuscenes
"gt_traffic_cone": [1.0, 1.0, 0.25],
"gt_truck": [0., 1., 0.],
"gt_pedestrian": [0., 0., 1.0],
"gt_construction_vehicle": [0., 1., 0.],
"gt_bus": [0., 1., 0.],
"gt_trailer": [0., 0.68627451, 0.],
"gt_motorcycle": [1., 0., 0.],
"gt_bicycle": [1., 0., 0.],
"gt_barrier": [0.19607843, 0.47058824, 1.],
}
for key, val in lut_labels.items():
lut.add_label(key, key, val)
# lut = None
_3dal_vis = Visualizer(fps=4)
_3dal_vis.visualize_dataset(data_collect, prefix="frame id", lut=lut)
def load_once(data_path, seq_id):
info_path = os.path.join(data_path, seq_id)
annos_path = os.path.join(info_path, seq_id + '.json')
frame_ids_list = list()
pts_list = list()
pts_label_list = list()
gt_list = list()
with open(annos_path, 'r') as f:
annos = json.load(f)
frames = annos['frames'][:3] # We only put three once frames here as an example.
for frame in frames:
if 'annos' in frame.keys():
sequence_id = frame['sequence_id']
frame_id = frame['frame_id']
pose = frame['pose']
annos = frame['annos']
names = annos['names']
boxes_3d = np.array(annos['boxes_3d'])
frame_ids_list.append(frame_id)
bin_path = os.path.join(info_path, 'lidar_roof', '{}.bin'.format(frame_id))
points = np.fromfile(bin_path, dtype=np.float32).reshape(-1, 4)[:, :3]
pts_list.append(points)
gt_list.append(
{
"bbox": boxes_3d,
"class": names,
})
box_idxs = points_in_boxes_gpu(
torch.from_numpy(points).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(boxes_3d).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
pts_label_list.append(box_idxs)
info = {
"idx_names": frame_ids_list,
"pts": pts_list,
"pts_label": pts_label_list,
"gt": gt_list,
}
return info
def load_nuscenes(data_path, seq_id):
info_path = os.path.join(data_path, 'nuscenes_infos_10sweeps_train.pkl')
annos = pickle.load(open(info_path, "rb"))
frame_ids_list = list()
pts_list = list()
pts_label_list = list()
gt_list = list()
for anno in annos:
lidar_path = anno['lidar_path']
cur_seq_name = lidar_path.split("__LIDAR_TOP__")[0].split("LIDAR_TOP/")[-1]
if cur_seq_name != seq_id:
continue
gt_names = anno['gt_names']
gt_boxes = anno['gt_boxes'][:,:7]
frame_id = lidar_path.split('_')[-1].strip('.pcd.bin')
bin_path = os.path.join(data_path, lidar_path)
points = np.fromfile(bin_path, dtype=np.float32).reshape([-1, 5])[:, :3]
print(points.shape)
boxes_3d = []
names = []
for box, name in zip(gt_boxes, gt_names):
if name != 'ignore':
boxes_3d.append(box)
names.append(name)
boxes_3d = np.array(boxes_3d)
if len(points) and len(boxes_3d):
box_idxs = points_in_boxes_gpu(
torch.from_numpy(points).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(boxes_3d).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
else:
# box_idxs = np.zeros(len(points)) - 1
continue
gt_list.append(
{
"bbox": boxes_3d,
"class": names,
})
pts_list.append(points)
frame_ids_list.append(frame_id)
pts_label_list.append(box_idxs)
info = {
"idx_names": frame_ids_list,
"pts": pts_list,
"pts_label": pts_label_list,
"gt": gt_list,
}
return info
if __name__ == '__main__':
np.set_printoptions(precision=3, linewidth=500,
threshold=np.inf, suppress=True)
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--data_file', type=str, default="once_data", help='the data path')
parser.add_argument('--seq_id', type=str, default="000076", help='the sequence id')
# parser.add_argument('--data_file', type=str, default="nuscenes_data", help='the data path of nuscenes')
# parser.add_argument('--seq_id', type=str, default="n015-2018-07-18-11-07-57+0800", help='the sequence id of nuscenes')
parser.add_argument('--func', type=str, default='once', help='choose the data')
args = parser.parse_args()
if args.func == 'once':
info = load_once(args.data_file, args.seq_id)
elif args.func == 'nuscenes':
info = load_nuscenes(args.data_file, args.seq_id)
sequence_visualize3d(**info)
| 6,236 | 32.532258 | 124 | py |
3DTrans | 3DTrans-master/tools/show_squence_demo/utils/gui.py | import math
import sys
import numpy as np
import threading
import open3d as o3d
from open3d.visualization import gui
from open3d.visualization import rendering
from collections import deque
from .components import *
import time
import os
class Model:
"""The class that helps build visualization models based on attributes,
data, and methods.
"""
# bounding_box_prefix = "Bounding Boxes/"
bounding_box_prefix = "bbox/"
class BoundingBoxData:
"""The class to define a bounding box that is used to describe the
target location.
Args:
name: The name of the pointcloud array.
boxes: The array of pointcloud that define the bounding box.
"""
def __init__(self, name, boxes):
self.name = name
self.boxes = boxes
def __init__(self):
# Note: the tpointcloud cannot store the actual data arrays, because
# the tpointcloud requires specific names for some arrays (e.g.
# "positions", "colors"). So the tpointcloud exists for rendering and
# initially only contains the "positions" array.
self.tclouds = {} # name -> tpointcloud
self.tcams = {} # name -> tcams
self.data_names = [] # the order data will be displayed / animated
self.bounding_box_data = [] # [BoundingBoxData]
self._data = {} # name -> {attr_name -> numpyarray}
self._known_attrs = {} # name -> set(attrs)
self._attr2minmax = {} # only access in _get_attr_minmax()
self._attr_rename = {"label": "labels", "feat": "feature"}
def _init_data(self, name):
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
tcam = dict()
self.tcams[name] = tcam
self._data[name] = {}
self.data_names.append(name)
def is_loaded(self, name):
"""Check if the data is loaded."""
if name in self._data:
return len(self._data[name]) > 0
else:
# if the name isn't in the data, presumably it is loaded
# (for instance, if this is a bounding box).
return True
def load(self, name, fail_if_no_space=False):
"""If data is not loaded, then load the data."""
assert (False) # pure virtual
def unload(self, name):
assert (False) # pure virtual
def create_point_cloud(self, data):
"""Create a point cloud based on the data provided.
The data should include name and points.
"""
assert ("name" in data) # name is a required field
assert ("points" in data) # 'points' is a required field
name = data["name"]
pts = self._convert_to_numpy(data["points"])
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
known_attrs = set()
if pts.shape[1] >= 4:
# We can't use inplace Tensor creation (e.g. from_numpy())
# because the resulting arrays won't be contiguous. However,
# TensorList can be inplace.
xyz = pts[:, [0, 1, 2]]
tcloud.point["positions"] = Visualizer._make_tcloud_array(xyz,
copy=True)
else:
tcloud.point["positions"] = Visualizer._make_tcloud_array(pts)
if 'pts_label' in data.keys():
# test_dict = {'bg':0, 'Vehicle':1, 'Pedestrian':2, 'Cyclist':'3'}
pts_color = np.ones_like(pts) * 0.3
bboxes = data['bounding_boxes']
for k, bbox in enumerate(bboxes):
box_class = bbox.label_class
label_color = np.array(self.lut.labels[box_class].color, np.float32)
point_indices = (data['pts_label'] == k)
pts_color[point_indices, :] = label_color
tcloud.point["colors"] = Visualizer._make_tcloud_array(pts_color)
self.tclouds[name] = tcloud
# Add scalar attributes and vector3 attributes
attrs = {}
for k, v in data.items():
attr = self._convert_to_numpy(v)
if attr is None or isinstance(v, dict):
continue
attr_name = k
if attr_name == "point":
continue
new_name = self._attr_rename.get(attr_name)
if new_name is not None:
attr_name = new_name
if len(attr.shape) == 1 or len(attr.shape) == 2:
attrs[attr_name] = attr
known_attrs.add(attr_name)
self._data[name] = attrs
self._known_attrs[name] = known_attrs
def create_cams(self, name, cam_dict, key='img', update=False):
"""Create images based on the data provided.
The data should include name and cams.
"""
tcam = dict()
for k, v in cam_dict.items():
img = self._convert_to_numpy(v[key])
tcam[k] = o3d.t.geometry.Image(Visualizer._make_tcloud_array(img))
self.tcams[name] = tcam
if update:
self._data[name]['cams'] = cam_dict
def _convert_to_numpy(self, ary):
if isinstance(ary, list):
try:
return np.array(ary, dtype='float32')
except TypeError:
return None
elif isinstance(ary, np.ndarray):
if len(ary.shape) == 2 and ary.shape[0] == 1:
ary = ary[0] # "1D" array as 2D: [[1, 2, 3,...]]
if ary.dtype.name.startswith('int'):
return np.array(ary, dtype='float32')
else:
return ary
try:
import tensorflow as tf
if isinstance(ary, tf.Tensor):
return self._convert_to_numpy(ary.numpy())
except:
pass
try:
import torch
if isinstance(ary, torch.Tensor):
return self._convert_to_numpy(ary.detach().cpu().numpy())
except:
pass
return None
def get_attr(self, name, attr_name):
"""Get an attribute from data based on the name passed."""
if name in self._data:
attrs = self._data[name]
if attr_name in attrs:
return attrs[attr_name]
return None
def get_attr_shape(self, name, attr_name):
"""Get a shape from data based on the name passed."""
attr = self.get_attr(name, attr_name)
if attr is not None:
return attr.shape
return []
def get_attr_minmax(self, attr_name, channel):
"""Get the minimum and maximum for an attribute."""
attr_key_base = attr_name + ":" + str(channel)
attr_min = 1e30
attr_max = -1e30
for name in self._data.keys():
key = name + ":" + attr_key_base
if key not in self._attr2minmax:
attr = self.get_attr(name, attr_name)
if attr is None: # clouds may not have all the same attributes
continue
if len(attr.shape) > 1:
attr = attr[:, channel]
self._attr2minmax[key] = (attr.min(), attr.max())
amin, amax = self._attr2minmax[key]
attr_min = min(attr_min, amin)
attr_max = max(attr_max, amax)
if attr_min > attr_max:
return (0.0, 0.0)
return (attr_min, attr_max)
def get_available_attrs(self, names):
"""Get a list of attributes based on the name."""
attr_names = None
for n in names:
known = self._known_attrs.get(n)
if known is not None:
if attr_names is None:
attr_names = known
else:
attr_names = attr_names.intersection(known)
if attr_names is None:
return []
return sorted(attr_names)
def calc_bounds_for(self, name):
"""Calculate the bounds for a pointcloud."""
if name in self.tclouds and not self.tclouds[name].is_empty():
tcloud = self.tclouds[name]
# Ideally would simply return tcloud.compute_aabb() here, but it can
# be very slow on macOS with clang 11.0
pts = tcloud.point["positions"].numpy()
min_val = (pts[:, 0].min(), pts[:, 1].min(), pts[:, 2].min())
max_val = (pts[:, 0].max(), pts[:, 1].max(), pts[:, 2].max())
return [min_val, max_val]
else:
return [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0)]
class DataModel(Model):
"""The class for data i/o and storage of visualization.
Args:
userdata: The dataset to be used in the visualization.
"""
def __init__(self, userdata):
super().__init__()
# We could just create the TPointCloud here, but that would cause the UI
# to block. If we do it on load then the loading dialog will display.
self._name2srcdata = {}
self.bounding_box_data = []
for d in userdata:
name = d["name"]
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2srcdata[name] = d
if 'bounding_boxes' in d:
self.bounding_box_data.append(
Model.BoundingBoxData(name, d['bounding_boxes']))
def load(self, name, fail_if_no_space=False):
"""Load a pointcloud based on the name provided."""
if self.is_loaded(name):
return True
self.create_point_cloud(self._name2srcdata[name])
def unload(self, name):
"""Unload a pointcloud."""
pass
class DatasetModel(Model):
"""The class used to manage a dataset model.
Args:
dataset: The 3D ML dataset to use. You can use the base dataset, sample datasets , or a custom dataset.
split: A string identifying the dataset split that is usually one of 'training', 'test', 'validation', or 'all'.
indices: The indices to be used for the datamodel. This may vary based on the split used.
"""
def __init__(self, dataset, indices, prefix, lut=None):
super().__init__()
self._dataset = None
self._name2datasetidx = {}
self._memory_limit = 10240 * 1024 * 1024 # memory limit in bytes
self._current_memory_usage = 0
self._cached_data = deque()
self.lut = lut
self._dataset = dataset.get_split(prefix)
if len(self._dataset) > 0:
if indices is None:
indices = range(0, len(self._dataset))
# Some results from get_split() (like "training") are randomized.
# Sort, so that the same index always returns the same piece of data.
# path2idx = {}
# for i in range(0, len(self._dataset.path_list)):
# path2idx[self._dataset.path_list[i]] = i
# real_indices = [path2idx[p] for p in sorted(path2idx.keys())]
# indices = [real_indices[idx] for idx in indices]
# SemanticKITTI names its items <sequence#>_<timeslice#>,
# "mm_nnnnnn". We'd like to use the hierarchical feature of the tree
# to separate the sequences. We cannot change the name in the dataset
# because this format is used to report algorithm results, so do it
# here.
underscore_to_slash = False
if dataset.__class__.__name__ == "SemanticKITTI":
underscore_to_slash = True
for i in indices:
info = self._dataset.get_attr(i)
name = info["name"]
if underscore_to_slash:
name = name.replace("_", "/")
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2datasetidx[name] = i
if dataset.__class__.__name__ in [
"Toronto3D", "Semantic3D", "S3DIS"
]:
self._attr_rename["feat"] = "colors"
self._attr_rename["feature"] = "colors"
else:
print(
"[ERROR] Dataset split has no data. Please check that you are pointing to the correct directory for the dataset."
)
sys.exit(-1)
def is_loaded(self, name):
"""Check if the data is loaded."""
loaded = super().is_loaded(name)
if loaded and name in self._cached_data:
# make this point cloud the most recently used
self._cached_data.remove(name)
self._cached_data.append(name)
return loaded
def load(self, name, fail_if_no_space=False):
"""Check if data is not loaded, and then load the data."""
assert (name in self._name2datasetidx)
if self.is_loaded(name):
return True
idx = self._name2datasetidx[name]
data = self._dataset.get_data(idx)
data["name"] = name
data["points"] = data["point"]
self.create_point_cloud(data)
if 'bounding_boxes' in data:
self.bounding_box_data.append(
Model.BoundingBoxData(name, data['bounding_boxes']))
if 'cams' in data:
for _, val in data['cams'].items():
lidar2img_rt = val['lidar2img_rt']
bbox_data = data['bounding_boxes']
bbox_3d_img = BoundingBox3D.project_to_img(
bbox_data, np.copy(val['img']), lidar2img_rt)
val['bbox_3d'] = bbox_3d_img
self.create_cams(data['name'], data['cams'], update=True)
size = self._calc_pointcloud_size(self._data[name], self.tclouds[name],
self.tcams[name])
if size + self._current_memory_usage > self._memory_limit:
if fail_if_no_space:
self.unload(name)
return False
else:
# Remove oldest from cache
remove_name = self._cached_data.popleft()
remove_size = self._calc_pointcloud_size(
self._data[remove_name], self.tclouds[remove_name])
self._current_memory_usage -= remove_size
self.unload(remove_name)
# Add new point cloud to cache
self._cached_data.append(name)
self._current_memory_usage += size
return True
else:
self._current_memory_usage += size
self._cached_data.append(name)
return True
def _calc_pointcloud_size(self, raw_data, pcloud, cams={}):
"""Calcute the size of the pointcloud based on the rawdata."""
pcloud_size = 0
for (attr, arr) in raw_data.items():
if not isinstance(arr, dict):
pcloud_size += arr.size * 4
# Point cloud consumes 64 bytes of per point of GPU memory
pcloud_size += pcloud.point["positions"].num_elements() * 64
# TODO: add memory for point cloud color and semantics
# TODO: add memory for cam images
return pcloud_size
def unload(self, name):
"""Unload the data (if it was loaded earlier)."""
# Only unload if this was loadable; we might have an in-memory,
# user-specified data created directly through create_point_cloud().
if name in self._name2datasetidx:
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
self.tcams[name] = {}
bbox_name = Model.bounding_box_prefix + name
for i in range(0, len(self.bounding_box_data)):
if self.bounding_box_data[i].name == bbox_name:
self.bounding_box_data.pop(i)
break
class Visualizer:
"""The visualizer class for dataset objects and custom point clouds."""
class LabelLUTEdit:
"""This class includes functionality for managing a labellut (label
look-up-table).
"""
def __init__(self):
self.widget = gui.TreeView()
self._on_changed = None # takes no args, returns no value
self.clear()
def clear(self):
"""Clears the look-up table."""
self.widget.clear()
self._label2color = {}
def is_empty(self):
"""Checks if the look-up table is empty."""
return len(self._label2color) == 0
def get_colors(self):
"""Returns a list of label keys."""
return [
self._label2color[label] for label in self._label2color.keys()
]
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def set_labels(self, labellut):
"""Updates the labels based on look-up table passsed."""
self.widget.clear()
root = self.widget.get_root_item()
for key in labellut.labels.keys():
lbl = labellut.labels[key]
color = lbl.color
if len(color) == 3:
color += [1.0]
self._label2color[key] = color
color = gui.Color(lbl.color[0], lbl.color[1], lbl.color[2])
cell = gui.LUTTreeCell(
str(key) + ": " + lbl.name, True, color, None, None)
cell.checkbox.set_on_checked(
self._make_on_checked(key, self._on_label_checked))
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(key,
self._on_label_color_changed))
self.widget.add_item(root, cell)
def _make_on_color_changed(self, label, member_func):
def on_changed(color):
member_func(label, color)
return on_changed
def _on_label_color_changed(self, label, gui_color):
self._label2color[label] = [
gui_color.red, gui_color.green, gui_color.blue,
self._label2color[label][3]
]
if self._on_changed is not None:
self._on_changed()
def _make_on_checked(self, label, member_func):
def on_checked(checked):
member_func(label, checked)
return on_checked
def _on_label_checked(self, label, checked):
if checked:
alpha = 1.0
else:
alpha = 0.0
color = self._label2color[label]
self._label2color[label] = [color[0], color[1], color[2], alpha]
if self._on_changed is not None:
self._on_changed()
class ColormapEdit:
"""This class is used to create a color map for visualization of
points.
"""
def __init__(self, window, em):
self.colormap = None
self.widget = gui.Vert()
self._window = window
self._min_value = 0.0
self._max_value = 1.0
self._on_changed = None # takes no args, no return value
self._itemid2idx = {}
self._min_label = gui.Label("")
self._max_label = gui.Label("")
grid = gui.VGrid(2)
grid.add_child(gui.Label("Range (min):"))
grid.add_child(self._min_label)
grid.add_child(gui.Label("Range (max):"))
grid.add_child(self._max_label)
self.widget.add_child(grid)
self.widget.add_fixed(0.5 * em)
self.widget.add_child(gui.Label("Colormap"))
self._edit = gui.TreeView()
self._edit.set_on_selection_changed(self._on_selection_changed)
self.widget.add_child(self._edit)
self._delete = gui.Button("Delete")
self._delete.horizontal_padding_em = 0.5
self._delete.vertical_padding_em = 0
self._delete.set_on_clicked(self._on_delete)
self._add = gui.Button("Add")
self._add.horizontal_padding_em = 0.5
self._add.vertical_padding_em = 0
self._add.set_on_clicked(self._on_add)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._delete)
h.add_fixed(0.25 * em)
h.add_child(self._add)
h.add_stretch()
self.widget.add_fixed(0.5 * em)
self.widget.add_child(h)
self.widget.add_fixed(0.5 * em)
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def update(self, colormap, min_val, max_val):
"""Updates the colormap based on the minimum and maximum values
passed.
"""
self.colormap = colormap
self._min_value = min_val
self._max_value = max_val
self._min_label.text = str(min_val)
self._max_label.text = str(max_val)
if self._min_value >= self._max_value:
self._max_value = self._min_value + 1.0
self._edit.clear()
self._itemid2idx = {}
root_id = self._edit.get_root_item()
for i in range(0, len(self.colormap.points)):
p = self.colormap.points[i]
color = gui.Color(p.color[0], p.color[1], p.color[2])
val = min_val + p.value * (max_val - min_val)
cell = gui.ColormapTreeCell(val, color, None, None)
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(i, self._on_color_changed))
cell.number_edit.set_on_value_changed(
self._make_on_value_changed(i, self._on_value_changed))
item_id = self._edit.add_item(root_id, cell)
self._itemid2idx[item_id] = i
self._update_buttons_enabled()
def _make_on_color_changed(self, idx, member_func):
def on_changed(color):
member_func(idx, color)
return on_changed
def _on_color_changed(self, idx, gui_color):
self.colormap.points[idx].color = [
gui_color.red, gui_color.green, gui_color.blue
]
if self._on_changed is not None:
self._on_changed()
def _make_on_value_changed(self, idx, member_func):
def on_changed(value):
member_func(idx, value)
return on_changed
def _on_value_changed(self, idx, value):
value = (value - self._min_value) / (self._max_value -
self._min_value)
needs_update = False
value = min(1.0, max(0.0, value))
if ((idx > 0 and value < self.colormap.points[idx - 1].value) or
(idx < len(self.colormap.points) - 1 and
value > self.colormap.points[idx + 1].value)):
self.colormap.points[idx].value = value
o = self.colormap.points[idx]
self.colormap.points.sort(key=lambda cmap_pt: cmap_pt.value)
for i in range(0, len(self.colormap.points)):
if self.colormap.points[i] is o:
idx = i
break
needs_update = True
if idx > 0 and value == self.colormap.points[idx - 1].value:
if idx < len(self.colormap.points):
upper = self.colormap.points[idx + 1].value
else:
upper = 1.0
value = value + 0.5 * (upper - value)
needs_update = True
if idx < len(self.colormap.points
) - 1 and value == self.colormap.points[idx + 1].value:
if idx > 0:
lower = self.colormap.points[idx - 1].value
else:
lower = 0.0
value = lower + 0.5 * (value - lower)
needs_update = True
self.colormap.points[idx].value = value
if needs_update:
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_selection_changed(self, item_id):
self._update_buttons_enabled()
def _on_delete(self):
if len(self.colormap.points) > 2:
idx = self._itemid2idx[self._edit.selected_item]
self.colormap.points = self.colormap.points[:
idx] + self.colormap.points[
idx + 1:]
del self._itemid2idx[self._edit.selected_item]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_add(self):
if self._edit.selected_item in self._itemid2idx: # maybe no selection
idx = self._itemid2idx[self._edit.selected_item]
if idx < len(self.colormap.points) - 1:
lower = self.colormap.points[idx]
upper = self.colormap.points[idx + 1]
else:
lower = self.colormap.points[len(self.colormap.points) - 2]
upper = self.colormap.points[len(self.colormap.points) - 1]
add_idx = min(idx + 1, len(self.colormap.points) - 1)
new_value = lower.value + 0.5 * (upper.value - lower.value)
new_color = [
0.5 * lower.color[0] + 0.5 * upper.color[0],
0.5 * lower.color[1] + 0.5 * upper.color[1],
0.5 * lower.color[2] + 0.5 * upper.color[2]
]
new_point = Colormap.Point(new_value, new_color)
self.colormap.points = self.colormap.points[:add_idx] + [
new_point
] + self.colormap.points[add_idx:]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _update_buttons_enabled(self):
if self._edit.selected_item in self._itemid2idx:
self._delete.enabled = len(self.colormap.points) > 2
self._add.enabled = True
else:
self._delete.enabled = False
self._add.enabled = False
def _update_later(self):
def update():
self.update(self.colormap, self._min_value, self._max_value)
self._window.post_redraw() # need to manually request redraw
gui.Application.instance.post_to_main_thread(self._window, update)
class ProgressDialog:
"""This class is used to manage the progress dialog displayed during
visualization.
Args:
title: The title of the dialog box.
window: The window where the progress dialog box should be displayed.
n_items: The maximum number of items.
"""
def __init__(self, title, window, n_items):
self._window = window
self._n_items = n_items
em = window.theme.font_size
self.dialog = gui.Dialog(title)
self._label = gui.Label(title + " ")
self._layout = gui.Vert(0, gui.Margins(em, em, em, em))
self.dialog.add_child(self._layout)
self._layout.add_child(self._label)
self._layout.add_fixed(0.5 * em)
self._progress = gui.ProgressBar()
self._progress.value = 0.0
self._layout.add_child(self._progress)
def set_text(self, text):
"""Set the label text on the dialog box."""
self._label.text = text + " "
def post_update(self, text=None):
"""Post updates to the main thread."""
if text is None:
gui.Application.instance.post_to_main_thread(
self._window, self.update)
else:
def update_with_text():
self.update()
self._label.text = text
gui.Application.instance.post_to_main_thread(
self._window, update_with_text)
def update(self):
"""Enumerate the progress in the dialog box."""
value = min(1.0, self._progress.value + 1.0 / self._n_items)
self._progress.value = value
SOLID_NAME = "Solid Color"
LABELS_NAME = "Label Colormap"
RAINBOW_NAME = "Colormap (Rainbow)"
GREYSCALE_NAME = "Colormap (Greyscale)"
COLOR_NAME = "RGB"
X_ATTR_NAME = "x position"
Y_ATTR_NAME = "y position"
Z_ATTR_NAME = "z position"
def __init__(self, fps=4):
self._objects = None
self._name2treenode = {}
self._name2treeid = {}
self._treeid2name = {}
self._attrname2lut = {}
self._colormaps = {}
self._shadername2panelidx = {}
self._gradient = rendering.Gradient()
self._scalar_min = 0.0
self._scalar_max = 1.0
self._animation_frames = []
self._last_animation_time = time.time()
self._animation_delay_secs = 1./(fps + 1e-9)
self._consolidate_bounding_boxes = False
self._dont_update_geometry = False
self._prev_img_mode = 0
def _init_dataset(self, dataset, indices, prefix, lut=None):
self._objects = DatasetModel(dataset, indices, prefix, lut)
self._modality = dict()
self._modality['use_lidar'] = True
self._modality['use_camera'] = False
if hasattr(self._objects._dataset, 'infos'):
if 'lidar_path' in self._objects._dataset.infos[0]:
self._modality['use_lidar'] = True
if 'cams' in self._objects._dataset.infos[0]:
self._modality['use_camera'] = True
self._cam_names = list(
self._objects._dataset.infos[0]['cams'].keys())
def _init_data(self, data):
self._objects = DataModel(data)
self._modality = dict()
for _, val in self._objects._name2srcdata.items():
if isinstance(val, dict):
if 'points' in val or 'point' in val:
self._modality['use_lidar'] = True
if 'cams' in val:
self._modality['use_camera'] = True
self._cam_names = list(
self._objects._dataset.infos[0]['cams'].keys())
def _init_user_interface(self, title, width, height):
### ADD!
self._obj_3d_labels = []
self.window = gui.Application.instance.create_window(
title, width, height)
self.window.set_on_layout(self._on_layout)
em = self.window.theme.font_size
self._3d = gui.SceneWidget()
self._3d.enable_scene_caching(True) # makes UI _much_ more responsive
self._3d.scene = rendering.Open3DScene(self.window.renderer)
self.window.add_child(self._3d)
self._panel = gui.Vert()
self.window.add_child(self._panel)
indented_margins = gui.Margins(em, 0, em, 0)
# View controls
ctrl = gui.CollapsableVert("Mouse Controls", 0, indented_margins)
arcball = gui.Button("Arcball")
arcball.set_on_clicked(self._on_arcball_mode)
arcball.horizontal_padding_em = 0.5
arcball.vertical_padding_em = 0
fly = gui.Button("Fly")
fly.set_on_clicked(self._on_fly_mode)
fly.horizontal_padding_em = 0.5
fly.vertical_padding_em = 0
reset = gui.Button("Re-center")
reset.set_on_clicked(self._on_reset_camera)
reset.horizontal_padding_em = 0.5
reset.vertical_padding_em = 0
h = gui.Horiz(0.25 * em)
h.add_stretch()
h.add_child(arcball)
h.add_child(fly)
h.add_fixed(em)
h.add_child(reset)
h.add_stretch()
ctrl.add_child(h)
ctrl.add_fixed(em)
self._panel.add_child(ctrl)
# Dataset
model = gui.CollapsableVert("Dataset", 0, indented_margins)
vgrid = gui.VGrid(2, 0.25 * em)
model.add_child(vgrid)
model.add_fixed(0.5 * em)
bgcolor = gui.ColorEdit()
#background color
bgcolor.color_value = gui.Color(1, 1, 1)
self._on_bgcolor_changed(bgcolor.color_value)
bgcolor.set_on_value_changed(self._on_bgcolor_changed)
vgrid.add_child(gui.Label("BG Color"))
vgrid.add_child(bgcolor)
list_selector = gui.CollapsableVert("Selector", 0, indented_margins)
list_selector_grid = gui.VGrid(4, 0.25 * em)
list_selector_grid.add_child(gui.Label("lower"))
list_selector.add_child(list_selector_grid)
self._lower_val = gui.NumberEdit(gui.NumberEdit.INT)
self._lower_val.int_value = 0
self._prev_lower_val = 0
self._lower_val.set_limits(0, len(self._objects.data_names) - 1)
self._lower_val.set_on_value_changed(self._on_lower_val)
list_selector_grid.add_child(self._lower_val)
list_selector_grid.add_child(gui.Label("upper"))
self._upper_val = gui.NumberEdit(gui.NumberEdit.INT)
self._upper_val.int_value = len(self._objects.data_names) - 1
self._prev_upper_val = 0
self._upper_val.set_limits(0, len(self._objects.data_names) - 1)
self._upper_val.set_on_value_changed(self._on_upper_val)
list_selector_grid.add_child(self._upper_val)
view_tab = gui.TabControl()
view_tab.set_on_selected_tab_changed(self._on_display_tab_changed)
model.add_child(view_tab)
# ... model list
self._dataset = gui.TreeView()
self._dataset.set_on_selection_changed(
self._on_dataset_selection_changed)
list_grid = gui.Vert(2)
list_grid.add_child(list_selector)
list_grid.add_child(self._dataset)
# ... animation slider
v = gui.Vert()
view_tab.add_tab("Animation", v)
v.add_fixed(0.25 * em)
grid = gui.VGrid(2)
v.add_child(grid)
# ... select image mode
self._img_mode = gui.Combobox()
for item in ["raw", "bbox_3d"]:
self._img_mode.add_item(item)
self._img_mode.selected_index = 0
self._img_mode.set_on_selection_changed(self._on_img_mode_changed)
grid.add_child(gui.Label("Image Mode"))
grid.add_child(self._img_mode)
self._slider = gui.Slider(gui.Slider.INT)
self._slider.set_limits(0, len(self._objects.data_names))
self._slider.set_on_value_changed(self._on_animation_slider_changed)
grid.add_child(gui.Label("Index"))
grid.add_child(self._slider)
self._slider_current = gui.Label("")
grid.add_child(gui.Label("Showing"))
grid.add_child(self._slider_current)
v.add_fixed(em)
self._play = gui.Button("Play")
self._play.horizontal_padding_em = 0.5
self._play.vertical_padding_em = 0
self._play.set_on_clicked(self._on_start_animation)
self._next = gui.Button(">")
self._next.horizontal_padding_em = 0.5
self._next.vertical_padding_em = 0
self._next.set_on_clicked(self._on_next)
self._prev = gui.Button("<")
self._prev.horizontal_padding_em = 0.5
self._prev.vertical_padding_em = 0
self._prev.set_on_clicked(self._on_prev)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._prev)
h.add_child(self._play)
h.add_child(self._next)
h.add_stretch()
v.add_child(h)
view_tab.add_tab("List", list_grid)
if 'use_camera' in self._modality and self._modality['use_camera']:
w = gui.CollapsableVert("Cameras", 0, indented_margins)
cam_grid = gui.VGrid(
2, 0, indented_margins) # change no. of cam_grid columns here
self._img = dict()
w.add_child(cam_grid)
v.add_child(w)
for cam in self._cam_names:
self._img[cam] = gui.ImageWidget(o3d.t.geometry.Image())
cam_grid.add_child(self._img[cam])
# Coloring
properties = gui.CollapsableVert("Properties", 0, indented_margins)
grid = gui.VGrid(2, 0.25 * em)
# ... data source
self._datasource_combobox = gui.Combobox()
self._datasource_combobox.set_on_selection_changed(
self._on_datasource_changed)
self._colormap_channel = gui.Combobox()
self._colormap_channel.add_item("0")
self._colormap_channel.set_on_selection_changed(
self._on_channel_changed)
h = gui.Horiz()
h.add_child(self._datasource_combobox)
h.add_fixed(em)
h.add_child(gui.Label("Index"))
h.add_child(self._colormap_channel)
grid.add_child(gui.Label("Data"))
grid.add_child(h)
# ... shader
self._shader = gui.Combobox()
self._shader.add_item(self.SOLID_NAME)
self._shader.add_item(self.LABELS_NAME)
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.COLOR_NAME)
self._colormaps[self.RAINBOW_NAME] = Colormap.make_rainbow()
self._colormaps[self.GREYSCALE_NAME] = Colormap.make_greyscale()
self._shader.selected_index = 0
self._shader.set_on_selection_changed(self._on_shader_changed)
grid.add_child(gui.Label("Shader"))
grid.add_child(self._shader)
properties.add_child(grid)
# ... add model widget after property widget
self._panel.add_child(model)
# ... shader panels
self._shader_panels = gui.StackedWidget()
panel_idx = 0
# ... sub-panel: single color
self._color_panel = gui.Vert()
self._shader_panels.add_child(self._color_panel)
self._shadername2panelidx[self.SOLID_NAME] = panel_idx
panel_idx += 1
self._color = gui.ColorEdit()
self._color.color_value = gui.Color(0.5, 0.5, 0.5)
self._color.set_on_value_changed(self._on_shader_color_changed)
h = gui.Horiz()
h.add_child(gui.Label("Color"))
h.add_child(self._color)
self._color_panel.add_child(h)
# ... sub-panel: labels
self._labels_panel = gui.Vert()
self._shader_panels.add_child(self._labels_panel)
self._shadername2panelidx[self.LABELS_NAME] = panel_idx
panel_idx += 1
self._label_edit = self.LabelLUTEdit()
self._label_edit.set_on_changed(self._on_labels_changed)
self._labels_panel.add_child(gui.Label("Labels"))
self._labels_panel.add_child(self._label_edit.widget)
# ... sub-panel: colormap
self._colormap_panel = gui.Vert()
self._shader_panels.add_child(self._colormap_panel)
self._shadername2panelidx[self.RAINBOW_NAME] = panel_idx
self._shadername2panelidx[self.GREYSCALE_NAME] = panel_idx
panel_idx += 1
self._colormap_edit = self.ColormapEdit(self.window, em)
self._colormap_edit.set_on_changed(self._on_colormap_changed)
self._colormap_panel.add_child(self._colormap_edit.widget)
# ... sub-panel: RGB
self._rgb_panel = gui.Vert()
self._shader_panels.add_child(self._rgb_panel)
self._shadername2panelidx[self.COLOR_NAME] = panel_idx
panel_idx += 1
self._rgb_combo = gui.Combobox()
self._rgb_combo.add_item("255")
self._rgb_combo.add_item("1.0")
self._rgb_combo.set_on_selection_changed(self._on_rgb_multiplier)
h = gui.Horiz(0.5 * em)
h.add_child(gui.Label("Max value"))
h.add_child(self._rgb_combo)
self._rgb_panel.add_child(h)
properties.add_fixed(em)
properties.add_child(self._shader_panels)
#collapse the panel
properties.set_is_open(True)
self._panel.add_child(properties)
# Populate tree, etc.
for name in self._objects.data_names:
self._add_tree_name(name)
self._update_datasource_combobox()
def set_lut(self, attr_name, lut):
"""Set the LUT for a specific attribute.
Args:
attr_name: The attribute name as string.
lut: The LabelLUT object that should be updated.
"""
self._attrname2lut[attr_name] = lut
def setup_camera(self):
"""Set up camera for visualization."""
selected_names = self._get_selected_names()
selected_bounds = [
self._objects.calc_bounds_for(n) for n in selected_names
]
min_val = [1e30, 1e30, 1e30]
max_val = [-1e30, -1e30, -1e30]
for b in selected_bounds:
for i in range(0, 3):
min_val[i] = min(min_val[i], b[0][i])
max_val[i] = max(max_val[i], b[1][i])
bounds = o3d.geometry.AxisAlignedBoundingBox(min_val, max_val)
self._3d.setup_camera(60, bounds, bounds.get_center())
def show_geometries_under(self, name, show):
"""Show geometry for a given node."""
prefix = name
for (n, node) in self._name2treenode.items():
if n.startswith(prefix):
self._3d.scene.show_geometry(n, show)
node.checkbox.checked = show
self._3d.force_redraw()
def _add_tree_name(self, name, is_geometry=True):
names = name.split("/")
parent = self._dataset.get_root_item()
for i in range(0, len(names) - 1):
n = "/".join(names[:i + 1]) + "/"
if n in self._name2treeid:
parent = self._name2treeid[n]
else:
def on_parent_checked(checked):
self.show_geometries_under(n, checked)
cell = gui.CheckableTextTreeCell(n, True, on_parent_checked)
parent = self._dataset.add_item(parent, cell)
self._name2treenode[n] = cell
self._name2treeid[n] = parent
self._treeid2name[parent] = n
def on_checked(checked):
self._3d.scene.show_geometry(name, checked)
if self._is_tree_name_geometry(name):
# available attrs could change
self._update_datasource_combobox()
self._update_bounding_boxes()
self._3d.force_redraw()
cell = gui.CheckableTextTreeCell(names[-1], True, on_checked)
if is_geometry:
cell.label.text_color = gui.Color(1.0, 0.0, 0.0, 1.0)
node = self._dataset.add_item(parent, cell)
self._name2treenode[name] = cell
self._treeid2name[node] = name
self._slider.set_limits(0, len(self._objects.data_names) - 1)
if len(self._objects.data_names) == 1:
self._slider_current.text = name
def _load_geometry(self, name, ui_done_callback):
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window, 2)
progress_dlg.set_text("Loading " + name + "...")
def load_thread():
result = self._objects.load(name)
progress_dlg.post_update("Loading " + name + "...")
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _load_geometries(self, names, ui_done_callback):
# Progress has: len(names) items + ui_done_callback
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window,
len(names) + 1)
progress_dlg.set_text("Loading " + names[0] + "...")
def load_thread():
for i in range(0, len(names)):
result = self._objects.load(names[i], True)
if i + 1 < len(names):
text = "Loading " + names[i + 1] + "..."
else:
text = "Creating GPU objects..."
progress_dlg.post_update(text)
if result:
self._name2treenode[names[i]].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
else:
break
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _update_geometry(self, check_unloaded=False):
if check_unloaded:
for name in self._objects.data_names:
if not self._objects.is_loaded(name):
self._3d.scene.remove_geometry(name)
material = self._get_material()
for n, tcloud in self._objects.tclouds.items():
self._update_point_cloud(n, tcloud, material)
if not tcloud.is_empty():
self._name2treenode[n].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
if self._3d.scene.has_geometry(n):
self._3d.scene.modify_geometry_material(n, material)
else:
self._name2treenode[n].label.text_color = gui.Color(
1.0, 0.0, 0.0, 1.0)
self._name2treenode[n].checkbox.checked = False
self._3d.force_redraw()
def _update_point_cloud(self, name, tcloud, material):
if self._dont_update_geometry:
return
if tcloud.is_empty():
return
attr_name = self._datasource_combobox.selected_text
attr = None
flag = 0
attr = self._objects.get_attr(name, attr_name)
# Update scalar values
if attr is not None:
if len(attr.shape) == 1:
scalar = attr
else:
channel = max(0, self._colormap_channel.selected_index)
scalar = attr[:, channel]
else:
shape = [len(tcloud.point["positions"].numpy())]
scalar = np.zeros(shape, dtype='float32')
tcloud.point["__visualization_scalar"] = Visualizer._make_tcloud_array(
scalar)
flag |= rendering.Scene.UPDATE_UV0_FLAG
# Update RGB values
if attr is not None and (len(attr.shape) == 2 and attr.shape[1] >= 3):
max_val = float(self._rgb_combo.selected_text)
if max_val <= 0:
max_val = 255.0
colors = attr[:, [0, 1, 2]] * (1.0 / max_val)
# tcloud.point["colors"] = Visualizer._make_tcloud_array(colors)
flag |= rendering.Scene.UPDATE_COLORS_FLAG
# Update geometry
if self._3d.scene.scene.has_geometry(name):
self._3d.scene.scene.update_geometry(name, tcloud, flag)
else:
self._3d.scene.add_geometry(name, tcloud, material)
node = self._name2treenode[name]
if node is not None:
self._3d.scene.show_geometry(name, node.checkbox.checked)
def _get_material(self):
self._update_gradient()
material = rendering.MaterialRecord()
if self._shader.selected_text == self.SOLID_NAME:
material.shader = "unlitSolidColor"
c = self._color.color_value
material.base_color = [c.red, c.green, c.blue, 1.0]
elif self._shader.selected_text == self.COLOR_NAME:
material.shader = "defaultUnlit"
material.base_color = [1.0, 1.0, 1.0, 1.0]
else:
material.shader = "unlitGradient"
material.gradient = self._gradient
material.scalar_min = self._scalar_min
material.scalar_max = self._scalar_max
return material
def _update_bounding_boxes(self, animation_frame=None):
if len(self._attrname2lut) == 1:
# Can't do dict.values()[0], so have to iterate over the 1 element
for v in self._attrname2lut.values():
lut = v
elif "labels" in self._attrname2lut:
lut = self._attrname2lut["labels"]
elif "label" in self._attrname2lut:
lut = self._attrname2lut["label"]
else:
lut = None
mat = rendering.MaterialRecord()
mat.shader = "unlitLine"
#3dbox line width
mat.line_width = 2 * self.window.scaling
if self._consolidate_bounding_boxes:
name = Model.bounding_box_prefix.split("/")[0]
boxes = []
# When consolidated we assume bbox_data.name is the geometry name.
if animation_frame is None:
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name in self._name2treenode and self._name2treenode[
bbox_data.name].checkbox.checked:
boxes += bbox_data.boxes
else:
geom_name = self._animation_frames[animation_frame]
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name == geom_name:
boxes = bbox_data.boxes
break
self._3d.scene.remove_geometry(name)
################## HANDLE OBJ 3D LABEL SHOW #################
for obj_3d_label in self._obj_3d_labels:
self._3d.remove_3d_label(obj_3d_label)
self._obj_3d_labels.clear()
################## HANDLE OBJ 3D LABEL SHOW #################
if len(boxes) > 0:
lines = BoundingBox3D.create_lines(boxes, lut)
self._3d.scene.add_geometry(name, lines, mat)
################## HANDLE OBJ 3D LABEL SHOW #################
# Starts with open3d v1.13
for box in boxes:
if box.show_meta:
# meta_pos = box.center + [0., box.size[2]*0.5, 0.]
meta_pos = box.meta_center
# meta_pos = box.center
# print(box.center, meta_pos, box.size)
self._obj_3d_labels.append(self._3d.add_3d_label(meta_pos, box.meta))
self._obj_3d_labels[-1].scale = 1
label = lut.labels[box.label_class]
self._obj_3d_labels[-1].color = gui.Color(label.color[0], label.color[1], label.color[2])
################## HANDLE OBJ 3D LABEL SHOW #################
if name not in self._name2treenode:
self._add_tree_name(name, is_geometry=False)
self._3d.force_redraw()
else:
# Don't run this more than once if we aren't consolidating,
# because nothing will change.
if len(self._objects.bounding_box_data) > 0:
if self._objects.bounding_box_data[
0].name in self._name2treenode:
return
for bbox_data in self._objects.bounding_box_data:
lines = BoundingBox3D.create_lines(bbox_data.boxes, lut)
self._3d.scene.add_geometry(bbox_data.name, lines, mat)
for bbox_data in self._objects.bounding_box_data:
self._add_tree_name(bbox_data.name, is_geometry=False)
self._3d.force_redraw()
def _update_gradient(self):
if self._shader.selected_text == self.LABELS_NAME:
colors = self._label_edit.get_colors()
n = float(len(colors) - 1)
if n >= 1:
self._gradient.points = [
rendering.Gradient.Point(
float(i) / n, [
colors[i][0], colors[i][1], colors[i][2],
colors[i][3]
]) for i in range(0, len(colors))
]
else:
self._gradient.points = [
rendering.Gradient.Point(0.0, [1.0, 0.0, 1.0, 1.0])
]
self._gradient.mode = rendering.Gradient.LUT
else:
cmap = self._colormaps.get(self._shader.selected_text)
if cmap is not None:
self._gradient.points = [
rendering.Gradient.Point(
p.value, [p.color[0], p.color[1], p.color[2], 1.0])
for p in cmap.points
]
self._gradient.mode = rendering.Gradient.GRADIENT
def _update_geometry_colors(self):
material = self._get_material()
for name, tcloud in self._objects.tclouds.items():
if not tcloud.is_empty() and self._3d.scene.has_geometry(name):
self._3d.scene.modify_geometry_material(name, material)
self._3d.force_redraw()
def _update_datasource_combobox(self):
current = self._datasource_combobox.selected_text
self._datasource_combobox.clear_items()
available_attrs = self._get_available_attrs()
for attr_name in available_attrs:
self._datasource_combobox.add_item(attr_name)
if current in available_attrs:
self._datasource_combobox.selected_text = current
elif len(available_attrs) > 0:
self._datasource_combobox.selected_text = available_attrs[0]
else:
# If no attributes, two possibilities:
# 1) no geometries are selected: don't change anything
# 2) geometries are selected: color solid
has_checked = False
for n, node in self._name2treenode.items():
if node.checkbox.checked and self._is_tree_name_geometry(n):
has_checked = True
break
if has_checked:
self._set_shader(self.SOLID_NAME)
def _update_shaders_combobox(self):
current_attr = self._datasource_combobox.selected_text
current_shader = self._shader.selected_text
has_lut = (current_attr in self._attrname2lut)
is_scalar = True
selected_names = self._get_selected_names()
if len(selected_names) > 0 and len(
self._objects.get_attr_shape(selected_names[0],
current_attr)) > 1:
is_scalar = False
self._shader.clear_items()
if not is_scalar:
self._shader.add_item(self.COLOR_NAME)
if has_lut:
self._shader.add_item(self.LABELS_NAME)
self._label_edit.set_labels(self._attrname2lut[current_attr])
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.SOLID_NAME)
if current_shader == self.LABELS_NAME and has_lut:
self._set_shader(self.LABELS_NAME)
elif is_scalar:
self._set_shader(self.RAINBOW_NAME)
def _update_attr_range(self):
attr_name = self._datasource_combobox.selected_text
current_channel = self._colormap_channel.selected_index
self._scalar_min, self._scalar_max = self._objects.get_attr_minmax(
attr_name, current_channel)
if self._shader.selected_text in self._colormaps:
cmap = self._colormaps[self._shader.selected_text]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
def _set_shader(self, shader_name, force_update=False):
# Disable channel if we are using a vector shader. Always do this to
# ensure that the UI is consistent.
if shader_name == Visualizer.COLOR_NAME:
self._colormap_channel.enabled = False
else:
self._colormap_channel.enabled = True
if shader_name == self._shader.selected_text and not force_update:
return
self._shader.selected_text = shader_name
idx = self._shadername2panelidx[self._shader.selected_text]
self._shader_panels.selected_index = idx
if shader_name in self._colormaps:
cmap = self._colormaps[shader_name]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
self._update_geometry_colors()
def _on_layout(self, context=None):
frame = self.window.content_rect
em = self.window.theme.font_size
panel_width = 20 * em #20 * em
panel_rect = gui.Rect(frame.get_right() - panel_width, frame.y,
panel_width, frame.height - frame.y)
self._panel.frame = panel_rect
self._3d.frame = gui.Rect(frame.x, frame.y, panel_rect.x - frame.x,
frame.height - frame.y)
# self._3d.frame = gui.Rect(frame.x, frame.y, frame.width,
# frame.height)
def _on_arcball_mode(self):
self._3d.set_view_controls(gui.SceneWidget.ROTATE_CAMERA)
def _on_fly_mode(self):
self._3d.set_view_controls(gui.SceneWidget.FLY)
def _on_reset_camera(self):
self.setup_camera()
def _on_dataset_selection_changed(self, item):
name = self._treeid2name[item]
if not self._is_tree_name_geometry(name):
return
def ui_callback():
self._update_attr_range()
self._update_geometry(check_unloaded=True)
self._update_bounding_boxes()
if not self._objects.is_loaded(name):
self._load_geometry(name, ui_callback)
def _on_display_tab_changed(self, index):
if index == 0:
self._animation_frames = self._get_selected_names()
self._slider.set_limits(0, len(self._animation_frames) - 1)
self._on_animation_slider_changed(self._slider.int_value)
# _on_animation_slider_changed() calls _update_bounding_boxes()
else:
for name, node in self._name2treenode.items():
self._3d.scene.show_geometry(name, node.checkbox.checked)
self._update_bounding_boxes()
def _on_animation_slider_changed(self, new_value):
idx = int(new_value)
for i in range(0, len(self._animation_frames)):
self._3d.scene.show_geometry(self._animation_frames[i], (i == idx))
if 'use_camera' in self._modality and self._modality['use_camera']:
for cam in self._cam_names:
self._img[cam].update_image(
self._objects.tcams[self._animation_frames[idx]][cam])
self._update_bounding_boxes(animation_frame=idx)
self._3d.force_redraw()
self._slider_current.text = self._animation_frames[idx]
r = self._slider_current.frame
self._slider_current.frame = gui.Rect(r.x, r.y,
self._slider.frame.get_right(),
r.height)
def _on_start_animation(self):
def on_tick():
return self._on_animate()
self._play.text = "Stop"
self._play.set_on_clicked(self._on_stop_animation)
self._last_animation_time = 0.0
self.window.set_on_tick_event(on_tick)
def _on_animate(self):
now = time.time()
if now >= self._last_animation_time + self._animation_delay_secs:
idx = (self._slider.int_value + 1) % len(self._animation_frames)
self._slider.int_value = idx
self._on_animation_slider_changed(idx)
self._last_animation_time = now
return True
return False
def _on_stop_animation(self):
self.window.set_on_tick_event(None)
self._play.text = "Play"
self._play.set_on_clicked(self._on_start_animation)
def _on_next(self):
self._slider.int_value += 1
self._on_animation_slider_changed(self._slider.int_value)
def _on_prev(self):
self._slider.int_value -= 1
self._on_animation_slider_changed(self._slider.int_value)
def _on_img_mode_changed(self, name, idx):
if idx == self._prev_img_mode:
return
if not 'use_camera' in self._modality or not self._modality[
'use_camera']:
return
self._prev_img_mode = idx
if idx == 0: # or name == 'raw'
for n in self._objects.data_names:
if self._objects.is_loaded(n):
self._objects.create_cams(n,
self._objects._data[n]['cams'],
update=False)
elif idx == 1: # or name == 'bbox_3d'
for n in self._objects.data_names:
if self._objects.is_loaded(n):
self._objects.create_cams(n,
self._objects._data[n]['cams'],
key='bbox_3d',
update=False)
def _on_bgcolor_changed(self, new_color):
bg_color = [
new_color.red, new_color.green, new_color.blue, new_color.alpha
]
self._3d.scene.set_background(bg_color)
self._3d.force_redraw()
def _on_lower_val(self, val):
if val > self._upper_val.int_value:
self._lower_val.int_value = self._upper_val.int_value
if val < int(self._lower_val.minimum_value):
self._lower_val.int_value = int(self._lower_val.minimum_value)
self._uncheck_bw_lims()
self._check_bw_lims()
self._prev_lower_val = int(self._lower_val.int_value)
# self._on_datasource_changed(
# self._datasource_combobox.selected_text,
# self._datasource_combobox.selected_index)
self._update_bounding_boxes()
def _on_upper_val(self, val):
if val < self._lower_val.int_value:
self._upper_val.int_value = self._lower_val.int_value
if val > int(self._upper_val.maximum_value):
self._upper_val.int_value = int(self._upper_val.maximum_value)
self._uncheck_bw_lims()
self._check_bw_lims()
self._prev_upper_val = int(self._upper_val.int_value)
# self._on_datasource_changed(
# self._datasource_combobox.selected_text,
# self._datasource_combobox.selected_index)
self._update_bounding_boxes()
def _uncheck_bw_lims(self):
if self._prev_lower_val < self._lower_val.int_value:
for i in range(self._prev_lower_val, self._lower_val.int_value):
name = self._objects.data_names[i]
self._name2treenode[name].checkbox.checked = False
self._3d.scene.show_geometry(name, False)
if self._prev_upper_val > self._upper_val.int_value:
for i in range(self._upper_val.int_value + 1,
self._prev_upper_val + 1):
name = self._objects.data_names[i]
self._name2treenode[name].checkbox.checked = False
self._3d.scene.show_geometry(name, False)
def _check_bw_lims(self):
for i in range(self._lower_val.int_value,
self._upper_val.int_value + 1):
name = self._objects.data_names[i]
self._name2treenode[name].checkbox.checked = True
item = [j for j, k in self._treeid2name.items() if name == k][0]
self._on_dataset_selection_changed(item)
self._3d.scene.show_geometry(name, True)
self._3d.force_redraw()
def _on_datasource_changed(self, attr_name, idx):
selected_names = self._get_selected_names()
n_channels = 1
if len(selected_names) > 0:
shape = self._objects.get_attr_shape(selected_names[0], attr_name)
if len(shape) <= 1:
n_channels = 1
else:
n_channels = max(1, shape[1])
current_channel = max(0, self._colormap_channel.selected_index)
current_channel = min(n_channels - 1, current_channel)
self._colormap_channel.clear_items()
for i in range(0, n_channels):
self._colormap_channel.add_item(str(i))
self._colormap_channel.selected_index = current_channel
self._update_attr_range()
self._update_shaders_combobox()
# Try to intelligently pick a shader.
current_shader = self._shader.selected_text
if current_shader == Visualizer.SOLID_NAME:
pass
elif attr_name in self._attrname2lut:
self._set_shader(Visualizer.LABELS_NAME)
elif attr_name == "colors":
self._set_shader(Visualizer.COLOR_NAME)
elif n_channels >= 3:
self._set_shader(Visualizer.SOLID_NAME)
elif current_shader == Visualizer.COLOR_NAME: # vector -> scalar
self._set_shader(Visualizer.RAINBOW_NAME)
# self._set_shader(Visualizer.SOLID_NAME)
else: # changing from one scalar to another, don't change
pass
self._update_geometry()
def _on_channel_changed(self, name, idx):
self._update_attr_range()
self._update_geometry() # need to recompute scalars array
def _on_shader_changed(self, name, idx):
# _shader.current_text is already name, so we need to force an update
self._set_shader(name, force_update=True)
def _on_shader_color_changed(self, color):
self._update_geometry_colors()
def _on_labels_changed(self):
self._update_geometry_colors()
def _on_colormap_changed(self):
self._colormaps[
self._shader.selected_text] = self._colormap_edit.colormap
self._update_geometry_colors()
def _on_rgb_multiplier(self, text, idx):
self._update_geometry()
def _get_selected_names(self):
# Note that things like bounding boxes could be in the tree, and we
# do not want to include them in the list of things selected, even if
# they are checked.
selected_names = []
for n in self._objects.data_names:
if self._name2treenode[n].checkbox.checked:
selected_names.append(n)
return selected_names
def _get_available_attrs(self):
selected_names = self._get_selected_names()
return self._objects.get_available_attrs(selected_names)
def _is_tree_name_geometry(self, name):
return (name in self._objects.data_names)
@staticmethod
def _make_tcloud_array(np_array, copy=False):
if copy or not np_array.data.c_contiguous:
return o3d.core.Tensor(np_array)
else:
return o3d.core.Tensor.from_numpy(np_array)
def visualize_dataset(self,
dataset,
prefix="",
lut=None,
indices=None,
width=1280+320,
height=768):
"""Visualize a dataset.
Example:
Minimal example for visualizing a dataset::
import open3d.ml.torch as ml3d # or open3d.ml.tf as ml3d
dataset = ml3d.datasets.SemanticKITTI(dataset_path='/path/to/SemanticKITTI/')
vis = ml3d.vis.Visualizer()
vis.visualize_dataset(dataset, 'all', indices=range(100))
Args:
dataset: The dataset to use for visualization.
split: The dataset split to be used, such as 'training'
indices: An iterable with a subset of the data points to visualize, such as [0,2,3,4].
width: The width of the visualization window.
height: The height of the visualization window.
"""
# Setup the labels
if lut is None:
lut = LabelLUT()
for key, val in dataset.label_to_names.items():
if len(val) == 0:
lut.add_label(key, key)
self.set_lut("labels", lut)
self._consolidate_bounding_boxes = True
self._init_dataset(dataset, indices, prefix, lut)
self._visualize("3DTrans", width, height)
def visualize(self,
data,
lut=None,
bounding_boxes=None,
width=1280,
height=768):
"""Visualize a custom point cloud data.
Example:
Minimal example for visualizing a single point cloud with an
attribute::
import numpy as np
import open3d.ml.torch as ml3d
# or import open3d.ml.tf as ml3d
data = [ {
'name': 'my_point_cloud',
'points': np.random.rand(100,3).astype(np.float32),
'point_attr1': np.random.rand(100).astype(np.float32),
} ]
vis = ml3d.vis.Visualizer()
vis.visualize(data)
Args:
data: A list of dictionaries. Each dictionary is a point cloud with
attributes. Each dictionary must have the entries 'name' and
'points'. Points and point attributes can be passed as numpy
arrays, PyTorch tensors or TensorFlow tensors.
lut: Optional lookup table for colors.
bounding_boxes: Optional bounding boxes.
width: window width.
height: window height.
"""
self._init_data(data)
if lut is not None:
self.set_lut("labels", lut)
if bounding_boxes is not None:
prefix = Model.bounding_box_prefix
# Filament crashes if you have to many items, and anyway, hundreds
# of items is unweildy in a list. So combine items if we have too
# many.
group_size = int(math.floor(float(len(bounding_boxes)) / 100.0))
if group_size < 2:
box_data = [
Model.BoundingBoxData(prefix + str(bbox), [bbox])
for bbox in bounding_boxes
]
else:
box_data = []
current_group = []
n = len(bounding_boxes)
for i in range(0, n):
current_group.append(bounding_boxes[i])
if len(current_group) >= group_size or i == n - 1:
if i < n - 1:
name = prefix + "Boxes " + str(
i + 1 - group_size) + " - " + str(i)
else:
if len(current_group) > 1:
name = prefix + "Boxes " + str(
i + 1 - len(current_group)) + " - " + str(i)
else:
name = prefix + "Box " + str(i)
data = Model.BoundingBoxData(name, current_group)
box_data.append(data)
current_group = []
self._objects.bounding_box_data = box_data
else:
self._consolidate_bounding_boxes = True
self._visualize("3DTrans", width, height)
def _visualize(self, title, width, height):
gui.Application.instance.initialize()
self._init_user_interface(title, width, height)
self._3d.scene.downsample_threshold = 400000
# Turn all the objects off except the first one
for name, node in self._name2treenode.items():
node.checkbox.checked = True
self._3d.scene.show_geometry(name, False)
for name in [self._objects.data_names[0]]:
self._name2treenode[name].checkbox.checked = True
self._3d.scene.show_geometry(name, True)
self._on_display_tab_changed(0)
self._on_start_animation()
def on_done_ui():
# Add bounding boxes here: bounding boxes belonging to the dataset
# will not be loaded until now.
self._update_bounding_boxes()
self._update_datasource_combobox()
self._update_shaders_combobox()
# Display "colors" by default if available, "points" if not
available_attrs = self._get_available_attrs()
self._set_shader(self.SOLID_NAME, force_update=True)
if "colors" in available_attrs:
self._datasource_combobox.selected_text = "colors"
elif "points" in available_attrs:
self._datasource_combobox.selected_text = "points"
self._dont_update_geometry = True
self._on_datasource_changed(
self._datasource_combobox.selected_text,
self._datasource_combobox.selected_index)
self._update_geometry_colors()
self._dont_update_geometry = False
# _datasource_combobox was empty, now isn't, re-layout.
self.window.set_needs_layout()
self._update_geometry()
self.setup_camera()
self._load_geometries(self._objects.data_names, on_done_ui)
gui.Application.instance.run()
| 73,715 | 38.294243 | 129 | py |
3DTrans | 3DTrans-master/tools/ssl_utils/semi_train_utils.py | import glob
import os
import torch
import tqdm
from torch.nn.utils import clip_grad_norm_
from .sess import sess
from .pseudo_label import pseudo_label
from .iou_match_3d import iou_match_3d
from .se_ssd import se_ssd
semi_learning_methods = {
'SESS': sess,
'Pseudo-Label': pseudo_label,
'3DIoUMatch': iou_match_3d,
'SE_SSD': se_ssd,
}
def train_ssl_one_epoch(teacher_model, student_model, optimizer, labeled_loader, unlabeled_loader, epoch_id, lr_scheduler, accumulated_iter, ssl_cfg,
rank, tbar, total_it_each_epoch, labeled_loader_iter, unlabeled_loader_iter, tb_log=None, leave_pbar=False, dist=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
try:
ud_teacher_batch_dict, ud_student_batch_dict = next(unlabeled_loader_iter)
except StopIteration:
unlabeled_loader_iter = iter(unlabeled_loader)
ud_teacher_batch_dict, ud_student_batch_dict = next(unlabeled_loader_iter)
try:
ld_teacher_batch_dict, ld_student_batch_dict = next(labeled_loader_iter)
except StopIteration:
labeled_loader_iter = iter(labeled_loader)
ld_teacher_batch_dict, ld_student_batch_dict = next(labeled_loader_iter)
#lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
optimizer.zero_grad()
loss, tb_dict, disp_dict = semi_learning_methods[ssl_cfg.NAME](
teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
ssl_cfg, epoch_id, dist
)
loss.backward()
clip_grad_norm_(student_model.parameters(), ssl_cfg.STUDENT.GRAD_NORM_CLIP)
optimizer.step()
lr_scheduler.step(accumulated_iter)
accumulated_iter += 1
disp_dict.update({'loss': loss.item(), 'lr': cur_lr})
# EMA Teacher
if ssl_cfg.TEACHER.NUM_ITERS_PER_UPDATE != -1:
ema_rampup_start, ema_start = ssl_cfg.TEACHER.EMA_EPOCH
assert ema_rampup_start <= ema_start
if epoch_id < ema_rampup_start:
pass
elif (epoch_id >= ema_rampup_start) and (epoch_id < ema_start):
if accumulated_iter % ssl_cfg.TEACHER.NUM_ITERS_PER_UPDATE == 0:
if dist:
#if rank == 0:
update_ema_variables(student_model.module.onepass, teacher_model.module.onepass, ssl_cfg.TEACHER.RAMPUP_EMA_MOMENTUM, accumulated_iter)
else:
update_ema_variables(student_model, teacher_model, ssl_cfg.TEACHER.RAMPUP_EMA_MOMENTUM, accumulated_iter)
elif epoch_id >= ema_start:
if accumulated_iter % ssl_cfg.TEACHER.NUM_ITERS_PER_UPDATE == 0:
if dist:
#if rank == 0:
update_ema_variables_with_fixed_momentum(student_model.module.onepass, teacher_model.module.onepass, ssl_cfg.TEACHER.EMA_MOMENTUM)
else:
update_ema_variables_with_fixed_momentum(student_model, teacher_model, ssl_cfg.TEACHER.EMA_MOMENTUM)
else:
raise Exception('Impossible condition for EMA update')
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_ssl_model(teacher_model, student_model, student_optimizer, labeled_loader, unlabeled_loader,
lr_scheduler, ssl_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
labeled_sampler, unlabeled_sampler,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False, dist=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(labeled_loader) # total iterations set to labeled set
assert merge_all_iters_to_one_epoch is False
labeled_loader_iter = iter(labeled_loader)
unlabeled_loader_iter = iter(unlabeled_loader)
for cur_epoch in tbar:
if labeled_sampler is not None:
labeled_sampler.set_epoch(cur_epoch)
if unlabeled_sampler is not None:
unlabeled_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < ssl_cfg.STUDENT.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_ssl_one_epoch(
teacher_model = teacher_model,
student_model = student_model,
optimizer = student_optimizer,
labeled_loader = labeled_loader,
unlabeled_loader = unlabeled_loader,
epoch_id = cur_epoch,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, ssl_cfg=ssl_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
labeled_loader_iter=labeled_loader_iter,
unlabeled_loader_iter=unlabeled_loader_iter,
dist = dist
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
student_ckpt_name = ckpt_save_dir / 'student' / ('checkpoint_epoch_%d' % trained_epoch)
if dist:
save_checkpoint(
checkpoint_state(student_model.module.onepass, student_optimizer, trained_epoch, accumulated_iter), filename=student_ckpt_name,
)
else:
save_checkpoint(
checkpoint_state(student_model, student_optimizer, trained_epoch, accumulated_iter), filename=student_ckpt_name,
)
teacher_ckpt_name = ckpt_save_dir / 'teacher'/ ('checkpoint_epoch_%d' % trained_epoch)
if dist:
save_checkpoint(
checkpoint_state(teacher_model.module.onepass, student_optimizer, trained_epoch, accumulated_iter), filename=teacher_ckpt_name,
)
else:
save_checkpoint(
checkpoint_state(teacher_model, student_optimizer, trained_epoch, accumulated_iter), filename=teacher_ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 2), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
"""
if param.requires_grad:
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
else:
ema_param.data.mul_(0).add_(1, param.data)
"""
def update_ema_variables_with_fixed_momentum(model, ema_model, alpha):
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
"""
if param.requires_grad:
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
else:
ema_param.data.mul_(0).add_(1, param.data)
""" | 9,752 | 42.346667 | 159 | py |
3DTrans | 3DTrans-master/tools/ssl_utils/semi_utils.py | import torch
import numpy as np
from pcdet.models.model_utils import model_nms_utils
try:
import kornia
except:
pass
def load_data_to_gpu(batch_dict):
# for key, val in batch_dict.items():
# if not isinstance(val, np.ndarray):
# continue
# if key in ['frame_id', 'metadata', 'calib', 'image_shape']:
# continue
# batch_dict[key] = torch.from_numpy(val).float().cuda()
for key, val in batch_dict.items():
if not isinstance(val, np.ndarray):
continue
elif key in ['frame_id', 'metadata', 'calib']:
continue
elif key in ['images']:
batch_dict[key] = kornia.image_to_tensor(val).float().cuda().contiguous()
elif key in ['image_shape']:
batch_dict[key] = torch.from_numpy(val).int().cuda()
elif key in ['db_flag']:
continue
else:
batch_dict[key] = torch.from_numpy(val).float().cuda()
"""
Reverse augmentation transform
"""
def random_world_flip(box_preds, params, reverse = False):
if reverse:
if 'y' in params:
box_preds[:, 0] = -box_preds[:, 0]
box_preds[:, 6] = -(box_preds[:, 6] + np.pi)
if 'x' in params:
box_preds[:, 1] = -box_preds[:, 1]
box_preds[:, 6] = -box_preds[:, 6]
else:
if 'x' in params:
box_preds[:, 1] = -box_preds[:, 1]
box_preds[:, 6] = -box_preds[:, 6]
if 'y' in params:
box_preds[:, 0] = -box_preds[:, 0]
box_preds[:, 6] = -(box_preds[:, 6] + np.pi)
return box_preds
def random_world_rotation(box_preds, params, reverse = False):
if reverse:
noise_rotation = -params
else:
noise_rotation = params
angle = torch.tensor([noise_rotation]).to(box_preds.device)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(1)
ones = angle.new_ones(1)
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).reshape(3, 3).float()
box_preds[:, :3] = torch.matmul(box_preds[:, :3], rot_matrix)
box_preds[:, 6] += noise_rotation
return box_preds
def random_world_scaling(box_preds, params, reverse = False):
if reverse:
noise_scale = 1.0/params
else:
noise_scale = params
box_preds[:, :6] *= noise_scale
return box_preds
@torch.no_grad()
def reverse_transform(teacher_boxes, teacher_dict, student_dict):
augmentation_functions = {
'random_world_flip': random_world_flip,
'random_world_rotation': random_world_rotation,
'random_world_scaling': random_world_scaling
}
for bs_idx, teacher_box in enumerate(teacher_boxes):
teacher_aug_list = teacher_dict['augmentation_list'][bs_idx]
student_aug_list = student_dict['augmentation_list'][bs_idx]
teacher_aug_param = teacher_dict['augmentation_params'][bs_idx]
student_aug_param = student_dict['augmentation_params'][bs_idx]
box_preds = teacher_box['pred_boxes']
# inverse teacher augmentation
teacher_aug_list = teacher_aug_list[::-1]
for key in teacher_aug_list:
aug_params = teacher_aug_param[key]
aug_func = augmentation_functions[key]
box_preds = aug_func(box_preds, aug_params, reverse = True)
# student_augmentation
for key in student_aug_list:
aug_params = student_aug_param[key]
aug_func = augmentation_functions[key]
box_preds = aug_func(box_preds, aug_params, reverse = False)
teacher_box['pred_boxes'] = box_preds
return teacher_boxes
"""
Filter predicted boxes with conditions
"""
def filter_boxes(batch_dict, cfgs):
batch_size = batch_dict['batch_size']
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
max_cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
final_boxes = box_preds
final_labels = label_preds
final_cls_preds = cls_preds
if cfgs.get('FILTER_BY_NMS', False):
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=max_cls_preds, box_preds=final_boxes,
nms_config=cfgs.NMS.NMS_CONFIG,
score_thresh=cfgs.NMS.SCORE_THRESH
)
final_labels = final_labels[selected]
final_boxes = final_boxes[selected]
final_cls_preds = final_cls_preds[selected]
max_cls_preds = max_cls_preds[selected]
if cfgs.get('FILTER_BY_SCORE_THRESHOLD', False):
selected = max_cls_preds > cfgs.SCORE_THRESHOLD
final_labels = final_labels[selected]
final_boxes = final_boxes[selected]
final_cls_preds = final_cls_preds[selected]
max_cls_preds = max_cls_preds[selected]
if cfgs.get('FILTER_BY_TOPK', False):
topk = min(max_cls_preds.shape[0], cfgs.TOPK)
selected = torch.topk(max_cls_preds, topk)[1]
final_labels = final_labels[selected]
final_boxes = final_boxes[selected]
final_cls_preds = final_cls_preds[selected]
max_cls_preds = max_cls_preds[selected]
# added filtering boxes with size 0
zero_mask = (final_boxes[:, 3:6] != 0).all(1)
final_boxes = final_boxes[zero_mask]
final_labels = final_labels[zero_mask]
final_cls_preds = final_cls_preds[zero_mask]
record_dict = {
'pred_boxes': final_boxes,
'pred_cls_preds': final_cls_preds,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts
"""
Generate gt_boxes in data_dict with prediction
"""
@torch.no_grad()
def construct_pseudo_label(boxes):
box_list = []
num_gt_list = []
for bs_idx, box in enumerate(boxes):
box_preds = box['pred_boxes']
label_preds = box['pred_labels'].float().unsqueeze(-1)
num_gt_list.append(box_preds.shape[0])
box_list.append(torch.cat([box_preds, label_preds], dim=1))
batch_size = len(boxes)
num_max_gt = max(num_gt_list)
gt_boxes = box_list[0].new_zeros((batch_size, num_max_gt, 8))
for bs_idx in range(batch_size):
num_gt = num_gt_list[bs_idx]
gt_boxes[bs_idx, :num_gt, :] = box_list[bs_idx]
return gt_boxes | 7,199 | 34.46798 | 91 | py |
3DTrans | 3DTrans-master/tools/ssl_utils/iou_match_3d.py | import torch
from .semi_utils import reverse_transform, load_data_to_gpu, construct_pseudo_label
from pcdet.models.model_utils.model_nms_utils import class_agnostic_nms
@torch.no_grad()
def iou_match_3d_filter(batch_dict, cfgs):
batch_size = batch_dict['batch_size']
pred_dicts = []
for index in range(batch_size):
box_preds = batch_dict['rois'][index]
iou_preds = batch_dict['roi_ious'][index]
cls_preds = batch_dict['roi_scores'][index]
label_preds = batch_dict['roi_labels'][index]
if not batch_dict['cls_preds_normalized']:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
iou_preds = iou_preds.squeeze(-1)
# filtered by iou
iou_threshold_per_class = cfgs.IOU_SCORE_THRESH
num_classes = len(iou_threshold_per_class)
iou_th = iou_preds.new_zeros(iou_preds.shape)
for cls_idx in range(num_classes):
class_mask = label_preds == (cls_idx + 1)
iou_th[class_mask] = iou_threshold_per_class[cls_idx]
iou_mask = iou_preds >= iou_th
iou_preds = iou_preds[iou_mask]
box_preds = box_preds[iou_mask]
cls_preds = cls_preds[iou_mask]
label_preds = label_preds[iou_mask]
nms_scores = cls_preds # iou_preds
selected, selected_scores = class_agnostic_nms(
box_scores=nms_scores, box_preds=box_preds,
nms_config=cfgs.NMS_CONFIG,
score_thresh=cfgs.CLS_SCORE_THRESH
)
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
# added filtering boxes with size 0
zero_mask = (final_boxes[:, 3:6] != 0).all(1)
final_boxes = final_boxes[zero_mask]
final_labels = final_labels[zero_mask]
final_scores = final_scores[zero_mask]
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
}
pred_dicts.append(record_dict)
return pred_dicts
def iou_match_3d(teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
cfgs, epoch_id, dist
):
assert ld_teacher_batch_dict is None # Only generate labels for unlabeled data
load_data_to_gpu(ld_student_batch_dict)
load_data_to_gpu(ud_student_batch_dict)
load_data_to_gpu(ud_teacher_batch_dict)
if not dist:
ud_teacher_batch_dict = teacher_model(ud_teacher_batch_dict)
else:
_, ud_teacher_batch_dict = teacher_model(ld_teacher_batch_dict, ud_teacher_batch_dict)
teacher_boxes = iou_match_3d_filter(ud_teacher_batch_dict, cfgs.TEACHER)
teacher_boxes = reverse_transform(teacher_boxes, ud_teacher_batch_dict, ud_student_batch_dict)
gt_boxes = construct_pseudo_label(teacher_boxes)
ud_student_batch_dict['gt_boxes'] = gt_boxes
if not dist:
_, ld_ret_dict, _, _ = student_model(ld_student_batch_dict)
_, ud_ret_dict, tb_dict, disp_dict = student_model(ud_student_batch_dict)
else:
(_, ld_ret_dict, _, _), (_, ud_ret_dict, tb_dict, disp_dict) = student_model(ld_student_batch_dict, ud_student_batch_dict)
loss = ld_ret_dict['loss'].mean() + ud_ret_dict['loss'].mean()
return loss, tb_dict, disp_dict | 3,447 | 38.181818 | 130 | py |
3DTrans | 3DTrans-master/tools/ssl_utils/sess.py | import torch
import torch.nn.functional as F
import numpy as np
from .semi_utils import reverse_transform, load_data_to_gpu, filter_boxes
def get_consistency_loss(teacher_boxes, student_boxes):
center_losses, size_losses, cls_losses = [], [], []
batch_normalizer = 0
for teacher_box, student_box in zip(teacher_boxes, student_boxes):
teacher_cls_preds = teacher_box['pred_cls_preds'].detach_()
teacher_box_preds = teacher_box['pred_boxes'].detach_()
student_cls_preds = student_box['pred_cls_preds']
student_box_preds = student_box['pred_boxes']
num_teacher_boxes = teacher_box_preds.shape[0]
num_student_boxes = student_box_preds.shape[0]
if num_teacher_boxes == 0 or num_student_boxes == 0:
batch_normalizer += 1
continue
teacher_centers, teacher_sizes, teacher_rot = teacher_box_preds[:, :3], teacher_box_preds[:, 3:6], teacher_box_preds[:, [6]]
student_centers, student_sizes, student_rot = student_box_preds[:, :3], student_box_preds[:, 3:6], student_box_preds[:, [6]]
with torch.no_grad():
teacher_class = torch.max(teacher_cls_preds, dim=-1, keepdim=True)[1] # [Nt, 1]
student_class = torch.max(student_cls_preds, dim=-1, keepdim=True)[1] # [Ns, 1]
not_same_class = (teacher_class != student_class.T).float() # [Nt, Ns]
MAX_DISTANCE = 1000000
dist = teacher_centers[:, None, :] - student_centers[None, :, :] # [Nt, Ns, 3]
dist = (dist ** 2).sum(-1) # [Nt, Ns]
dist += not_same_class * MAX_DISTANCE # penalty on different classes
student_dist_of_teacher, student_index_of_teacher = dist.min(1) # [Nt]
teacher_dist_of_student, teacher_index_of_student = dist.min(0) # [Ns]
# different from standard sess, we only consider distance<1m as matching
MATCHED_DISTANCE = 1
matched_teacher_mask = (teacher_dist_of_student < MATCHED_DISTANCE).float().unsqueeze(-1) # [Ns, 1]
matched_student_mask = (student_dist_of_teacher < MATCHED_DISTANCE).float().unsqueeze(-1) # [Nt, 1]
matched_teacher_centers = teacher_centers[teacher_index_of_student] # [Ns, :]
matched_student_centers = student_centers[student_index_of_teacher] # [Nt, :]
matched_student_sizes = student_sizes[student_index_of_teacher] # [Nt, :]
matched_student_cls_preds = student_cls_preds[student_index_of_teacher] # [Nt, :]
center_loss = (((student_centers - matched_teacher_centers) * matched_teacher_mask).abs().sum()
+ ((teacher_centers - matched_student_centers) * matched_student_mask).abs().sum()) \
/ (num_teacher_boxes + num_student_boxes)
size_loss = F.mse_loss(matched_student_sizes, teacher_sizes, reduction='none')
size_loss = (size_loss * matched_student_mask).sum() / num_teacher_boxes
# kl_div is not feasible, since we use sigmoid instead of softmax for class prediction
# cls_loss = F.kl_div(matched_student_cls_preds.log(), teacher_cls_preds, reduction='none')
cls_loss = F.mse_loss(matched_student_cls_preds, teacher_cls_preds, reduction='none') # use mse loss instead
cls_loss = (cls_loss * matched_student_mask).sum() / num_teacher_boxes
center_losses.append(center_loss)
size_losses.append(size_loss)
cls_losses.append(cls_loss)
batch_normalizer += 1
return sum(center_losses)/batch_normalizer, sum(size_losses)/batch_normalizer, sum(cls_losses)/batch_normalizer
def sigmoid_rampup(current, rampup_start, rampup_end):
assert rampup_start <= rampup_end
if current < rampup_start:
return 0
elif (current >= rampup_start) and (current < rampup_end):
rampup_length = max(rampup_end, 0) - max(rampup_start, 0)
if rampup_length == 0: # no rampup
return 1
else:
phase = 1.0 - (current - max(rampup_start, 0)) / rampup_length
return float(np.exp(-5.0 * phase * phase))
elif current >= rampup_end:
return 1
else:
raise Exception('Impossible condition for sigmoid rampup')
def sess(teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
cfgs, epoch_id, dist
):
load_data_to_gpu(ld_teacher_batch_dict)
load_data_to_gpu(ld_student_batch_dict)
load_data_to_gpu(ud_teacher_batch_dict)
load_data_to_gpu(ud_student_batch_dict)
# get loss for labeled data
if not dist:
ld_teacher_batch_dict = teacher_model(ld_teacher_batch_dict)
ud_teacher_batch_dict = teacher_model(ud_teacher_batch_dict)
ld_student_batch_dict, ret_dict, tb_dict, disp_dict = student_model(ld_student_batch_dict)
ud_student_batch_dict = student_model(ud_student_batch_dict)
else:
ld_teacher_batch_dict, ud_teacher_batch_dict = teacher_model(ld_teacher_batch_dict, ud_teacher_batch_dict)
(ld_student_batch_dict, ret_dict, tb_dict, disp_dict), (ud_student_batch_dict) = student_model(ld_student_batch_dict, ud_student_batch_dict)
sup_loss = ret_dict['loss'].mean()
ld_teacher_boxes = filter_boxes(ld_teacher_batch_dict, cfgs)
ud_teacher_boxes = filter_boxes(ud_teacher_batch_dict, cfgs)
ld_student_boxes = filter_boxes(ld_student_batch_dict, cfgs)
ud_student_boxes = filter_boxes(ud_student_batch_dict, cfgs)
# Since the teacher model did not perform the Point-level Data Transform,
# the prediction results of the teacher model should be transformed by a identical transformation function
ld_teacher_boxes = reverse_transform(ld_teacher_boxes, ld_teacher_batch_dict, ld_student_batch_dict)
ud_teacher_boxes = reverse_transform(ud_teacher_boxes, ud_teacher_batch_dict, ud_student_batch_dict)
ld_center_loss, ld_size_loss, ld_cls_loss = get_consistency_loss(ld_teacher_boxes, ld_student_boxes)
ud_center_loss, ud_size_loss, ud_cls_loss = get_consistency_loss(ud_teacher_boxes, ud_student_boxes)
consistency_loss = (ld_center_loss + ud_center_loss) * cfgs.CENTER_WEIGHT \
+ (ld_size_loss + ud_size_loss) * cfgs.SIZE_WEIGHT \
+ (ld_cls_loss + ud_cls_loss) * cfgs.CLASS_WEIGHT
consistency_weight = cfgs.CONSISTENCY_WEIGHT * sigmoid_rampup(epoch_id, cfgs.TEACHER.EMA_EPOCH[0], cfgs.TEACHER.EMA_EPOCH[1])
loss = sup_loss + consistency_weight * consistency_loss
return loss, tb_dict, disp_dict | 6,566 | 54.184874 | 148 | py |
3DTrans | 3DTrans-master/tools/ssl_utils/se_ssd.py | import torch
import torch.nn.functional as F
import numpy as np
from .semi_utils import reverse_transform, load_data_to_gpu, filter_boxes
from pcdet.ops.iou3d_nms.iou3d_nms_utils import boxes_iou3d_gpu
def get_iou_consistency_loss(teacher_boxes, student_boxes):
box_losses, cls_losses = [], []
batch_normalizer = 0
for teacher_box, student_box in zip(teacher_boxes, student_boxes):
teacher_cls_preds = teacher_box['pred_cls_preds'].detach_()
teacher_box_preds = teacher_box['pred_boxes'].detach_()
student_cls_preds = student_box['pred_cls_preds']
student_box_preds = student_box['pred_boxes']
num_teacher_boxes = teacher_box_preds.shape[0]
num_student_boxes = student_box_preds.shape[0]
if num_teacher_boxes == 0 or num_student_boxes == 0:
batch_normalizer += 1
continue
with torch.no_grad():
teacher_class = torch.max(teacher_cls_preds, dim=-1, keepdim=True)[1] # [Nt, 1]
student_class = torch.max(student_cls_preds, dim=-1, keepdim=True)[1] # [Ns, 1]
not_same_class = (teacher_class != student_class.T).float() # [Nt, Ns]
iou_3d = boxes_iou3d_gpu(teacher_box_preds, student_box_preds) # [Nt, Ns]
iou_3d -= not_same_class # iou < 0 if not from the same class
matched_iou_of_stduent, matched_teacher_index_of_student = iou_3d.max(0) # [Ns]
MATCHED_IOU_TH = 0.7
matched_teacher_mask = (matched_iou_of_stduent >= MATCHED_IOU_TH).float().unsqueeze(-1)
num_matched_boxes = matched_teacher_mask.sum()
if num_matched_boxes == 0: num_matched_boxes = 1
matched_teacher_preds = teacher_box_preds[matched_teacher_index_of_student]
matched_teacher_cls = teacher_cls_preds[matched_teacher_index_of_student]
student_box_reg, student_box_rot = student_box_preds[:, :6], student_box_preds[:, [6]]
matched_teacher_reg, matched_teacher_rot = matched_teacher_preds[:, :6], matched_teacher_preds[:, [6]]
box_loss_reg = F.smooth_l1_loss(student_box_reg, matched_teacher_reg, reduction='none')
box_loss_reg = (box_loss_reg * matched_teacher_mask).sum() / num_matched_boxes
box_loss_rot = F.smooth_l1_loss(torch.sin(student_box_rot - matched_teacher_rot), torch.zeros_like(student_box_rot), reduction='none')
box_loss_rot = (box_loss_rot * matched_teacher_mask).sum() / num_matched_boxes
consistency_box_loss = box_loss_reg + box_loss_rot
consistency_cls_loss = F.smooth_l1_loss(student_cls_preds, matched_teacher_cls, reduction='none')
consistency_cls_loss = (consistency_cls_loss * matched_teacher_mask).sum() / num_matched_boxes
box_losses.append(consistency_box_loss)
cls_losses.append(consistency_cls_loss)
batch_normalizer += 1
return sum(box_losses)/batch_normalizer, sum(cls_losses)/batch_normalizer
def sigmoid_rampup(current, rampup_start, rampup_end):
assert rampup_start <= rampup_end
if current < rampup_start:
return 0
elif (current >= rampup_start) and (current < rampup_end):
rampup_length = max(rampup_end, 0) - max(rampup_start, 0)
if rampup_length == 0: # no rampup
return 1
else:
phase = 1.0 - (current - max(rampup_start, 0)) / rampup_length
return float(np.exp(-5.0 * phase * phase))
elif current >= rampup_end:
return 1
else:
raise Exception('Impossible condition for sigmoid rampup')
def se_ssd(teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
cfgs, epoch_id, dist
):
load_data_to_gpu(ld_teacher_batch_dict)
load_data_to_gpu(ld_student_batch_dict)
load_data_to_gpu(ud_teacher_batch_dict)
load_data_to_gpu(ud_student_batch_dict)
# get loss for labeled data
if not dist:
ld_teacher_batch_dict = teacher_model(ld_teacher_batch_dict)
ud_teacher_batch_dict = teacher_model(ud_teacher_batch_dict)
ld_student_batch_dict, ret_dict, tb_dict, disp_dict = student_model(ld_student_batch_dict)
ud_student_batch_dict = student_model(ud_student_batch_dict)
else:
ld_teacher_batch_dict, ud_teacher_batch_dict = teacher_model(ld_teacher_batch_dict, ud_teacher_batch_dict)
(ld_student_batch_dict, ret_dict, tb_dict, disp_dict), (ud_student_batch_dict) = student_model(ld_student_batch_dict, ud_student_batch_dict)
sup_loss = ret_dict['loss'].mean()
ld_teacher_boxes = filter_boxes(ld_teacher_batch_dict, cfgs)
ud_teacher_boxes = filter_boxes(ud_teacher_batch_dict, cfgs)
ld_student_boxes = filter_boxes(ld_student_batch_dict, cfgs)
ud_student_boxes = filter_boxes(ud_student_batch_dict, cfgs)
ld_teacher_boxes = reverse_transform(ld_teacher_boxes, ld_teacher_batch_dict, ld_student_batch_dict)
ud_teacher_boxes = reverse_transform(ud_teacher_boxes, ud_teacher_batch_dict, ud_student_batch_dict)
ld_box_loss, ld_cls_loss = get_iou_consistency_loss(ld_teacher_boxes, ld_student_boxes)
ud_box_loss, ud_cls_loss = get_iou_consistency_loss(ud_teacher_boxes, ud_student_boxes)
consistency_loss = (ld_box_loss + ud_box_loss) * cfgs.CONSIST_BOX_WEIGHT \
+ (ld_cls_loss + ud_cls_loss) * cfgs.CONSIST_CLS_WEIGHT
consistency_weight = cfgs.CONSISTENCY_WEIGHT * sigmoid_rampup(epoch_id, cfgs.TEACHER.EMA_EPOCH[0], cfgs.TEACHER.EMA_EPOCH[1])
loss = sup_loss + consistency_weight * consistency_loss
return loss, tb_dict, disp_dict | 5,617 | 51.018519 | 148 | py |
3DTrans | 3DTrans-master/tools/ssl_utils/pseudo_label.py | import torch
from .semi_utils import reverse_transform, load_data_to_gpu, construct_pseudo_label
def pseudo_label(teacher_model, student_model,
ld_teacher_batch_dict, ld_student_batch_dict,
ud_teacher_batch_dict, ud_student_batch_dict,
cfgs, epoch_id, dist
):
assert ld_teacher_batch_dict is None # Only generate labels for unlabeled data
load_data_to_gpu(ld_student_batch_dict)
load_data_to_gpu(ud_student_batch_dict)
load_data_to_gpu(ud_teacher_batch_dict)
if not dist:
ud_teacher_batch_dict = teacher_model(ud_teacher_batch_dict)
teacher_boxes, _ = teacher_model.post_processing(ud_teacher_batch_dict)
else:
_, ud_teacher_batch_dict = teacher_model(ld_teacher_batch_dict, ud_teacher_batch_dict)
teacher_boxes, _ = teacher_model.module.onepass.post_processing(ud_teacher_batch_dict)
teacher_boxes = reverse_transform(teacher_boxes, ud_teacher_batch_dict, ud_student_batch_dict)
if cfgs.get('FILTER_BY_SCORE_THRESHOLD', False):
pred_dicts = []
for index in range(ud_teacher_batch_dict['batch_size']):
selected = teacher_boxes[index]['pred_scores'] > cfgs.SCORE_THRESHOLD
pred_boxes = teacher_boxes[index]['pred_boxes'][selected]
pred_scores = teacher_boxes[index]['pred_scores'][selected]
pred_labels = teacher_boxes[index]['pred_labels'][selected]
record_dict = {
'pred_boxes': pred_boxes,
'pred_scores': pred_scores,
'pred_labels': pred_labels
}
pred_dicts.append(record_dict)
gt_boxes = construct_pseudo_label(pred_dicts)
else:
gt_boxes = construct_pseudo_label(teacher_boxes)
ud_student_batch_dict['gt_boxes'] = gt_boxes
if not dist:
_, ld_ret_dict, _, _ = student_model(ld_student_batch_dict)
_, ud_ret_dict, tb_dict, disp_dict = student_model(ud_student_batch_dict)
else:
(_, ld_ret_dict, _, _), (_, ud_ret_dict, tb_dict, disp_dict) = student_model(ld_student_batch_dict, ud_student_batch_dict)
loss = ld_ret_dict['loss'].mean() + ud_ret_dict['loss'].mean()
return loss, tb_dict, disp_dict | 2,249 | 42.269231 | 130 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_active_CLUE.py | import glob
import os
import pickle
from symbol import parameters
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, self_training_utils
from pcdet.models import load_data_to_gpu
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.utils import active_learning_2D_utils
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_active_model_target(model, optimizer, source_train_loader, target_train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, sample_epoch,
annotation_budget, target_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
target_list = active_learning_2D_utils.get_dataset_list(target_file_path, oss=True)
sample_list = []
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
accumulated_iter_detector = start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from target_domain
frame_score = active_learning_2D_utils.active_evaluate_dual(model, target_train_loader, rank, domain='target')
sampled_frame_id, _ = active_learning_2D_utils.active_sample_CLUE(frame_score, budget=annotation_budget)
sample_list, info_path = active_learning_2D_utils.update_sample_list_dual(
sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank, domain='target'
)
target_list, target_info_path = active_learning_2D_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer,
lr_scheduler,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
len(sample_train_loader),
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 11,006 | 41.334615 | 170 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_active_source_utils.py | from dis import dis
import glob
import os
import pickle
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, self_training_utils
from pcdet.models import load_data_to_gpu
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.utils import active_learning_utils
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
total_it_each_epoch = len(source_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
if sample_loader is not None:
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
if sample_loader is not None:
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
if sample_loader is not None:
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
else:
loss = loss_src
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
if sample_loader is not None:
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_one_epoch(model, optimizer_detector, optimizer_discriminator, source_train_loader, target_train_loader,
sample_train_loader, model_func, lr_scheduler_detector, lr_scheduler_discriminator,
accumulated_iter_detector, accumulated_iter_discriminator, optim_cfg, rank, tbar,
dist_train, total_it_each_epoch, dataloader_iter_src, dataloader_iter_tar, dataloader_iter_sample,
tb_log=None, leave_pbar=False, ema_model=None):
# assert total_it_each_epoch == len(source_train_loader)
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer_detector,
lr_scheduler_detector,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_it_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
accumulated_iter_discriminator = train_discriminator(
model,
optimizer_discriminator,
lr_scheduler_discriminator,
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
2,
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
return accumulated_iter_detector, accumulated_iter_discriminator
def train_active_model_source(model, optimizer, source_train_loader, target_train_loader, sample_loader, model_func, lr_scheduler,
optim_cfg, total_iters_each_epoch, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, source_budget, source_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
source_list = active_learning_utils.get_dataset_list(source_file_path, oss=True)
sample_list_source = []
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
source_name = cfg['DATA_CONFIG']['DATASET']
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = start_iter, start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
dataloader_iter_sample = iter(sample_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from source domain
frame_score = active_learning_utils.active_evaluate_dual(model, source_train_loader, rank, domain='source')
sampled_frame_id_source, _ = active_learning_utils.active_sample(frame_score, source_budget)
sample_list_source, info_path_source = active_learning_utils.update_sample_list_dual(sample_list_source, source_list, sampled_frame_id_source,
cur_epoch, sample_save_path, source_name, rank, domain='source')
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
workers=workers,
logger=logger,
training=True,
info_path=info_path_source,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
# dataloader_iter_sample = iter(sample_train_loader)
dataloader_iter_src_sample = iter(source_sample_loader) if source_sample_loader is not None else None
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_iters_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def train_active_model_source_2(model, optimizer, source_train_loader, target_train_loader, sample_loader, model_func, lr_scheduler,
optim_cfg, total_iters_each_epoch, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, source_budget, source_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
source_list = active_learning_utils.get_dataset_list(source_file_path, oss=True)
sample_list_source = []
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
source_name = cfg['DATA_CONFIG']['DATASET']
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = start_iter, start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
dataloader_iter_sample = iter(sample_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from source domain
frame_score = active_learning_utils.active_evaluate_dual(model, source_train_loader, rank, domain='source')
sampled_frame_id_source, _ = active_learning_utils.active_sample_source(frame_score, source_budget)
sample_list_source, info_path_source = active_learning_utils.update_sample_list_dual(sample_list_source, source_list, sampled_frame_id_source,
cur_epoch, sample_save_path, source_name, rank, domain='source')
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
workers=workers,
logger=logger,
training=True,
info_path=info_path_source,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
# dataloader_iter_sample = iter(sample_train_loader)
dataloader_iter_src_sample = iter(source_sample_loader) if source_sample_loader is not None else None
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_iters_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def train_active_model_source_only(model, optimizer, source_train_loader, target_train_loader, sample_loader, model_func, lr_scheduler,
optim_cfg, total_iters_each_epoch, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, source_budget, source_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=5,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
source_name = cfg['DATA_CONFIG']['DATASET']
waymo_source = True if source_name == 'ActiveWaymoDataset' else False
if waymo_source:
sample_interval = cfg['DATA_CONFIG']['SAMPLED_INTERVAL'].get('train', 1)
else:
sample_interval = 1
source_list = active_learning_utils.get_dataset_list(source_file_path, oss=True, waymo=waymo_source, sample_interval=sample_interval)
print('source_list % d' % len(source_list))
sample_list_source = []
accumulated_iter_detector, accumulated_iter_discriminator = start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
dataloader_iter_sample = iter(sample_loader) if sample_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch <= sample_epoch[-1]:
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from source domain
frame_score = active_learning_utils.active_evaluate_dual(model, source_train_loader, rank, domain='source')
# sampled_frame_id_source, _ = active_learning_utils.active_sample_source(frame_score, source_budget)
sampled_frame_id_source, _ = active_learning_utils.active_sample(frame_score, source_budget)
sample_list_source, info_path_source = active_learning_utils.update_sample_list_dual(sample_list_source, source_list, sampled_frame_id_source,
cur_epoch, sample_save_path, source_name, rank, domain='source')
# sample_frame_id_target = active_learning_utils.active_sample_target_2(frame_score_tar)
# sample_list_target = []
# target_list = active_learning_utils.get_dataset_list('', oss=True)
# active_learning_utils.save_sample(frame_score_tar, target_list, sample_save_path)
# sample_list_target, info_path_target = active_learning_utils.update_sample_list_dual(sample_list_target, target_list, sample_frame_id_target,
# cur_epoch, sample_save_path, 'ActiveKittiDataset', rank, domain='target')
source_sample_set, source_sample_loader, source_sample_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train,
workers=workers,
logger=logger,
training=True,
info_path=info_path_source,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
# dataloader_iter_sample = iter(sample_train_loader)
dataloader_iter_src_sample = iter(source_sample_loader) if source_sample_loader is not None else None
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_iters_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 29,688 | 43.31194 | 175 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_st_utils.py | import torch
import os
import glob
import tqdm
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils
from pcdet.utils import self_training_utils
from pcdet.config import cfg
from .train_utils import save_checkpoint, checkpoint_state
def train_one_epoch_st(model, optimizer, source_reader, target_loader, model_func, lr_scheduler,
accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch,
dataloader_iter, tb_log=None, leave_pbar=False, ema_model=None):
if total_it_each_epoch == len(target_loader):
dataloader_iter = iter(target_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
ps_bbox_meter = common_utils.AverageMeter()
ignore_ps_bbox_meter = common_utils.AverageMeter()
st_loss_meter = common_utils.AverageMeter()
disp_dict = {}
for cur_it in range(total_it_each_epoch):
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
try:
target_batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(target_loader)
target_batch = next(dataloader_iter)
print('new iters')
# parameters for save pseudo label on the fly
st_loss, st_tb_dict, st_disp_dict = model_func(model, target_batch)
st_loss.backward()
st_loss_meter.update(st_loss.item())
# count number of used ps bboxes in this batch
pos_pseudo_bbox = target_batch['pos_ps_bbox'].mean().item()
ign_pseudo_bbox = target_batch['ign_ps_bbox'].mean().item()
ps_bbox_meter.update(pos_pseudo_bbox)
ignore_ps_bbox_meter.update(ign_pseudo_bbox)
st_tb_dict = common_utils.add_prefix_to_dict(st_tb_dict, 'st_')
disp_dict.update(common_utils.add_prefix_to_dict(st_disp_dict, 'st_'))
disp_dict.update({'st_loss': "{:.3f}({:.3f})".format(st_loss_meter.val, st_loss_meter.avg),
'pos_ps_box': ps_bbox_meter.avg,
'ign_ps_box': ignore_ps_bbox_meter.avg})
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter, pos_ps_box=ps_bbox_meter.val,
ign_ps_box=ignore_ps_bbox_meter.val))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
tb_log.add_scalar('train/st_loss', st_loss, accumulated_iter)
tb_log.add_scalar('train/pos_ps_bbox', ps_bbox_meter.val, accumulated_iter)
tb_log.add_scalar('train/ign_ps_bbox', ignore_ps_bbox_meter.val, accumulated_iter)
for key, val in st_tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
tb_log.add_scalar('train/epoch_ign_ps_box', ignore_ps_bbox_meter.avg, accumulated_iter)
tb_log.add_scalar('train/epoch_pos_ps_box', ps_bbox_meter.avg, accumulated_iter)
return accumulated_iter
def train_model_st(model, optimizer, source_loader, target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter = start_iter
source_reader = common_utils.DataReader(source_loader, source_sampler)
source_reader.construct_iter()
# for continue training.
# if already exist generated pseudo label result
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
# for continue training
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
start_epoch > 0:
for cur_epoch in range(start_epoch):
if cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG:
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print ("***********update AUG times for continue training:**********", aug_times)
target_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG', None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
total_it_each_epoch = len(target_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(target_loader.dataset, 'merge_all_iters_to_one_epoch')
target_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(target_loader) // max(total_epochs, 1)
dataloader_iter = iter(target_loader)
for cur_epoch in tbar:
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
source_reader.set_cur_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
# update pseudo label
if (cur_epoch in cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL) or \
((cur_epoch % cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL_INTERVAL == 0)
and cur_epoch != 0):
target_loader.dataset.eval()
print ("***********update pseudo label**********")
self_training_utils.save_pseudo_label_epoch(
model, target_loader, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
target_loader.dataset.train()
# curriculum data augmentation
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
(cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG):
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print ("***********update AUG times:**********", aug_times)
target_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG', None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
accumulated_iter = train_one_epoch_st(
model, optimizer, source_reader, target_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter, ema_model=ema_model
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
state = checkpoint_state(model, optimizer, trained_epoch, accumulated_iter)
save_checkpoint(state, filename=ckpt_name) | 8,582 | 46.41989 | 125 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_utils.py | import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
source_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename) | 6,737 | 38.174419 | 132 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_active_target_utils.py | import glob
import os
import pickle
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import active_learning_utils
from pcdet.datasets import build_dataloader_ada
from pcdet.models import load_data_to_gpu
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_active_model_dual_tar(model, optimizer, source_train_loader, target_train_loader, source_sample_loader, model_func,
lr_scheduler, optim_cfg, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, annotation_budget, target_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, source_sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter_detector, accumulated_iter_discriminator = start_iter, start_iter
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
oss = True if cfg.DATA_CONFIG_TAR.get('OSS_PATH', None) is not None else False
target_list = active_learning_utils.get_target_list(target_file_path, oss=oss)
sample_list = []
sample_train_loader = None
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(source_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch in sample_epoch:
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
if cur_epoch in sample_epoch:
frame_score = active_learning_utils.active_evaluate_dual(model, target_train_loader, rank, domain='target')
sampled_frame_id, _ = active_learning_utils.active_sample_tar(frame_score, budget=annotation_budget, logger=logger)
sample_list, info_path = active_learning_utils.update_sample_list_dual(
sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank, domain='target'
)
target_list, target_info_path = active_learning_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
dataloader_iter_src_sample = iter(source_sample_loader)
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_train_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
len(source_sample_loader),
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename) | 15,815 | 40.952255 | 167 | py |
3DTrans | 3DTrans-master/tools/train_utils/active_with_st3d_utils.py | import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import active_learning_utils, self_training_utils
from pcdet.datasets import build_dataloader_ada
from pcdet.models import load_data_to_gpu
def train_detector_st3d(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, target_loader, source_loader_iter, sample_loader_iter,
target_loader_iter, dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
total_it_each_epoch = len(sample_loader) + len(target_loader)
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
if cur_it < len(sample_loader):
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
else:
try:
batch_sample = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_sample = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
# loss = loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_detector_st3d_1(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, target_loader, source_loader_iter, sample_loader_iter,
target_loader_iter, dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
total_it_each_epoch = len(target_loader)
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
# else:
# try:
# batch_sample = next(target_loader_iter)
# except StopIteration:
# target_loader_iter = iter(target_loader)
# batch_sample = next(target_loader_iter)
# print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_tar, tb_dict_tar, disp_dict = model_func(model, batch_tar, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_tar + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
# loss = loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_tar.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_detector_st3d_2(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, target_loader, source_loader_iter, sample_loader_iter,
target_loader_iter, dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
total_it_each_epoch = len(sample_loader) + len(target_loader)
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_tar, tb_dict_tar, disp_dict = model_func(model, batch_tar, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam + loss_tar
# loss = loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_active_with_st3d(model, optimizer, source_train_loader, target_train_loader, source_sample_loader, model_func,
lr_scheduler, optim_cfg, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
sample_epoch, annotation_budget, target_file_path, sample_save_path, ps_label_dir, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, source_sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter_detector, accumulated_iter_discriminator = start_iter, start_iter
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
target_list = active_learning_utils.get_target_list(target_file_path, oss=True)
sample_list = []
sample_train_loader = None
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
start_epoch > 0:
for cur_epoch in range(start_epoch):
if cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG:
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print ("***********update AUG times for continue training:**********", aug_times)
target_train_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG', None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(source_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch in [20, 30, 40]:
cfg.DATA_CONFIG_TAR.USE_PSEUDO_LABEL = True
target_train_loader.dataset.eval()
print("***********update pseudo label**********")
self_training_utils.save_pseudo_label_epoch(
model, target_train_loader, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
target_train_loader.dataset.train()
commu_utils.synchronize()
if cur_epoch in sample_epoch:
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
if cur_epoch in sample_epoch:
frame_score = active_learning_utils.active_evaluate_dual(model, target_train_loader, rank, domain='target')
sampled_frame_id, _ = active_learning_utils.active_sample_tar(frame_score, budget=annotation_budget, logger=logger)
sample_list, info_path = active_learning_utils.update_sample_list_dual(
sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank, domain='target'
)
target_list, target_info_path = active_learning_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
dataloader_iter_src_sample = iter(source_sample_loader)
# if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
# (cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG):
# aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
# print ("***********update AUG times:**********", aug_times)
# target_train_loader.dataset.data_augmentor.re_prepare(
# augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG', None) else None,
# intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
if cur_epoch < 20:
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_train_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
len(source_sample_loader),
accumulated_iter_detector,
tb_log, tbar
)
else:
accumulated_iter_detector = train_detector_st3d_2(
model,
model_func,
optimizer[0],
lr_scheduler[0],
source_sample_loader,
sample_train_loader,
target_train_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dataloader_iter_tar,
dist_train,
optim_cfg,
rank,
len(sample_train_loader) + len(target_train_loader),
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename) | 32,330 | 41.318063 | 167 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_semi_utils.py | import glob
import os
import math
import torch
import tqdm
from torch.nn.utils import clip_grad_norm_
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, cur_epoch, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
# use temperature for radius search
if optim_cfg.get('USE_TEMPERATURE', False):
cur_temperature = calculate_temperature_decay(cur_epoch, optim_cfg)
batch['temperature'] = cur_temperature
loss, tb_dict, disp_dict = model_func(model, batch)
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
disp_dict.update({'loss': loss.item(), 'lr': cur_lr})
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter,
cur_epoch = cur_epoch,
optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
def calculate_temperature_decay(cur_epoch, optim_cfg):
start_epoch, end_epoch = optim_cfg.DECAY_EPOCH
start_temperature, end_temperature = optim_cfg.DECAY_TEMPERATURE
if optim_cfg.DECAY_MODE == 'exp':
if cur_epoch < start_epoch:
return start_temperature
elif cur_epoch > end_epoch:
return end_temperature
else:
duration = end_epoch - start_epoch
ratio = end_temperature / start_temperature
factor = math.log(ratio) # neg
factor = factor / duration
cur_temperature = start_temperature * math.exp(factor * (cur_epoch - start_epoch))
return cur_temperature
else:
raise NotImplementedError
| 6,704 | 37.757225 | 117 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_active_utils.py | from dis import dis
import glob
import os
import pickle
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, self_training_utils
from pcdet.models import load_data_to_gpu
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.utils import active_learning_utils
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.dense_head.conv_cls1.parameters():
p.requires_grad = False
for p in model.module.dense_head.conv_cls2.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.dense_head.conv_cls1.parameters():
p.requires_grad = False
for p in model.dense_head.conv_cls2.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_multi_classifier(model, optimizer, lr_scheduler, source_loader, sample_loader,
source_loader_iter, sample_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_mul_cls, optim_cfg, tb_log, rank,
tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.module.dense_head.conv_cls2.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.dense_head.conv_cls2.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_mul_cls', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_mul_cls',
'distance': 'max'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sam = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sam = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_mul_cls)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_mul_cls)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
ret_src, tb_dict_src, disp_dict = model(batch_src, **forward_args)
load_data_to_gpu(batch_sam)
ret_sam, tb_dict_sam, disp_dict = model(batch_sam, **forward_args)
loss_src = ret_src['loss'].mean()
loss_sam = ret_sam['loss'].mean()
loss = (loss_src + loss_sam) / 2
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_mul_cls += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_mul_cls': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_mul_cls))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_mul_cls', loss, accumulated_iter_mul_cls)
tb_log.add_scalar('meta_data/learning_rate_mul_cls', cur_lr, accumulated_iter_mul_cls)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/mul_cls_src_' + key, val, accumulated_iter_mul_cls)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/mul_cls_sam_' + key, val, accumulated_iter_mul_cls)
if rank == 0:
pbar.close()
return accumulated_iter_mul_cls
def train_one_epoch(model, optimizer_detector, optimizer_mul_cls, optimizer_discriminator,
source_train_loader, target_train_loader, sample_train_loader, model_func,
lr_scheduler_detector, lr_scheduler_mul_cls, lr_scheduler_discriminator,
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator,
optim_cfg, rank, tbar, dist_train, total_it_each_epoch,
dataloader_iter_src, dataloader_iter_tar, dataloader_iter_sample,
tb_log=None, leave_pbar=False, ema_model=None):
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer_detector,
lr_scheduler_detector,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_it_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
accumulated_iter_mul_cls = train_multi_classifier(
model,
optimizer_mul_cls,
lr_scheduler_mul_cls,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
2,
accumulated_iter_mul_cls,
optim_cfg.MUL_CLS,
tb_log, rank, tbar
)
accumulated_iter_discriminator = train_discriminator(
model,
optimizer_discriminator,
lr_scheduler_discriminator,
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
2,
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
return accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator
def train_active_model_ps_label(model, optimizer, source_train_loader, target_train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir, sample_epoch,
annotation_budget, target_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
oss = True if cfg['DATA_CONFIG_TAR'].get('OSS_PATH', None) is not None else False
target_list = active_learning_utils.get_target_list(target_file_path, oss)
sample_list = []
sample_train_loader = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = start_iter, start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
# for continue training.
# if already exist generated pseudo label result
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
# for continue training
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
start_epoch > 0:
for cur_epoch in range(start_epoch):
if cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG:
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print("***********update AUG times for continue training:**********", aug_times)
target_train_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG',
None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
total_it_each_epoch = len(source_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
# update pseudo label
if (cur_epoch in cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL) or \
((cur_epoch % cfg.SELF_TRAIN.UPDATE_PSEUDO_LABEL_INTERVAL == 0)
and cur_epoch != 0):
target_train_loader.dataset.eval()
print("***********update pseudo label**********")
self_training_utils.save_pseudo_label_epoch(
model, target_train_loader, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
target_train_loader.dataset.train()
# curriculum data augmentation
if cfg.SELF_TRAIN.get('PROG_AUG', None) and cfg.SELF_TRAIN.PROG_AUG.ENABLED and \
(cur_epoch in cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG):
aug_times = cfg.SELF_TRAIN.PROG_AUG.UPDATE_AUG.index(cur_epoch) + 1
print("***********update AUG times:**********", aug_times)
target_train_loader.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.SELF_TRAIN.PROG_AUG.D_CFG if cfg.SELF_TRAIN.PROG_AUG.get('D_CFG',
None) else None,
intensity=cfg.SELF_TRAIN.PROG_AUG.SCALE, aug_times=aug_times)
# active evaluate and sample
if cur_epoch in sample_epoch:
frame_score = active_learning_utils.active_evaluate(model, target_train_loader, rank)
sampled_frame_id, _ = active_learning_utils.active_sample(frame_score, budget=annotation_budget)
sample_list, info_path = active_learning_utils.update_sample_list(sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name)
target_list, target_info_path = active_learning_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = train_one_epoch(
model=model,
optimizer_detector=optimizer[0],
optimizer_mul_cls=optimizer[1],
optimizer_discriminator=optimizer[2],
source_train_loader=source_train_loader,
target_train_loader=target_train_loader,
sample_train_loader=sample_train_loader,
model_func=model_func,
lr_scheduler_detector=cur_scheduler[0],
lr_scheduler_mul_cls=cur_scheduler[1],
lr_scheduler_discriminator=cur_scheduler[2],
accumulated_iter_detector=accumulated_iter_detector,
accumulated_iter_mul_cls=accumulated_iter_mul_cls,
accumulated_iter_discriminator=accumulated_iter_discriminator,
optim_cfg=optim_cfg,
rank=rank, tbar=tbar,
dist_train=dist_train,
tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_src=dataloader_iter_src,
dataloader_iter_tar=dataloader_iter_tar,
dataloader_iter_sample=dataloader_iter_sample,
ema_model=ema_model
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def fine_tune_dense_head(model, optimizer, lr_scheduler, source_loader, source_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_dense_head, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.module.dense_head.conv_cls2.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.dense_head.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_dense_head', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_dense_head)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate_dense_head', cur_lr, accumulated_iter_dense_head)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'finetune'
}
ret_dict, tb_dict, disp_dict = model(batch_src, **forward_args)
loss = ret_dict['loss'].mean()
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.DENSE_HEAD.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_dense_head += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_dense_head': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_dense_head))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_dense_head', loss, accumulated_iter_dense_head)
tb_log.add_scalar('meta_data/learning_rate_dense_head', cur_lr, accumulated_iter_dense_head)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_dense_head)
if rank == 0:
pbar.close()
return accumulated_iter_dense_head
def fine_tune_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader, source_loader_iter,
target_loader_iter, dist_train, total_it_each_epoch, accumulated_iter_discriminator,
optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
forward_timer = time.time()
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.DISCRIMINATOR.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def fine_tune_one_epoch_1(model, optimizer_dense_head, optimizer_discriminator, source_train_loader, target_train_loader,
model_func, lr_scheduler_dense_head, lr_scheduler_discriminator, optim_cfg,
accumulated_iter_dense_head, accumulated_iter_discriminator, rank, tbar, dist_train, total_it_each_epoch, dataloader_iter_src,
dataloader_iter_tar, tb_log=None, leave_pbar=False):
accumulated_iter_dense_head = fine_tune_dense_head(
model,
optimizer_dense_head,
lr_scheduler_dense_head,
source_train_loader,
dataloader_iter_src,
dist_train,
total_it_each_epoch,
accumulated_iter_dense_head,
optim_cfg,
tb_log,
rank,
tbar,
leave_pbar
)
accumulated_iter_discriminator = fine_tune_discriminator(
model,
optimizer_discriminator,
lr_scheduler_discriminator,
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
total_it_each_epoch,
accumulated_iter_discriminator,
optim_cfg,
tb_log,
rank,
tbar,
leave_pbar
)
return accumulated_iter_dense_head, accumulated_iter_discriminator
def fine_tune_model(model, optimizer, source_train_loader, target_train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_finetune_epochs, start_iter, rank, tb_log, ckpt_save_dir,
dist_train, source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter_dense_head, accumulated_iter_discriminator = start_iter, start_iter
with tqdm.trange(start_epoch, total_finetune_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(source_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_finetune_epochs)
total_it_each_epoch = len(source_train_loader) // max(total_finetune_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter_dense_head, accumulated_iter_discriminator = fine_tune_one_epoch_1(
model,
optimizer[0],
optimizer[1],
source_train_loader,
target_train_loader,
model_func,
cur_scheduler[0],
cur_scheduler[1],
optim_cfg,
accumulated_iter_dense_head=accumulated_iter_dense_head,
accumulated_iter_discriminator=accumulated_iter_discriminator,
rank=rank, tbar=tbar, tb_log=tb_log,
dist_train=dist_train,
leave_pbar=(cur_epoch + 1 == total_finetune_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_src=dataloader_iter_src,
dataloader_iter_tar=dataloader_iter_tar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_dense_head), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 37,649 | 41.067039 | 167 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_pseudo_label_utils.py | import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import self_training_utils
def train_detector(model, model_func, optimizer, lr_scheduler, labeled_loader, unlabeled_loader, labeled_loader_iter,
unlabeled_loader_iter, dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
total_it_each_epoch = len(unlabeled_loader)
model.train()
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_labeled = next(labeled_loader_iter)
except StopIteration:
labeled_loader_iter = iter(labeled_loader)
batch_labeled = next(labeled_loader_iter)
print('new labeled iter')
try:
batch_unlabeled = next(unlabeled_loader_iter)
except StopIteration:
unlabeled_loader_iter = iter(unlabeled_loader)
batch_unlabeled = next(unlabeled_loader_iter)
print('new unlabeled iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_unlabeled, tb_dict_unlabeled, disp_dict = model_func(model, batch_unlabeled)
loss_labeled, tb_dict_labeled, disp_dict = model_func(model, batch_labeled)
loss = loss_labeled + loss_unlabeled
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_labeled.items():
tb_log.add_scalar('train/detector_labeled' + key, val, accumulated_iter_detector)
for key, val in tb_dict_unlabeled.items():
tb_log.add_scalar('train/detector_unlabeled' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_model(model, optimizer, labeled_train_loader, unlabeled_train_loader, model_func,
lr_scheduler, optim_cfg, start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
ps_label_dir, cfg, dist_train, labeled_sampler=None, unlabeled_sampler=None, lr_warmup_scheduler=None,
ckpt_save_interval=1, max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter = start_iter
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(unlabeled_train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(labeled_train_loader.dataset, 'merge_all_iters_to_one_epoch')
labeled_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(labeled_train_loader) // max(total_epochs, 1)
labeled_loader_iter = iter(labeled_train_loader)
unlabeled_loader_iter = iter(unlabeled_train_loader)
for cur_epoch in tbar:
if labeled_sampler is not None:
labeled_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch in [0, 5, 10]:
cfg.DATA_CONFIG.USE_UNLABELED_PSEUDO_LABEL = True
unlabeled_train_loader.dataset.eval()
print("***********update pseudo label**********")
self_training_utils.save_pseudo_label_epoch(
model, unlabeled_train_loader, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
unlabeled_train_loader.dataset.train()
commu_utils.synchronize()
accumulated_iter = train_detector(
model,
model_func,
optimizer,
cur_scheduler,
labeled_train_loader,
unlabeled_train_loader,
labeled_loader_iter,
unlabeled_loader_iter,
dist_train,
optim_cfg,
rank,
total_it_each_epoch,
accumulated_iter,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer=optimizer, epoch=trained_epoch, it=accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename) | 8,615 | 40.423077 | 151 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_multi_db_utils_3cls.py | import glob
import os
import torch
import tqdm
import time
import math
import copy
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import self_training_utils
from pcdet.config import cfg
def train_one_epoch(model, optimizer, train_loader_1, train_loader_2, train_loader_3, model_func,
lr_scheduler, accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch,
dataloader_iter_1, dataloader_iter_2, dataloader_iter_3, tb_log=None, leave_pbar=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
merge_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_1 = next(dataloader_iter_1)
except StopIteration:
dataloader_iter_1 = iter(train_loader_1)
batch_1 = next(dataloader_iter_1)
#print('new iters')
try:
batch_2 = next(dataloader_iter_2)
except StopIteration:
dataloader_iter_2 = iter(train_loader_2)
batch_2 = next(dataloader_iter_2)
try:
batch_3 = next(dataloader_iter_3)
except StopIteration:
dataloader_iter_3 = iter(train_loader_3)
batch_3 = next(dataloader_iter_3)
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
batch_pre = common_utils.merge_two_batch_dict(batch_1, batch_2)
batch = common_utils.merge_two_batch_dict(batch_pre, batch_3)
merge_timer = time.time()
cur_merge_time = merge_timer - data_timer
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_merge_time = commu_utils.average_reduce_value(cur_merge_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
merge_time.update(avg_merge_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})', 'm_time': f'{merge_time.val:.2f}({merge_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader_1, train_loader_2, train_loader_3, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
# generate the pseudo-labeling for UDA training
if cfg.get('SELF_TRAIN', None):
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
total_it_each_epoch = len(train_loader_1) if len(train_loader_1) > len(train_loader_2) else len(train_loader_2)
if merge_all_iters_to_one_epoch:
raise NotImplementedError
dataloader_iter_1 = iter(train_loader_1)
dataloader_iter_2 = iter(train_loader_2)
dataloader_iter_3 = iter(train_loader_3)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if optim_cfg.get('GENERATE_PSEUDO_LABEL', None):
train_loader_2.dataset.eval()
logger.info('***********update pseudo label**********')
self_training_utils.save_pseudo_label_epoch(
model, train_loader_2, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
train_loader_2.dataset.train()
accumulated_iter = train_one_epoch(
model, optimizer, train_loader_1, train_loader_2, train_loader_3, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_1=dataloader_iter_1,
dataloader_iter_2=dataloader_iter_2,
dataloader_iter_3=dataloader_iter_3
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename) | 8,627 | 39.317757 | 163 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_multi_db_loss_merge.py | import torch
import os
import glob
import tqdm
from torch.nn.utils import clip_grad_norm_
def visualize_boxes_batch(batch):
import visualize_utils as vis
import mayavi.mlab as mlab
for b_idx in range(batch['batch_size']):
points = batch['points'][batch['points'][:, 0] == b_idx][:, 1:]
if 'debug' not in batch:
vis.draw_scenes(points, ref_boxes=batch['gt_boxes'][b_idx, :, :7],
scores=batch['scores'][b_idx])
else:
vis.draw_scenes(points, ref_boxes=batch['gt_boxes'][b_idx, :, :7],
gt_boxes=batch['debug'][b_idx]['gt_boxes_lidar'],
scores=batch['scores'][b_idx])
mlab.show(stop=True)
def merge_two_batch_data(batch_1, batch_2):
import numpy as np
ret = {}
for key, val in batch_1.items():
if key in ['batch_size']:
continue
else:
ret[key] = np.stack(val, axis=0)
for key, val in batch_2.items():
val_cat = []
if key in ['batch_size']:
continue
elif key in ['gt_boxes']:
assert batch_1[key][0].shape[-1] == val[0].shape[-1]
max_gt = max([len(x) for x in batch_1[key]]) + max([len(x) for x in val])
batch_gt_boxes3d = np.zeros((batch_1['batch_size']*2, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_1['batch_size']):
batch_gt_boxes3d[k, :batch_1[key][k].__len__(), :] = batch_1[key][k]
for k in range(batch_2['batch_size']):
batch_gt_boxes3d[k+batch_1['batch_size'], :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
else:
val_cat.append(batch_1[key])
val_cat.append(val)
ret[key] = np.concatenate(val_cat, axis=0)
#ret[key] = np.stack(val, axis=0)
ret['batch_size'] = batch_1['batch_size']*2
return ret
def train_one_epoch_multi_db(model, optimizer, train_loader_1, train_loader_2, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter_1, dataloader_iter_2, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader_1):
dataloader_iter_1 = iter(train_loader_1)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
# Load the source domain ONE:
try:
batch_1 = next(dataloader_iter_1)
except StopIteration:
dataloader_iter_1 = iter(train_loader_1)
batch_1 = next(dataloader_iter_1)
# Load the source domain TWO:
try:
batch_2 = next(dataloader_iter_2)
except StopIteration:
dataloader_iter_2 = iter(train_loader_2)
batch_2 = next(dataloader_iter_2)
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
# The loss_1 + loss_2 will lead to a runtime error of loss.backward()
# when perofrming the pytorch distributed
# you should perform the forward and backward one by one.
# Loss for source domain ONE:
loss_s1, tb_dict_s1, disp_dict_s1 = model_func(model, batch_1)
# Loss for source domain TWO:
loss_s2, tb_dict_s2, _ = model_func(model, batch_2)
# Merge the two loss
loss = loss_s1 + optim_cfg.DB_2_W * loss_s2
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
disp_dict_s1.update({'loss': loss_s1.item(), 'lr': cur_lr})
# log to console and tensorboard
# save the log of the source domain ONE
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict_s1)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_1', loss_s1, accumulated_iter)
tb_log.add_scalar('train/loss_2', loss_s2, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict_s1.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
for key, val in tb_dict_s2.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_multi_db_model(model, optimizer, train_src_loader, train_src_loader_2, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_src_loader) if len(train_src_loader) > len(train_src_loader_2) else len(train_src_loader_2)
if merge_all_iters_to_one_epoch:
raise NotImplementedError
dataloader_iter_1 = iter(train_src_loader)
dataloader_iter_2 = iter(train_src_loader_2)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch_multi_db(
model, optimizer, train_src_loader, train_src_loader_2, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_1=dataloader_iter_1,
dataloader_iter_2=dataloader_iter_2
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename) | 8,641 | 40.152381 | 133 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_multi_db_utils.py | import glob
import os
import torch
import tqdm
import time
import math
import copy
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
from pcdet.utils import self_training_utils
from pcdet.config import cfg
def train_one_epoch(model, optimizer, train_loader_1, train_loader_2, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter_1, dataloader_iter_2, tb_log=None, leave_pbar=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
merge_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_1 = next(dataloader_iter_1)
except StopIteration:
dataloader_iter_1 = iter(train_loader_1)
batch_1 = next(dataloader_iter_1)
#print('new iters')
try:
batch_2 = next(dataloader_iter_2)
except StopIteration:
dataloader_iter_2 = iter(train_loader_2)
batch_2 = next(dataloader_iter_2)
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
batch = common_utils.merge_two_batch_dict(batch_1, batch_2)
merge_timer = time.time()
cur_merge_time = merge_timer - data_timer
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_merge_time = commu_utils.average_reduce_value(cur_merge_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
merge_time.update(avg_merge_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})', 'm_time': f'{merge_time.val:.2f}({merge_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader_1, train_loader_2, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, ps_label_dir,
source_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None):
accumulated_iter = start_iter
# Change the Data Augmentation:
# Cancel the GT-Sampling for the last 1/4 epoch for the training process
# for resuming the ckpt: change the data_augmentator for dataset 1
if cfg.DATA_CONFIG.get('PROG_AUG', None) and cfg.DATA_CONFIG.PROG_AUG.ENABLED and \
(start_epoch == cfg.DATA_CONFIG.PROG_AUG.UPDATE_AUG_EPOCH):
logger.info('**********************Starting the Fade GT-Sampling Operation**********************')
train_loader_1.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.DATA_CONFIG.PROG_AUG.D_CFG if cfg.DATA_CONFIG.PROG_AUG.get('D_CFG', None) else None)
# for resuming the ckpt: change the data_augmentator for dataset 2
if cfg.DATA_CONFIG_SRC_2.get('PROG_AUG', None) and cfg.DATA_CONFIG_SRC_2.PROG_AUG.ENABLED and \
(start_epoch == cfg.DATA_CONFIG_SRC_2.PROG_AUG.UPDATE_AUG_EPOCH):
logger.info('**********************Starting the Fade GT-Sampling Operation**********************')
train_loader_2.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.DATA_CONFIG_SRC_2.PROG_AUG.D_CFG if cfg.DATA_CONFIG_SRC_2.PROG_AUG.get('D_CFG', None) else None)
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
# generate the pseudo-labeling for merge the labeled A dataset and unlabeled B dataset
if cfg.get('SELF_TRAIN', None):
ps_pkl = self_training_utils.check_already_exsit_pseudo_label(ps_label_dir, start_epoch)
if ps_pkl is not None:
logger.info('==> Loading pseudo labels from {}'.format(ps_pkl))
total_it_each_epoch = len(train_loader_1) if len(train_loader_1) > len(train_loader_2) else len(train_loader_2)
if merge_all_iters_to_one_epoch:
raise NotImplementedError
dataloader_iter_1 = iter(train_loader_1)
dataloader_iter_2 = iter(train_loader_2)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if optim_cfg.get('GENERATE_PSEUDO_LABEL', None):
train_loader_2.dataset.eval()
logger.info('***********update pseudo label**********')
self_training_utils.save_pseudo_label_epoch(
model, train_loader_2, rank,
leave_pbar=True, ps_label_dir=ps_label_dir, cur_epoch=cur_epoch
)
train_loader_2.dataset.train()
# for resuming the ckpt: change the data_augmentator for dataset 1
if cfg.DATA_CONFIG.get('PROG_AUG', None) and cfg.DATA_CONFIG.PROG_AUG.ENABLED and \
(cur_epoch == cfg.DATA_CONFIG.PROG_AUG.UPDATE_AUG_EPOCH):
logger.info('**********************Dataset ONE: Starting the Fade GT-Sampling Operation**********************')
train_loader_1.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.DATA_CONFIG.PROG_AUG.D_CFG if cfg.DATA_CONFIG.PROG_AUG.get('D_CFG', None) else None)
# for resuming the ckpt: change the data_augmentator for dataset 2
if cfg.DATA_CONFIG_SRC_2.get('PROG_AUG', None) and cfg.DATA_CONFIG_SRC_2.PROG_AUG.ENABLED and \
(cur_epoch == cfg.DATA_CONFIG_SRC_2.PROG_AUG.UPDATE_AUG_EPOCH):
logger.info('**********************Dataset TWO: Starting the Fade GT-Sampling Operation**********************')
train_loader_2.dataset.data_augmentor.re_prepare(
augmentor_configs=cfg.DATA_CONFIG_SRC_2.PROG_AUG.D_CFG if cfg.DATA_CONFIG_SRC_2.PROG_AUG.get('D_CFG', None) else None)
accumulated_iter = train_one_epoch(
model, optimizer, train_loader_1, train_loader_2, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_1=dataloader_iter_1,
dataloader_iter_2=dataloader_iter_2
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename) | 10,562 | 43.758475 | 163 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_active_TQS.py | import glob
import os
import pickle
from symbol import parameters
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, self_training_utils
from pcdet.models import load_data_to_gpu
from pcdet.datasets import build_dataloader, build_dataloader_ada
from pcdet.utils import active_learning_2D_utils
def train_detector(model, model_func, optimizer, lr_scheduler, source_loader, sample_loader, source_loader_iter, sample_loader_iter,
dist_train, optim_cfg, rank, total_it_each_epoch, accumulated_iter_detector, tb_log, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = True
for p in model.module.discriminator.parameters():
p.requires_grad = False
else:
for p in model.parameters():
p.requires_grad = True
for p in model.discriminator.parameters():
p.requires_grad = False
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_detector', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_detector'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_sample = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sample = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_detector)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_detector)
optimizer.zero_grad()
loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, **forward_args)
loss_src, tb_dict_src, disp_dict = model_func(model, batch_src, **forward_args)
# loss_sam, tb_dict_sam, disp_dict = model_func(model, batch_sample, forward_args)
loss = loss_src + optim_cfg.SAMPLE_LOSS_SCALE * loss_sam
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_detector += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_detector': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_detector))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_detector', loss, accumulated_iter_detector)
tb_log.add_scalar('meta_data/learning_rate_detector', cur_lr, accumulated_iter_detector)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/detector_src' + key, val, accumulated_iter_detector)
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/detector_sam' + key, val, accumulated_iter_detector)
if rank == 0:
pbar.close()
return accumulated_iter_detector
def train_multi_classifier(model, optimizer, lr_scheduler, source_loader, sample_loader,
source_loader_iter, sample_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_mul_cls, optim_cfg, tb_log, rank,
tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.module.dense_head.conv_cls2.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.dense_head.conv_cls1.parameters():
p.requires_grad = True
for p in model.dense_head.conv_cls2.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_mul_cls', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
forward_args = {
'mode': 'train_mul_cls'
}
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
if sample_loader is not None:
try:
batch_sam = next(sample_loader_iter)
except StopIteration:
sample_loader_iter = iter(sample_loader)
batch_sam = next(sample_loader_iter)
print('new sample iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_mul_cls)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_mul_cls)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
ret_src, tb_dict_src, disp_dict = model(batch_src, **forward_args)
loss_src = ret_src['loss'].mean()
if sample_loader is not None:
load_data_to_gpu(batch_sam)
ret_sam, tb_dict_sam, disp_dict = model(batch_sam, **forward_args)
loss_sam = ret_sam['loss'].mean()
loss = (loss_src + loss_sam) / 2
else:
loss = loss_src
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_mul_cls += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_mul_cls': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_mul_cls))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_mul_cls', loss, accumulated_iter_mul_cls)
tb_log.add_scalar('meta_data/learning_rate_mul_cls', cur_lr, accumulated_iter_mul_cls)
for key, val in tb_dict_src.items():
tb_log.add_scalar('train/mul_cls_src_' + key, val, accumulated_iter_mul_cls)
if sample_loader is not None:
for key, val in tb_dict_sam.items():
tb_log.add_scalar('train/mul_cls_sam_' + key, val, accumulated_iter_mul_cls)
if rank == 0:
pbar.close()
return accumulated_iter_mul_cls
def train_discriminator(model, optimizer, lr_scheduler, source_loader, target_loader,
source_loader_iter, target_loader_iter, dist_train, total_it_each_epoch,
accumulated_iter_discriminator, optim_cfg, tb_log, rank, tbar, leave_pbar=False):
model.train()
if dist_train:
for p in model.module.parameters():
p.requires_grad = False
for p in model.module.discriminator.parameters():
p.requires_grad = True
else:
for p in model.parameters():
p.requires_grad = False
for p in model.discriminator.parameters():
p.requires_grad = True
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train_discriminator', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(source_loader_iter)
except StopIteration:
source_loader_iter = iter(source_loader)
batch_src = next(source_loader_iter)
print('new source iter')
try:
batch_tar = next(target_loader_iter)
except StopIteration:
target_loader_iter = iter(target_loader)
batch_tar = next(target_loader_iter)
print('new target iter')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter_discriminator)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter_discriminator)
optimizer.zero_grad()
load_data_to_gpu(batch_src)
forward_args = {
'mode': 'train_discriminator',
'source': True
}
loss_src = model(batch_src, **forward_args)
load_data_to_gpu(batch_tar)
forward_args = {
'mode': 'train_discriminator',
'source': False
}
loss_tar = model(batch_tar, **forward_args)
loss = (loss_src + loss_tar) / 2
tb_dict = {
'discriminator_loss': loss.item()
}
disp_dict = {}
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter_discriminator += 1
cur_batch_time = time.time() - end
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr_discriminator': cur_lr,
'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})',
'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter_discriminator))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss_discriminator', loss, accumulated_iter_discriminator)
tb_log.add_scalar('meta_data/learning_rate_discriminator', cur_lr, accumulated_iter_discriminator)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter_discriminator)
if rank == 0:
pbar.close()
return accumulated_iter_discriminator
def train_one_epoch(model, optimizer_detector, optimizer_discriminator, optimizer_mul_cls, source_train_loader, target_train_loader,
sample_train_loader, model_func, lr_scheduler_detector, lr_scheduler_discriminator, lr_scheduler_mul_cls,
accumulated_iter_detector, accumulated_iter_discriminator, accumulated_iter_mul_cls,optim_cfg, rank, tbar,
dist_train, total_it_each_epoch, dataloader_iter_src, dataloader_iter_tar, dataloader_iter_sample,
tb_log=None, leave_pbar=False, ema_model=None):
# assert total_it_each_epoch == len(source_train_loader)
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer_detector,
lr_scheduler_detector,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
total_it_each_epoch,
accumulated_iter_detector,
tb_log, tbar
)
accumulated_iter_nul_cls = train_multi_classifier(
model,
optimizer_mul_cls,
lr_scheduler_mul_cls,
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
total_it_each_epoch,
accumulated_iter_mul_cls,
optim_cfg.MUL_CLS,
tb_log,
rank,
tbar
)
accumulated_iter_discriminator = train_discriminator(
model,
optimizer_discriminator,
lr_scheduler_discriminator,
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
total_it_each_epoch,
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
return accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator
def train_active_model_target(model, optimizer, source_train_loader, target_train_loader, sample_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, sample_epoch,
annotation_budget, target_file_path, sample_save_path, cfg, batch_size, workers, dist_train,
source_sampler=None, target_sampler=None, sample_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False, logger=None, ema_model=None):
target_list = active_learning_2D_utils.get_dataset_list(target_file_path, oss=True)
sample_list = []
sample_train_loader = None
dataloader_iter_sample = None
target_name = cfg['DATA_CONFIG_TAR']['DATASET']
accumulated_iter_detector, accumulated_iter_mul_cls, accumulated_iter_discriminator = start_iter, start_iter, start_iter
source_reader = common_utils.DataReader(source_train_loader, source_sampler)
source_reader.construct_iter()
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True,
leave=(rank == 0)) as tbar:
if merge_all_iters_to_one_epoch:
assert hasattr(source_train_loader.dataset, 'merge_all_iters_to_one_epoch')
source_train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_iters_each_epoch = len(source_train_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(source_train_loader)
dataloader_iter_tar = iter(target_train_loader) if target_train_loader is not None else None
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
if cur_epoch in sample_epoch:
accumulated_iter_discriminator = train_discriminator(
model,
optimizer[1],
lr_scheduler[1],
source_train_loader,
target_train_loader,
dataloader_iter_src,
dataloader_iter_tar,
dist_train,
len(target_train_loader),
accumulated_iter_discriminator,
optim_cfg.DISCRIMINATOR,
tb_log, rank, tbar
)
accumulated_iter_mul_cls = train_multi_classifier(
model,
optimizer[2],
lr_scheduler[2],
source_train_loader,
sample_train_loader,
dataloader_iter_src,
dataloader_iter_sample,
dist_train,
len(target_train_loader),
accumulated_iter_mul_cls,
optim_cfg.MUL_CLS,
tb_log,
rank,
tbar
)
# active evaluate and sample
if cur_epoch in sample_epoch:
# sample from target_domain
frame_score = active_learning_2D_utils.active_evaluate_dual(model, target_train_loader, rank, domain='target')
sampled_frame_id, _ = active_learning_2D_utils.active_sample(frame_score, budget=annotation_budget)
sample_list, info_path = active_learning_2D_utils.update_sample_list_dual(
sample_list, target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank, domain='target'
)
target_list, target_info_path = active_learning_2D_utils.update_target_list(target_list, sampled_frame_id, cur_epoch, sample_save_path, target_name, rank)
sample_train_set, sample_train_loader, sample_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_SAMPLE,
class_names=cfg.DATA_CONFIG_SAMPLE.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
target_train_set, target_train_loader, target_train_sampler = build_dataloader_ada(
dataset_cfg=cfg.DATA_CONFIG_TAR,
class_names=cfg.DATA_CONFIG_TAR.CLASS_NAMES,
batch_size=batch_size,
dist=dist_train, workers=workers,
logger=logger,
training=True,
info_path=target_info_path,
merge_all_iters_to_one_epoch=merge_all_iters_to_one_epoch,
total_epochs=total_epochs-cur_epoch
)
dataloader_iter_tar = iter(target_train_loader)
dataloader_iter_sample = iter(sample_train_loader) if sample_train_loader is not None else None
dataloader_iter_src_sample = iter(sample_loader)
accumulated_iter_detector = train_detector(
model,
model_func,
optimizer[0],
lr_scheduler[0],
sample_loader,
sample_train_loader,
dataloader_iter_src_sample,
dataloader_iter_sample,
dist_train,
optim_cfg,
rank,
len(sample_loader),
accumulated_iter_detector,
tb_log, tbar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, epoch=trained_epoch, it=accumulated_iter_detector), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 23,288 | 39.362218 | 170 | py |
3DTrans | 3DTrans-master/tools/train_utils/train_random_utils.py | import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils, active_learning_utils
def train_one_epoch(model, optimizer, train_source_loader, train_target_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter_src, dataloader_iter_tar, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_source_loader):
dataloader_iter_src = iter(train_source_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_src = next(dataloader_iter_src)
except StopIteration:
dataloader_iter_src = iter(train_source_loader)
batch_src = next(dataloader_iter_src)
print('new iters')
try:
batch_tar = next(dataloader_iter_tar)
except StopIteration:
dataloader_iter_tar = iter(train_target_loader)
batch_tar = next(dataloader_iter_tar)
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss_src, tb_dict, disp_dict = model_func(model, batch_src)
loss_tar, tb_dict, disp_dict = model_func(model, batch_tar)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss = (loss_src + loss_tar) / 2
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_one_epoch_tar_only(model, optimizer, train_target_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter_tar, tb_log=None, leave_pbar=False):
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch_tar = next(dataloader_iter_tar)
except StopIteration:
dataloader_iter_tar = iter(train_target_loader)
batch_tar = next(dataloader_iter_tar)
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss_tar, tb_dict, disp_dict = model_func(model, batch_tar)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss = loss_tar
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_source_loader, train_target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_source_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_source_loader.dataset, 'merge_all_iters_to_one_epoch')
train_source_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_source_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(train_source_loader)
dataloader_iter_tar = iter(train_target_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_source_loader, train_target_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_src=dataloader_iter_src, dataloader_iter_tar=dataloader_iter_tar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def train_model_random(model, optimizer, train_source_loader, train_target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
source_sampler=None, target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_source_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_source_loader.dataset, 'merge_all_iters_to_one_epoch')
train_source_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_source_loader) // max(total_epochs, 1)
dataloader_iter_src = iter(train_source_loader)
dataloader_iter_tar = iter(train_target_loader)
for cur_epoch in tbar:
if source_sampler is not None:
source_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_source_loader, train_target_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_src=dataloader_iter_src, dataloader_iter_tar=dataloader_iter_tar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def train_model_target_only(model, optimizer, train_target_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir,
target_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_target_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_target_loader.dataset, 'merge_all_iters_to_one_epoch')
train_target_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_target_loader) // max(total_epochs, 1)
dataloader_iter_tar = iter(train_target_loader)
for cur_epoch in tbar:
if target_sampler is not None:
target_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch_tar_only(
model, optimizer, train_target_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter_tar=dataloader_iter_tar
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename) | 15,415 | 42.548023 | 134 | py |
3DTrans | 3DTrans-master/tools/train_utils/optimization/fastai_optim.py | # This file is modified from https://github.com/traveller59/second.pytorch
try:
from collections.abc import Iterable
except:
from collections import Iterable
import torch
from torch import nn
from torch._utils import _unflatten_dense_tensors
from torch.nn.utils import parameters_to_vector
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
def split_bn_bias(layer_groups):
"Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups."
split_groups = []
for l in layer_groups:
l1, l2 = [], []
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups
def get_master(layer_groups, flat_master: bool = False):
"Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
split_groups = split_bn_bias(layer_groups)
model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
return model_params, master_params
else:
master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
for mp in master_params:
for param in mp: param.requires_grad = True
return model_params, master_params
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
"Copy the `model_params` gradients to `master_params` for the optimizer step."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(master_group) != 0:
master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group]))
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
if model.grad is not None:
if master.grad is None: master.grad = master.data.new(*master.data.size())
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master2model(model_params, master_params, flat_master: bool = False) -> None:
"Copy `master_params` to `model_params`."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(model_group) != 0:
for model, master in zip(model_group, _unflatten_dense_tensors(master_group[0].data, model_group)):
model.data.copy_(master)
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group): model.data.copy_(master.data)
def listify(p=None, q=None):
"Make `p` listy and the same length as `q`."
if p is None:
p = []
elif isinstance(p, str):
p = [p]
elif not isinstance(p, Iterable):
p = [p]
n = q if type(q) == int else len(p) if q is None else len(q)
if len(p) == 1: p = p * n
assert len(p) == n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
def trainable_params(m: nn.Module):
"Return list of trainable params in `m`."
res = filter(lambda p: p.requires_grad, m.parameters())
return res
def is_tuple(x) -> bool: return isinstance(x, tuple)
# copy from fastai.
class OptimWrapper():
"Basic wrapper around `opt` to simplify hyper-parameters changes."
def __init__(self, opt, wd, true_wd: bool = False, bn_wd: bool = True):
self.opt, self.true_wd, self.bn_wd = opt, true_wd, bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_func, lr,
layer_groups, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr, opt.opt_func = listify(lr, layer_groups), opt_func
return opt
def new(self, layer_groups):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt_func', self.opt.__class__)
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
return self.create(opt_func, self.lr, layer_groups, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
def __repr__(self) -> str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
# Pytorch optimizer methods
def step(self) -> None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr, wd, pg1, pg2 in zip(self._lr, self._wd, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
for p in pg1['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
if self.bn_wd:
for p in pg2['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self) -> None:
"Clear optimizer gradients."
self.opt.zero_grad()
# Passthrough to the inner opt.
def __getattr__(self, k: str):
return getattr(self.opt, k, None)
def clear(self):
"Reset the state of the inner optimizer."
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
# Hyperparameters as properties
@property
def lr(self) -> float:
return self._lr[-1]
@lr.setter
def lr(self, val: float) -> None:
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self) -> float:
return self._mom[-1]
@mom.setter
def mom(self, val: float) -> None:
if 'momentum' in self.opt_keys:
self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys:
self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self) -> float:
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val: float) -> None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys:
self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys:
self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self) -> float:
return self._wd[-1]
@wd.setter
def wd(self, val: float) -> None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
# Helper functions
def read_defaults(self) -> None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom, self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key: str, val, bn_groups: bool = True):
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1, v2) for v1, v2 in zip(*val)]
for v, pg1, pg2 in zip(val, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key: str):
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class FastAIMixedOptim(OptimWrapper):
@classmethod
def create(cls, opt_func, lr,
layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
opt.model_params, opt.master_params = get_master(layer_groups, flat_master)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
# Changes the optimizer so that the optimization step is done in FP32.
# opt = self.learn.opt
mom, wd, beta = opt.mom, opt.wd, opt.beta
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{'params': mp, 'lr': lr} for mp, lr in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
opt.mom, opt.wd, opt.beta = mom, wd, beta
return opt
def step(self):
model_g2master_g(self.model_params, self.master_params, self.flat_master)
for group in self.master_params:
for param in group: param.grad.div_(self.loss_scale)
super(FastAIMixedOptim, self).step()
self.model.zero_grad()
# Update the params from master to model.
master2model(self.model_params, self.master_params, self.flat_master)
| 10,535 | 38.758491 | 117 | py |
3DTrans | 3DTrans-master/tools/train_utils/optimization/learning_schedules_fastai.py | # This file is modified from https://github.com/traveller59/second.pytorch
import math
from functools import partial
import numpy as np
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
class LRSchedulerStep(object):
def __init__(self, fai_optimizer: OptimWrapper, total_step, lr_phases,
mom_phases):
# if not isinstance(fai_optimizer, OptimWrapper):
# raise TypeError('{} is not a fastai OptimWrapper'.format(
# type(fai_optimizer).__name__))
self.optimizer = fai_optimizer
self.total_step = total_step
self.lr_phases = []
for i, (start, lambda_func) in enumerate(lr_phases):
if len(self.lr_phases) != 0:
assert self.lr_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(lr_phases) - 1:
self.lr_phases.append((int(start * total_step), int(lr_phases[i + 1][0] * total_step), lambda_func))
else:
self.lr_phases.append((int(start * total_step), total_step, lambda_func))
assert self.lr_phases[0][0] == 0
self.mom_phases = []
for i, (start, lambda_func) in enumerate(mom_phases):
if len(self.mom_phases) != 0:
assert self.mom_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(mom_phases) - 1:
self.mom_phases.append((int(start * total_step), int(mom_phases[i + 1][0] * total_step), lambda_func))
else:
self.mom_phases.append((int(start * total_step), total_step, lambda_func))
assert self.mom_phases[0][0] == 0
def step(self, step):
for start, end, func in self.lr_phases:
if step >= start:
self.optimizer.lr = func((step - start) / (end - start))
for start, end, func in self.mom_phases:
if step >= start:
self.optimizer.mom = func((step - start) / (end - start))
def annealing_cos(start, end, pct):
# print(pct, start, end)
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start - end) / 2 * cos_out
class OneCycle(LRSchedulerStep):
def __init__(self, fai_optimizer, total_step, lr_max, moms, div_factor,
pct_start):
self.lr_max = lr_max
self.moms = moms
self.div_factor = div_factor
self.pct_start = pct_start
a1 = int(total_step * self.pct_start)
a2 = total_step - a1
low_lr = self.lr_max / self.div_factor
lr_phases = ((0, partial(annealing_cos, low_lr, self.lr_max)),
(self.pct_start,
partial(annealing_cos, self.lr_max, low_lr / 1e4)))
mom_phases = ((0, partial(annealing_cos, *self.moms)),
(self.pct_start, partial(annealing_cos,
*self.moms[::-1])))
fai_optimizer.lr, fai_optimizer.mom = low_lr, self.moms[0]
super().__init__(fai_optimizer, total_step, lr_phases, mom_phases)
class CosineWarmupLR(lr_sched._LRScheduler):
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.eta_min = eta_min
super(CosineWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.eta_min + (base_lr - self.eta_min) *
(1 - math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs]
class FakeOptim:
def __init__(self):
self.lr = 0
self.mom = 0
if __name__ == "__main__":
import matplotlib.pyplot as plt
opt = FakeOptim() # 3e-3, wd=0.4, div_factor=10
schd = OneCycle(opt, 100, 3e-3, (0.95, 0.85), 10.0, 0.1)
lrs = []
moms = []
for i in range(100):
schd.step(i)
lrs.append(opt.lr)
moms.append(opt.mom)
plt.plot(lrs)
# plt.plot(moms)
plt.show()
plt.plot(moms)
plt.show()
| 4,169 | 35.26087 | 118 | py |
3DTrans | 3DTrans-master/tools/train_utils/optimization/__init__.py | from functools import partial
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import CosineWarmupLR, OneCycle
def build_optimizer(model, optim_cfg):
if optim_cfg.OPTIMIZER == 'adam':
optimizer = optim.Adam(model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY)
elif optim_cfg.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY,
momentum=optim_cfg.MOMENTUM
)
elif optim_cfg.OPTIMIZER == 'adam_onecycle':
def children(m: nn.Module):
return list(m.children())
def num_children(m: nn.Module) -> int:
return len(children(m))
flatten_model = lambda m: sum(map(flatten_model, m.children()), []) if num_children(m) else [m]
get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))]
optimizer_func = partial(optim.Adam, betas=(0.9, 0.99))
optimizer = OptimWrapper.create(
optimizer_func, 3e-3, get_layer_groups(model), wd=optim_cfg.WEIGHT_DECAY, true_wd=True, bn_wd=True
)
else:
raise NotImplementedError
return optimizer
def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg):
decay_steps = [x * total_iters_each_epoch for x in optim_cfg.DECAY_STEP_LIST]
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in decay_steps:
if cur_epoch >= decay_step:
cur_decay = cur_decay * optim_cfg.LR_DECAY
return max(cur_decay, optim_cfg.LR_CLIP / optim_cfg.LR)
lr_warmup_scheduler = None
total_steps = total_iters_each_epoch * total_epochs
if optim_cfg.OPTIMIZER == 'adam_onecycle':
lr_scheduler = OneCycle(
optimizer, total_steps, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.DIV_FACTOR, optim_cfg.PCT_START
)
else:
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
if optim_cfg.LR_WARMUP:
lr_warmup_scheduler = CosineWarmupLR(
optimizer, T_max=optim_cfg.WARMUP_EPOCH * len(total_iters_each_epoch),
eta_min=optim_cfg.LR / optim_cfg.DIV_FACTOR
)
return lr_scheduler, lr_warmup_scheduler
| 2,401 | 36.53125 | 113 | py |
3DTrans | 3DTrans-master/tools/visual_utils/open3d_vis_utils.py | """
Open3d visualization tool box
Written by Jihan YANG
All rights preserved from 2021 - present.
"""
import open3d
import torch
import matplotlib
import numpy as np
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def get_coor_colors(obj_labels):
"""
Args:
obj_labels: 1 is ground, labels > 1 indicates different instance cluster
Returns:
rgb: [N, 3]. color for each point.
"""
colors = matplotlib.colors.XKCD_COLORS.values()
max_color_num = obj_labels.max()
color_list = list(colors)[:max_color_num+1]
colors_rgba = [matplotlib.colors.to_rgba_array(color) for color in color_list]
label_rgba = np.array(colors_rgba)[obj_labels]
label_rgba = label_rgba.squeeze()[:, :3]
return label_rgba
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_labels=None, ref_scores=None, point_colors=None, draw_origin=True):
if isinstance(points, torch.Tensor):
points = points.cpu().numpy()
if isinstance(gt_boxes, torch.Tensor):
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(ref_boxes, torch.Tensor):
ref_boxes = ref_boxes.cpu().numpy()
vis = open3d.visualization.Visualizer()
vis.create_window()
vis.get_render_option().point_size = 1.0
vis.get_render_option().background_color = np.zeros(3)
# draw origin
if draw_origin:
axis_pcd = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0])
vis.add_geometry(axis_pcd)
pts = open3d.geometry.PointCloud()
pts.points = open3d.utility.Vector3dVector(points[:, :3])
vis.add_geometry(pts)
if point_colors is None:
pts.colors = open3d.utility.Vector3dVector(np.ones((points.shape[0], 3)))
else:
pts.colors = open3d.utility.Vector3dVector(point_colors)
if gt_boxes is not None:
vis = draw_box(vis, gt_boxes, (0, 0, 1))
if ref_boxes is not None:
vis = draw_box(vis, ref_boxes, (0, 1, 0), ref_labels, ref_scores)
vis.run()
vis.destroy_window()
def translate_boxes_to_open3d_instance(gt_boxes):
"""
4-------- 6
/| /|
5 -------- 3 .
| | | |
. 7 -------- 1
|/ |/
2 -------- 0
"""
center = gt_boxes[0:3]
lwh = gt_boxes[3:6]
axis_angles = np.array([0, 0, gt_boxes[6] + 1e-10])
rot = open3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles)
box3d = open3d.geometry.OrientedBoundingBox(center, rot, lwh)
line_set = open3d.geometry.LineSet.create_from_oriented_bounding_box(box3d)
# import ipdb; ipdb.set_trace(context=20)
lines = np.asarray(line_set.lines)
lines = np.concatenate([lines, np.array([[1, 4], [7, 6]])], axis=0)
line_set.lines = open3d.utility.Vector2iVector(lines)
return line_set, box3d
def draw_box(vis, gt_boxes, color=(0, 1, 0), ref_labels=None, score=None):
for i in range(gt_boxes.shape[0]):
line_set, box3d = translate_boxes_to_open3d_instance(gt_boxes[i])
if ref_labels is None:
line_set.paint_uniform_color(color)
else:
line_set.paint_uniform_color(box_colormap[ref_labels[i]])
vis.add_geometry(line_set)
# if score is not None:
# corners = box3d.get_box_points()
# vis.add_3d_label(corners[5], '%.2f' % score[i])
return vis
| 3,413 | 28.179487 | 126 | py |
3DTrans | 3DTrans-master/tools/visual_utils/visualize_utils.py | import mayavi.mlab as mlab
import numpy as np
import torch
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
show_intensity=False, size=(600, 600), draw_origin=True):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
if show_intensity:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
else:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
if draw_origin:
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)
return fig
def draw_sphere_pts(pts, color=(0, 1, 0), fig=None, bgcolor=(0, 0, 0), scale_factor=0.2):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(600, 600))
if isinstance(color, np.ndarray) and color.shape[0] == 1:
color = color[0]
color = (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
if isinstance(color, np.ndarray):
pts_color = np.zeros((pts.__len__(), 4), dtype=np.uint8)
pts_color[:, 0:3] = color
pts_color[:, 3] = 255
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], np.arange(0, pts_color.__len__()), mode='sphere',
scale_factor=scale_factor, figure=fig)
G.glyph.color_mode = 'color_by_scalar'
G.glyph.scale_mode = 'scale_by_vector'
G.module_manager.scalar_lut_manager.lut.table = pts_color
else:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='sphere', color=color,
colormap='gnuplot', scale_factor=scale_factor, figure=fig)
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), line_width=3, tube_radius=None, figure=fig)
return fig
def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
return fig
def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):
for x in range(bv_range[0], bv_range[2], grid_size):
for y in range(bv_range[1], bv_range[3], grid_size):
fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)
return fig
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
if not isinstance(points, np.ndarray):
points = points.cpu().numpy()
if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
ref_boxes = ref_boxes.cpu().numpy()
if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
gt_boxes = gt_boxes.cpu().numpy()
if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
ref_scores = ref_scores.cpu().numpy()
if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
ref_labels = ref_labels.cpu().numpy()
fig = visualize_pts(points)
fig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40))
if gt_boxes is not None:
corners3d = boxes_to_corners_3d(gt_boxes)
fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100)
if ref_boxes is not None and len(ref_boxes) > 0:
ref_corners3d = boxes_to_corners_3d(ref_boxes)
if ref_labels is None:
fig = draw_corners3d(ref_corners3d, fig=fig, color=(0, 1, 0), cls=ref_scores, max_num=100)
else:
for k in range(ref_labels.min(), ref_labels.max() + 1):
cur_color = tuple(box_colormap[k % len(box_colormap)])
mask = (ref_labels == k)
fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=100)
mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0)
return fig
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
import mayavi.mlab as mlab
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
| 8,540 | 38.541667 | 121 | py |
3DTrans | 3DTrans-master/pcdet/models/__init__.py | from collections import namedtuple
import numpy as np
import torch
from .detectors import build_detector, build_detector_multi_db, build_detector_multi_db_3
try:
import kornia
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
def build_network(model_cfg, num_class, dataset):
model = build_detector(
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
# def build_network_multi_db_v2(model_cfg, num_class, dataset):
# model = build_detector_multi_db_v2(
# model_cfg=model_cfg, num_class=num_class, dataset=dataset
# )
# return model
def build_network_multi_db(model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
model = build_detector_multi_db(
model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2,
dataset=dataset, dataset_s2=dataset_s2, source_one_name=source_one_name
)
return model
def build_network_multi_db_3(model_cfg, num_class, num_class_s2, num_class_s3, dataset, dataset_s2, dataset_s3, source_one_name, source_1):
model = build_detector_multi_db_3(
model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, num_class_s3=num_class_s3,
dataset=dataset, dataset_s2=dataset_s2, dataset_s3=dataset_s3, source_one_name=source_one_name,
source_1=source_1
)
return model
def load_data_to_gpu(batch_dict):
for key, val in batch_dict.items():
if not isinstance(val, np.ndarray):
continue
elif key in ['frame_id', 'metadata', 'calib']:
continue
elif key in ['images']:
batch_dict[key] = kornia.image_to_tensor(val).float().cuda().contiguous()
elif key in ['image_shape']:
batch_dict[key] = torch.from_numpy(val).int().cuda()
elif key in ['db_flag']:
continue
else:
batch_dict[key] = torch.from_numpy(val).float().cuda()
def model_fn_decorator():
ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict'])
def model_func(model, batch_dict, **forward_args):
load_data_to_gpu(batch_dict)
ret_dict, tb_dict, disp_dict = model(batch_dict, **forward_args)
loss = ret_dict['loss'].mean()
if hasattr(model, 'update_global_step'):
model.update_global_step()
else:
model.module.update_global_step()
return ModelReturn(loss, tb_dict, disp_dict)
return model_func
| 2,525 | 33.135135 | 139 | py |
3DTrans | 3DTrans-master/pcdet/models/mdf_models/dense_2d_moe_add_wo_SE.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.utils import common_utils
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, groups=groups, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
# self.conv2 = conv3x3(planes, planes)
# self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
# out = self.relu(out)
# out = self.conv2(out)
# out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class BasicBlock_2(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_2, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv1x1(planes, planes, stride)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
return out
class BasicBlock_Rescale(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_Rescale, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
return out
class SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SEBlock, self).__init__()
self.r = r
self.squeeze = nn.Sequential(nn.Linear(channels, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
squeeze = self.squeeze(torch.mean(x, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class DENSE_2D_MoE_ADD_wo_SE(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = 2
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
BasicBlock(self.shared_channels//4, self.shared_channels//4),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
if self.training:
# Concat the task-specific into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Per task attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# Per task-specific squeeze-and-excitation
out_s1 = shared + spatial_features_2d_s1
out_s2 = shared + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
else:
# Concat the dataset-specific features into the channel-dimension
if self.db_source == 1:
features_used = spatial_features_2d_s1
elif self.db_source == 2:
features_used = spatial_features_2d_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
if self.db_source == 1:
out_s1 = shared + spatial_features_2d_s1
data_dict['spatial_features_2d'] = out_s1
elif self.db_source == 2:
out_s2 = shared + spatial_features_2d_s2
data_dict['spatial_features_2d'] = out_s2
return data_dict | 8,239 | 39 | 121 | py |
3DTrans | 3DTrans-master/pcdet/models/mdf_models/dense_2d_moe_add_wo_attention.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.utils import common_utils
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, groups=groups, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
# self.conv2 = conv3x3(planes, planes)
# self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
# out = self.relu(out)
# out = self.conv2(out)
# out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class BasicBlock_2(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_2, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv1x1(planes, planes, stride)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
return out
class BasicBlock_Rescale(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_Rescale, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
return out
class SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SEBlock, self).__init__()
self.r = r
self.squeeze = nn.Sequential(nn.Linear(channels, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
squeeze = self.squeeze(torch.mean(x, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class DENSE_2D_MoE_ADD_wo_AT(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = 2
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
BasicBlock(self.shared_channels//4, self.shared_channels//4),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
if self.training:
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
else:
# Inference Usage: BEV Feature Copy
if self.db_source == 1:
features_used = spatial_features_2d_s1
elif self.db_source == 2:
features_used = spatial_features_2d_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
if self.db_source == 1:
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
data_dict['spatial_features_2d'] = out_s1
elif self.db_source == 2:
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
data_dict['spatial_features_2d'] = out_s2
return data_dict | 7,968 | 38.450495 | 121 | py |
3DTrans | 3DTrans-master/pcdet/models/mdf_models/dense_3d_cr.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.utils import common_utils
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SEBlock, self).__init__()
self.r = r
self.squeeze = nn.Sequential(nn.Linear(channels, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
squeeze = self.squeeze(torch.mean(x, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class DENSE_3D_DT(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
out_s1 = self.se_s1(spatial_features_2d_s1)
out_s2 = self.se_s2(spatial_features_2d_s2)
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features'] = concat_f
return data_dict
class DENSE_3D_CR(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.N = self.model_cfg.NUM_OF_DB
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features = data_dict['spatial_features']
spatial_features_s1 = spatial_features[split_tag_s1,:,:,:]
spatial_features_s2 = spatial_features[split_tag_s2,:,:,:]
if self.training:
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_s1, spatial_features_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
spatial_att = torch.max(concat, dim=1).values.view(B, 1, 1, H, W)
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
mask = mask * spatial_att
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_s1
out_s2 = self.se_s2(shared) + spatial_features_s2
concat_f_spatial = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features'] = concat_f_spatial
else:
if self.db_source == 1:
features_used = spatial_features_s1
elif self.db_source == 2:
features_used = spatial_features_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
spatial_att = torch.max(concat, dim=1).values.view(B, 1, 1, H, W)
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
mask = mask * spatial_att
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
if self.db_source == 1:
out_s1 = self.se_s1(shared) + spatial_features_s1
data_dict['spatial_features'] = out_s1
elif self.db_source == 2:
out_s2 = self.se_s2(shared) + spatial_features_s2
data_dict['spatial_features'] = out_s2
return data_dict | 7,057 | 38.430168 | 121 | py |
3DTrans | 3DTrans-master/pcdet/models/mdf_models/dense_cr.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.utils import common_utils
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, groups=groups, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class BasicBlock_2(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_2, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv1x1(planes, planes, stride)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
return out
class BasicBlock_Rescale(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock_Rescale, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv1x1(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
return out
class SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SEBlock, self).__init__()
self.r = r
self.squeeze = nn.Sequential(nn.Linear(channels, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
squeeze = self.squeeze(torch.mean(x, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class SA_SEBlock(nn.Module):
""" Squeeze-and-excitation block """
def __init__(self, channels, r=16):
super(SA_SEBlock, self).__init__()
self.r = r
self.squeeze_1 = nn.Sequential(nn.Conv2d(channels, channels//self.r, 8, 3, 1, bias=True),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.squeeze_2 = nn.Sequential(nn.Conv2d(channels//self.r, channels//self.r, 8, 3, 1, bias=True),
nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.squeeze = nn.Sequential(nn.Linear(channels//self.r, channels//self.r),
nn.ReLU(),
nn.Linear(channels//self.r, channels),
nn.Sigmoid())
def forward(self, x):
B, C, H, W = x.size()
att = self.squeeze_1(x)
att = self.squeeze_2(att)
squeeze = self.squeeze(torch.mean(att, dim=(2,3))).view(B,C,1,1)
return torch.mul(x, squeeze)
class DENSE_2D_DT(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
out_s1 = self.se_s1(spatial_features_2d_s1)
out_s2 = self.se_s2(spatial_features_2d_s2)
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
return data_dict
class DENSE_CR(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = self.model_cfg.NUM_OF_DB
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//8, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//8))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//8, downsample=downsample),
nn.Conv2d(self.shared_channels//8, self.shared_channels, 1))
# Dimensionality reduction
self.dimensionality_reduction = BasicBlock_Rescale(self.shared_channels, self.per_task_channels)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels, r=32)
self.se_s2 = SEBlock(self.per_task_channels, r=32)
def forward(self, data_dict):
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
if self.training:
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Spatial mask across different datasets
spatial_att = torch.max(concat, dim=1).values.view(B, 1, 1, H, W)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
mask = mask * spatial_att
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
else:
# Inference Usage: BEV Features Copy
if self.db_source == 1:
features_used = spatial_features_2d_s1
elif self.db_source == 2:
features_used = spatial_features_2d_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# Spatial mask across different datasets
spatial_att = torch.max(concat, dim=1).values.view(B, 1, 1, H, W)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
mask = mask * spatial_att
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
if self.db_source == 1:
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
data_dict['spatial_features_2d'] = out_s1
elif self.db_source == 2:
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
data_dict['spatial_features_2d'] = out_s2
return data_dict
class DENSE_2D_CR_ADD(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = self.model_cfg.NUM_OF_DB
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
BasicBlock(self.shared_channels//4, self.shared_channels//4),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
if self.training:
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
else:
# Inference Usage: BEV Features Copy
if self.db_source == 1:
features_used = spatial_features_2d_s1
elif self.db_source == 2:
features_used = spatial_features_2d_s2
concat = torch.cat([features_used, features_used], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
if self.db_source == 1:
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
data_dict['spatial_features_2d'] = out_s1
elif self.db_source == 2:
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
data_dict['spatial_features_2d'] = out_s2
return data_dict
class DENSE_2D_CR_ADD_SIM(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.N = self.model_cfg.NUM_OF_DB
self.source_one_name = self.model_cfg.SOURCE_ONE_NAME
self.per_task_channels = self.model_cfg.INPUT_CONV_CHANNEL
self.shared_channels = int(self.N*self.model_cfg.INPUT_CONV_CHANNEL)
self.db_source = int(self.model_cfg.db_source)
# Non-linear function f
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.shared_channels//4, 1, bias=False),
nn.BatchNorm2d(self.shared_channels//4))
self.non_linear = nn.Sequential(BasicBlock(self.shared_channels, self.shared_channels//4, downsample=downsample),
BasicBlock(self.shared_channels//4, self.shared_channels//4),
nn.Conv2d(self.shared_channels//4, self.shared_channels, 1))
# Dimensionality reduction
downsample = nn.Sequential(nn.Conv2d(self.shared_channels, self.per_task_channels, 1, bias=False),
nn.BatchNorm2d(self.per_task_channels))
self.dimensionality_reduction = BasicBlock(self.shared_channels, self.per_task_channels,
downsample=downsample)
# SEBlock
self.se_s1 = SEBlock(self.per_task_channels)
self.se_s2 = SEBlock(self.per_task_channels)
def forward(self, data_dict):
# Get shared representation
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, data_dict)
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d_s1 = spatial_features_2d[split_tag_s1,:,:,:]
spatial_features_2d_s2 = spatial_features_2d[split_tag_s2,:,:,:]
# Concat the dataset-specific features into the channel-dimension
concat = torch.cat([spatial_features_2d_s1, spatial_features_2d_s2], 1)
B, C, H, W = concat.size()
shared = self.non_linear(concat)
# dataset attention mask
mask = F.softmax(shared.view(B, C//self.N, self.N, H, W), dim = 2)
shared = torch.mul(mask, concat.view(B, C//self.N, self.N, H, W)).view(B,-1, H, W)
# Perform dimensionality reduction
shared = self.dimensionality_reduction(shared)
# dataset-specific squeeze-and-excitation
out_s1 = self.se_s1(shared) + spatial_features_2d_s1
out_s2 = self.se_s2(shared) + spatial_features_2d_s2
concat_f = torch.cat([out_s1, out_s2], 0)
data_dict['spatial_features_2d'] = concat_f
return data_dict | 16,634 | 40.175743 | 121 | py |
3DTrans | 3DTrans-master/pcdet/models/detectors/detector3d_template_IASSD.py | import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate_IASSD(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
num_class=self.num_class, ####################
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
if post_process_cfg.get('RECALL_MODE', 'normal') == 'normal':
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 19,155 | 45.382567 | 119 | py |
3DTrans | 3DTrans-master/pcdet/models/detectors/detector3d_template.py | import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'unetscn', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_unetscn(self, model_info_dict):
if self.model_cfg.get('UNETSCN', None) is None:
return None, model_info_dict
unetscn_module = pfe.__all__[self.model_cfg.UNETSCN.NAME](
model_cfg=self.model_cfg.UNETSCN,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(unetscn_module)
return unetscn_module, model_info_dict
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 19,470 | 45.030733 | 119 | py |
3DTrans | 3DTrans-master/pcdet/models/detectors/detector3d_template_ada.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads, active_models
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class ActiveDetector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'discriminator', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_discriminator(self, model_info_dict):
if self.model_cfg.get('DISCRIMINATOR', None) is None:
return None, model_info_dict
discriminator_module = active_models.__all__[self.model_cfg.DISCRIMINATOR.NAME](
model_cfg=self.model_cfg.DISCRIMINATOR
)
model_info_dict['module_list'].append(discriminator_module)
return discriminator_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if batch_dict['mode'] == 'active_evaluate':
roi_feature = batch_dict['roi_shared_feature'][batch_mask]
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
# selected, selected_scores = model_nms_utils.class_agnostic_nms(
# box_scores=cls_preds, box_preds=box_preds,
# nms_config=post_process_cfg.NMS_CONFIG,
# score_thresh=post_process_cfg.SCORE_THRESH
# )
if batch_dict['mode'] == 'active_evaluate':
if batch_dict.get('reweight_roi', None) is None:
batch_dict['post_roi_num'] = []
batch_dict['reweight_roi'] = []
batch_dict['reweight_roi_entropy'] = []
batch_dict['reweight_roi_entropy_score'] = []
batch_dict['reweight_roi_entropy_score_2'] = []
batch_dict['uncertainty_score'] = []
batch_dict['cls_preds'] = []
batch_dict['roi_feature'] = []
selected, selected_scores, selected_roi = model_nms_utils.class_agnostic_nms_with_roi(
box_scores=cls_preds, box_preds=box_preds, roi_feature=roi_feature,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
batch_dict['cls_preds'].append(cls_preds.mean().view(1))
batch_dict['roi_feature'].append(roi_feature.mean(dim=0).view(1, -1))
# reweight roi feature based on score
reweight = F.softmax(selected_scores)
reweight_roi = reweight.view(1, -1) @ selected_roi
batch_dict['reweight_roi'].append(reweight_roi)
# reweight roi feature base on entropy
entropy = -cls_preds * torch.log(cls_preds)
reweight_roi_entropy = entropy.view(-1,1) * roi_feature
reweight_roi_entropy = reweight_roi_entropy.mean(dim=0)
batch_dict['reweight_roi_entropy'].append(reweight_roi_entropy)
entropy_score = -selected_scores * torch.log(selected_scores)
reweight_roi_entropy_score = entropy_score.view(-1, 1) * selected_roi
reweight_roi_entropy_score = reweight_roi_entropy_score.mean(dim=0)
batch_dict['reweight_roi_entropy_score'].append(reweight_roi_entropy_score)
entropy_score_2 = entropy_score.view(-1, 1) + reweight.view(-1, 1)
reweight_roi_entropy_score_2 = entropy_score_2 * selected_roi
reweight_roi_entropy_score_2 = reweight_roi_entropy_score_2.mean(dim=0)
batch_dict['reweight_roi_entropy_score_2'].append(reweight_roi_entropy_score_2)
# use entropy to evaluate uncertainty
uncertainty = -selected_scores * torch.log2(selected_scores) - (1 - selected_scores) * torch.log2(1 - selected_scores)
uncertainty_score = uncertainty.view(1, -1).mean(dim=-1)
batch_dict['uncertainty_score'].append(uncertainty_score)
batch_dict['post_roi_num'].append(selected_roi.shape[0])
if index == (batch_size-1):
return batch_dict
else:
continue
else:
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 23,013 | 47.348739 | 138 | py |
3DTrans | 3DTrans-master/pcdet/models/detectors/detector3d_template_multi_db.py | import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads, mdf_models
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate_M_DB(nn.Module):
def __init__(self, model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.num_class_s2 = num_class_s2
self.dataset = dataset
self.dataset_s2 = dataset_s2
self.class_names = dataset.class_names
self.class_names_s2 = dataset_s2.class_names
self.source_one_name = source_one_name
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'point_t', 'vfe', 'backbone_3d', 'map_to_bev_module', 'dense_3d_moe', 'pfe',
'backbone_2d', 'dense_2d_moe', 'dense_head_s1', 'point_head_s1', 'roi_head_s1',
'dense_head_s2', 'point_head_s2', 'roi_head_s2',
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_point_t(self, model_info_dict):
if self.model_cfg.get('POINT_T', None) is None:
return None, model_info_dict
point_t_module = pfe.__all__[self.model_cfg.POINT_T.NAME](
model_cfg=self.model_cfg.POINT_T
)
model_info_dict['module_list'].append(point_t_module)
return point_t_module, model_info_dict
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_dense_3d_moe(self, model_info_dict):
if self.model_cfg.get('DENSE_3D_MoE', None) is None:
return None, model_info_dict
dense_moe_module = mdf_models.__all__[self.model_cfg.DENSE_3D_MoE.NAME](
model_cfg=self.model_cfg.DENSE_3D_MoE
)
model_info_dict['module_list'].append(dense_moe_module)
return dense_moe_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_2d_moe(self, model_info_dict):
if self.model_cfg.get('DENSE_2D_MoE', None) is None:
return None, model_info_dict
dense_moe_module = mdf_models.__all__[self.model_cfg.DENSE_2D_MoE.NAME](
model_cfg=self.model_cfg.DENSE_2D_MoE
)
model_info_dict['module_list'].append(dense_moe_module)
return dense_moe_module, model_info_dict
def build_dense_head_s1(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S1', None) is None:
return None, model_info_dict
dense_head_module_s1 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S1.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S1,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD_S1.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S1', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s1)
return dense_head_module_s1, model_info_dict
def build_dense_head_s2(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S2', None) is None:
return None, model_info_dict
dense_head_module_s2 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S2.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S2,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class_s2 if not self.model_cfg.DENSE_HEAD_S2.CLASS_AGNOSTIC else 1,
class_names=self.class_names_s2,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S2', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s2)
return dense_head_module_s2, model_info_dict
def build_point_head_s1(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S1', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S1.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s1 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S1.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S1,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD_S1.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S1', False)
)
model_info_dict['module_list'].append(point_head_module_s1)
return point_head_module_s1, model_info_dict
def build_point_head_s2(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S2', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S2.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s2 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S2.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S2,
input_channels=num_point_features,
num_class=self.num_class_s2 if not self.model_cfg.POINT_HEAD_S2.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S2', False)
)
model_info_dict['module_list'].append(point_head_module_s2)
return point_head_module_s2, model_info_dict
def build_roi_head_s1(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S1', None) is None:
return None, model_info_dict
point_head_module_s1 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S1.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S1,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD_S1.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s1)
return point_head_module_s1, model_info_dict
def build_roi_head_s2(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S2', None) is None:
return None, model_info_dict
point_head_module_s2 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S2.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S2,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class_s2 if not self.model_cfg.ROI_HEAD_S2.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s2)
return point_head_module_s2, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
# add some multi-head operation
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def frozen_model(self, model):
for p in model.vfe.parameters():
p.requires_grad = False
for p in model.backbone_3d.parameters():
p.requires_grad = False
for p in model.map_to_bev_module.parameters():
p.requires_grad = False
for p in model.backbone_2d.parameters():
p.requires_grad = False
return model
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch | 23,645 | 45.455796 | 119 | py |
3DTrans | 3DTrans-master/pcdet/models/detectors/pv_rcnn.py | import torch
from .detector3d_template import Detector3DTemplate
from .detector3d_template_multi_db import Detector3DTemplate_M_DB
from .detector3d_template_multi_db_3 import Detector3DTemplate_M_DB_3
from .detector3d_template_ada import ActiveDetector3DTemplate
from pcdet.utils import common_utils
class PVRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
if hasattr(self.backbone_3d, 'get_loss'):
loss_backbone3d, tb_dict = self.backbone_3d.get_loss(tb_dict)
loss += loss_backbone3d
return loss, tb_dict, disp_dict
class SemiPVRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.model_type = None
def set_model_type(self, model_type):
assert model_type in ['origin', 'teacher', 'student']
self.model_type = model_type
self.dense_head.model_type = model_type
self.point_head.model_type = model_type
self.roi_head.model_type = model_type
def forward(self, batch_dict):
# origin: (training, return loss) (testing, return final boxes)
if self.model_type == 'origin':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
# teacher: (testing, return raw boxes)
elif self.model_type == 'teacher':
# assert not self.training
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
return batch_dict
# student:
elif self.model_type == 'student':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
if 'gt_boxes' in batch_dict: # for (pseudo-)labeled data
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return batch_dict, ret_dict, tb_dict, disp_dict
else:
return batch_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
else:
raise Exception('Unsupprted model type')
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
if hasattr(self.backbone_3d, 'get_loss'):
loss_backbone3d, tb_dict = self.backbone_3d.get_loss(tb_dict)
loss += loss_backbone3d
return loss, tb_dict, disp_dict
class PVRCNN_M_DB(Detector3DTemplate_M_DB):
def __init__(self, model_cfg, num_class, num_class_s2, dataset, dataset_s2, source_one_name):
super().__init__(model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, dataset=dataset,
dataset_s2=dataset_s2, source_one_name=source_one_name)
self.module_list = self.build_networks()
self.source_one_name = source_one_name
def forward(self, batch_dict):
# Split the Concat dataset batch into batch_1 and batch_2
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
batch_s1 = {}
batch_s2 = {}
len_of_module = len(self.module_list)
for k, cur_module in enumerate(self.module_list):
if k < len_of_module-6:
batch_dict = cur_module(batch_dict)
if k == len_of_module-6 or k == len_of_module-5 or k == len_of_module-4:
if len(split_tag_s1) == batch_dict['batch_size']:
batch_dict = cur_module(batch_dict)
elif len(split_tag_s2) == batch_dict['batch_size']:
continue
else:
if k == len_of_module-6:
batch_s1, batch_s2 = common_utils.split_two_batch_dict_gpu(split_tag_s1, split_tag_s2, batch_dict)
batch_s1 = cur_module(batch_s1)
if k == len_of_module-3 or k == len_of_module-2 or k == len_of_module-1:
if len(split_tag_s2) == batch_dict['batch_size']:
batch_dict = cur_module(batch_dict)
elif len(split_tag_s1) == batch_dict['batch_size']:
continue
else:
batch_s2 = cur_module(batch_s2)
if self.training:
split_tag_s1, split_tag_s2 = common_utils.split_batch_dict(self.source_one_name, batch_dict)
if len(split_tag_s1) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s1()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
elif len(split_tag_s2) == batch_dict['batch_size']:
loss, tb_dict, disp_dict = self.get_training_loss_s2()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
loss_1, tb_dict_1, disp_dict_1 = self.get_training_loss_s1()
loss_2, tb_dict_2, disp_dict_2 = self.get_training_loss_s2()
ret_dict = {
'loss': loss_1 + loss_2
}
return ret_dict, tb_dict_1, disp_dict_1
else:
# NOTE: When peform the inference, only one dataset can be accessed.
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss_s1(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s1.get_loss()
loss_point, tb_dict = self.point_head_s1.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s1.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s2(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s2.get_loss()
loss_point, tb_dict = self.point_head_s2.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s2.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
class PVRCNN_M_DB_3(Detector3DTemplate_M_DB_3):
def __init__(self, model_cfg, num_class, num_class_s2, num_class_s3, dataset, dataset_s2, dataset_s3, source_one_name, source_1):
super().__init__(model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, num_class_s3=num_class_s3,
dataset=dataset, dataset_s2=dataset_s2, dataset_s3=dataset_s3, source_one_name=source_one_name, source_1=source_1)
self.module_list = self.build_networks()
self.source_one_name = source_one_name
self.source_1 = source_1
def forward(self, batch_dict):
batch_s1 = {}
batch_s2 = {}
batch_s3 = {}
if self.training:
len_of_module = len(self.module_list)
for k, cur_module in enumerate(self.module_list):
if k < len_of_module-9:
batch_dict = cur_module(batch_dict)
if k == len_of_module-9 or k == len_of_module-8 or k == len_of_module-7:
if k == len_of_module-9:
# Split the Concat dataset batch into batch_1, batch_2, and batch_3
split_tag_s1, split_tag_s2_pre = common_utils.split_batch_dict('waymo', batch_dict)
batch_s1, batch_s2_pre = common_utils.split_two_batch_dict_gpu(split_tag_s1, split_tag_s2_pre, batch_dict)
split_tag_s2, split_tag_s3 = common_utils.split_batch_dict(self.source_one_name, batch_s2_pre)
batch_s2, batch_s3 = common_utils.split_two_batch_dict_gpu(split_tag_s2, split_tag_s3, batch_s2_pre)
batch_s1 = cur_module(batch_s1)
if k == len_of_module-6 or k == len_of_module-5 or k == len_of_module-4:
batch_s2 = cur_module(batch_s2)
if k == len_of_module-3 or k == len_of_module-2 or k == len_of_module-1:
batch_s3 = cur_module(batch_s3)
else:
len_of_module = len(self.module_list)
for k, cur_module in enumerate(self.module_list):
if k < len_of_module-9:
batch_dict = cur_module(batch_dict)
if k == len_of_module-9 or k == len_of_module-8 or k == len_of_module-7:
if self.source_1 == 1:
batch_dict = cur_module(batch_dict)
else:
continue
if k == len_of_module-6 or k == len_of_module-5 or k == len_of_module-4:
if self.source_1 == 2:
batch_dict = cur_module(batch_dict)
else:
continue
if k == len_of_module-3 or k == len_of_module-2 or k == len_of_module-1:
if self.source_1 == 3:
batch_dict = cur_module(batch_dict)
else:
continue
if self.training:
loss_1, tb_dict_1, disp_dict_1 = self.get_training_loss_s1()
loss_2, tb_dict_2, disp_dict_2 = self.get_training_loss_s2()
loss_3, tb_dict_3, disp_dict_3 = self.get_training_loss_s3()
ret_dict = {
'loss': loss_1 + loss_2 + loss_3
}
return ret_dict, tb_dict_1, disp_dict_1
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss_s1(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s1.get_loss()
loss_point, tb_dict = self.point_head_s1.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s1.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s2(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s2.get_loss()
loss_point, tb_dict = self.point_head_s2.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s2.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_training_loss_s3(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head_s3.get_loss()
loss_point, tb_dict = self.point_head_s3.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head_s3.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
class ActivePVRCNN_DUAL(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
if self.training and forward_args.get('mode', None) == 'train_discriminator':
batch_dict = self.module_list[0](batch_dict) # MeanVFE
batch_dict = self.module_list[1](batch_dict) # VoxelBackBone8x
batch_dict = self.module_list[2](batch_dict) # HeightCompression
batch_dict = self.module_list[3](batch_dict) # VoxelSetAbstraction
batch_dict = self.module_list[4](batch_dict)
batch_dict = self.module_list[5](batch_dict)
batch_dict = self.discriminator(batch_dict)
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) == 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif not self.training and forward_args.get('mode', None) == 'active_evaluate':
batch_dict = self.post_processing(batch_dict)
sample_score = self.get_evaluate_score(batch_dict, forward_args['domain'])
return sample_score
elif not self.training and forward_args.get('mode', None) is None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
ret_dict={
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict= {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_mul_classifier_loss(self, mode=None):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss(mode)
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict, domain):
batch_dict = self.discriminator.domainness_evaluate(batch_dict)
batch_size = batch_dict['batch_size']
frame_id = [str(id) for id in batch_dict['frame_id']]
domainness_evaluate = batch_dict['domainness_evaluate'].cpu()
reweight_roi = batch_dict['reweight_roi']
sample_score = []
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'domainness_evaluate': domainness_evaluate[i].cpu(),
'roi_feature': reweight_roi[i],
'total_score': domainness_evaluate[i].cpu(),
}
sample_score.append(frame_score)
return sample_score
def get_discriminator_result(self, batch_dict):
acc = self.discriminator.get_accuracy(batch_dict)
return acc
class PVRCNN_TQS(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
if self.training and forward_args.get('mode', None) is 'train_discriminator':
batch_dict = self.module_list[0](batch_dict) # MeanVFE
batch_dict = self.module_list[1](batch_dict) # VoxelBackBone8x
batch_dict = self.module_list[2](batch_dict) # HeightCompression
batch_dict = self.module_list[3](batch_dict) # VoxelSetAbstraction
batch_dict = self.module_list[4](batch_dict)
batch_dict = self.module_list[5](batch_dict)
batch_dict = self.point_head.get_point_score(batch_dict)
batch_dict = self.discriminator(batch_dict)
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) is 'finetune':
loss, tb_dict, disp_dict = self.get_finetune_loss()
elif self.training and forward_args.get('mode', None) is 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif self.training and forward_args.get('mode', None) is 'train_mul_cls':
loss, tb_dict, disp_dict = self.get_mul_classifier_loss()
elif not self.training and forward_args.get('mode', None) is 'active_evaluate':
sample_score = self.get_evaluate_score(batch_dict)
return sample_score
elif not self.training and forward_args.get('mode', None) is None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
# discriminator_acc = self.get_discriminator_result(batch_dict, forward_args['source'])
return pred_dicts, recall_dicts
ret_dict={
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_mul_cls_loss(self, mode='train_mul_cls'):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss(mode)
return loss, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict= {}
loss_rpn, tb_dict = self.dense_head.get_active_loss(mode='train_detector')
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_mul_classifier_loss(self, mode=None):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss('train_mul_cls')
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict):
sample_score = {}
batch_dict = self.dense_head.committee_evaluate(batch_dict)
batch_dict = self.dense_head.uncertainty_evaluate(batch_dict)
batch_dict = self.discriminator.domainness_evaluate(batch_dict)
batch_size = batch_dict['batch_size']
frame_id = batch_dict['frame_id']
committee_evaluate = batch_dict['committee_evaluate']
uncertainty_evaluate = batch_dict['uncertainty_evaluate']
domainness_evaluate = batch_dict['domainness_evaluate'].cpu()
sample_score = []
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'committee_evaluate': committee_evaluate[i],
'uncertainty_evaluate': uncertainty_evaluate[i],
'domainness_evaluate': domainness_evaluate[i],
'total_score': committee_evaluate[i] + uncertainty_evaluate[i] + domainness_evaluate[i]
}
sample_score.append(frame_score)
return sample_score
class PVRCNN_CLUE(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
if self.training and forward_args.get('mode', None) is 'train_discriminator':
batch_dict = self.module_list[0](batch_dict) # MeanVFE
batch_dict = self.module_list[1](batch_dict) # VoxelBackBone8x
batch_dict = self.module_list[2](batch_dict) # HeightCompression
batch_dict = self.module_list[3](batch_dict) # VoxelSetAbstraction
batch_dict = self.module_list[4](batch_dict)
batch_dict = self.module_list[5](batch_dict)
batch_dict = self.point_head.get_point_score(batch_dict)
batch_dict = self.discriminator(batch_dict)
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) is 'finetune':
loss, tb_dict, disp_dict = self.get_finetune_loss()
elif self.training and forward_args.get('mode', None) is 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif self.training and forward_args.get('mode', None) is 'train_mul_cls':
loss, tb_dict, disp_dict = self.get_mul_cls_loss()
elif not self.training and forward_args.get('mode', None) is 'active_evaluate':
batch_dict = self.post_processing(batch_dict)
sample_score = self.get_evaluate_score(batch_dict)
return sample_score
elif not self.training and forward_args.get('mode', None) is None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
# discriminator_acc = self.get_discriminator_result(batch_dict, forward_args['source'])
return pred_dicts, recall_dicts
ret_dict={
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_mul_cls_loss(self, mode='train_mul_cls'):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss(mode)
return loss, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict= {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
def get_mul_classifier_loss(self, mode=None):
disp_dict = {}
loss, tb_dict = self.dense_head.get_active_loss('train_mul_cls')
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict):
sample_score = {}
batch_size = batch_dict['batch_size']
frame_id = batch_dict['frame_id']
roi_score = batch_dict['cls_preds']
roi_feature = batch_dict['roi_feature']
sample_score = []
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'roi_score': roi_score[i],
'roi_feature': roi_feature[i]
}
sample_score.append(frame_score)
return sample_score | 23,337 | 43.880769 | 138 | py |
3DTrans | 3DTrans-master/pcdet/models/detectors/detector3d_template_multi_db_3.py | import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils.spconv_utils import find_all_spconv_keys
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads, mdf_models
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate_M_DB_3(nn.Module):
def __init__(self, model_cfg, num_class, num_class_s2, num_class_s3, dataset, dataset_s2, dataset_s3, source_one_name, source_1):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.num_class_s2 = num_class_s2
self.num_class_s3 = num_class_s3
self.dataset = dataset
self.dataset_s2 = dataset_s2
self.dataset_s3 = dataset_s3
self.class_names = dataset.class_names
self.class_names_s2 = dataset_s2.class_names
self.class_names_s3 = dataset_s3.class_names
self.source_one_name = source_one_name
self.source_1 = source_1
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'point_t', 'vfe', 'backbone_3d', 'map_to_bev_module', 'dense_3d_moe', 'pfe',
'backbone_2d', 'dense_2d_moe', 'dense_head_s1', 'point_head_s1', 'roi_head_s1',
'dense_head_s2', 'point_head_s2', 'roi_head_s2', 'dense_head_s3', 'point_head_s3', 'roi_head_s3',
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_point_t(self, model_info_dict):
if self.model_cfg.get('POINT_T', None) is None:
return None, model_info_dict
point_t_module = pfe.__all__[self.model_cfg.POINT_T.NAME](
model_cfg=self.model_cfg.POINT_T
)
model_info_dict['module_list'].append(point_t_module)
return point_t_module, model_info_dict
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_dense_3d_moe(self, model_info_dict):
if self.model_cfg.get('DENSE_3D_MoE', None) is None:
return None, model_info_dict
dense_moe_module = mdf_models.__all__[self.model_cfg.DENSE_3D_MoE.NAME](
model_cfg=self.model_cfg.DENSE_3D_MoE
)
model_info_dict['module_list'].append(dense_moe_module)
return dense_moe_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_2d_moe(self, model_info_dict):
if self.model_cfg.get('DENSE_2D_MoE', None) is None:
return None, model_info_dict
dense_moe_module = mdf_models.__all__[self.model_cfg.DENSE_2D_MoE.NAME](
model_cfg=self.model_cfg.DENSE_2D_MoE
)
model_info_dict['module_list'].append(dense_moe_module)
return dense_moe_module, model_info_dict
def build_dense_head_s1(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S1', None) is None:
return None, model_info_dict
dense_head_module_s1 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S1.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S1,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD_S1.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S1', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s1)
return dense_head_module_s1, model_info_dict
def build_dense_head_s2(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S2', None) is None:
return None, model_info_dict
dense_head_module_s2 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S2.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S2,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class_s2 if not self.model_cfg.DENSE_HEAD_S2.CLASS_AGNOSTIC else 1,
class_names=self.class_names_s2,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S2', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s2)
return dense_head_module_s2, model_info_dict
def build_dense_head_s3(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD_S3', None) is None:
return None, model_info_dict
dense_head_module_s3 = dense_heads.__all__[self.model_cfg.DENSE_HEAD_S3.NAME](
model_cfg=self.model_cfg.DENSE_HEAD_S3,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class_s3 if not self.model_cfg.DENSE_HEAD_S3.CLASS_AGNOSTIC else 1,
class_names=self.class_names_s3,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S3', False),
voxel_size=model_info_dict.get('voxel_size', False)
)
model_info_dict['module_list'].append(dense_head_module_s3)
return dense_head_module_s3, model_info_dict
def build_point_head_s1(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S1', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S1.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s1 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S1.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S1,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD_S1.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S1', False)
)
model_info_dict['module_list'].append(point_head_module_s1)
return point_head_module_s1, model_info_dict
def build_point_head_s2(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S2', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S2.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s2 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S2.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S2,
input_channels=num_point_features,
num_class=self.num_class_s2 if not self.model_cfg.POINT_HEAD_S2.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S2', False)
)
model_info_dict['module_list'].append(point_head_module_s2)
return point_head_module_s2, model_info_dict
def build_point_head_s3(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD_S3', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD_S3.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module_s3 = dense_heads.__all__[self.model_cfg.POINT_HEAD_S3.NAME](
model_cfg=self.model_cfg.POINT_HEAD_S3,
input_channels=num_point_features,
num_class=self.num_class_s3 if not self.model_cfg.POINT_HEAD_S3.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD_S3', False)
)
model_info_dict['module_list'].append(point_head_module_s3)
return point_head_module_s3, model_info_dict
def build_roi_head_s1(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S1', None) is None:
return None, model_info_dict
point_head_module_s1 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S1.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S1,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD_S1.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s1)
return point_head_module_s1, model_info_dict
def build_roi_head_s2(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S2', None) is None:
return None, model_info_dict
point_head_module_s2 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S2.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S2,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class_s2 if not self.model_cfg.ROI_HEAD_S2.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s2)
return point_head_module_s2, model_info_dict
def build_roi_head_s3(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD_S3', None) is None:
return None, model_info_dict
point_head_module_s3 = roi_heads.__all__[self.model_cfg.ROI_HEAD_S3.NAME](
model_cfg=self.model_cfg.ROI_HEAD_S3,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class_s3 if not self.model_cfg.ROI_HEAD_S3.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module_s3)
return point_head_module_s3, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
# add some multi-head operation
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def _load_state_dict(self, model_state_disk, *, strict=True):
state_dict = self.state_dict() # local cache of state_dict
spconv_keys = find_all_spconv_keys(self)
update_model_state = {}
for key, val in model_state_disk.items():
if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape:
# with different spconv versions, we need to adapt weight shapes for spconv blocks
# adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x
val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in)
if val_native.shape == state_dict[key].shape:
val = val_native.contiguous()
else:
assert val.shape.__len__() == 5, 'currently only spconv 3D is supported'
val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in)
if val_implicit.shape == state_dict[key].shape:
val = val_implicit.contiguous()
if key in state_dict and state_dict[key].shape == val.shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if strict:
self.load_state_dict(update_model_state)
else:
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
return state_dict, update_model_state
def frozen_model(self, model):
for p in model.vfe.parameters():
p.requires_grad = False
for p in model.backbone_3d.parameters():
p.requires_grad = False
for p in model.map_to_bev_module.parameters():
p.requires_grad = False
for p in model.backbone_2d.parameters():
p.requires_grad = False
return model
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
version = checkpoint.get("version", None)
if version is not None:
logger.info('==> Checkpoint trained from version: %s' % version)
state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict)))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self._load_state_dict(checkpoint['model_state'], strict=True)
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch | 26,507 | 45.916814 | 133 | py |
3DTrans | 3DTrans-master/pcdet/models/detectors/second_net_iou.py | import torch
from .detector3d_template import Detector3DTemplate
from .detector3d_template_ada import ActiveDetector3DTemplate
from ..model_utils.model_nms_utils import class_agnostic_nms, class_agnostic_nms_with_roi
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
import torch.nn.functional as F
class SECONDNetIoU(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
batch_dict['dataset_cfg'] = self.dataset.dataset_cfg
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
@staticmethod
def cal_scores_by_npoints(cls_scores, iou_scores, num_points_in_gt, cls_thresh=10, iou_thresh=100):
"""
Args:
cls_scores: (N)
iou_scores: (N)
num_points_in_gt: (N, 7+c)
cls_thresh: scalar
iou_thresh: scalar
"""
assert iou_thresh >= cls_thresh
alpha = torch.zeros(cls_scores.shape, dtype=torch.float32).cuda()
alpha[num_points_in_gt <= cls_thresh] = 0
alpha[num_points_in_gt >= iou_thresh] = 1
mask = ((num_points_in_gt > cls_thresh) & (num_points_in_gt < iou_thresh))
alpha[mask] = (num_points_in_gt[mask] - 10) / (iou_thresh - cls_thresh)
scores = (1 - alpha) * cls_scores + alpha * iou_scores
return scores
def set_nms_score_by_class(self, iou_preds, cls_preds, label_preds, score_by_class):
n_classes = torch.unique(label_preds).shape[0]
nms_scores = torch.zeros(iou_preds.shape, dtype=torch.float32).cuda()
for i in range(n_classes):
mask = label_preds == (i + 1)
class_name = self.class_names[i]
score_type = score_by_class[class_name]
if score_type == 'iou':
nms_scores[mask] = iou_preds[mask]
elif score_type == 'cls':
nms_scores[mask] = cls_preds[mask]
else:
raise NotImplementedError
return nms_scores
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
roi_labels: (B, num_rois) 1 .. num_classes
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_cls_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
iou_preds = batch_dict['batch_cls_preds'][batch_mask]
cls_preds = batch_dict['roi_scores'][batch_mask]
src_iou_preds = iou_preds
src_box_preds = box_preds
src_cls_preds = cls_preds
assert iou_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
iou_preds, label_preds = torch.max(iou_preds, dim=-1)
label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels', False) else label_preds + 1
if post_process_cfg.NMS_CONFIG.get('SCORE_BY_CLASS', None) and \
post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'score_by_class':
nms_scores = self.set_nms_score_by_class(
iou_preds, cls_preds, label_preds, post_process_cfg.NMS_CONFIG.SCORE_BY_CLASS
)
elif post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) == 'iou' or \
post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) is None:
nms_scores = iou_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'cls':
nms_scores = cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'weighted_iou_cls':
nms_scores = post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.iou * iou_preds + \
post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.cls * cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'num_pts_iou_cls':
point_mask = (batch_dict['points'][:, 0] == batch_mask)
batch_points = batch_dict['points'][point_mask][:, 1:4]
num_pts_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu(
batch_points.cpu(), box_preds[:, 0:7].cpu()
).sum(dim=1).float().cuda()
score_thresh_cfg = post_process_cfg.NMS_CONFIG.SCORE_THRESH
nms_scores = self.cal_scores_by_npoints(
cls_preds, iou_preds, num_pts_in_gt,
score_thresh_cfg.cls, score_thresh_cfg.iou
)
else:
raise NotImplementedError
selected, selected_scores = class_agnostic_nms(
box_scores=nms_scores, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
raise NotImplementedError
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
'pred_cls_scores': cls_preds[selected],
'pred_iou_scores': iou_preds[selected]
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
class ActiveSECONDNetIoU(ActiveDetector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict, **forward_args):
batch_dict['dataset_cfg'] = self.dataset.dataset_cfg
batch_dict['mode'] = forward_args.get('mode', None) if forward_args is not None else None
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training and forward_args.get('mode', None) is 'train_discriminator':
loss = self.discriminator.get_discriminator_loss(batch_dict, source=forward_args['source'])
return loss
if self.training and forward_args.get('mode', None) is 'train_detector':
loss, tb_dict, disp_dict = self.get_detector_loss()
elif not self.training and forward_args.get('mode', None) is 'active_evaluate':
batch_dict = self.post_processing(batch_dict)
sample_score = self.get_evaluate_score(batch_dict, forward_args['domain'])
return sample_score
elif not self.training and forward_args.get('mode', None) is None:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
ret_dict={
'loss': loss
}
return ret_dict, tb_dict, disp_dict
def get_detector_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
def get_evaluate_score(self, batch_dict, domain):
batch_dict = self.discriminator.domainness_evaluate(batch_dict)
batch_size = batch_dict['batch_size']
frame_id = [str(id) for id in batch_dict['frame_id']]
reweight_roi = batch_dict['reweight_roi']
domainness_evaluate = batch_dict['domainness_evaluate'].cpu()
sample_score = []
for i in range(batch_size):
frame_score = {
'frame_id': frame_id[i],
'domainness_evaluate': domainness_evaluate[i].cpu(),
'roi_feature': reweight_roi[i],
'total_score': domainness_evaluate[i].cpu()
}
sample_score.append(frame_score)
return sample_score
@staticmethod
def cal_scores_by_npoints(cls_scores, iou_scores, num_points_in_gt, cls_thresh=10, iou_thresh=100):
"""
Args:
cls_scores: (N)
iou_scores: (N)
num_points_in_gt: (N, 7+c)
cls_thresh: scalar
iou_thresh: scalar
"""
assert iou_thresh >= cls_thresh
alpha = torch.zeros(cls_scores.shape, dtype=torch.float32).cuda()
alpha[num_points_in_gt <= cls_thresh] = 0
alpha[num_points_in_gt >= iou_thresh] = 1
mask = ((num_points_in_gt > cls_thresh) & (num_points_in_gt < iou_thresh))
alpha[mask] = (num_points_in_gt[mask] - 10) / (iou_thresh - cls_thresh)
scores = (1 - alpha) * cls_scores + alpha * iou_scores
return scores
def set_nms_score_by_class(self, iou_preds, cls_preds, label_preds, score_by_class):
n_classes = torch.unique(label_preds).shape[0]
nms_scores = torch.zeros(iou_preds.shape, dtype=torch.float32).cuda()
for i in range(n_classes):
mask = label_preds == (i + 1)
class_name = self.class_names[i]
score_type = score_by_class[class_name]
if score_type == 'iou':
nms_scores[mask] = iou_preds[mask]
elif score_type == 'cls':
nms_scores[mask] = cls_preds[mask]
else:
raise NotImplementedError
return nms_scores
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
roi_labels: (B, num_rois) 1 .. num_classes
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_cls_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
iou_preds = batch_dict['batch_cls_preds'][batch_mask]
cls_preds = batch_dict['roi_scores'][batch_mask]
src_iou_preds = iou_preds
src_box_preds = box_preds
src_cls_preds = cls_preds
assert iou_preds.shape[1] in [1, self.num_class]
if batch_dict['mode'] == 'active_evaluate':
roi_feature = batch_dict['roi_shared_feature'][batch_mask]
if not batch_dict['cls_preds_normalized']:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
iou_preds, label_preds = torch.max(iou_preds, dim=-1)
label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels', False) else label_preds + 1
if post_process_cfg.NMS_CONFIG.get('SCORE_BY_CLASS', None) and \
post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'score_by_class':
nms_scores = self.set_nms_score_by_class(
iou_preds, cls_preds, label_preds, post_process_cfg.NMS_CONFIG.SCORE_BY_CLASS
)
elif post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) == 'iou' or \
post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) is None:
nms_scores = iou_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'cls':
nms_scores = cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'weighted_iou_cls':
nms_scores = post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.iou * iou_preds + \
post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.cls * cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'num_pts_iou_cls':
point_mask = (batch_dict['points'][:, 0] == batch_mask)
batch_points = batch_dict['points'][point_mask][:, 1:4]
num_pts_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu(
batch_points.cpu(), box_preds[:, 0:7].cpu()
).sum(dim=1).float().cuda()
score_thresh_cfg = post_process_cfg.NMS_CONFIG.SCORE_THRESH
nms_scores = self.cal_scores_by_npoints(
cls_preds, iou_preds, num_pts_in_gt,
score_thresh_cfg.cls, score_thresh_cfg.iou
)
else:
raise NotImplementedError
if batch_dict['mode'] == 'active_evaluate':
if batch_dict.get('reweight_roi', None) is None:
batch_dict['reweight_roi'] = []
selected, selected_scores, selected_roi = class_agnostic_nms_with_roi(
box_scores=nms_scores, box_preds=box_preds, roi_feature=roi_feature,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
reweight = F.softmax(selected_scores)
reweight_roi = reweight.view(1, -1) @ selected_roi
batch_dict['reweight_roi'].append(reweight_roi)
if index == (batch_size-1):
return batch_dict
else:
continue
selected, selected_scores = class_agnostic_nms(
box_scores=nms_scores, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
raise NotImplementedError
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
'pred_cls_scores': cls_preds[selected],
'pred_iou_scores': iou_preds[selected]
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
| 17,487 | 43.161616 | 127 | py |
3DTrans | 3DTrans-master/pcdet/models/detectors/semi_second.py | import torch
from .detector3d_template import Detector3DTemplate
from ..model_utils.model_nms_utils import class_agnostic_nms
class SemiSECOND(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.model_type = None
def set_model_type(self, model_type):
assert model_type in ['origin', 'teacher', 'student']
self.model_type = model_type
self.dense_head.model_type = model_type
def forward(self, batch_dict):
# import pdb
# pdb.set_trace()
# origin: (training, return loss) (testing, return final boxes)
if self.model_type == 'origin':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
# teacher: (testing, return raw boxes)
elif self.model_type == 'teacher':
# assert not self.training
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
return batch_dict
# student: (training, return (loss & raw boxes w/ gt_boxes) or raw boxes (w/o gt_boxes) for consistency)
# (testing, return final_boxes)
elif self.model_type == 'student':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
if 'gt_boxes' in batch_dict: # for (pseudo-)labeled data
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return batch_dict, ret_dict, tb_dict, disp_dict
else:
return batch_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
else:
raise Exception('Unsupprted model type')
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
class SemiSECONDIoU(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.model_type = None
def set_model_type(self, model_type):
assert model_type in ['origin', 'teacher', 'student']
self.model_type = model_type
self.dense_head.model_type = model_type
self.roi_head.model_type = model_type
"""
if model_type in ['teacher', 'student']:
for param in self.roi_head.parameters():
param.requires_grad = False
"""
def forward(self, batch_dict):
# origin: (training, return loss) (testing, return final boxes)
if self.model_type == 'origin':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
# teacher: (testing, return initial filtered boxes and iou_scores)
elif self.model_type == 'teacher':
#assert not self.training
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
return batch_dict
# student: (training, return (loss & raw boxes w/ gt_boxes) or raw boxes (w/o gt_boxes) for consistency)
# (testing, return final_boxes)
elif self.model_type == 'student':
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
if 'gt_boxes' in batch_dict:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return batch_dict, ret_dict, tb_dict, disp_dict
else:
return batch_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
else:
raise Exception('Unsupprted model type')
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
#if self.model_type == 'origin':
if self.model_type in ['origin', 'student']:
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
#elif self.model_type in ['teacher', 'student']:
elif self.model_type in ['teacher']:
loss = loss_rpn
else:
raise Exception('Unsupprted model type')
return loss, tb_dict, disp_dict
def post_processing(self, batch_dict):
"""
we found NMS with IoU-guided filtering is bad, probablely bugs in the head
thus we only use original RPN score for NMS
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
box_preds = batch_dict['rois'][index]
iou_preds = batch_dict['roi_ious'][index]
cls_preds = batch_dict['roi_scores'][index]
label_preds = batch_dict['roi_labels'][index]
assert iou_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
nms_scores = cls_preds # iou_preds
nms_scores = nms_scores.squeeze(-1)
selected, selected_scores = class_agnostic_nms(
box_scores=nms_scores, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
# added filtering boxes with size 0
zero_mask = (final_boxes[:, 3:6] != 0).all(1)
final_boxes = final_boxes[zero_mask]
final_labels = final_labels[zero_mask]
final_scores = final_scores[zero_mask]
recall_dict = self.generate_recall_record(
box_preds=final_boxes,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels,
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
| 8,060 | 37.385714 | 112 | py |
3DTrans | 3DTrans-master/pcdet/models/detectors/unsupervised_model/pvrcnn_plus_backbone.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..detector3d_template import Detector3DTemplate
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
# copy from voxel set abstraction module
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
# TODO add LOSS_CFG TO MODEL_CFG
class PVRCNN_PLUS_BACKBONE(Detector3DTemplate):
def __init__(self, model_cfg, dataset, num_class=None):
super().__init__(model_cfg=model_cfg, num_class=None, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
# batch_dict = self.backbone_2d(batch_dict)
return batch_dict
# TODO add POS_THRESH, NEG_THRESH
class HardestContrastiveLoss():
def __init__(self, loss_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.loss_cfg = loss_cfg
self.point_feature_names = []
self.pos_thresh = loss_cfg.POS_THRESH
self.neg_thresh = loss_cfg.NEG_THRESH
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.loss_cfg.SA_LAYER
self.point_feature_names = []
self.downsample_times_map = {}
self.SA_layers = nn.ModuleList()
for src_name in self.loss_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
self.point_feature_names.append(src_name)
self.SA_layers.append(build_feature_aggregation_module(config=SA_cfg[src_name]))
def pdist(self, point_features_1, point_features_2):
D = torch.sum((point_features_1.unsqueeze(1) - point_features_2.unsqueeze(0)).pow(2), 2)
return torch.sqrt(D + 1e-7)
# TODO add self.pos_thresh and self.neg_thresh
def get_hardest_contrastive_loss(self, batch_dict_1, batch_dict_2):
batch_size = batch_dict_1['batch_size']
batch_dict_1, batch_dict_2, keypoints_inds = self.get_point_features(batch_dict_1, batch_dict_2, tag='positive')
batch_dict_1, batch_dict_2, (keypoints_inds_1, keypoints_inds_2) = self.get_point_features(batch_dict_1, batch_dict_2, tag='negative')
pos_features_1, pos_features_2 = batch_dict_1['point_features_positive'], batch_dict_2['point_features_positive']
neg_features_1, neg_features_2 = batch_dict_1['point_features_negative'], batch_dict_2['point_features_negative']
batch_size = batch_dict_1['batch_size']
pos_loss_all = None
neg_loss_all = None
for bs_idx in range(batch_size):
mask_pos = batch_dict_1['points_coords_positive'][:, 0] == bs_idx
cur_pos_features_1, cur_pos_features_2 = pos_features_1[mask_pos], pos_features_2[mask_pos]
pos_loss = torch.relu((cur_pos_features_1 - cur_pos_features_2).pow(2).sum(1) - self.pos_thresh)
mask_neg = batch_dict_1['points_coords_negative'][:, 0] == bs_idx
cur_neg_features_1, cur_neg_features_2 = neg_features_1[mask_neg], neg_features_2[mask_neg]
distance_1 = self.pdist(cur_pos_features_1, cur_neg_features_2)
distance_2 = self.pdist(cur_pos_features_2, cur_neg_features_1)
distance_1_min, distance_1_ind = distance_1.min(1)
distance_2_min, distance_2_ind = distance_2.min(1)
mask_1 = keypoints_inds[mask_pos] != keypoints_inds_2[distance_1_ind].to(keypoints_inds.device)
mask_2 = keypoints_inds[mask_pos] != keypoints_inds_1[distance_2_ind].to(keypoints_inds.device)
neg_loss_1 = torch.relu(self.neg_thresh - distance_1_min[mask_1]).pow(2)
neg_loss_2 = torch.relu(self.neg_thresh - distance_2_min[mask_2]).pow(2)
pos_loss = pos_loss.mean()
neg_loss = (neg_loss_1.mean() + neg_loss_2.mean()) / 2
if pos_loss_all is None and neg_loss_all is None:
pos_loss_all = pos_loss
neg_loss_all = neg_loss
else:
pos_loss_all += pos_loss
neg_loss_all += neg_loss
pos_loss_all = pos_loss_all / batch_size
neg_loss_all = neg_loss_all / batch_size
return pos_loss_all, neg_loss_all
def get_point_features(self, batch_dict_1, batch_dict_2, tag='positive'):
if tag == 'positive':
keypoints_1, keypoints_2, keypoints_inds = self.get_positive_sampled_points(batch_dict_1, batch_dict_2)
else:
keypoints_1, keypoints_2, keypoints_inds_1, keypoints_inds_2 = self.get_negative_sampled_points(batch_dict_1, batch_dict_2, method='random')
keypoints_inds = (keypoints_inds_1, keypoints_inds_2)
point_feature_list_1 = []
point_feature_list_2 = []
if 'bev' in self.loss_cfg.FEATURES_SOURCE:
point_bev_features_1 = self.interpolate_from_bev_features(
keypoints_1, batch_dict_1['spatial_features'], batch_dict_1['batch_size'],
bev_stride=batch_dict_1['spatial_features_stride']
)
point_bev_features_2 = self.interpolate_from_bev_features(
keypoints_2, batch_dict_2['spatial_features'], batch_dict_2['batch_size'],
bev_stride=batch_dict_2['spatial_features_stride']
)
point_feature_list_1.append(point_bev_features_1)
point_feature_list_2.append(point_bev_features_2)
batch_size = batch_dict_1['batch_size']
new_xyz_1 = keypoints_1[:, 1:4].contiguous()
new_xyz_2 = keypoints_2[:, 1:4].contiguous()
new_xyz_batch_cnt_1 = new_xyz_1.new_zeros(batch_size).int()
new_xyz_batch_cnt_2 = new_xyz_2.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt_1[k] = (keypoints_1[:, 0] == k).sum()
for k in range(batch_size):
new_xyz_batch_cnt_2[k] = (keypoints_2[:, 0] == k).sum()
for k, src_name in enumerate(self.point_feature_names):
cur_coords_1 = batch_dict_1['multi_scale_3d_features'][src_name].indices
cur_features_1 = batch_dict_1['multi_scale_3d_features'][src_name].features.contiguous()
# TODO: add self.down_sample_times_map, self.voxel_size, self.point_cloud_range to __init__
xyz_1 = common_utils.get_voxel_centers(
cur_coords_1[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
cur_coords_2 = batch_dict_2['multi_scale_3d_features'][src_name].indices
cur_features_2 = batch_dict_2['multi_scale_3d_features'][src_name].features.contiguous()
xyz_2 = common_utils.get_voxel_centers(
cur_coords_2[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
pooled_features_1 = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz_1.contiguous(), xyz_features=cur_features_1, xyz_bs_idxs=cur_coords_1[:, 0],
new_xyz=new_xyz_1, new_xyz_batch_cnt=new_xyz_batch_cnt_1,
filter_neighbors_with_roi=self.loss_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.loss_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict_1.get('rois', None),
cover_feat_4=self.model_cfg.COVER_FEAT if self.loss_cfg.get('COVER_FEAT', None) else None
)
pooled_features_2 = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz_2.contiguous(), xyz_features=cur_features_2, xyz_bs_idxs=cur_coords_2[:, 0],
new_xyz=new_xyz_2, new_xyz_batch_cnt=new_xyz_batch_cnt_2,
filter_neighbors_with_roi=self.loss_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.loss_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict_2.get('rois', None),
cover_feat_4=self.model_cfg.COVER_FEAT if self.loss_cfg.get('COVER_FEAT', None) else None
)
point_feature_list_1.append(pooled_features_1)
point_feature_list_2.append(pooled_features_2)
point_features_1 = torch.cat(point_feature_list_1, dim=-1)
point_features_2 = torch.cat(point_feature_list_2, dim=-1)
save_name = 'point_features_' + tag
save_name_coords = 'points_coords_' + tag
batch_dict_1[save_name] = point_features_1.view(-1, point_features_1.shape[-1])
batch_dict_2[save_name] = point_features_2.view(-1, point_features_2.shape[-1])
batch_dict_1[save_name_coords] = keypoints_1
batch_dict_2[save_name_coords] = keypoints_2
return batch_dict_1, batch_dict_2, keypoints_inds
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,
filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None, cover_feat_4=False
):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
cover_feat_4: if cover the xyz_features using the values in z-dimension
Returns:
"""
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
if filter_neighbors_with_roi:
point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx], points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum()
valid_point_features = torch.cat(point_features_list, dim=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()
#modify: for z-axes as the fourth dimension feature of point-cloud representations
if xyz_features is None:
if cover_feat_4:
xyz_features=xyz[:, 2].view(-1, 1)
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
else:
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features,
)
else:
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
return pooled_features
# copy from voxel set abstraction module
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
# copy from voxel set abstraction module
def get_positive_sampled_points(self, batch_dict_1, batch_dict_2):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict_1['batch_size']
if self.loss_cfg.POINT_SOURCE == 'raw_points':
src_points_1 = batch_dict_1['points'][:, 1:4]
src_points_2 = batch_dict_2['points'][:, 1:4]
batch_indices = batch_dict_1['points'][:, 0].long()
elif self.loss_cfg.POINT_SOURCE == 'voxel_centers':
src_points_1 = common_utils.get_voxel_centers(
batch_dict_1['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
src_points_2 = common_utils.get_voxel_centers(
batch_dict_2['voxel_coords'],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict_1['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list_1 = []
keypoints_list_2 = []
keypoints_inds_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points_1 = src_points_1[bs_mask].unsqueeze(dim=0) # (1, N, 3)
sampled_points_2 = src_points_2[bs_mask].unsqueeze(dim=0)
# using FPS to sample points
cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(
sampled_points_1[:, :, 0:3].contiguous(), self.loss_cfg.NUM_KEYPOINTS
).long()
if sampled_points_1.shape[1] < self.loss_cfg.NUM_KEYPOINTS:
times = int(self.loss_cfg.NUM_KEYPOINTS / sampled_points_1.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points_1.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]
keypoints_1 = sampled_points_1[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_2 = sampled_points_2[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_list_1.append(keypoints_1)
keypoints_list_2.append(keypoints_2)
keypoints_inds_list.append(cur_pt_idxs[0])
keypoints_1 = torch.cat(keypoints_list_1, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
keypoints_2 = torch.cat(keypoints_list_2, dim=0)
keypoints_inds = torch.cat(keypoints_inds_list, dim=0)
if len(keypoints_1.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints_1.device).view(-1, 1).repeat(1, keypoints_1.shape[1]).view(-1, 1)
keypoints_1 = torch.cat((batch_idx.float(), keypoints_1.view(-1, 3)), dim=1)
keypoints_2 = torch.cat((batch_idx.float(), keypoints_2.view(-1, 3)), dim=1)
return keypoints_1, keypoints_2, keypoints_inds
# TODO add NUM_NEGATIVE_KEYPOINTS to config
def get_negative_sampled_points(self, batch_dict_1, batch_dict_2, method='random'):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict_1['batch_size']
if self.loss_cfg.POINT_SOURCE == 'raw_points':
src_points_1 = batch_dict_1['points'][:, 1:4]
src_points_2 = batch_dict_2['points'][:, 1:4]
batch_indices = batch_dict_1['points'][:, 0].long()
elif self.loss_cfg.POINT_SOURCE == 'voxel_centers':
src_points_1 = common_utils.get_voxel_centers(
batch_dict_1['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
src_points_2 = common_utils.get_voxel_centers(
batch_dict_2['voxel_coords'],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict_1['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list_1 = []
keypoints_list_2 = []
keypoints_inds_list_1 = []
keypoints_inds_list_2 = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points_1 = src_points_1[bs_mask].unsqueeze(dim=0) # (1, N, 3)
sampled_points_2 = src_points_2[bs_mask].unsqueeze(dim=0)
# using FPS to sample points
if method == 'fps':
cur_pt_idxs_1 = pointnet2_stack_utils.farthest_point_sample(
sampled_points_1[:, :, 0:3].contiguous(), self.loss_cfg.NUM_NEGATIVE_KEYPOINTS
).long()
cur_pt_idxs_2 = pointnet2_stack_utils.farthest_point_sample(
sampled_points_2[:, :, 0:3].contiguous(), self.loss_cfg.NUM_NEGATIVE_KEYPOINTS
).long()
elif method == 'random':
num_points = sampled_points_1.shape[1]
cur_pt_idxs_1 = torch.from_numpy(np.random.choice(num_points, self.loss_cfg.NUM_NEGATIVE_KEYPOINTS, replace=False)).long()
cur_pt_idxs_2 = torch.from_numpy(np.random.choice(num_points, self.loss_cfg.NUM_NEGATIVE_KEYPOINTS, replace=False)).long()
cur_pt_idxs_1 = cur_pt_idxs_1.view(1, -1)
cur_pt_idxs_2 = cur_pt_idxs_2.view(1, -1)
if sampled_points_1.shape[1] < self.loss_cfg.NUM_NEGATIVE_KEYPOINTS:
times = int(self.loss_cfg.NUM_NEGATIVE_KEYPOINTS / sampled_points_1.shape[1]) + 1
non_empty = cur_pt_idxs_1[0, :sampled_points_1.shape[1]]
cur_pt_idxs_1[0] = non_empty.repeat(times)[:self.model_cfg.NUM_NEGATIVE_KEYPOINTS]
if sampled_points_2.shape[1] < self.loss_cfg.NUM_NEGATIVE_KEYPOINTS:
times = int(self.loss_cfg.NUM_NEGATIVE_KEYPOINTS / sampled_points_2.shape[1]) + 1
non_empty = cur_pt_idxs_2[0, :sampled_points_2.shape[1]]
cur_pt_idxs_2[0] = non_empty.repeat(times)[:self.model_cfg.NUM_NEGATIVE_KEYPOINTS]
keypoints_1 = sampled_points_1[0][cur_pt_idxs_1[0]].unsqueeze(dim=0)
keypoints_2 = sampled_points_2[0][cur_pt_idxs_2[0]].unsqueeze(dim=0)
keypoints_inds_list_1.append(cur_pt_idxs_1[0])
keypoints_inds_list_2.append(cur_pt_idxs_2[0])
keypoints_list_1.append(keypoints_1)
keypoints_list_2.append(keypoints_2)
keypoints_1 = torch.cat(keypoints_list_1, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
keypoints_2 = torch.cat(keypoints_list_2, dim=0)
keypoints_inds_1 = torch.cat(keypoints_inds_list_1, dim=0)
keypoints_inds_2 = torch.cat(keypoints_inds_list_2, dim=0)
if len(keypoints_1.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints_1.device).view(-1, 1).repeat(1, keypoints_1.shape[1]).view(-1, 1)
keypoints_1 = torch.cat((batch_idx.float(), keypoints_1.view(-1, 3)), dim=1)
keypoints_2 = torch.cat((batch_idx.float(), keypoints_2.view(-1, 3)), dim=1)
return keypoints_1, keypoints_2, keypoints_inds_1, keypoints_inds_2
def build_feature_aggregation_module(config):
local_aggregation_name = config.get('NAME', 'StackSAModuleMSG')
if local_aggregation_name == 'StackSAModuleMSG':
cur_layer = StackPointFeature(
radii=config.POOL_RADIUS, nsamples=config.NSAMPLE, pool_method='max_pool',
)
else:
raise NotImplementedError
return cur_layer
class StackPointFeature(nn.Module):
def __init__(self, *, radii, nsamples, pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples)
self.groupers = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(pointnet2_stack_utils.QueryAndGroup(radius, nsample, use_xyz=False))
self.pool_method = pool_method
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features | 26,396 | 45.555556 | 152 | py |
3DTrans | 3DTrans-master/pcdet/models/backbones_3d/IASSD_backbone.py | import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
import os
class IASSD_Backbone(nn.Module):
""" Backbone for IA-SSD"""
def __init__(self, model_cfg, num_class, input_channels, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
channel_out_list = [channel_in]
self.num_points_each_layer = []
sa_config = self.model_cfg.SA_CONFIG
self.layer_types = sa_config.LAYER_TYPE
self.ctr_idx_list = sa_config.CTR_INDEX
self.layer_inputs = sa_config.LAYER_INPUT
self.aggregation_mlps = sa_config.get('AGGREGATION_MLPS', None)
self.confidence_mlps = sa_config.get('CONFIDENCE_MLPS', None)
self.max_translate_range = sa_config.get('MAX_TRANSLATE_RANGE', None)
for k in range(sa_config.NSAMPLE_LIST.__len__()):
if isinstance(self.layer_inputs[k], list): ###
channel_in = channel_out_list[self.layer_inputs[k][-1]]
else:
channel_in = channel_out_list[self.layer_inputs[k]]
if self.layer_types[k] == 'SA_Layer':
mlps = sa_config.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
if self.aggregation_mlps and self.aggregation_mlps[k]:
aggregation_mlp = self.aggregation_mlps[k].copy()
if aggregation_mlp.__len__() == 0:
aggregation_mlp = None
else:
channel_out = aggregation_mlp[-1]
else:
aggregation_mlp = None
if self.confidence_mlps and self.confidence_mlps[k]:
confidence_mlp = self.confidence_mlps[k].copy()
if confidence_mlp.__len__() == 0:
confidence_mlp = None
else:
confidence_mlp = None
self.SA_modules.append(
pointnet2_modules.PointnetSAModuleMSG_WithSampling(
npoint_list=sa_config.NPOINT_LIST[k],
sample_range_list=sa_config.SAMPLE_RANGE_LIST[k],
sample_type_list=sa_config.SAMPLE_METHOD_LIST[k],
radii=sa_config.RADIUS_LIST[k],
nsamples=sa_config.NSAMPLE_LIST[k],
mlps=mlps,
use_xyz=True,
dilated_group=sa_config.DILATED_GROUP[k],
aggregation_mlp=aggregation_mlp,
confidence_mlp=confidence_mlp,
num_class = self.num_class
)
)
elif self.layer_types[k] == 'Vote_Layer':
self.SA_modules.append(pointnet2_modules.Vote_layer(mlp_list=sa_config.MLPS[k],
pre_channel=channel_out_list[self.layer_inputs[k]],
max_translate_range=self.max_translate_range
)
)
channel_out_list.append(channel_out)
self.num_point_features = channel_out
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert xyz_batch_cnt.min() == xyz_batch_cnt.max()
xyz = xyz.view(batch_size, -1, 3)
features = features.view(batch_size, -1, features.shape[-1]).permute(0, 2, 1).contiguous() if features is not None else None ###
encoder_xyz, encoder_features, sa_ins_preds = [xyz], [features], []
encoder_coords = [torch.cat([batch_idx.view(batch_size, -1, 1), xyz], dim=-1)]
li_cls_pred = None
for i in range(len(self.SA_modules)):
xyz_input = encoder_xyz[self.layer_inputs[i]]
feature_input = encoder_features[self.layer_inputs[i]]
if self.layer_types[i] == 'SA_Layer':
ctr_xyz = encoder_xyz[self.ctr_idx_list[i]] if self.ctr_idx_list[i] != -1 else None
li_xyz, li_features, li_cls_pred = self.SA_modules[i](xyz_input, feature_input, li_cls_pred, ctr_xyz=ctr_xyz)
elif self.layer_types[i] == 'Vote_Layer': #i=4
li_xyz, li_features, xyz_select, ctr_offsets = self.SA_modules[i](xyz_input, feature_input)
centers = li_xyz
centers_origin = xyz_select
center_origin_batch_idx = batch_idx.view(batch_size, -1)[:, :centers_origin.shape[1]]
encoder_coords.append(torch.cat([center_origin_batch_idx[..., None].float(),centers_origin.view(batch_size, -1, 3)],dim =-1))
encoder_xyz.append(li_xyz)
li_batch_idx = batch_idx.view(batch_size, -1)[:, :li_xyz.shape[1]]
encoder_coords.append(torch.cat([li_batch_idx[..., None].float(),li_xyz.view(batch_size, -1, 3)],dim =-1))
encoder_features.append(li_features)
if li_cls_pred is not None:
li_cls_batch_idx = batch_idx.view(batch_size, -1)[:, :li_cls_pred.shape[1]]
sa_ins_preds.append(torch.cat([li_cls_batch_idx[..., None].float(),li_cls_pred.view(batch_size, -1, li_cls_pred.shape[-1])],dim =-1))
else:
sa_ins_preds.append([])
ctr_batch_idx = batch_idx.view(batch_size, -1)[:, :li_xyz.shape[1]]
ctr_batch_idx = ctr_batch_idx.contiguous().view(-1)
batch_dict['ctr_offsets'] = torch.cat((ctr_batch_idx[:, None].float(), ctr_offsets.contiguous().view(-1, 3)), dim=1)
batch_dict['centers'] = torch.cat((ctr_batch_idx[:, None].float(), centers.contiguous().view(-1, 3)), dim=1)
batch_dict['centers_origin'] = torch.cat((ctr_batch_idx[:, None].float(), centers_origin.contiguous().view(-1, 3)), dim=1)
center_features = encoder_features[-1].permute(0, 2, 1).contiguous().view(-1, encoder_features[-1].shape[1])
batch_dict['centers_features'] = center_features
batch_dict['ctr_batch_idx'] = ctr_batch_idx
batch_dict['encoder_xyz'] = encoder_xyz
batch_dict['encoder_coords'] = encoder_coords
batch_dict['sa_ins_preds'] = sa_ins_preds
batch_dict['encoder_features'] = encoder_features
###save per frame
if self.model_cfg.SA_CONFIG.get('SAVE_SAMPLE_LIST',False) and not self.training:
import numpy as np
result_dir = np.load('/home/yifan/tmp.npy', allow_pickle=True)
for i in range(batch_size) :
# i=0
# point_saved_path = '/home/yifan/tmp'
point_saved_path = result_dir / 'sample_list_save'
os.makedirs(point_saved_path, exist_ok=True)
idx = batch_dict['frame_id'][i]
xyz_list = []
for sa_xyz in encoder_xyz:
xyz_list.append(sa_xyz[i].cpu().numpy())
if '/' in idx: # Kitti_tracking
sample_xyz = point_saved_path / idx.split('/')[0] / ('sample_list_' + ('%s' % idx.split('/')[1]))
os.makedirs(point_saved_path / idx.split('/')[0], exist_ok=True)
else:
sample_xyz = point_saved_path / ('sample_list_' + ('%s' % idx))
np.save(str(sample_xyz), xyz_list)
# np.save(str(new_file), point_new.detach().cpu().numpy())
return batch_dict | 8,693 | 45.491979 | 150 | py |
3DTrans | 3DTrans-master/pcdet/models/backbones_3d/spconv_unet.py | from functools import partial
import torch
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
from ...utils import common_utils
from .spconv_backbone import post_act_block
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity)
out = replace_feature(out, self.relu(out.features))
return out
class UNetV2(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
if self.model_cfg.get('RETURN_ENCODED_TENSOR', True):
last_pad = self.model_cfg.get('last_pad', 0)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
else:
self.conv_out = None
# decoder
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)
self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')
self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')
self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')
self.conv5 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x = replace_feature(x, torch.cat((x_bottom.features, x_trans.features), dim=1))
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x = replace_feature(x, x_m.features + x.features)
x = conv_inv(x)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x = replace_feature(x, features.view(n, out_channels, -1).sum(dim=2))
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
if self.conv_out is not None:
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict['encoded_spconv_tensor'] = out
batch_dict['encoded_spconv_tensor_stride'] = 8
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5)
batch_dict['point_features'] = x_up1.features
point_coords = common_utils.get_voxel_centers(
x_up1.indices[:, 1:], downsample_times=1, voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_dict['point_coords'] = torch.cat((x_up1.indices[:, 0:1].float(), point_coords), dim=1)
return batch_dict
| 8,602 | 39.389671 | 117 | py |
3DTrans | 3DTrans-master/pcdet/models/backbones_3d/spconv_backbone_unibn.py | from functools import partial
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
from ...utils import uni3d_norm_2_in
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm'):
if conv_type == 'subm':
conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)
elif conv_type == 'spconv':
conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
bias=False, indice_key=indice_key)
elif conv_type == 'inverseconv':
conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)
else:
raise NotImplementedError
m = spconv.SparseSequential(
conv,
# norm_fn(out_channels),
# nn.ReLU(),
)
return m
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None):
super(SparseBasicBlock, self).__init__()
assert norm_fn is not None
bias = norm_fn is not None
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity.features)
out = replace_feature(out, self.relu(out.features))
return out
class VoxelBackBone8x_UniBN(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(uni3d_norm_2_in.UniNorm1d, dataset_from_flag=int(self.model_cfg.db_source), eps=1e-3, momentum=0.01, voxel_coord=True)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
)
self.bn_input = norm_fn(16)
self.relu_input = nn.ReLU()
block = post_act_block
#----------Block_1---------#
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, padding=1, indice_key='subm1'),
)
self.conv1_bn_1 = norm_fn(16)
self.conv1_relu_1 = nn.ReLU()
#----------Block_2---------#
self.conv2_1 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
)
self.conv2_bn_1 = norm_fn(32)
self.conv2_relu_1 = nn.ReLU()
self.conv2_2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(32, 32, 3, padding=1, indice_key='subm2'),
)
self.conv2_bn_2 = norm_fn(32)
self.conv2_relu_2 = nn.ReLU()
self.conv2_3 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(32, 32, 3, padding=1, indice_key='subm2'),
)
self.conv2_bn_3 = norm_fn(32)
self.conv2_relu_3 = nn.ReLU()
#----------Block_3---------#
self.conv3_1 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
)
self.conv3_bn_1 = norm_fn(64)
self.conv3_relu_1 = nn.ReLU()
self.conv3_2 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(64, 64, 3, padding=1, indice_key='subm3'),
)
self.conv3_bn_2 = norm_fn(64)
self.conv3_relu_2 = nn.ReLU()
self.conv3_3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(64, 64, 3, padding=1, indice_key='subm3'),
)
self.conv3_bn_3 = norm_fn(64)
self.conv3_relu_3 = nn.ReLU()
#----------Block_4---------#
self.conv4_1 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
)
self.conv4_bn_1 = norm_fn(64)
self.conv4_relu_1 = nn.ReLU()
self.conv4_2 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, padding=1, indice_key='subm4'),
)
self.conv4_bn_2 = norm_fn(64)
self.conv4_relu_2 = nn.ReLU()
self.conv4_3 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, padding=1, indice_key='subm4'),
)
self.conv4_bn_3 = norm_fn(64)
self.conv4_relu_3 = nn.ReLU()
#----------Last Block---------#
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
)
self.conv_out_bn = norm_fn(128)
self.conv_out_relu = nn.ReLU()
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 64
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
#----------Input Block---------#
t_input = self.conv_input(input_sp_tensor)
t_input = replace_feature(t_input, self.bn_input(t_input.features, t_input.indices))
t_input = replace_feature(t_input, self.relu_input(t_input.features))
#----------Block_1---------#
t_conv1 = self.conv1(t_input)
t_conv1 = replace_feature(t_conv1, self.conv1_bn_1(t_conv1.features, t_conv1.indices))
t_conv1 = replace_feature(t_conv1, self.conv1_relu_1(t_conv1.features))
#----------Block_2---------#
t_conv2_1 = self.conv2_1(t_conv1)
t_conv2_1 = replace_feature(t_conv2_1, self.conv2_bn_1(t_conv2_1.features, t_conv2_1.indices))
t_conv2_1 = replace_feature(t_conv2_1, self.conv2_relu_1(t_conv2_1.features))
t_conv2_2 = self.conv2_2(t_conv2_1)
t_conv2_2 = replace_feature(t_conv2_2, self.conv2_bn_2(t_conv2_2.features, t_conv2_2.indices))
t_conv2_2 = replace_feature(t_conv2_2, self.conv2_relu_2(t_conv2_2.features))
t_conv2_3 = self.conv2_3(t_conv2_2)
t_conv2_3 = replace_feature(t_conv2_3, self.conv2_bn_3(t_conv2_3.features, t_conv2_3.indices))
t_conv2_3 = replace_feature(t_conv2_3, self.conv2_relu_3(t_conv2_3.features))
#----------Block_3---------#
t_conv3_1 = self.conv3_1(t_conv2_3)
t_conv3_1 = replace_feature(t_conv3_1, self.conv3_bn_1(t_conv3_1.features, t_conv3_1.indices))
t_conv3_1 = replace_feature(t_conv3_1, self.conv3_relu_1(t_conv3_1.features))
t_conv3_2 = self.conv3_2(t_conv3_1)
t_conv3_2 = replace_feature(t_conv3_2, self.conv3_bn_2(t_conv3_2.features, t_conv3_2.indices))
t_conv3_2 = replace_feature(t_conv3_2, self.conv3_relu_2(t_conv3_2.features))
t_conv3_3 = self.conv3_3(t_conv3_2)
t_conv3_3 = replace_feature(t_conv3_3, self.conv3_bn_3(t_conv3_3.features, t_conv3_3.indices))
t_conv3_3 = replace_feature(t_conv3_3, self.conv3_relu_3(t_conv3_3.features))
#----------Block_4---------#
t_conv4_1 = self.conv4_1(t_conv3_3)
t_conv4_1 = replace_feature(t_conv4_1, self.conv4_bn_1(t_conv4_1.features, t_conv4_1.indices))
t_conv4_1 = replace_feature(t_conv4_1, self.conv4_relu_1(t_conv4_1.features))
t_conv4_2 = self.conv4_2(t_conv4_1)
t_conv4_2 = replace_feature(t_conv4_2, self.conv4_bn_2(t_conv4_2.features, t_conv4_2.indices))
t_conv4_2 = replace_feature(t_conv4_2, self.conv4_relu_2(t_conv4_2.features))
t_conv4_3 = self.conv4_3(t_conv4_2)
t_conv4_3 = replace_feature(t_conv4_3, self.conv4_bn_3(t_conv4_3.features, t_conv4_3.indices))
t_conv4_3 = replace_feature(t_conv4_3, self.conv4_relu_3(t_conv4_3.features))
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(t_conv4_3)
out = replace_feature(out, self.conv_out_bn(out.features, out.indices))
out = replace_feature(out, self.conv_out_relu(out.features))
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': t_conv1,
'x_conv2': t_conv2_3,
'x_conv3': t_conv3_3,
'x_conv4': t_conv4_3,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
class VoxelResBackBone8x_UniBN(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(uni3d_norm_2_in.UniNorm1d, dataset_from_flag=int(self.model_cfg.db_source), eps=1e-3, momentum=0.01, voxel_coord=True)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 128
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict | 14,619 | 37.072917 | 144 | py |
3DTrans | 3DTrans-master/pcdet/models/backbones_3d/spconv_backbone.py | from functools import partial
import torch.nn as nn
from ...utils.spconv_utils import replace_feature, spconv
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
conv_type='subm', norm_fn=None):
if conv_type == 'subm':
conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)
elif conv_type == 'spconv':
conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
bias=False, indice_key=indice_key)
elif conv_type == 'inverseconv':
conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)
else:
raise NotImplementedError
m = spconv.SparseSequential(
conv,
norm_fn(out_channels),
nn.ReLU(),
)
return m
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None):
super(SparseBasicBlock, self).__init__()
assert norm_fn is not None
bias = norm_fn is not None
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity.features)
out = replace_feature(out, self.relu(out.features))
return out
class VoxelBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 64
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
class VoxelResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 128
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
# ------------------------------------------------------- #
# ------------------New 3D Backbone---------------------- #
# ------------------------------------------------------- #
class VoxelWideResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.input_channels = model_cfg.IN_CHANNELS
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.wide_factor = model_cfg.WIDE_FACTOR
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(self.input_channels, 16*self.wide_factor, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16*self.wide_factor),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16*self.wide_factor, 32*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32*self.wide_factor, 64*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64*self.wide_factor, 128*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128*self.wide_factor, 128*self.wide_factor, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128*self.wide_factor),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16*self.wide_factor,
'x_conv2': 32*self.wide_factor,
'x_conv3': 64*self.wide_factor,
'x_conv4': 128*self.wide_factor
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features_after_scn'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
class VoxelWideResBackBone_L8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.wide_factor = model_cfg.WIDE_FACTOR
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16*self.wide_factor, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16*self.wide_factor),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
SparseBasicBlock(16*self.wide_factor, 16*self.wide_factor, norm_fn=norm_fn, indice_key='res1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16*self.wide_factor, 32*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
SparseBasicBlock(32*self.wide_factor, 32*self.wide_factor, norm_fn=norm_fn, indice_key='res2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32*self.wide_factor, 64*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
SparseBasicBlock(64*self.wide_factor, 64*self.wide_factor, norm_fn=norm_fn, indice_key='res3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64*self.wide_factor, 128*self.wide_factor, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
SparseBasicBlock(128*self.wide_factor, 128*self.wide_factor, norm_fn=norm_fn, indice_key='res4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(128*self.wide_factor, 128*self.wide_factor, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128*self.wide_factor),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16*self.wide_factor,
'x_conv2': 32*self.wide_factor,
'x_conv3': 64*self.wide_factor,
'x_conv4': 128*self.wide_factor
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update({
'encoded_spconv_tensor': out,
'encoded_spconv_tensor_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict | 20,640 | 37.581308 | 152 | py |
3DTrans | 3DTrans-master/pcdet/models/backbones_3d/pointnet2_backbone.py | import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_modules_stack
from ...ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_utils_stack
class PointNet2MSG(nn.Module):
def __init__(self, model_cfg, input_channels, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels - 3]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules.PointnetSAModuleMSG(
npoint=self.model_cfg.SA_CONFIG.NPOINTS[k],
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out
self.FP_modules.append(
pointnet2_modules.PointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert xyz_batch_cnt.min() == xyz_batch_cnt.max()
xyz = xyz.view(batch_size, -1, 3)
features = features.view(batch_size, -1, features.shape[-1]).permute(0, 2, 1).contiguous() if features is not None else None
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
) # (B, C, N)
point_features = l_features[0].permute(0, 2, 1).contiguous() # (B, N, C)
batch_dict['point_features'] = point_features.view(-1, point_features.shape[-1])
batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0].view(-1, 3)), dim=1)
return batch_dict
class PointNet2Backbone(nn.Module):
"""
DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723
"""
def __init__(self, model_cfg, input_channels, **kwargs):
assert False, 'DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723'
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
self.num_points_each_layer.append(self.model_cfg.SA_CONFIG.NPOINTS[k])
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules_stack.StackSAModuleMSG(
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out
self.FP_modules.append(
pointnet2_modules_stack.StackPointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None)
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points']
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
l_xyz, l_features, l_batch_cnt = [xyz], [features], [xyz_batch_cnt]
for i in range(len(self.SA_modules)):
new_xyz_list = []
for k in range(batch_size):
if len(l_xyz) == 1:
cur_xyz = l_xyz[0][batch_idx == k]
else:
last_num_points = self.num_points_each_layer[i - 1]
cur_xyz = l_xyz[-1][k * last_num_points: (k + 1) * last_num_points]
cur_pt_idxs = pointnet2_utils_stack.farthest_point_sample(
cur_xyz[None, :, :].contiguous(), self.num_points_each_layer[i]
).long()[0]
if cur_xyz.shape[0] < self.num_points_each_layer[i]:
empty_num = self.num_points_each_layer[i] - cur_xyz.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
new_xyz_list.append(cur_xyz[cur_pt_idxs])
new_xyz = torch.cat(new_xyz_list, dim=0)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(self.num_points_each_layer[i])
li_xyz, li_features = self.SA_modules[i](
xyz=l_xyz[i], features=l_features[i], xyz_batch_cnt=l_batch_cnt[i],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt
)
l_xyz.append(li_xyz)
l_features.append(li_features)
l_batch_cnt.append(new_xyz_batch_cnt)
l_features[0] = points[:, 1:]
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
unknown=l_xyz[i - 1], unknown_batch_cnt=l_batch_cnt[i - 1],
known=l_xyz[i], known_batch_cnt=l_batch_cnt[i],
unknown_feats=l_features[i - 1], known_feats=l_features[i]
)
batch_dict['point_features'] = l_features[0]
batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0]), dim=1)
return batch_dict
| 8,540 | 40.26087 | 132 | py |
3DTrans | 3DTrans-master/pcdet/models/backbones_3d/pfe/unet_scn.py | from functools import partial
import torch
import torch.nn as nn
from pcdet.utils.spconv_utils import replace_feature, spconv
from pcdet.utils import common_utils
from pcdet.models.backbones_3d.spconv_backbone import post_act_block
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()
out = self.conv1(x)
out = replace_feature(out, self.bn1(out.features))
out = replace_feature(out, self.relu(out.features))
out = self.conv2(out)
out = replace_feature(out, self.bn2(out.features))
if self.downsample is not None:
identity = self.downsample(x)
out = replace_feature(out, out.features + identity)
out = replace_feature(out, self.relu(out.features))
return out
class UNetSCN(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
# decoder
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)
self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')
self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')
self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')
self.conv5 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x = replace_feature(x, torch.cat((x_bottom.features, x_trans.features), dim=1))
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x = replace_feature(x, x_m.features + x.features)
x = conv_inv(x)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x = replace_feature(x, features.view(n, out_channels, -1).sum(dim=2))
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5)
batch_dict['voxel_features_after_scn'] = x_up1.features
return batch_dict
# class UNetSCN(nn.Module):
# def __init__(self,
# model_cfg,
# m=16, # number of unet features (multiplied in each layer)
# block_reps=1, # depth
# residual_blocks=False, # ResNet style basic blocks
# full_scale=4096,
# num_planes=7
# ):
# super(UNetSCN, self).__init__()
# self.model_cfg = model_cfg
# self.in_channels = self.model_cfg.IN_CHANNELS #3
# self.out_channels = m
# n_planes = [(n + 1) * m for n in range(num_planes)]
# self.sparseModel = scn.Sequential().add(
# scn.InputLayer(DIMENSION, full_scale, mode=4)).add(
# scn.SubmanifoldConvolution(DIMENSION, self.in_channels, m, 3, False)).add(
# scn.UNet(DIMENSION, block_reps, n_planes, residual_blocks)).add(
# scn.BatchNormReLU(m)).add(
# scn.OutputLayer(DIMENSION))
# def forward(self, batch_dict):
# voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
# x = [voxel_coords, voxel_features]
# x = self.sparseModel(x)
# batch_dict['voxel_features_after_scn'] = x
# return batch_dict | 8,709 | 39.138249 | 117 | py |
3DTrans | 3DTrans-master/pcdet/models/backbones_3d/pfe/point_t_trans.py | import copy
import numpy as np
import torch
import torch.nn as nn
from ....utils import uni3d_norm_2_in
class POINT_T(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
# using the domain-specific norm
self.scale_bn = uni3d_norm_2_in.UniNorm1d(self.model_cfg.SHARED_CONV_CHANNEL,
dataset_from_flag=int(self.model_cfg.db_source),
eps=1e-3, momentum=0.01, voxel_coord=True)
#self.scale_bn = nn.BatchNorm1d(self.model_cfg.SHARED_CONV_CHANNEL)
# ---update the xyz coord---
# def forward(self, data_dict):
# points = data_dict['points']
# points_idx = points[:,0].unsqueeze(1)
# points_coord = points[:,1:4]
# points_rescaled = self.scale_bn(points_coord)
# points = torch.cat([points_idx, points_rescaled], dim=1)
# data_dict['points'] = points
# return data_dict
# ---only update the z coord---
def forward(self, data_dict):
points = data_dict['points']
points_others = points[:,0:3]
points_coord_z = points[:,3].unsqueeze(1)
points_rescaled = self.scale_bn(points_coord_z, points[:,0].unsqueeze(1))
points = torch.cat([points_others, points_rescaled], dim=1)
data_dict['points'] = points
return data_dict | 1,448 | 33.5 | 89 | py |
3DTrans | 3DTrans-master/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py | import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
from ....utils import uni3d_norm
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
def sector_fps(points, num_sampled_points, num_sectors):
"""
Args:
points: (N, 3)
num_sampled_points: int
num_sectors: int
Returns:
sampled_points: (N_out, 3)
"""
sector_size = np.pi * 2 / num_sectors
point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi
sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)
xyz_points_list = []
xyz_batch_cnt = []
num_sampled_points_list = []
for k in range(num_sectors):
mask = (sector_idx == k)
cur_num_points = mask.sum().item()
if cur_num_points > 0:
xyz_points_list.append(points[mask])
xyz_batch_cnt.append(cur_num_points)
ratio = cur_num_points / points.shape[0]
num_sampled_points_list.append(
min(cur_num_points, math.ceil(ratio * num_sampled_points))
)
if len(xyz_batch_cnt) == 0:
xyz_points_list.append(points)
xyz_batch_cnt.append(len(points))
num_sampled_points_list.append(num_sampled_points)
print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')
xyz = torch.cat(xyz_points_list, dim=0)
xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()
sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()
sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(
xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt
).long()
sampled_points = xyz[sampled_pt_idxs]
return sampled_points
class VoxelSetAbstraction(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None:
input_channels = SA_cfg[src_name].MLPS[0][0] \
if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0]
else:
input_channels = SA_cfg[src_name]['INPUT_CHANNELS']
cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=SA_cfg[src_name]
)
self.SA_layers.append(cur_layer)
self.SA_layer_names.append(src_name)
c_in += cur_num_c_out
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points']
)
c_in += cur_num_c_out
if self.model_cfg.get('DUAL_NORM', None):
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
uni3d_norm.UniNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES, dataset_from_flag=int(self.model_cfg.db_source)),
nn.ReLU(),
)
else:
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
def sectorized_proposal_centric_sampling(self, roi_boxes, points):
"""
Args:
roi_boxes: (M, 7 + C)
points: (N, 3)
Returns:
sampled_points: (N_out, 3)
"""
sampled_points, _ = sample_points_with_roi(
rois=roi_boxes, points=points,
sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI,
num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000)
)
sampled_points = sector_fps(
points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS,
num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS
)
return sampled_points
def get_sampled_points(self, batch_dict):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'SPC':
cur_keypoints = self.sectorized_proposal_centric_sampling(
roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0]
)
bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx
keypoints = torch.cat((bs_idxs[:, None], cur_keypoints), dim=1)
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
if len(keypoints.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1, 1)
keypoints = torch.cat((batch_idx.float(), keypoints.view(-1, 3)), dim=1)
return keypoints
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,
filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None, cover_feat_4=False
):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
cover_feat_4: if cover the xyz_features using the values in z-dimension
Returns:
"""
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
if filter_neighbors_with_roi:
point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx], points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum()
valid_point_features = torch.cat(point_features_list, dim=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()
# for using the z-axes as the fourth dimension feature of point-cloud representations
if xyz_features is None:
if cover_feat_4:
xyz_features=xyz[:, 2].view(-1, 1)
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
else:
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features,
)
else:
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size = batch_dict['batch_size']
new_xyz = keypoints[:, 1:4].contiguous()
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum()
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_rawpoints,
xyz=raw_points[:, 1:4],
xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None,
xyz_bs_idxs=raw_points[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None),
cover_feat_4=self.model_cfg.COVER_FEAT if self.model_cfg.get('COVER_FEAT', None) else None
)
point_features_list.append(pooled_features)
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous()
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None),
cover_feat_4=self.model_cfg.COVER_FEAT if self.model_cfg.get('COVER_FEAT', None) else None
)
point_features_list.append(pooled_features)
point_features = torch.cat(point_features_list, dim=-1)
batch_dict['point_features_before_fusion'] = point_features.view(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = keypoints # (BxN, 4)
return batch_dict | 18,045 | 39.920635 | 130 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.