code stringlengths 17 6.64M |
|---|
def apply_Dropout(rng, dropoutRate, inputShape, inputData, task):
' Task:\n # 0: Training\n # 1: Validation\n # 2: Testing '
outputData = inputData
if (dropoutRate > 0.001):
activationRate = (1 - dropoutRate)
srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
dropoutMask = srng.binomial(n=1, size=inputShape, p=activationRate, dtype=theano.config.floatX)
if (task == 0):
outputData = (inputData * dropoutMask)
else:
outputData = (inputData * activationRate)
return outputData
|
def convolveWithKernel(W, filter_shape, inputSample, inputSampleShape):
wReshapedForConv = W.dimshuffle(0, 4, 1, 2, 3)
wReshapedForConvShape = (filter_shape[0], filter_shape[4], filter_shape[1], filter_shape[2], filter_shape[3])
inputSampleReshaped = inputSample.dimshuffle(0, 4, 1, 2, 3)
inputSampleReshapedShape = (inputSampleShape[0], inputSampleShape[4], inputSampleShape[1], inputSampleShape[2], inputSampleShape[3])
convolved_Output = T.nnet.conv3d2d.conv3d(inputSampleReshaped, wReshapedForConv, inputSampleReshapedShape, wReshapedForConvShape, border_mode='valid')
output = convolved_Output.dimshuffle(0, 2, 3, 4, 1)
outputShape = [inputSampleShape[0], filter_shape[0], ((inputSampleShape[2] - filter_shape[2]) + 1), ((inputSampleShape[3] - filter_shape[3]) + 1), ((inputSampleShape[4] - filter_shape[4]) + 1)]
return (output, outputShape)
|
def applyBn(numberEpochApplyRolling, inputTrain, inputTest, inputShapeTrain):
numberOfChannels = inputShapeTrain[1]
gBn_values = np.ones(numberOfChannels, dtype='float32')
gBn = theano.shared(value=gBn_values, borrow=True)
bBn_values = np.zeros(numberOfChannels, dtype='float32')
bBn = theano.shared(value=bBn_values, borrow=True)
muArray = theano.shared(np.zeros((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True)
varArray = theano.shared(np.ones((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True)
sharedNewMu_B = theano.shared(np.zeros(numberOfChannels, dtype='float32'), borrow=True)
sharedNewVar_B = theano.shared(np.ones(numberOfChannels, dtype='float32'), borrow=True)
e1 = np.finfo(np.float32).tiny
mu_B = inputTrain.mean(axis=[0, 2, 3, 4])
mu_B = T.unbroadcast(mu_B, 0)
var_B = inputTrain.var(axis=[0, 2, 3, 4])
var_B = T.unbroadcast(var_B, 0)
var_B_plusE = (var_B + e1)
mu_RollingAverage = muArray.mean(axis=0)
effectiveSize = (((inputShapeTrain[0] * inputShapeTrain[2]) * inputShapeTrain[3]) * inputShapeTrain[4])
var_RollingAverage = ((effectiveSize / (effectiveSize - 1)) * varArray.mean(axis=0))
var_RollingAverage_plusE = (var_RollingAverage + e1)
normXi_train = ((inputTrain - mu_B.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_B_plusE.dimshuffle('x', 0, 'x', 'x', 'x')))
normYi_train = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_train) + bBn.dimshuffle('x', 0, 'x', 'x', 'x'))
normXi_test = ((inputTest - mu_RollingAverage.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_RollingAverage_plusE.dimshuffle('x', 0, 'x', 'x', 'x')))
normYi_test = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_test) + bBn.dimshuffle('x', 0, 'x', 'x', 'x'))
return (normYi_train, normYi_test, gBn, bBn, muArray, varArray, sharedNewMu_B, sharedNewVar_B, mu_B, var_B)
|
def applySoftMax(inputSample, inputSampleShape, numClasses, softmaxTemperature):
inputSampleReshaped = inputSample.dimshuffle(0, 2, 3, 4, 1)
inputSampleFlattened = inputSampleReshaped.flatten(1)
numClassifiedVoxels = ((inputSampleShape[2] * inputSampleShape[3]) * inputSampleShape[4])
firstDimOfinputSample2d = (inputSampleShape[0] * numClassifiedVoxels)
inputSample2d = inputSampleFlattened.reshape((firstDimOfinputSample2d, numClasses))
p_y_given_x_2d = T.nnet.softmax((inputSample2d / softmaxTemperature))
p_y_given_x_class = p_y_given_x_2d.reshape((inputSampleShape[0], inputSampleShape[2], inputSampleShape[3], inputSampleShape[4], inputSampleShape[1]))
p_y_given_x = p_y_given_x_class.dimshuffle(0, 4, 1, 2, 3)
y_pred = T.argmax(p_y_given_x, axis=1)
return (p_y_given_x, y_pred)
|
def applyBiasToFeatureMaps(bias, featMaps):
featMaps = (featMaps + bias.dimshuffle('x', 0, 'x', 'x', 'x'))
return featMaps
|
class parserConfigIni(object):
def __init__(_self):
_self.networkName = []
def readConfigIniFile(_self, fileName, task):
def createModel():
print(' --- Creating model (Reading parameters...)')
_self.readModelCreation_params(fileName)
def trainModel():
print(' --- Training model (Reading parameters...)')
_self.readModelTraining_params(fileName)
def testModel():
print(' --- Testing model (Reading parameters...)')
_self.readModelTesting_params(fileName)
optionsParser = {0: createModel, 1: trainModel, 2: testModel}
optionsParser[task]()
def readModelCreation_params(_self, fileName):
ConfigIni = ConfigParser.ConfigParser()
ConfigIni.read(fileName)
_self.networkName = ConfigIni.get('General', 'networkName')
_self.folderName = ConfigIni.get('General', 'folderName')
_self.n_classes = json.loads(ConfigIni.get('CNN_Architecture', 'n_classes'))
_self.layers = json.loads(ConfigIni.get('CNN_Architecture', 'numkernelsperlayer'))
_self.kernels = json.loads(ConfigIni.get('CNN_Architecture', 'kernelshapes'))
_self.intermediate_ConnectedLayers = json.loads(ConfigIni.get('CNN_Architecture', 'intermediateConnectedLayers'))
_self.pooling_scales = json.loads(ConfigIni.get('CNN_Architecture', 'pooling_scales'))
_self.dropout_Rates = json.loads(ConfigIni.get('CNN_Architecture', 'dropout_Rates'))
_self.activationType = json.loads(ConfigIni.get('CNN_Architecture', 'activationType'))
_self.weight_Initialization_CNN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_CNN'))
_self.weight_Initialization_FCN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_FCN'))
_self.weightsFolderName = ConfigIni.get('CNN_Architecture', 'weights folderName')
_self.weightsTrainedIdx = json.loads(ConfigIni.get('CNN_Architecture', 'weights trained indexes'))
_self.batch_size = json.loads(ConfigIni.get('Training Parameters', 'batch_size'))
_self.sampleSize_Train = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Train'))
_self.sampleSize_Test = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Test'))
_self.costFunction = json.loads(ConfigIni.get('Training Parameters', 'costFunction'))
_self.L1_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L1 Regularization Constant'))
_self.L2_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L2 Regularization Constant'))
_self.learning_rate = json.loads(ConfigIni.get('Training Parameters', 'Leraning Rate'))
_self.momentumType = json.loads(ConfigIni.get('Training Parameters', 'Momentum Type'))
_self.momentumValue = json.loads(ConfigIni.get('Training Parameters', 'Momentum Value'))
_self.momentumNormalized = json.loads(ConfigIni.get('Training Parameters', 'momentumNormalized'))
_self.optimizerType = json.loads(ConfigIni.get('Training Parameters', 'Optimizer Type'))
_self.rho_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Rho RMSProp'))
_self.epsilon_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Epsilon RMSProp'))
applyBatchNorm = json.loads(ConfigIni.get('Training Parameters', 'applyBatchNormalization'))
if (applyBatchNorm == 1):
_self.applyBatchNorm = True
else:
_self.applyBatchNorm = False
_self.BatchNormEpochs = json.loads(ConfigIni.get('Training Parameters', 'BatchNormEpochs'))
_self.tempSoftMax = json.loads(ConfigIni.get('Training Parameters', 'SoftMax temperature'))
def readModelTraining_params(_self, fileName):
ConfigIni = ConfigParser.ConfigParser()
ConfigIni.read(fileName)
_self.imagesFolder = ConfigIni.get('Training Images', 'imagesFolder')
_self.imagesFolder_Bottom = ConfigIni.get('Training Images', 'imagesFolder_Bottom')
_self.GroundTruthFolder = ConfigIni.get('Training Images', 'GroundTruthFolder')
_self.ROIFolder = ConfigIni.get('Training Images', 'ROIFolder')
_self.indexesForTraining = json.loads(ConfigIni.get('Training Images', 'indexesForTraining'))
_self.indexesForValidation = json.loads(ConfigIni.get('Training Images', 'indexesForValidation'))
_self.imageTypesTrain = json.loads(ConfigIni.get('Training Images', 'imageTypes'))
_self.numberOfEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of Epochs'))
_self.numberOfSubEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of SubEpochs'))
_self.numberOfSamplesSupEpoch = json.loads(ConfigIni.get('Training Parameters', 'number of samples at each SubEpoch Train'))
_self.firstEpochChangeLR = json.loads(ConfigIni.get('Training Parameters', 'First Epoch Change LR'))
_self.frequencyChangeLR = json.loads(ConfigIni.get('Training Parameters', 'Frequency Change LR'))
_self.applyPadding = json.loads(ConfigIni.get('Training Parameters', 'applyPadding'))
def readModelTesting_params(_self, fileName):
ConfigIni = ConfigParser.ConfigParser()
ConfigIni.read(fileName)
_self.imagesFolder = ConfigIni.get('Segmentation Images', 'imagesFolder')
_self.imagesFolder_Bottom = ConfigIni.get('Segmentation Images', 'imagesFolder_Bottom')
_self.GroundTruthFolder = ConfigIni.get('Segmentation Images', 'GroundTruthFolder')
_self.ROIFolder = ConfigIni.get('Segmentation Images', 'ROIFolder')
_self.imageTypes = json.loads(ConfigIni.get('Segmentation Images', 'imageTypes'))
_self.indexesToSegment = json.loads(ConfigIni.get('Segmentation Images', 'indexesToSegment'))
_self.applyPadding = json.loads(ConfigIni.get('Segmentation Images', 'applyPadding'))
|
def printUsage(error_type):
if (error_type == 1):
print(' ** ERROR!!: Few parameters used.')
else:
print(' ** ERROR!!: Asked to start with an already created network but its name is not specified.')
print(' ******** USAGE ******** ')
print(' --- argv 1: Name of the configIni file.')
print(' --- argv 2: Network model name')
|
def networkSegmentation(argv):
if (len(argv) < 2):
printUsage(1)
sys.exit()
configIniName = argv[0]
networkModelName = argv[1]
startTesting(networkModelName, configIniName)
print(' ***************** SEGMENTATION DONE!!! ***************** ')
|
class BatchGenerator():
'\n Iterate over a video datasets, returning filenames of frames to laod.\n preprocessing.py can be used in combination with BatchGenerator to read and preprocess frames.\n\n (num_labels) number of labels in dataset.\n (filename) part of filename before _train.pkl or _test.pkl in annotations folder (e.g. Annotations/D1).\n (temporal_window) number of frames to be sampled in a single video segment.\n (flow_data_path) Path to folder containing flow frames.\n (rgb_data_path) Path to folder containing rgb frames.\n (random_sync) Half the batch with corresponding examples, half the batch with non-corresponding examples.\n (synchronised) flow and rgb frames are temporally synchronised for corresponding examples.\n\n nextBatch - returns rgb and flow filenames of frames to load in a training batch, labels, and correspondence labels\n nextBatchEval - returns rgb and flow filenames of frames to load in a training batch, labels, and correspondence labels\n '
def __init__(self, num_labels, filename, temporal_window=16, flow_data_path='flow_frames_parent/flow', rgb_data_path='rgb_frames_parent/frames', synchronised=False, random_sync=False):
self.random_sync = random_sync
self.synchronised = synchronised
self.temporal_window = temporal_window
self.flow_data_path = (flow_data_path + '/')
self.rgb_data_path = (rgb_data_path + '/')
self.filename = filename
self.num_labels = num_labels
(dataset_data, dataset_labels) = self._parse_inputs_df((filename + '_train.pkl'))
(dataset_data_test, dataset_labels_test) = self._parse_inputs_df((filename + '_test.pkl'))
dataset_data_total = np.arange(dataset_data.shape[0])
dataset_test_total = np.arange(dataset_data_test.shape[0])
dataset_data_train = (dataset_data, dataset_labels)
dataset_data_test = (dataset_data_test, dataset_labels_test)
self.dataset_data = {False: dataset_data_train, True: dataset_data_test}
dataset_data_train_total = dataset_data_total
dataset_data_test_total = dataset_test_total
self.dataset_total = {False: dataset_data_train_total, True: dataset_data_test_total}
def reset_dataset(self, test=False):
' Reset dataset iterator to include all data'
self.dataset_total[test] = np.arange(self.dataset_data[test][0].shape[0])
def _parse_inputs_df(self, filename):
' Read annotation file '
df = pd.read_pickle(filename)
data = []
for (_, line) in df.iterrows():
image = [((line['participant_id'] + '/') + line['video_id']), line['start_frame'], line['stop_frame']]
labels = line['verb_class']
one_hot = np.zeros(self.num_labels)
one_hot[labels] = 1.0
data.append((image[0], image[1], image[2], one_hot))
(segment, start, end, softmaxlabels) = list(zip(*data))
labels = list(softmaxlabels)
train = list(zip(segment, start, end))
train = np.array(train)
labels = np.array(labels)
return (train, labels)
def nextBatch(self, batch_size, test=False):
' Get next training samples\n loops through datasets, randomly sampling data without replacement to add to batch.'
batch_size = int(batch_size)
(dataset_data, dataset_labels) = self.dataset_data[test]
dataset_total = self.dataset_total[test]
file = self.filename
if (len(dataset_total) > batch_size):
sample_idx_t = np.random.choice(range(dataset_total.shape[0]), size=batch_size, replace=False)
sample_idx = dataset_total[sample_idx_t]
self.dataset_total[test] = np.delete(dataset_total, sample_idx_t, axis=0)
else:
sample_idx = dataset_total
remaining = (batch_size - sample_idx.shape[0])
dataset_total_temp = np.arange(dataset_data.shape[0])
dataset_total_temp = np.delete(dataset_total_temp, sample_idx, axis=0)
sample_idx_t = np.random.choice(range(dataset_total_temp.shape[0]), size=remaining, replace=False)
sample_idx = np.concatenate((sample_idx, dataset_total_temp[sample_idx_t]))
self.dataset_total[test] = np.delete(dataset_total_temp, sample_idx_t, axis=0)
print('Done Epoch')
sample = dataset_data[sample_idx]
if self.synchronised:
sychron = ([True] * len(sample))
else:
sychron = ([False] * len(sample))
sample = [self.sample_segment(filen, synchronise=to_sync) for (filen, to_sync) in zip(sample, sychron)]
(sample_rgb, sample_flow) = zip(*sample)
if self.random_sync:
half = int((len(sample) / 2))
fixed_sample_rgb = sample_rgb[:half]
fixed_sample_flow = sample_flow[:half]
variate_sample_rgb = sample_rgb[half:]
variate_sample_flow = sample_flow[half:]
variate_sample_flow = (variate_sample_flow[1:] + variate_sample_flow[:1])
sample_flow = (fixed_sample_flow + variate_sample_flow)
sample_rgb = (fixed_sample_rgb + variate_sample_rgb)
sychron = (([True] * len(fixed_sample_rgb)) + ([False] * len(variate_sample_rgb)))
elif self.synchronised:
sychron = ([True] * len(sample))
else:
sychron = ([True] * len(sample))
sample_labels = dataset_labels[sample_idx]
batch_labels = sample_labels
batch_rgb = list(sample_rgb)
batch_flow = list(sample_flow)
combined = list(zip(batch_rgb, batch_flow, batch_labels, sychron))
shuffle(combined)
(batch_rgb, batch_flow, batch_labels, sychron) = list(zip(*combined))
batch_rgb = np.array(batch_rgb)
batch_flow = np.array(batch_flow)
return (batch_rgb, batch_flow, batch_labels, sychron)
def nextBatchEval(self, batch_size, test=True):
' Get next testing samples, return 5 equidistant frames along a action segment\n loops through datasets, randomly sampling data without replacement to add to batch. '
(dataset_data, dataset_labels) = self.dataset_data[test]
dataset_total = self.dataset_total[test]
dataset_data = dataset_data
dataset_labels = dataset_labels
dataset_total = dataset_total
batch_rgb = []
batch_flow = []
batch_labels = np.empty(shape=[0, self.num_labels])
done = True
if (len(dataset_total) != 0):
done = False
if (len(dataset_total) > batch_size):
sample_idx = np.random.choice(range(dataset_total.shape[0]), size=batch_size, replace=False)
else:
sample_idx = range(dataset_total.shape[0])
done = True
sample = dataset_data[dataset_total[sample_idx]]
sample = [self.sample_segment_test(filen) for filen in sample]
(sample_rgb, sample_flow) = zip(*sample)
sample_labels = dataset_labels[dataset_total[sample_idx]]
self.dataset_total[test] = np.delete(dataset_total, sample_idx, axis=0)
batch_labels = np.concatenate((sample_labels, batch_labels))
batch_rgb = (list(sample_rgb) + batch_rgb)
batch_flow = (list(sample_flow) + batch_flow)
if done:
self.dataset_total[test] = np.arange(dataset_data.shape[0])
batch_rgb = np.array(batch_rgb)
batch_flow = np.array(batch_flow)
return (done, batch_rgb, batch_flow, batch_labels)
def sample_segment_test(self, s):
' Samples rgb and flow frame windows from a video segment s.\n Sampling 5 windows, equidistant along a video segment\n s = ["filename", start_frame, end_frame]'
def _path_to_dataset(flow):
if flow:
left = self.flow_data_path
else:
left = self.rgb_data_path
right = '/frame_'
numframe = 10
return (left, right, numframe)
def flow_filename(frameno, num_stack=1):
(left, right, fill_frame) = _path_to_dataset(True)
left_frame = (frameno - int(((num_stack - 1) / 2)))
right_frame = (frameno + int((num_stack / 2)))
filename = []
for no in range(left_frame, (right_frame + 1)):
filename.append((((((left + str(s[0])) + '/u') + right) + str(no).zfill(fill_frame)) + '.jpg'))
filename.append((((((left + str(s[0])) + '/v') + right) + str(no).zfill(fill_frame)) + '.jpg'))
return filename
def rgb_filename(frameno):
(left, right, fill_frame) = _path_to_dataset(False)
filename = ((((left + str(s[0])) + right) + str(frameno).zfill(fill_frame)) + '.jpg')
return filename
def c3d_sampling():
num_sample_frame = self.temporal_window
half_sample_frame = int((self.temporal_window / 2))
segment_images = []
segment_flow = []
step = 2
segment_start = (int(s[1]) + (step * half_sample_frame))
segment_end = ((int(s[2]) + 1) - (step * half_sample_frame))
if (segment_start >= segment_end):
segment_start = int(s[1])
segment_end = int(s[2])
if (segment_start <= ((half_sample_frame * step) + 1)):
segment_start = ((half_sample_frame * step) + 2)
for center_frame in np.linspace(segment_start, segment_end, 7, dtype=np.int32)[1:(- 1)]:
seg_f = []
seg_i = []
for no in range((center_frame - (step * half_sample_frame)), (center_frame + (step * half_sample_frame)), step):
seg_f.append(flow_filename(int((no / 2))))
seg_i.append(rgb_filename(no))
segment_flow.append(seg_f)
segment_images.append(seg_i)
return (segment_images, segment_flow)
return c3d_sampling()
def sample_segment(self, s, synchronise=False):
' Samples rgb and flow frame windows from a video segment s.\n Sampling temporal windows randomly in video segment.\n s = ["filename", start_frame, end_frame]'
def _path_to_dataset(flow):
if flow:
left = self.flow_data_path
else:
left = self.rgb_data_path
right = '/frame_'
numframe = 10
return (left, right, numframe)
def flow_filename(frameno, num_stack=1):
(left, right, fill_frame) = _path_to_dataset(True)
left_frame = (frameno - int(((num_stack - 1) / 2)))
right_frame = (frameno + int((num_stack / 2)))
filename = []
for no in range(left_frame, (right_frame + 1)):
filename.append((((((left + str(s[0])) + '/u') + right) + str(no).zfill(fill_frame)) + '.jpg'))
filename.append((((((left + str(s[0])) + '/v') + right) + str(no).zfill(fill_frame)) + '.jpg'))
return filename
def rgb_filename(frameno):
(left, right, fill_frame) = _path_to_dataset(False)
filename = ((((left + str(s[0])) + right) + str(frameno).zfill(fill_frame)) + '.jpg')
return filename
def c3d_sampling():
num_sample_frame = self.temporal_window
half_sample_frame = int((self.temporal_window / 2))
segment_images = []
segment_flow = []
step = 2
segment_start = (int(s[1]) + (step * half_sample_frame))
segment_end = ((int(s[2]) + 1) - (step * half_sample_frame))
if (segment_start >= segment_end):
segment_start = int(s[1])
segment_end = int(s[2])
if (segment_start <= ((half_sample_frame * step) + 1)):
segment_start = ((half_sample_frame * step) + 2)
if synchronise:
center_frame_rgb = center_frame_flow = randint(segment_start, segment_end)
else:
center_frame_rgb = randint(segment_start, segment_end)
center_frame_flow = randint(segment_start, segment_end)
for no in range((center_frame_rgb - (step * half_sample_frame)), (center_frame_rgb + (step * half_sample_frame)), step):
segment_images.append(rgb_filename(no))
for no in range((center_frame_flow - (step * half_sample_frame)), (center_frame_flow + (step * half_sample_frame)), step):
segment_flow.append(flow_filename(int((no / 2))))
return (segment_images, segment_flow)
return c3d_sampling()
|
def i3d_model(input_images, is_training, num_labels, dropout, flip_classifier_gradient=False, flip_weight=1.0, aux_classifier=False, feat_level='features'):
rgb_model = i3d.InceptionI3d((num_labels + num_labels), spatial_squeeze=True, final_endpoint='Logits', aux_classifier=aux_classifier)
(logits, endpoints) = rgb_model(input_images, is_training=is_training, dropout_keep_prob=dropout, flip_classifier_gradient=flip_classifier_gradient, flip_weight=flip_weight)
if aux_classifier:
aux_classifier_logits = endpoints['aux_classifier']
else:
aux_classifier_logits = None
features = endpoints[feat_level]
return (logits, aux_classifier_logits, features)
|
def build_i3d(reuse_variables, input_images, is_training, num_labels, flow, temporal_window, dropout, flip_classifier_gradient, flip_weight=1.0, aux_classifier=False, feat_level='features'):
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
return i3d_model(input_images, is_training, num_labels, dropout, flip_classifier_gradient=flip_classifier_gradient, flip_weight=flip_weight, aux_classifier=aux_classifier, feat_level=feat_level)
|
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
|
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
|
def domain_classifier(feat):
shape = feat.get_shape().as_list()
dim = np.prod(shape[1:])
feat = tf.reshape(feat, [(- 1), dim])
with tf.variable_scope('Domain_Classifier'):
d_h_fc0 = tf.layers.dense(feat, 100, kernel_initializer=tf.initializers.truncated_normal(stddev=0.1), name='first')
d_h_fc0 = tf.nn.relu(d_h_fc0)
d_logits = tf.layers.dense(d_h_fc0, 2, kernel_initializer=tf.initializers.truncated_normal(stddev=0.1), name='second')
return d_logits
|
def predict_synch(feat):
shape = feat.get_shape().as_list()
dim = np.prod(shape[1:])
feat = tf.reshape(feat, [(- 1), dim])
d_h_fc0 = tf.layers.dense(feat, 100, kernel_initializer=tf.initializers.truncated_normal(stddev=0.1), name='first')
d_h_fc0 = tf.nn.relu(d_h_fc0)
d_logits = tf.layers.dense(d_h_fc0, 2, kernel_initializer=tf.initializers.truncated_normal(stddev=0.1), name='second')
return (d_logits, d_h_fc0)
|
class FlipGradientBuilder(object):
def __init__(self):
self.num_calls = 0
def __call__(self, x, l=1.0):
grad_name = ('FlipGradient%d' % self.num_calls)
@ops.RegisterGradient(grad_name)
def _flip_gradients(op, grad):
return [(tf.negative(grad) * l)]
g = tf.get_default_graph()
with g.gradient_override_map({'Identity': grad_name}):
y = tf.identity(x)
self.num_calls += 1
return y
|
class Unit3D(snt.AbstractModule):
'Basic unit containing Conv3D + BatchNorm + non-linearity.'
def __init__(self, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), activation_fn=tf.nn.relu, use_batch_norm=True, use_bias=False, name='unit_3d'):
'Initializes Unit3D module.'
super(Unit3D, self).__init__(name=name)
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
def _build(self, inputs, is_training):
'Connects the module to inputs.\n\n Args:\n inputs: Inputs to the Unit3D component.\n is_training: whether to use training mode for snt.BatchNorm (boolean).\n\n Returns:\n Outputs from the module.\n '
net = snt.Conv3D(output_channels=self._output_channels, kernel_shape=self._kernel_shape, stride=self._stride, padding=snt.SAME, use_bias=self._use_bias)(inputs)
if self._use_batch_norm:
bn = snt.BatchNorm()
net = bn(net, is_training=is_training, test_local_stats=False)
if (self._activation_fn is not None):
net = self._activation_fn(net)
return net
|
class InceptionI3d(snt.AbstractModule):
'Inception-v1 I3D architecture.\n\n The model is introduced in:\n\n Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset\n Joao Carreira, Andrew Zisserman\n https://arxiv.org/pdf/1705.07750v1.pdf.\n\n See also the Inception architecture, introduced in:\n\n Going deeper with convolutions\n Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,\n Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.\n http://arxiv.org/pdf/1409.4842v1.pdf.\n '
VALID_ENDPOINTS = ('Conv3d_1a_7x7', 'MaxPool3d_2a_3x3', 'Conv3d_2b_1x1', 'Conv3d_2c_3x3', 'MaxPool3d_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool3d_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool3d_5a_2x2', 'Mixed_5b', 'Mixed_5c', 'Logits', 'Predictions')
def __init__(self, num_classes=400, spatial_squeeze=True, final_endpoint='Logits', name='inception_i3d', aux_classifier=False):
"Initializes I3D model instance.\n\n Args:\n num_classes: The number of outputs in the logit layer (default 400, which\n matches the Kinetics dataset).\n spatial_squeeze: Whether to squeeze the spatial dimensions for the logits\n before returning (default True).\n final_endpoint: The model contains many possible endpoints.\n `final_endpoint` specifies the last endpoint for the model to be built\n up to. In addition to the output at `final_endpoint`, all the outputs\n at endpoints up to `final_endpoint` will also be returned, in a\n dictionary. `final_endpoint` must be one of\n InceptionI3d.VALID_ENDPOINTS (default 'Logits').\n name: A string (optional). The name of this module.\n\n Raises:\n ValueError: if `final_endpoint` is not recognized.\n "
if (final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % final_endpoint))
super(InceptionI3d, self).__init__(name=name)
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
self._aux_classifier = aux_classifier
def _build(self, inputs, is_training, dropout_keep_prob=1.0, flip_classifier_gradient=False, flip_weight=1.0):
'Connects the model to inputs.\n\n Args:\n inputs: Inputs to the model, which should have dimensions\n `batch_size` x `num_frames` x 224 x 224 x `num_channels`.\n is_training: whether to use training mode for snt.BatchNorm (boolean).\n dropout_keep_prob: Probability for the tf.nn.dropout layer (float in\n [0, 1)).\n\n Returns:\n A tuple consisting of:\n 1. Network output at location `self._final_endpoint`.\n 2. Dictionary containing all endpoints up to `self._final_endpoint`,\n indexed by endpoint name.\n\n Raises:\n ValueError: if `self._final_endpoint` is not recognized.\n '
if (self._final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % self._final_endpoint))
net = inputs
end_points = {}
end_point = 'Conv3d_1a_7x7'
net = Unit3D(output_channels=64, kernel_shape=[7, 7, 7], stride=[2, 2, 2], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_2a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Conv3d_2b_1x1'
net = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Conv3d_2c_3x3'
net = Unit3D(output_channels=192, kernel_shape=[3, 3, 3], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_3a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=32, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=192, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=96, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_4a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 2, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=208, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=48, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=224, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=256, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=144, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=288, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_5a_2x2'
net = tf.nn.max_pool3d(net, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0a_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=384, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=384, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=48, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Logits'
with tf.variable_scope(end_point):
net = tf.nn.avg_pool3d(net, ksize=[1, 2, 7, 7, 1], strides=[1, 1, 1, 1, 1], padding=snt.VALID)
end_points['features'] = net
net = tf.nn.dropout(net, dropout_keep_prob)
if flip_classifier_gradient:
net = flip_gradient(net, flip_weight)
if self._aux_classifier:
with tf.variable_scope('AuxLogits'):
logits_aux = Unit3D(output_channels=self._num_classes, kernel_shape=[1, 1, 1], activation_fn=None, use_batch_norm=False, use_bias=True, name='Conv3d_0c_1x1')(net, is_training=is_training)
if self._spatial_squeeze:
logits_aux = tf.squeeze(logits_aux, [2, 3], name='SpatialSqueezeAux')
end_points['aux_classifier'] = tf.reduce_mean(logits_aux, axis=1)
logits = Unit3D(output_channels=self._num_classes, kernel_shape=[1, 1, 1], activation_fn=None, use_batch_norm=False, use_bias=True, name='Conv3d_0c_1x1')(net, is_training=is_training)
if self._spatial_squeeze:
logits = tf.squeeze(logits, [2, 3], name='SpatialSqueeze')
averaged_logits = tf.reduce_mean(logits, axis=1)
end_points[end_point] = averaged_logits
if (self._final_endpoint == end_point):
return (averaged_logits, end_points)
end_point = 'Predictions'
predictions = tf.nn.softmax(averaged_logits)
end_points[end_point] = predictions
return (predictions, end_points)
|
def _mix_rbf_kernel(X, Y, gammas, wts=None):
if (wts is None):
wts = ([1] * len(gammas))
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = (lambda x: tf.expand_dims(x, 0))
c = (lambda x: tf.expand_dims(x, 1))
(K_XX, K_XY, K_YY) = (0, 0, 0)
for (gamma_t, wt) in zip(gammas, wts):
gamma = (1 / gamma_t)
K_XX += (wt * tf.exp(((- gamma) * ((((- 2) * XX) + c(X_sqnorms)) + r(X_sqnorms)))))
K_XY += (wt * tf.exp(((- gamma) * ((((- 2) * XY) + c(X_sqnorms)) + r(Y_sqnorms)))))
K_YY += (wt * tf.exp(((- gamma) * ((((- 2) * YY) + c(Y_sqnorms)) + r(Y_sqnorms)))))
return (K_XX, K_XY, K_YY, tf.reduce_sum(wts))
|
def rbf_mmd2(X, Y, gammas=1, biased=True):
return mix_rbf_mmd2(X, Y, gammas=[gammas], biased=biased)
|
def mix_rbf_mmd2(X, Y, gammas=(1,), wts=None, biased=True):
(K_XX, K_XY, K_YY, d) = _mix_rbf_kernel(X, Y, gammas, wts)
return _mmd2(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
|
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(tf.shape(K_XX)[0], tf.float32)
n = tf.cast(tf.shape(K_YY)[0], tf.float32)
if biased:
mmd2 = (((tf.reduce_sum(K_XX) / (m * m)) + (tf.reduce_sum(K_YY) / (n * n))) - ((2 * tf.reduce_sum(K_XY)) / (m * n)))
else:
if (const_diagonal is not False):
trace_X = (m * const_diagonal)
trace_Y = (n * const_diagonal)
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))) + ((tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1)))) - ((2 * tf.reduce_sum(K_XY)) / (m * n)))
return (mmd2, n)
|
def main():
seen = tf.placeholder(tf.float32, shape=[None, 1024])
unseen = tf.placeholder(tf.float32, shape=[None, 1024])
(mmd, n) = rbf_mmd2(seen, unseen)
(mmd, n) = mix_rbf_mmd2(seen, unseen, gammas=[10.0, 1.0, 0.1, 0.01, 0.001])
source_numpy = np.load(sys.argv[1])
target_numpy = np.load(sys.argv[2])
source_numpy_labels = np.load(sys.argv[3])
target_numpy_labels = np.load(sys.argv[4])
with tf.Session() as sess:
print('Total', sess.run(mmd, feed_dict={seen: source_numpy, unseen: target_numpy}))
for i in np.unique(source_numpy_labels):
print(i, sess.run(mmd, feed_dict={seen: source_numpy[(source_numpy_labels == i)], unseen: target_numpy[(target_numpy_labels == i)]}))
|
def _get_variables_to_restore_load(to_ignore, flow):
to_ignore.append('domain_accumulators')
to_ignore.append('accum_accumulators')
to_ignore.append('accumulators')
scope_to_ignore = to_ignore
if flow:
scope_to_ignore.append('RGB')
scope_to_ignore.append('Joint')
else:
scope_to_ignore.append('Flow')
scope_to_ignore.append('Joint')
variables = slim.get_variables_to_restore(exclude=scope_to_ignore)
keyword_filter = to_ignore
return [x for x in variables if (not any(((word in x.name) for word in keyword_filter)))]
|
def read_joint(mode=''):
if (mode == 'restore'):
to_ignore = ['Adam', 'adam', 'Momentum', 'beta1_power', 'beta2_power', 'global_step', 'Domain_Classifier']
elif (mode == 'continue'):
to_ignore = []
else:
raise Exception('Unknown mode for read_joint')
to_ignore.append('domain_accumulators')
to_ignore.append('accum_accumulators')
to_ignore.append('accumulators')
to_ignore.append('Flow')
to_ignore.append('RGB')
variables = slim.get_variables_to_restore(exclude=to_ignore)
variables_restore = [x for x in variables if (not any(((word in x.name) for word in to_ignore)))]
rgb_variable_map = {}
for variable in variables_restore:
name = variable.name.replace(':0', '')
rgb_variable_map[name] = variable
variable_loader = tf.train.Saver(var_list=rgb_variable_map, reshape=True, max_to_keep=20)
return variable_loader
|
def read_i3d_checkpoint(mode='', flow=False, aux_logits=False):
if (mode == 'pretrain'):
to_ignore = ['inception_i3d/Logits', 'Adam', 'adam', 'Momentum', 'beta1_power', 'beta2_power', 'global_step', 'Domain_Classifier', 'arrow_test']
elif (mode == 'restore'):
to_ignore = ['Adam', 'adam', 'Momentum', 'beta1_power', 'beta2_power', 'global_step', 'Domain_Classifier']
elif (mode == 'continue'):
to_ignore = []
else:
raise Exception('Unkown mode for read_i3d_checkpoint')
variables_restore = _get_variables_to_restore_load(to_ignore, flow)
rgb_variable_map = {}
for variable in variables_restore:
name = variable.name.replace(':0', '')
rgb_variable_map[name] = variable
variable_loader = tf.train.Saver(var_list=rgb_variable_map, reshape=True, max_to_keep=20)
return variable_loader
|
def restore_base(sess, saver, checkpoint_path, model_to_restore, restore_mode='model'):
if (restore_mode == 'continue'):
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
if (ckpt and ckpt.model_checkpoint_path):
saver['continue'].restore(sess, ckpt.model_checkpoint_path)
else:
raise Exception('Cannot find a model to continue training')
start_step = ckpt.model_checkpoint_path.split('/')[(- 1)].split('-')[(- 1)]
elif (restore_mode == 'model'):
saver['model'].restore(sess, model_to_restore)
start_step = 0
elif ((restore_mode == 'pretrain') or (restore_mode == 'pretrain_from_synch')):
saver['pretrain'].restore(sess, model_to_restore)
start_step = 0
else:
raise Exception('A valid restore Mode must be set --restore_mode==[continue,model,pretrain]')
return start_step
|
def restore_joint(sess, saver, checkpoint_path, model_to_restore, restore_mode='model'):
if (restore_mode == 'continue'):
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
if (ckpt and ckpt.model_checkpoint_path):
saver['continue'].restore(sess, ckpt.model_checkpoint_path)
else:
raise Exception('Cannot find a model to continue training')
start_step = ckpt.model_checkpoint_path.split('/')[(- 1)].split('-')[(- 1)]
elif ((restore_mode == 'model') or (restore_mode == 'pretrain_from_synch')):
saver['model'].restore(sess, model_to_restore)
start_step = 0
elif (restore_mode == 'pretrain'):
start_step = 0
else:
raise Exception('A valid restore Mode must be set --restore_mode==[continue,model,pretrain]')
return start_step
|
def init_savers_base(flow=False):
pretrain_loader = read_i3d_checkpoint(mode='pretrain', flow=flow)
model_loader = read_i3d_checkpoint(mode='restore', flow=flow)
savesave = read_i3d_checkpoint(mode='continue', flow=flow)
return (pretrain_loader, model_loader, savesave)
|
class TrainTestScript():
' Creates a framework to train/test an MM-SADA model\n (FLAGS) TensorFlow flags\n (results_dir) Directory of tensorboard files and other testing logs\n (train_dir) Director of saved model\n\n Methods:\n train - train MM-SADA\n test - evaluate an MM-SADA saved model\n '
def __init__(self, FLAGS, results_dir, train_dir):
self.FLAGS = FLAGS
self.train_dir = train_dir
self.datasets = FLAGS.datasets
self.unseen_dataset = FLAGS.unseen_dataset
self.num_gpus = FLAGS.num_gpus
self.num_labels = FLAGS.num_labels
self.target_data = (not (not FLAGS.domain_mode))
if self.target_data:
if ((FLAGS.domain_mode == 'None') or (FLAGS.domain_mode == 'Pretrain')):
self.target_data = False
print('No adaptation')
if FLAGS.domain_mode:
self.domain_mode = FLAGS.domain_mode
else:
self.domain_mode = 'None'
self.lr = FLAGS.lr
if (not FLAGS.modality):
raise Exception('Need to Specify modality')
if ((FLAGS.modality != 'rgb') and (FLAGS.modality != 'flow') and (FLAGS.modality != 'joint')):
raise Exception('Invalid Modality')
self.results_dir = ((results_dir + '_') + FLAGS.modality)
self.modality = FLAGS.modality
self.model = Model(num_gpus=self.num_gpus, num_labels=self.num_labels, modality=self.modality, temporal_window=self.FLAGS.temporal_window, batch_norm_update=self.FLAGS.batch_norm_update, domain_mode=self.domain_mode, steps_per_update=FLAGS.steps_before_update, aux_classifier=self.FLAGS.aux_classifier, synchronised=self.FLAGS.synchronised, predict_synch=self.FLAGS.pred_synch, selfsupervised_lambda=self.FLAGS.self_lambda)
def training_batch_gen(self):
batch_gen = BatchGenerator(self.num_labels, self.datasets, temporal_window=self.FLAGS.temporal_window, rgb_data_path=self.FLAGS.rgb_data_path, flow_data_path=self.FLAGS.flow_data_path, synchronised=self.FLAGS.synchronised, random_sync=self.FLAGS.pred_synch)
batch_gen_unseen = BatchGenerator(self.num_labels, self.unseen_dataset, temporal_window=self.FLAGS.temporal_window, rgb_data_path=self.FLAGS.rgb_data_path, flow_data_path=self.FLAGS.flow_data_path, synchronised=self.FLAGS.synchronised, random_sync=self.FLAGS.pred_synch)
return (batch_gen, batch_gen_unseen)
def testing_batch_gen(self):
batch_gen = BatchGenerator(self.num_labels, self.datasets, temporal_window=self.FLAGS.temporal_window, rgb_data_path=self.FLAGS.rgb_data_path, flow_data_path=self.FLAGS.flow_data_path, synchronised=self.FLAGS.synchronised, random_sync=self.FLAGS.pred_synch)
batch_gen_unseen = BatchGenerator(self.num_labels, self.unseen_dataset, temporal_window=self.FLAGS.temporal_window, rgb_data_path=self.FLAGS.rgb_data_path, flow_data_path=self.FLAGS.flow_data_path, synchronised=self.FLAGS.synchronised, random_sync=self.FLAGS.pred_synch)
return (batch_gen, batch_gen_unseen)
def train(self):
' Train MM-SADA model'
g1 = tf.Graph()
with g1.as_default(), tf.device('/cpu:0'):
self.model.init_savers()
train_writer = tf.summary.FileWriter((self.results_dir + '/train'))
seen_writer = tf.summary.FileWriter((self.results_dir + '/seen'))
unseen_writer = tf.summary.FileWriter((self.results_dir + '/unseen'))
(batch_gen, batch_gen_unseen) = self.training_batch_gen()
with tf.Session(graph=g1, config=tf.ConfigProto(allow_soft_placement=True)) as sess:
print('init variables')
sess.run(tf.global_variables_initializer())
start_step = self.model.restore_model_train(sess, self.train_dir, self.FLAGS.restore_model_flow, self.FLAGS.restore_model_rgb, self.FLAGS.restore_model_joint, self.FLAGS.restore_mode)
for step in range(int(start_step), (self.FLAGS.max_steps + 1)):
p = (float(step) / self.FLAGS.max_steps)
lin = (((2 / (1.0 + np.exp(((- 10.0) * p)))) - 1) * self.FLAGS.lambda_in)
start_time = time.time()
(training_loss, training_accuracy, summary) = train_step(sess, self.model, self.FLAGS, batch_gen, batch_gen_unseen, lin, self.target_data)
for s in summary:
train_writer.add_summary(s, step)
duration = (time.time() - start_time)
if ((step % 50) == 0):
num_examples_per_step = self.FLAGS.batch_size
examples_per_sec = (num_examples_per_step / duration)
sec_per_batch = duration
format_str = '(Train) %s: step %d, loss %.3f, acc %.3f (%.1f examples/sec; %.3f sec/batch)'
print((format_str % (datetime.now(), step, training_loss, training_accuracy, examples_per_sec, sec_per_batch)))
(valaccuracy, domainaccuracy, average_class) = evaluate(sess, self.model, self.FLAGS, batch_gen, lin)
domainaccuracy = (1.0 - domainaccuracy)
val_summary = tf.Summary()
val_summary.value.add(tag='acc/Accuracy', simple_value=valaccuracy)
domain_summary = tf.Summary()
domain_summary.value.add(tag='acc/Domain', simple_value=domainaccuracy)
seen_writer.add_summary(val_summary, step)
seen_writer.add_summary(domain_summary, step)
format_str = '(Val) %s: domain:%s step:%d accuracy:%f avg_class %f domain_accuracy %f'
print((format_str % (datetime.now(), 'Source', step, valaccuracy, average_class, domainaccuracy)))
if self.FLAGS.pred_synch:
synch_accuracy = evaluate_self_supervised(sess, self.model, self.FLAGS, batch_gen, lin, mode='synch')
val_summary = tf.Summary()
val_summary.value.add(tag='acc/Synch_Accuracy', simple_value=synch_accuracy)
seen_writer.add_summary(val_summary, step)
format_str = '(Val) %s: domain:%s step:%d synch_accuracy:%f'
print((format_str % (datetime.now(), 'Source', step, synch_accuracy)))
(valaccuracy, domainaccuracy, average_class) = evaluate(sess, self.model, self.FLAGS, batch_gen_unseen, lin)
val_summary = tf.Summary()
val_summary.value.add(tag='acc/Accuracy', simple_value=valaccuracy)
domain_summary = tf.Summary()
domain_summary.value.add(tag='acc/Domain', simple_value=domainaccuracy)
unseen_writer.add_summary(val_summary, step)
unseen_writer.add_summary(domain_summary, step)
format_str = '(Val) %s: domain:%s step:%d accuracy:%f avg_class %f domain_accuracy %f'
print((format_str % (datetime.now(), 'Target', step, valaccuracy, average_class, domainaccuracy)))
if self.FLAGS.pred_synch:
synch_accuracy = evaluate_self_supervised(sess, self.model, self.FLAGS, batch_gen_unseen, lin, mode='synch')
val_summary = tf.Summary()
val_summary.value.add(tag='acc/Synch_Accuracy', simple_value=synch_accuracy)
unseen_writer.add_summary(val_summary, step)
format_str = '(Val) %s: domain:%s step:%d synch_accuracy:%f'
print((format_str % (datetime.now(), 'Target', step, synch_accuracy)))
if (((step % 50) == 0) or (step == self.FLAGS.max_steps)):
self.model.save_model(sess, self.train_dir, step)
def test(self):
' Evaluate MM-SADA model'
def _save_results(FLAGS, feature_list, label_list, predict_list, img_path_list, ident, test=True):
' Save statistics and extracted features to feature_path folder'
if test:
stringtest = 'test'
else:
stringtest = 'train'
source_domain = os.path.basename(FLAGS.datasets)
np.save(((((((((FLAGS.feature_path + '/') + stringtest) + '_feat_') + source_domain) + '_') + str(FLAGS.modelnum)) + '_') + str(ident)), feature_list)
np.save(((((((((FLAGS.feature_path + '/') + stringtest) + '_label') + source_domain) + '_') + str(FLAGS.modelnum)) + '_') + str(ident)), label_list)
np.save(((((((((FLAGS.feature_path + '/') + stringtest) + '_pred') + source_domain) + '_') + str(FLAGS.modelnum)) + '_') + str(ident)), predict_list)
np.save(((((((((FLAGS.feature_path + '/') + stringtest) + '_filenames') + source_domain) + '_') + str(FLAGS.modelnum)) + '_') + str(ident)), img_path_list)
with tf.Graph().as_default(), tf.device('/cpu:0'):
(batch_gen, batch_gen_unseen) = self.testing_batch_gen()
self.model.init_savers()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
self.model.restore_model_test(sess, self.train_dir, self.FLAGS.modelnum)
lin = 0.0
step = 0
seen_filenames = ''
seen_accuracy = ''
(valaccuracy, domainaccuracy, valperclass, valfeat, valfile, vallabel, valpredict) = evaluate(sess, self.model, self.FLAGS, batch_gen, lin, test=(not self.FLAGS.eval_train), out_features=self.FLAGS.features, extra_info=True)
if self.FLAGS.features:
_save_results(self.FLAGS, valfeat, vallabel, valpredict, valfile, 'Source', test=(not self.FLAGS.eval_train))
seen_accuracy = ((seen_accuracy + str(valaccuracy)) + ',')
seen_filenames = ((seen_filenames + 'Source') + ',')
format_str = '(Val) %s: domain:%s step:%d accuracy:%f domain_accuracy %f'
print((format_str % (datetime.now(), 'Source', step, valaccuracy, domainaccuracy)))
(valaccuracy, domainaccuracy, valperclass, valfeat, valfile, vallabel, valpredict) = evaluate(sess, self.model, self.FLAGS, batch_gen_unseen, lin, test=(not self.FLAGS.eval_train), out_features=self.FLAGS.features, extra_info=True)
domainaccuracy = (1.0 - domainaccuracy)
if self.FLAGS.features:
_save_results(self.FLAGS, valfeat, vallabel, valpredict, valfile, 'Target', test=(not self.FLAGS.eval_train))
format_str = '(Val) %s: domain:%s step:%d accuracy:%f domain_accuracy %f'
print((format_str % (datetime.now(), 'Target', step, valaccuracy, domainaccuracy)))
results_log_file = '/logs/results.list'
if (not os.path.exists((self.results_dir + '/logs'))):
os.makedirs((self.results_dir + '/logs'))
if (not os.path.isfile((self.results_dir + results_log_file))):
f = open((self.results_dir + results_log_file), 'w')
f.write(((seen_filenames + 'target,step,target_directory') + '\n'))
f.close()
f = open((self.results_dir + results_log_file), 'a')
f.write(((((((seen_accuracy + str(valaccuracy)) + ',') + str(self.FLAGS.modelnum)) + ',') + self.FLAGS.unseen_dataset) + '\n'))
f.close()
|
def parse_args(FLAGS):
error = False
if (FLAGS.train is None):
print('Specify whether to train (True) or test (False) --train')
error = True
if (FLAGS.results_path is None):
print('Specify path to save logs and models --results_path')
error = True
if (FLAGS.datasets is None):
print('Specify the Source domain dataset --datasets')
error = True
if (FLAGS.unseen_dataset is None):
print('Specify the Target domain dataset --unseen_dataset')
error = True
if (FLAGS.rgb_data_path is None):
print('Specify the path to rgb frames --rgb_data_path')
error = True
if (FLAGS.flow_data_path is None):
print('Specify the path to flow frames --flow_data_path')
error = True
if error:
return True
if FLAGS.train:
if (FLAGS.restore_mode is None):
print("Specify the restore mode --restore_mode ('pretrain', 'model', 'continue')")
error = True
if ((FLAGS.restore_model_flow is None) and FLAGS.flow):
print('Specify pretrained model to use --pretrained_model')
error = True
if ((FLAGS.restore_model_rgb is None) and (not FLAGS.flow)):
print('Specify pretrained model to use --pretrained_model')
error = True
else:
if (FLAGS.modelnum is None):
print('Specify model number to restore for testing --modelnum')
error = True
if FLAGS.features:
if (FLAGS.feature_path is None):
print('Specify path to store features --feature_path')
error = True
if error:
return True
return False
|
def input_parser():
flags = tf.app.flags
flags.DEFINE_boolean('train', None, 'Weither to train or evaluate (False)')
flags.DEFINE_string('results_path', None, 'Where to store the log files and saved models')
flags.DEFINE_float('lr', 0.001, 'Initial Learning Rate')
flags.DEFINE_float('batch_norm_update', 0.9, 'Update rate of batch norm statistics')
flags.DEFINE_integer('num_gpus', 8, 'number of gpus to run')
flags.DEFINE_integer('max_steps', 6000, 'Number of batches to run.')
flags.DEFINE_integer('steps_before_update', 1, 'number of steps to run before updating weights')
flags.DEFINE_string('domain_mode', None, 'background only for dataset2')
flags.DEFINE_float('lambda_in', 1.0, 'grl hyperparameter')
flags.DEFINE_float('self_lambda', 5.0, 'weigthing of self supervised loss')
flags.DEFINE_string('datasets', None, 'Comma seperated list of datasets')
flags.DEFINE_string('unseen_dataset', None, 'Specify file path to unseen dataset folder')
flags.DEFINE_integer('num_labels', 8, 'Total number of combined labels')
flags.DEFINE_integer('batch_size', 128, 'Size of a batch')
flags.DEFINE_boolean('synchronised', None, 'Weither to synchronise flow and rgb')
flags.DEFINE_string('modality', 'joint', 'rgb, flow or joint (default: joint)')
flags.DEFINE_integer('temporal_window', 16, 'i3d temporal window')
flags.DEFINE_boolean('aux_classifier', None, '2 classifiers')
flags.DEFINE_boolean('pred_synch', None, 'Predict if modalities are synchronised')
flags.DEFINE_boolean('features', None, 'Weither to produce features of evalutate')
flags.DEFINE_string('feature_path', None, 'path to store features')
flags.DEFINE_boolean('eval_train', None, 'Weither to evaludate training example rather than test')
flags.DEFINE_integer('modelnum', None, 'model number to restore for testing')
flags.DEFINE_string('restore_model_rgb', None, 'Load these weights excluding Logits')
flags.DEFINE_string('restore_model_flow', None, 'Load these weights excluding Logits')
flags.DEFINE_string('restore_model_joint', None, 'Load these weights excluding Logits')
flags.DEFINE_string('rgb_data_path', None, 'path to rgb data')
flags.DEFINE_string('flow_data_path', None, 'path to flow data')
flags.DEFINE_string('restore_mode', None, 'pretrain (for base netwrok without logits), model (restore base model with classification logits) or continue (restore everything)')
FLAGS = flags.FLAGS
source_domain = os.path.basename(FLAGS.datasets)
target_domain = os.path.basename(FLAGS.unseen_dataset)
train_dir = ((((((((FLAGS.results_path + '/saved_model_') + source_domain) + '_') + target_domain) + '_') + str(FLAGS.lr)) + '_') + str(FLAGS.batch_norm_update))
if (not os.path.exists(train_dir)):
os.makedirs(train_dir)
results_dir = ((((((((FLAGS.results_path + '/results_') + source_domain) + '_') + target_domain) + '_') + str(FLAGS.lr)) + '_') + str(FLAGS.batch_norm_update))
return (FLAGS, train_dir, results_dir)
|
def main():
(flags, train_dir, results_dir) = input_parser()
if parse_args(flags):
return
train_test = TrainTestScript(flags, results_dir, train_dir)
if flags.train:
train_test.train()
else:
train_test.test()
|
def conv_block(in_dim, out_dim, act_fn, kernel_size=3, stride=1, padding=1, dilation=1):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation), nn.BatchNorm2d(out_dim), act_fn)
return model
|
def conv_block_Asym_Inception(in_dim, out_dim, act_fn, kernel_size=3, stride=1, padding=1, dilation=1):
model = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=[kernel_size, 1], padding=tuple([padding, 0]), dilation=(dilation, 1)), nn.BatchNorm2d(out_dim), nn.ReLU(), nn.Conv2d(out_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, padding]), dilation=(1, dilation)), nn.BatchNorm2d(out_dim), nn.ReLU())
return model
|
def conv_decod_block(in_dim, out_dim, act_fn):
model = nn.Sequential(nn.ConvTranspose2d(in_dim, out_dim, kernel_size=3, stride=2, padding=1, output_padding=1), nn.BatchNorm2d(out_dim), act_fn)
return model
|
def maxpool():
pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
return pool
|
def bottleNeck(nin, nmid):
return nn.Sequential(nn.BatchNorm2d(nin), nn.ReLU(), nn.Conv2d(nin, nmid, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(nmid), nn.ReLU(), nn.Conv2d(nmid, nmid, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(nmid), nn.ReLU(), nn.Conv2d(nmid, (nmid * 4), kernel_size=1, stride=1, padding=0))
self.resBlock = nn.Sequential()
def forward(self, input):
out = self.resBlock(input)
return (out + input)
|
def convBatch(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d, dilation=1):
return nn.Sequential(layer(nin, nout, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, dilation=dilation), nn.BatchNorm2d(nout), nn.PReLU())
|
def downSampleConv(nin, nout, kernel_size=3, stride=2, padding=1, bias=False):
return nn.Sequential(convBatch(nin, nout, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
|
def upSampleConv(nin, nout, kernel_size=3, upscale=2, padding=1, bias=False):
return nn.Sequential(nn.Upsample(scale_factor=upscale), convBatch(nin, nout, kernel_size=kernel_size, stride=1, padding=padding, bias=bias), convBatch(nout, nout, kernel_size=3, stride=1, padding=1, bias=bias))
|
def conv(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d, BN=False, ws=False, activ=nn.LeakyReLU(0.2), gainWS=2):
convlayer = layer(nin, nout, kernel_size, stride=stride, padding=padding, bias=bias)
layers = []
if ws:
layers.append(WScaleLayer(convlayer, gain=gainWS))
if BN:
layers.append(nn.BatchNorm2d(nout))
if (activ is not None):
if (activ == nn.PReLU):
layers.append(activ(num_parameters=1))
else:
layers.append(activ)
layers.insert(ws, convlayer)
return nn.Sequential(*layers)
|
class ResidualConv(nn.Module):
def __init__(self, nin, nout, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
super(ResidualConv, self).__init__()
convs = [conv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ), conv(nout, nout, bias=bias, BN=BN, ws=ws, activ=None)]
self.convs = nn.Sequential(*convs)
res = []
if (nin != nout):
res.append(conv(nin, nout, kernel_size=1, padding=0, bias=False, BN=BN, ws=ws, activ=None))
self.res = nn.Sequential(*res)
activation = []
if (activ is not None):
if (activ == nn.PReLU):
activation.append(activ(num_parameters=1))
else:
activation.append(activ)
self.activation = nn.Sequential(*activation)
def forward(self, input):
out = self.convs(input)
return self.activation((out + self.res(input)))
|
class residualConv(nn.Module):
def __init__(self, nin, nout):
super(residualConv, self).__init__()
self.convs = nn.Sequential(convBatch(nin, nout), nn.Conv2d(nout, nout, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(nout))
self.res = nn.Sequential()
if (nin != nout):
self.res = nn.Sequential(nn.Conv2d(nin, nout, kernel_size=1, bias=False), nn.BatchNorm2d(nout))
def forward(self, input):
out = self.convs(input)
return F.leaky_relu((out + self.res(input)), 0.2)
|
def weights_init(m):
if ((type(m) == nn.Conv2d) or (type(m) == nn.ConvTranspose2d)):
nn.init.xavier_normal(m.weight.data)
elif (type(m) == nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
|
def runTraining():
print(('-' * 40))
print('~~~~~~~~ Starting the training... ~~~~~~')
print(('-' * 40))
batch_size = 4
batch_size_val = 1
batch_size_val_save = 1
lr = 0.0001
epoch = 200
num_classes = 2
initial_kernels = 32
modelName = 'IVD_Net'
img_names_ALL = []
print(('.' * 40))
print(' ....Model name: {} ........'.format(modelName))
print(' - Num. classes: {}'.format(num_classes))
print(' - Num. initial kernels: {}'.format(initial_kernels))
print(' - Batch size: {}'.format(batch_size))
print(' - Learning rate: {}'.format(lr))
print(' - Num. epochs: {}'.format(epoch))
print(('.' * 40))
root_dir = '../Data/Training_PngITK'
model_dir = 'IVD_Net'
transform = transforms.Compose([transforms.ToTensor()])
mask_transform = transforms.Compose([transforms.ToTensor()])
train_set = medicalDataLoader.MedicalImageDataset('train', root_dir, transform=transform, mask_transform=mask_transform, augment=False, equalize=False)
train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=5, shuffle=True)
val_set = medicalDataLoader.MedicalImageDataset('val', root_dir, transform=transform, mask_transform=mask_transform, equalize=False)
val_loader = DataLoader(val_set, batch_size=batch_size_val, num_workers=5, shuffle=False)
val_loader_save_images = DataLoader(val_set, batch_size=batch_size_val_save, num_workers=5, shuffle=False)
print('~~~~~~~~~~~ Creating the model ~~~~~~~~~~')
net = IVD_Net_asym(1, num_classes, initial_kernels)
net.apply(weights_init)
softMax = nn.Softmax()
CE_loss = nn.CrossEntropyLoss()
Dice_ = computeDiceOneHotBinary()
if torch.cuda.is_available():
net.cuda()
softMax.cuda()
CE_loss.cuda()
Dice_.cuda()
'try:\n net = torch.load(\'modelName\')\n print("--------model restored--------")\n except:\n print("--------model not restored--------")\n pass'
optimizer = Adam(net.parameters(), lr=lr, betas=(0.9, 0.99), amsgrad=False)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', patience=4, verbose=True, factor=(10 ** (- 0.5)))
(BestDice, BestEpoch) = (0, 0)
d1Train = []
d1Val = []
Losses = []
print('~~~~~~~~~~~ Starting the training ~~~~~~~~~~')
for i in range(epoch):
net.train()
lossTrain = []
d1TrainTemp = []
totalImages = len(train_loader)
for (j, data) in enumerate(train_loader):
(image_f, image_i, image_o, image_w, labels, img_names) = data
image_f = image_f.type(torch.FloatTensor)
image_i = image_i.type(torch.FloatTensor)
image_o = image_o.type(torch.FloatTensor)
image_w = image_w.type(torch.FloatTensor)
labels = labels.numpy()
idx = np.where((labels > 0.0))
labels[idx] = 1.0
labels = torch.from_numpy(labels)
labels = labels.type(torch.FloatTensor)
optimizer.zero_grad()
MRI = to_var(torch.cat((image_f, image_i, image_o, image_w), dim=1))
Segmentation = to_var(labels)
target_dice = to_var(torch.ones(1))
net.zero_grad()
segmentation_prediction = net(MRI)
predClass_y = softMax(segmentation_prediction)
Segmentation_planes = getOneHotSegmentation(Segmentation)
segmentation_prediction_ones = predToSegmentation(predClass_y)
Segmentation_class = getTargetSegmentation(Segmentation)
CE_loss_ = CE_loss(segmentation_prediction, Segmentation_class)
(DicesB, DicesF) = Dice_(segmentation_prediction_ones, Segmentation_planes)
DiceB = DicesToDice(DicesB)
DiceF = DicesToDice(DicesF)
loss = CE_loss_
loss.backward()
optimizer.step()
lossTrain.append(loss.data[0])
printProgressBar((j + 1), totalImages, prefix='[Training] Epoch: {} '.format(i), length=15, suffix=' Mean Dice: {:.4f},'.format(DiceF.data[0]))
printProgressBar(totalImages, totalImages, done='[Training] Epoch: {}, LossG: {:.4f}'.format(i, np.mean(lossTrain)))
Losses.append(np.mean(lossTrain))
d1 = inference(net, val_loader, batch_size, i)
d1Val.append(d1)
d1Train.append(np.mean(d1TrainTemp).data[0])
mainPath = ('../Results/Statistics/' + modelName)
directory = mainPath
if (not os.path.exists(directory)):
os.makedirs(directory)
np.save(os.path.join(directory, 'Losses.npy'), Losses)
np.save(os.path.join(directory, 'd1Val.npy'), d1Val)
np.save(os.path.join(directory, 'd1Train.npy'), d1Train)
currentDice = d1[0].numpy()
print('[val] DSC: {:.4f} '.format(d1[0]))
if (currentDice > BestDice):
BestDice = currentDice
BestEpoch = i
if (currentDice > 0.75):
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Saving best model..... ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
torch.save(net, os.path.join(model_dir, (('Best_' + modelName) + '.pkl')))
saveImages(net, val_loader_save_images, batch_size_val_save, i, modelName)
if (i % (BestEpoch + 10)):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def make_dataset(root, mode):
assert (mode in ['train', 'val', 'test'])
items = []
if (mode == 'train'):
train_fat_path = os.path.join(root, 'train', 'Fat')
train_inn_path = os.path.join(root, 'train', 'Inn')
train_opp_path = os.path.join(root, 'train', 'Opp')
train_wat_path = os.path.join(root, 'train', 'Wat')
train_mask_path = os.path.join(root, 'train', 'GT')
images_fat = os.listdir(train_fat_path)
images_inn = os.listdir(train_inn_path)
images_opp = os.listdir(train_opp_path)
images_wat = os.listdir(train_wat_path)
labels = os.listdir(train_mask_path)
images_fat.sort()
images_inn.sort()
images_opp.sort()
images_wat.sort()
labels.sort()
for (it_f, it_i, it_o, it_w, it_gt) in zip(images_fat, images_inn, images_opp, images_wat, labels):
item = (os.path.join(train_fat_path, it_f), os.path.join(train_inn_path, it_i), os.path.join(train_opp_path, it_o), os.path.join(train_wat_path, it_w), os.path.join(train_mask_path, it_gt))
items.append(item)
elif (mode == 'val'):
train_fat_path = os.path.join(root, 'val', 'Fat')
train_inn_path = os.path.join(root, 'val', 'Inn')
train_opp_path = os.path.join(root, 'val', 'Opp')
train_wat_path = os.path.join(root, 'val', 'Wat')
train_mask_path = os.path.join(root, 'val', 'GT')
images_fat = os.listdir(train_fat_path)
images_inn = os.listdir(train_inn_path)
images_opp = os.listdir(train_opp_path)
images_wat = os.listdir(train_wat_path)
labels = os.listdir(train_mask_path)
images_fat.sort()
images_inn.sort()
images_opp.sort()
images_wat.sort()
labels.sort()
for (it_f, it_i, it_o, it_w, it_gt) in zip(images_fat, images_inn, images_opp, images_wat, labels):
item = (os.path.join(train_fat_path, it_f), os.path.join(train_inn_path, it_i), os.path.join(train_opp_path, it_o), os.path.join(train_wat_path, it_w), os.path.join(train_mask_path, it_gt))
items.append(item)
else:
train_fat_path = os.path.join(root, 'test', 'Fat')
train_inn_path = os.path.join(root, 'test', 'Inn')
train_opp_path = os.path.join(root, 'test', 'Opp')
train_wat_path = os.path.join(root, 'test', 'Wat')
train_mask_path = os.path.join(root, 'test', 'GT')
images_fat = os.listdir(train_fat_path)
images_inn = os.listdir(train_inn_path)
images_opp = os.listdir(train_opp_path)
images_wat = os.listdir(train_wat_path)
labels = os.listdir(train_mask_path)
images_fat.sort()
images_inn.sort()
images_opp.sort()
images_wat.sort()
labels.sort()
for (it_f, it_i, it_o, it_w, it_gt) in zip(images_fat, images_inn, images_opp, images_wat, labels):
item = (os.path.join(train_fat_path, it_f), os.path.join(train_inn_path, it_i), os.path.join(train_opp_path, it_o), os.path.join(train_wat_path, it_w), os.path.join(train_mask_path, it_gt))
items.append(item)
return items
|
class MedicalImageDataset(Dataset):
'Face Landmarks dataset.'
def __init__(self, mode, root_dir, transform=None, mask_transform=None, augment=False, equalize=False):
'\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n '
self.root_dir = root_dir
self.transform = transform
self.mask_transform = mask_transform
self.imgs = make_dataset(root_dir, mode)
self.augmentation = augment
self.equalize = equalize
def __len__(self):
return len(self.imgs)
def augment(self, img, mask):
angle = ((random() * 10) - 20)
img_f = img_f.rotate(angle)
img_i = img_i.rotate(angle)
img_o = img_o.rotate(angle)
img_w = img_w.rotate(angle)
mask = mask.rotate(mask)
return (img_f, img_i, img_o, img_w, mask)
def __getitem__(self, index):
(fat_path, inn_path, opp_path, wat_path, mask_path) = self.imgs[index]
img_f = Image.open(fat_path)
img_i = Image.open(inn_path)
img_o = Image.open(opp_path)
img_w = Image.open(wat_path)
mask = Image.open(mask_path).convert('L')
if self.equalize:
img = ImageOps.equalize(img)
if self.augmentation:
(img, mask) = self.augment(img, mask)
if self.transform:
img_f = self.transform(img_f)
img_i = self.transform(img_i)
img_o = self.transform(img_o)
img_w = self.transform(img_w)
mask = self.mask_transform(mask)
return [img_f, img_i, img_o, img_w, mask, fat_path]
|
class Adam(Optimizer):
'Implements Adam algorithm.\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
if (group['weight_decay'] != 0):
grad = grad.add(group['weight_decay'], p.data)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_((- step_size), exp_avg, denom)
return loss
|
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='=', empty=' ', tip='>', begin='[', end=']', done='[DONE]', clear=True):
'\n Print iterations progress.\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration [int]\n total - Required : total iterations [int]\n prefix - Optional : prefix string [str]\n suffix - Optional : suffix string [str]\n decimals - Optional : positive number of decimals in percent [int]\n length - Optional : character length of bar [int]\n fill - Optional : bar fill character [str] (ex: \'â– \', \'â–ˆ\', \'#\', \'=\')\n empty - Optional : not filled bar character [str] (ex: \'-\', \' \', \'•\')\n tip - Optional : character at the end of the fill bar [str] (ex: \'>\', \'\')\n begin - Optional : starting bar character [str] (ex: \'|\', \'â–•\', \'[\')\n end - Optional : ending bar character [str] (ex: \'|\', \'â–\x8f\', \']\')\n done - Optional : display message when 100% is reached [str] (ex: "[DONE]")\n clear - Optional : display completion message or leave as is [str]\n '
percent = (('{0:.' + str(decimals)) + 'f}').format((100 * (iteration / float(total))))
filledLength = int(((length * iteration) // total))
bar = (fill * filledLength)
if (iteration != total):
bar = (bar + tip)
bar = (bar + (empty * ((length - filledLength) - len(tip))))
display = '\r{prefix}{begin}{bar}{end} {percent}%{suffix}'.format(prefix=prefix, begin=begin, bar=bar, end=end, percent=percent, suffix=suffix)
(print(display, end=''),)
if (iteration == total):
if clear:
finish = '\r{prefix}{done}'.format(prefix=prefix, done=done)
if hasattr(str, 'decode'):
finish = finish.decode('utf-8')
display = display.decode('utf-8')
clear = (' ' * max((len(display) - len(finish)), 0))
print((finish + clear))
else:
print('')
|
def verbose(verboseLevel, requiredLevel, printFunc=print, *printArgs, **kwPrintArgs):
'\n Calls `printFunc` passing it `printArgs` and `kwPrintArgs`\n only if `verboseLevel` meets the `requiredLevel` of verbosity.\n\n Following forms are supported:\n\n > verbose(1, 0, "message")\n\n >> message\n\n > verbose(1, 0, "message1", "message2")\n\n >> message1 message2\n\n > verbose(1, 2, "message")\n\n >> <nothing since verbosity level not high enough>\n\n > verbose(1, 1, lambda x: print(\'MSG: \' + x), \'message\')\n\n >> MSG: message\n\n > def myprint(x, y="msg_y", z=True): print(\'MSG_Y: \' + y) if z else print(\'MSG_X: \' + x)\n > verbose(1, 1, myprint, "msg_x", "msg_y")\n\n >> MSG_Y: msg_y\n\n > verbose(1, 1, myprint, "msg_x", "msg_Y!", z=True)\n\n >> MSG_Y: msg_Y!\n\n > verbose(1, 1, myprint, "msg_x", z=False)\n\n >> MSG_X: msg_x\n\n > verbose(1, 1, myprint, "msg_x", z=True)\n\n >> MSG_Y: msg_y\n '
if (verboseLevel >= requiredLevel):
printArgs = (printArgs if (printArgs is not None) else tuple(['']))
if (not hasattr(printFunc, '__call__')):
printArgs = (tuple([printFunc]) + printArgs)
printFunc = print
printFunc(*printArgs, **kwPrintArgs)
|
def print_flush(txt=''):
print(txt)
sys.stdout.flush()
|
def hide_cursor():
if (os.name == 'nt'):
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle((- 11))
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = False
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
elif (os.name == 'posix'):
sys.stdout.write('\x1b[?25l')
sys.stdout.flush()
|
def show_cursor():
if (os.name == 'nt'):
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle((- 11))
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = True
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
elif (os.name == 'posix'):
sys.stdout.write('\x1b[?25h')
sys.stdout.flush()
|
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
|
class computeDiceOneHotBinary(nn.Module):
def __init__(self):
super(computeDiceOneHotBinary, self).__init__()
def dice(self, input, target):
inter = (input * target).float().sum()
sum = (input.sum() + target.sum())
if (sum == 0).all():
return (((2 * inter) + 1e-08) / (sum + 1e-08))
return ((2 * (input * target).float().sum()) / (input.sum() + target.sum()))
def inter(self, input, target):
return (input * target).float().sum()
def sum(self, input, target):
return (input.sum() + target.sum())
def forward(self, pred, GT):
batchsize = GT.size(0)
DiceB = to_var(torch.zeros(batchsize, 2))
DiceF = to_var(torch.zeros(batchsize, 2))
for i in range(batchsize):
DiceB[(i, 0)] = self.inter(pred[(i, 0)], GT[(i, 0)])
DiceF[(i, 0)] = self.inter(pred[(i, 1)], GT[(i, 1)])
DiceB[(i, 1)] = self.sum(pred[(i, 0)], GT[(i, 0)])
DiceF[(i, 1)] = self.sum(pred[(i, 1)], GT[(i, 1)])
return (DiceB, DiceF)
|
def DicesToDice(Dices):
sums = Dices.sum(dim=0)
return (((2 * sums[0]) + 1e-08) / (sums[1] + 1e-08))
|
def getSingleImageBin(pred):
n_channels = 2
Val = to_var(torch.zeros(2))
Val[1] = 1.0
x = predToSegmentation(pred)
out = (x * Val.view(1, n_channels, 1, 1))
return out.sum(dim=1, keepdim=True)
|
def predToSegmentation(pred):
Max = pred.max(dim=1, keepdim=True)[0]
x = (pred / Max)
return (x == 1).float()
|
def getOneHotSegmentation(batch):
backgroundVal = 0
label1 = 1.0
oneHotLabels = torch.cat(((batch == backgroundVal), (batch == label1)), dim=1)
return oneHotLabels.float()
|
def getTargetSegmentation(batch):
spineLabel = 1.0
return (batch / spineLabel).round().long().squeeze()
|
def saveImages(net, img_batch, batch_size, epoch, modelName):
path = ((('../Results/Images_PNG/' + modelName) + '_') + str(epoch))
if (not os.path.exists(path)):
os.makedirs(path)
total = len(img_batch)
net.eval()
softMax = nn.Softmax()
for (i, data) in enumerate(img_batch):
printProgressBar(i, total, prefix='Saving images.....', length=30)
(image_f, image_i, image_o, image_w, labels, img_names) = data
image_f = image_f.type(torch.FloatTensor)
image_i = image_i.type(torch.FloatTensor)
image_o = image_o.type(torch.FloatTensor)
image_w = image_w.type(torch.FloatTensor)
images = torch.cat((image_f, image_i, image_o, image_w), dim=1)
MRI = to_var(images)
image_f_var = to_var(image_f)
Segmentation = to_var(labels)
segmentation_prediction = net(MRI)
pred_y = softMax(segmentation_prediction)
segmentation = getSingleImageBin(pred_y)
imgname = img_names[0].split('/Fat/')
imgname = imgname[1].split('_fat.png')
out = torch.cat((image_f_var, segmentation, (Segmentation * 255)))
torchvision.utils.save_image(out.data, os.path.join(path, (imgname[0] + '.png')), nrow=batch_size, padding=2, normalize=False, range=None, scale_each=False)
printProgressBar(total, total, done='Images saved !')
|
def inference(net, img_batch, batch_size, epoch):
total = len(img_batch)
Dice1 = torch.zeros(total, 2)
net.eval()
dice = computeDiceOneHotBinary().cuda()
softMax = nn.Softmax().cuda()
img_names_ALL = []
for (i, data) in enumerate(img_batch):
printProgressBar(i, total, prefix='[Inference] Getting segmentations...', length=30)
(image_f, image_i, image_o, image_w, labels, img_names) = data
image_f = (image_f.type(torch.FloatTensor) / 65535)
image_i = (image_i.type(torch.FloatTensor) / 65535)
image_o = (image_o.type(torch.FloatTensor) / 65535)
image_w = (image_w.type(torch.FloatTensor) / 65535)
images = torch.cat((image_f, image_i, image_o, image_w), dim=1)
img_names_ALL.append(img_names[0].split('/')[(- 1)].split('.')[0])
MRI = to_var(images)
labels = labels.numpy()
idx = np.where((labels > 0.0))
labels[idx] = 1.0
labels = torch.from_numpy(labels)
labels = labels.type(torch.FloatTensor)
Segmentation = to_var(labels)
segmentation_prediction = net(MRI)
pred_y = softMax(segmentation_prediction)
Segmentation_planes = getOneHotSegmentation(Segmentation)
segmentation_prediction_ones = predToSegmentation(pred_y)
(DicesN, Dices1) = dice(segmentation_prediction_ones, Segmentation_planes)
Dice1[i] = Dices1.data
printProgressBar(total, total, done='[Inference] Segmentation Done !')
ValDice1 = DicesToDice(Dice1)
return [ValDice1]
|
def append(x, start, freq, precision):
'Encodes a symbol with range [start, start + freq). All frequencies are\n assumed to sum to "1 << precision", and the resulting bits get written to\n x.'
if (x[0] >= (((rans_l >> precision) << 32) * freq)):
x = ((x[0] >> 32), ((x[0] & tail_bits), x[1]))
return (((((x[0] // freq) << precision) + (x[0] % freq)) + start), x[1])
|
def pop(x_, precision):
'Advances in the bit stream by "popping" a single symbol with range start\n "start" and frequency "freq".'
cf = (x_[0] & ((1 << precision) - 1))
def pop(start, freq):
x = ((((freq * (x_[0] >> precision)) + cf) - start), x_[1])
return ((((x[0] << 32) | x[1][0]), x[1][1]) if (x[0] < rans_l) else x)
return (cf, pop)
|
def append_symbol(statfun, precision):
def append_(x, symbol):
(start, freq) = statfun(symbol)
return append(x, start, freq, precision)
return append_
|
def pop_symbol(statfun, precision):
def pop_(x):
(cf, pop_fun) = pop(x, precision)
(symbol, (start, freq)) = statfun(cf)
return (pop_fun(start, freq), symbol)
return pop_
|
def flatten(x):
'Flatten a rans state x into a 1d numpy array.'
(out, x) = ([(x[0] >> 32), x[0]], x[1])
while x:
(x_head, x) = x
out.append(x_head)
return np.asarray(out, dtype=np.uint32)
|
def unflatten(arr):
'Unflatten a 1d numpy array into a rans state.'
return (((int(arr[0]) << 32) | int(arr[1])), reduce((lambda tl, hd: (int(hd), tl)), reversed(arr[2:]), ()))
|
def run(args, kwargs):
args.snap_dir = snap_dir = 'snapshots/discrete_logisticcifar10_flows_2_levels_3__2019-09-27_13_08_49/'
(train_loader, val_loader, test_loader, args) = load_dataset(args, **kwargs)
final_model = torch.load((snap_dir + 'a.model'))
if hasattr(final_model, 'module'):
final_model = final_model.module
from models.backround import SmoothRound
for module in final_model.modules():
if isinstance(module, SmoothRound):
module._round_decay = 1.0
exp_dir = (snap_dir + 'partials/')
os.makedirs(exp_dir, exist_ok=True)
images = []
with torch.no_grad():
for (data, _) in test_loader:
if args.cuda:
data = data.cuda()
for i in range(len(data)):
(_, _, _, pz, z, pys, ys, ldj) = final_model.forward(data[i:(i + 1)])
for j in range((len(ys) + 1)):
x_recon = final_model.inverse(z, ys[(len(ys) - j):])
images.append(x_recon.float())
if (i == 10):
break
break
for j in range((len(ys) + 1)):
grid = make_grid(torch.stack(images[j::(len(ys) + 1)], dim=0).squeeze(), nrow=11, padding=0, normalize=True, range=None, scale_each=False, pad_value=0)
imageio.imwrite((exp_dir + 'loaded{j}.png'.format(j=j)), grid.cpu().numpy().transpose(1, 2, 0))
|
def run(args, kwargs):
print('\nMODEL SETTINGS: \n', args, '\n')
print('Random Seed: ', args.manual_seed)
if (('imagenet' in args.dataset) and (args.evaluate_interval_epochs > 5)):
args.evaluate_interval_epochs = 5
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
snapshots_path = os.path.join(args.out_dir, (((args.variable_type + '_') + args.distribution_type) + args.dataset))
snap_dir = snapshots_path
snap_dir += (((('_' + 'flows_') + str(args.n_flows)) + '_levels_') + str(args.n_levels))
snap_dir = (((snap_dir + '__') + args.model_signature) + '/')
args.snap_dir = snap_dir
if (not os.path.exists(snap_dir)):
os.makedirs(snap_dir)
with open((snap_dir + 'log.txt'), 'a') as ff:
print('\nMODEL SETTINGS: \n', args, '\n', file=ff)
torch.save(args, (snap_dir + '.config'))
(train_loader, val_loader, test_loader, args) = load_dataset(args, **kwargs)
print(args.input_size)
import models.Model as Model
model = Model.Model(args)
args.device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
model.set_temperature(args.temperature)
model.enable_hard_round(args.hard_round)
model_sample = model
for (batch_idx, (data, _)) in enumerate(train_loader):
model(data)
break
if (torch.cuda.device_count() > 1):
print("Let's use", torch.cuda.device_count(), 'GPUs!')
model = torch.nn.DataParallel(model, dim=0)
model.to(args.device)
def lr_lambda(epoch):
return (min(1.0, ((epoch + 1) / args.warmup)) * np.power(args.lr_decay, epoch))
optimizer = optim.Adamax(model.parameters(), lr=args.learning_rate, eps=1e-07)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=(- 1))
train_bpd = []
val_bpd = []
best_val_bpd = np.inf
best_train_bpd = np.inf
epoch = 0
train_times = []
model.eval()
model.train()
for epoch in range(1, (args.epochs + 1)):
t_start = time.time()
scheduler.step()
(tr_loss, tr_bpd) = train(epoch, train_loader, model, optimizer, args)
train_bpd.append(tr_bpd)
train_times.append((time.time() - t_start))
print(('One training epoch took %.2f seconds' % (time.time() - t_start)))
if ((epoch < 25) or ((epoch % args.evaluate_interval_epochs) == 0)):
(v_loss, v_bpd) = evaluate(train_loader, val_loader, model, model_sample, args, epoch=epoch, file=(snap_dir + 'log.txt'))
val_bpd.append(v_bpd)
if (np.mean(tr_bpd) < best_train_bpd):
best_train_bpd = np.mean(tr_bpd)
best_val_bpd = v_bpd
torch.save(model.module, (snap_dir + 'a.model'))
torch.save(optimizer, (snap_dir + 'a.optimizer'))
print('->model saved<-')
print('(BEST: train bpd {:.4f}, test bpd {:.4f})\n'.format(best_train_bpd, best_val_bpd))
if math.isnan(v_loss):
raise ValueError('NaN encountered!')
train_bpd = np.hstack(train_bpd)
val_bpd = np.array(val_bpd)
train_times = np.array(train_times)
mean_train_time = np.mean(train_times)
std_train_time = np.std(train_times, ddof=1)
print(('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time)))
final_model = torch.load((snap_dir + 'a.model'))
(test_loss, test_bpd) = evaluate(train_loader, test_loader, final_model, final_model, args, epoch=epoch, file=(snap_dir + 'test_log.txt'))
print(('Test loss / bpd: %.2f / %.2f' % (test_loss, test_bpd)))
|
class RoundStraightThrough(torch.autograd.Function):
def __init__(self):
super().__init__()
@staticmethod
def forward(ctx, input):
rounded = torch.round(input, out=None)
return rounded
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input
|
def _stacked_sigmoid(x, temperature, n_approx=3):
x_ = (x - 0.5)
rounded = torch.round(x_)
x_remainder = (x_ - rounded)
size = x_.size()
x_remainder = x_remainder.view((size + (1,)))
translation = (torch.arange(n_approx) - (n_approx // 2))
translation = translation.to(device=x.device, dtype=x.dtype)
translation = translation.view((([1] * len(size)) + [len(translation)]))
out = torch.sigmoid(((x_remainder - translation) / temperature)).sum(dim=(- 1))
return ((out + rounded) - (n_approx // 2))
|
class SmoothRound(Base):
def __init__(self):
self._temperature = None
self._n_approx = None
super().__init__()
self.hard_round = None
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, value):
self._temperature = value
if (self._temperature <= 0.05):
self._n_approx = 1
elif (0.05 < self._temperature < 0.13):
self._n_approx = 3
else:
self._n_approx = 5
def forward(self, x):
assert (self._temperature is not None)
assert (self._n_approx is not None)
assert (self.hard_round is not None)
if (self.temperature <= 0.25):
h = _stacked_sigmoid(x, self.temperature, n_approx=self._n_approx)
else:
h = x
if self.hard_round:
h = _round_straightthrough(h)
return h
|
class StochasticRound(Base):
def __init__(self):
super().__init__()
self.hard_round = None
def forward(self, x):
u = torch.rand_like(x)
h = ((x + u) - 0.5)
if self.hard_round:
h = _round_straightthrough(h)
return h
|
class BackRound(Base):
def __init__(self, args, inverse_bin_width):
'\n BackRound is an approximation to Round that allows for Backpropagation.\n\n Approximate the round function using a sum of translated sigmoids.\n The temperature determines how well the round function is approximated,\n i.e., a lower temperature corresponds to a better approximation, at\n the cost of more vanishing gradients.\n\n BackRound supports the following settings:\n * By setting hard to True and temperature > 0.25, BackRound\n reduces to a round function with a straight through gradient\n estimator\n * When using 0 < temperature <= 0.25 and hard = True, the\n output in the forward pass is equivalent to a round function, but the\n gradient is approximated by the gradient of a sum of sigmoids.\n * When using hard = False, the output is not constrained to integers.\n * When temperature > 0.25 and hard = False, BackRound reduces to\n the identity function.\n\n Arguments\n ---------\n temperature: float\n Temperature used for stacked sigmoid approximated. If temperature\n is greater than 0.25, the approximation reduces to the indentiy\n function.\n hard: bool\n If hard is True, a (hard) round is applied before returning. The\n gradient for this is approximated using the straight-through\n estimator.\n '
super().__init__()
self.inverse_bin_width = inverse_bin_width
self.round_approx = args.round_approx
if (args.round_approx == 'smooth'):
self.round = SmoothRound()
elif (args.round_approx == 'stochastic'):
self.round = StochasticRound()
else:
raise ValueError
def forward(self, x):
if ((self.round_approx == 'smooth') or (self.round_approx == 'stochastic')):
h = (x * self.inverse_bin_width)
h = self.round(h)
return (h / self.inverse_bin_width)
else:
raise ValueError
|
class Conv2dReLU(Base):
def __init__(self, n_inputs, n_outputs, kernel_size=3, stride=1, padding=0, bias=True):
super().__init__()
self.nn = nn.Conv2d(n_inputs, n_outputs, kernel_size, padding=padding)
def forward(self, x):
h = self.nn(x)
y = F.relu(h)
return y
|
class ResidualBlock(Base):
def __init__(self, n_channels, kernel, Conv2dAct):
super().__init__()
self.nn = torch.nn.Sequential(Conv2dAct(n_channels, n_channels, kernel, padding=1), torch.nn.Conv2d(n_channels, n_channels, kernel, padding=1))
def forward(self, x):
h = self.nn(x)
h = F.relu((h + x))
return h
|
class DenseLayer(Base):
def __init__(self, args, n_inputs, growth, Conv2dAct):
super().__init__()
conv1x1 = Conv2dAct(n_inputs, n_inputs, kernel_size=1, stride=1, padding=0, bias=True)
self.nn = torch.nn.Sequential(conv1x1, Conv2dAct(n_inputs, growth, kernel_size=3, stride=1, padding=1, bias=True))
def forward(self, x):
h = self.nn(x)
h = torch.cat([x, h], dim=1)
return h
|
class DenseBlock(Base):
def __init__(self, args, n_inputs, n_outputs, kernel, Conv2dAct):
super().__init__()
depth = args.densenet_depth
future_growth = (n_outputs - n_inputs)
layers = []
for d in range(depth):
growth = (future_growth // (depth - d))
layers.append(DenseLayer(args, n_inputs, growth, Conv2dAct))
n_inputs += growth
future_growth -= growth
self.nn = torch.nn.Sequential(*layers)
def forward(self, x):
return self.nn(x)
|
class Identity(Base):
def __init__(self):
super.__init__()
def forward(self, x):
return x
|
class NN(Base):
def __init__(self, args, c_in, c_out, height, width, nn_type, kernel=3):
super().__init__()
Conv2dAct = Conv2dReLU
n_channels = args.n_channels
if (nn_type == 'shallow'):
if (args.network1x1 == 'standard'):
conv1x1 = Conv2dAct(n_channels, n_channels, kernel_size=1, stride=1, padding=0, bias=False)
layers = [Conv2dAct(c_in, n_channels, kernel, padding=1), conv1x1]
layers += [torch.nn.Conv2d(n_channels, c_out, kernel, padding=1)]
elif (nn_type == 'resnet'):
layers = [Conv2dAct(c_in, n_channels, kernel, padding=1), ResidualBlock(n_channels, kernel, Conv2dAct), ResidualBlock(n_channels, kernel, Conv2dAct)]
layers += [torch.nn.Conv2d(n_channels, c_out, kernel, padding=1)]
elif (nn_type == 'densenet'):
layers = [DenseBlock(args=args, n_inputs=c_in, n_outputs=(n_channels + c_in), kernel=kernel, Conv2dAct=Conv2dAct)]
layers += [torch.nn.Conv2d((n_channels + c_in), c_out, kernel, padding=1)]
else:
raise ValueError
self.nn = torch.nn.Sequential(*layers)
if (not UNIT_TESTING):
self.nn[(- 1)].weight.data.zero_()
self.nn[(- 1)].bias.data.zero_()
def forward(self, x):
return self.nn(x)
|
class Base(torch.nn.Module):
'\n The base class for modules. That contains a disable round mode\n '
def __init__(self):
super().__init__()
def _set_child_attribute(self, attr, value):
'Sets the module in rounding mode.\n\n This has any effect only on certain modules if variable type is\n discrete.\n\n Returns:\n Module: self\n '
if hasattr(self, attr):
setattr(self, attr, value)
for module in self.modules():
if hasattr(module, attr):
setattr(module, attr, value)
return self
def set_temperature(self, value):
self._set_child_attribute('temperature', value)
def enable_hard_round(self, mode=True):
self._set_child_attribute('hard_round', mode)
def disable_hard_round(self, mode=True):
self.enable_hard_round((not mode))
|
def compute_log_ps(pxs, xs, args):
inverse_bin_width = (2.0 ** args.n_bits)
log_pxs = []
for (px, x) in zip(pxs, xs):
if (args.variable_type == 'discrete'):
if (args.distribution_type == 'logistic'):
log_px = log_discretized_logistic(x, *px, inverse_bin_width=inverse_bin_width)
elif (args.distribution_type == 'normal'):
log_px = log_discretized_normal(x, *px, inverse_bin_width=inverse_bin_width)
elif (args.variable_type == 'continuous'):
if (args.distribution_type == 'logistic'):
log_px = log_logistic(x, *px)
elif (args.distribution_type == 'normal'):
log_px = log_normal(x, *px)
elif (args.distribution_type == 'steplogistic'):
x = (_round_straightthrough((x * inverse_bin_width)) / inverse_bin_width)
log_px = log_discretized_logistic(x, *px, inverse_bin_width=inverse_bin_width)
log_pxs.append(torch.sum(log_px, dim=[1, 2, 3]))
return log_pxs
|
def compute_log_pz(pz, z, args):
inverse_bin_width = (2.0 ** args.n_bits)
if (args.variable_type == 'discrete'):
if (args.distribution_type == 'logistic'):
if (args.n_mixtures == 1):
log_pz = log_discretized_logistic(z, pz[0], pz[1], inverse_bin_width=inverse_bin_width)
else:
log_pz = log_mixture_discretized_logistic(z, pz[0], pz[1], pz[2], inverse_bin_width=inverse_bin_width)
elif (args.distribution_type == 'normal'):
log_pz = log_discretized_normal(z, *pz, inverse_bin_width=inverse_bin_width)
elif (args.variable_type == 'continuous'):
if (args.distribution_type == 'logistic'):
log_pz = log_logistic(z, *pz)
elif (args.distribution_type == 'normal'):
if (args.n_mixtures == 1):
log_pz = log_normal(z, *pz)
else:
log_pz = log_mixture_normal(z, *pz)
elif (args.distribution_type == 'steplogistic'):
z = (_round_straightthrough((z * 256.0)) / 256.0)
log_pz = log_discretized_logistic(z, *pz)
log_pz = torch.sum(log_pz, dim=[1, 2, 3])
return log_pz
|
def compute_loss_function(pz, z, pys, ys, ldj, args):
'\n Computes the cross entropy loss function while summing over batch dimension, not averaged!\n :param x_logit: shape: (batch_size, num_classes * num_channels, pixel_width, pixel_height), real valued logits\n :param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].\n :param z_mu: mean of z_0\n :param z_var: variance of z_0\n :param z_0: first stochastic latent variable\n :param z_k: last stochastic latent variable\n :param ldj: log det jacobian\n :param args: global parameter settings\n :param beta: beta for kl loss\n :return: loss, ce, kl\n '
batch_size = z.size(0)
(loss_array, bpd_array, bpd_per_prior_array) = compute_loss_array(pz, z, pys, ys, ldj, args)
loss = torch.mean(loss_array)
bpd = torch.mean(bpd_array).item()
bpd_per_prior = [torch.mean(x) for x in bpd_per_prior_array]
return (loss, bpd, bpd_per_prior)
|
def convert_bpd(log_p, input_size):
return ((- log_p) / (np.prod(input_size) * np.log(2.0)))
|
def compute_loss_array(pz, z, pys, ys, ldj, args):
'\n Computes the cross entropy loss function while summing over batch dimension, not averaged!\n :param x_logit: shape: (batch_size, num_classes * num_channels, pixel_width, pixel_height), real valued logits\n :param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].\n :param z_mu: mean of z_0\n :param z_var: variance of z_0\n :param z_0: first stochastic latent variable\n :param z_k: last stochastic latent variable\n :param ldj: log det jacobian\n :param args: global parameter settings\n :param beta: beta for kl loss\n :return: loss, ce, kl\n '
bpd_per_prior = []
log_pz = compute_log_pz(pz, z, args)
bpd_per_prior.append(convert_bpd(log_pz.detach(), args.input_size))
log_p = log_pz
if ys:
log_pys = compute_log_ps(pys, ys, args)
for log_py in log_pys:
log_p += log_py
bpd_per_prior.append(convert_bpd(log_py.detach(), args.input_size))
log_p += ldj
loss = (- log_p)
bpd = convert_bpd(log_p.detach(), args.input_size)
return (loss, bpd, bpd_per_prior)
|
def calculate_loss(pz, z, pys, ys, ldj, loss_aux, args):
return compute_loss_function(pz, z, pys, ys, ldj, loss_aux, args)
|
def train(epoch, train_loader, model, opt, args):
model.train()
train_loss = np.zeros(len(train_loader))
train_bpd = np.zeros(len(train_loader))
num_data = 0
for (batch_idx, (data, _)) in enumerate(train_loader):
data = data.view((- 1), *args.input_size)
data = data.to(args.device)
opt.zero_grad()
(loss, bpd, bpd_per_prior, pz, z, pys, py, ldj) = model(data)
loss = torch.mean(loss)
bpd = torch.mean(bpd)
bpd_per_prior = [torch.mean(i) for i in bpd_per_prior]
loss.backward()
loss = loss.item()
train_loss[batch_idx] = loss
train_bpd[batch_idx] = bpd
ldj = ((torch.mean(ldj).item() / np.prod(args.input_size)) / np.log(2))
opt.step()
num_data += len(data)
if ((batch_idx % args.log_interval) == 0):
perc = ((100.0 * batch_idx) / len(train_loader))
tmp = 'Epoch: {:3d} [{:5d}/{:5d} ({:2.0f}%)] \tLoss: {:11.6f}\tbpd: {:8.6f}\tbits ldj: {:8.6f}'
print(tmp.format(epoch, num_data, len(train_loader.sampler), perc, loss, bpd, ldj))
print('z min: {:8.3f}, max: {:8.3f}'.format((torch.min(z).item() * 256), (torch.max(z).item() * 256)))
print('z bpd: {:.3f}'.format(bpd_per_prior[0]))
for i in range(1, len(bpd_per_prior)):
print('y{} bpd: {:.3f}'.format((i - 1), bpd_per_prior[i]))
print('pz mu', np.mean(pz[0].data.cpu().numpy(), axis=(0, 1, 2, 3)))
print('pz logs ', np.mean(pz[1].data.cpu().numpy(), axis=(0, 1, 2, 3)))
if (len(pz) == 3):
print('pz pi ', np.mean(pz[2].data.cpu().numpy(), axis=(0, 1, 2, 3)))
for (i, py) in enumerate(pys):
print('py{} mu '.format(i), np.mean(py[0].data.cpu().numpy(), axis=(0, 1, 2, 3)))
print('py{} logs '.format(i), np.mean(py[1].data.cpu().numpy(), axis=(0, 1, 2, 3)))
from utils.visual_evaluation import plot_images
import os
if (not os.path.exists((args.snap_dir + 'training/'))):
os.makedirs((args.snap_dir + 'training/'))
print('====> Epoch: {:3d} Average train loss: {:.4f}, average bpd: {:.4f}'.format(epoch, (train_loss.sum() / len(train_loader)), (train_bpd.sum() / len(train_loader))))
return (train_loss, train_bpd)
|
def evaluate(train_loader, val_loader, model, model_sample, args, testing=False, file=None, epoch=0):
model.eval()
loss_type = 'bpd'
def analyse(data_loader, plot=False):
bpds = []
batch_idx = 0
with torch.no_grad():
for (data, _) in data_loader:
batch_idx += 1
if args.cuda:
data = data.cuda()
data = data.view((- 1), *args.input_size)
(loss, batch_bpd, bpd_per_prior, pz, z, pys, ys, ldj) = model(data)
loss = torch.mean(loss).item()
batch_bpd = torch.mean(batch_bpd).item()
bpds.append(batch_bpd)
bpd = np.mean(bpds)
with torch.no_grad():
if ((not testing) and plot):
x_sample = model_sample.sample(n=100)
try:
plot_reconstructions(x_sample, bpd, loss_type, epoch, args)
except:
print('Not plotting')
return bpd
bpd_train = analyse(train_loader)
bpd_val = analyse(val_loader, plot=True)
with open(file, 'a') as ff:
msg = 'epoch {}\ttrain bpd {:.3f}\tval bpd {:.3f}\t'.format(epoch, bpd_train, bpd_val)
print(msg, file=ff)
loss = ((bpd_val * np.prod(args.input_size)) * np.log(2.0))
bpd = bpd_val
file = None
with torch.no_grad():
if testing:
test_data = val_loader.dataset.data_tensor
if args.cuda:
test_data = test_data.cuda()
print('Computing log-likelihood on test set')
model.eval()
log_likelihood = analyse(test_data)
else:
log_likelihood = None
nll_bpd = None
if (file is None):
if testing:
print('====> Test set loss: {:.4f}'.format(loss))
print('====> Test set log-likelihood: {:.4f}'.format(log_likelihood))
print('====> Test set bpd (elbo): {:.4f}'.format(bpd))
print('====> Test set bpd (log-likelihood): {:.4f}'.format((log_likelihood / (np.prod(args.input_size) * np.log(2.0)))))
else:
print('====> Validation set loss: {:.4f}'.format(loss))
print('====> Validation set bpd: {:.4f}'.format(bpd))
else:
with open(file, 'a') as ff:
if testing:
print('====> Test set loss: {:.4f}'.format(loss), file=ff)
print('====> Test set log-likelihood: {:.4f}'.format(log_likelihood), file=ff)
print('====> Test set bpd: {:.4f}'.format(bpd), file=ff)
print('====> Test set bpd (log-likelihood): {:.4f}'.format((log_likelihood / (np.prod(args.input_size) * np.log(2.0)))), file=ff)
else:
print('====> Validation set loss: {:.4f}'.format(loss), file=ff)
print('====> Validation set bpd: {:.4f}'.format((loss / (np.prod(args.input_size) * np.log(2.0)))), file=ff)
if (not testing):
return (loss, bpd)
else:
return (log_likelihood, nll_bpd)
|
def log_min_exp(a, b, epsilon=1e-08):
'\n Computes the log of exp(a) - exp(b) in a (more) numerically stable fashion.\n Using:\n log(exp(a) - exp(b))\n c + log(exp(a-c) - exp(b-c))\n a + log(1 - exp(b-a))\n And note that we assume b < a always.\n '
y = (a + torch.log(((1 - torch.exp((b - a))) + epsilon)))
return y
|
def log_normal(x, mean, logvar):
logp = ((- 0.5) * logvar)
logp += ((- 0.5) * np.log((2 * PI)))
logp += ((((- 0.5) * (x - mean)) * (x - mean)) / torch.exp(logvar))
return logp
|
def log_mixture_normal(x, mean, logvar, pi):
x = x.view(x.size(0), x.size(1), x.size(2), x.size(3), 1)
logp_mixtures = log_normal(x, mean, logvar)
logp = torch.log((torch.sum((pi * torch.exp(logp_mixtures)), dim=(- 1)) + 1e-08))
return logp
|
def sample_normal(mean, logvar):
y = torch.randn_like(mean)
x = ((torch.exp((0.5 * logvar)) * y) + mean)
return x
|
def sample_mixture_normal(mean, logvar, pi):
(b, c, h, w, n_mixtures) = tuple(map(int, pi.size()))
pi = pi.view((((b * c) * h) * w), n_mixtures)
sampled_pi = torch.multinomial(pi, num_samples=1).view((- 1))
mean = mean.view((((b * c) * h) * w), n_mixtures)
mean = mean[(torch.arange((((b * c) * h) * w)), sampled_pi)].view(b, c, h, w)
logvar = logvar.view((((b * c) * h) * w), n_mixtures)
logvar = logvar[(torch.arange((((b * c) * h) * w)), sampled_pi)].view(b, c, h, w)
y = sample_normal(mean, logvar)
return y
|
def log_logistic(x, mean, logscale):
'\n pdf = sigma([x - mean] / scale) * [1 - sigma(...)] * 1/scale\n '
scale = torch.exp(logscale)
u = ((x - mean) / scale)
logp = ((F.logsigmoid(u) + F.logsigmoid((- u))) - logscale)
return logp
|
def sample_logistic(mean, logscale):
y = torch.rand_like(mean)
x = ((torch.exp(logscale) * torch.log((y / (1 - y)))) + mean)
return x
|
def log_discretized_logistic(x, mean, logscale, inverse_bin_width):
scale = torch.exp(logscale)
logp = log_min_exp(F.logsigmoid((((x + (0.5 / inverse_bin_width)) - mean) / scale)), F.logsigmoid((((x - (0.5 / inverse_bin_width)) - mean) / scale)))
return logp
|
def discretized_logistic_cdf(x, mean, logscale, inverse_bin_width):
scale = torch.exp(logscale)
cdf = torch.sigmoid((((x + (0.5 / inverse_bin_width)) - mean) / scale))
return cdf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.