repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
nuts-ml | nuts-ml-master/nutsml/examples/keras_/autoencoder/conv_autoencoder.py | """
A simple convolutional autoencoder adapted from
https://blog.keras.io/building-autoencoders-in-keras.html
"""
from nutsml import KerasNetwork
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.models import Model
from runner import INPUT_SHAPE
def create_network():
input_img = Input(shape=INPUT_SHAPE)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
model = Model(input_img, decoded)
model.compile(optimizer='adadelta', loss='binary_crossentropy')
return KerasNetwork(model, 'weights_conv_autoencoder.hd5')
| 1,312 | 36.514286 | 77 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/autoencoder/runner.py | """
Runs training and prediction.
Trains an autoencoder on MNIST and in the prediction phase shows
the original image, the decoded images and the difference.
"""
from __future__ import print_function
import numpy as np
from six.moves import zip, range
from nutsflow import *
from nutsml import *
NUM_EPOCHS = 10 # need more epochs for good results!
BATCH_SIZE = 128
INPUT_SHAPE = (28, 28, 1)
def create_network():
import conv_autoencoder as cae
return cae.create_network()
def load_samples():
from tensorflow.keras.datasets import mnist
(x_train, _), (x_test, _) = mnist.load_data()
h, w, c = INPUT_SHAPE
x_train = np.reshape(x_train, (len(x_train), h, w, c))
x_test = np.reshape(x_test, (len(x_test), h, w, c))
return list(zip(x_train, x_train)), list(zip(x_test, x_test))
@nut_function
def Diff(sample):
x, y = sample
return x, y, abs(x - y) # Add difference image to sample
def train():
print('\n\nTRAIN...')
rerange = TransformImage((0, 1)).by('rerange', 0, 255, 0, 1, 'float32')
build_batch = (BuildBatch(BATCH_SIZE)
.input(0, 'image', 'float32')
.output(1, 'image', 'float32'))
print('creating network and loading data...')
network = create_network()
train_samples, test_samples = load_samples()
print('training...', len(train_samples), len(test_samples))
for epoch in range(NUM_EPOCHS):
print('EPOCH:', epoch)
t_loss = (train_samples >> PrintProgress(train_samples) >> rerange >>
Shuffle(1000) >> build_batch >> network.train() >> Mean())
print("train loss : {:.6f}".format(t_loss))
network.save_best(t_loss, isloss=True)
def predict():
print('\n\nPREDICT...')
rerange = TransformImage((0, 1)).by('rerange', 0, 255, 0, 1, 'float32')
build_batch = (BuildBatch(BATCH_SIZE).input(0, 'image', 'float32'))
view_images = ViewImage((0, 1, 2), pause=0.5,
titles=['Input', 'Output', 'Difference'])
print('creating network ...')
network = create_network()
network.load_weights()
print('loading data...')
_, test_samples = load_samples()
print('predicting...')
preds = test_samples >> rerange >> build_batch >> network.predict()
(test_samples >> Take(100) >> rerange >> Get(0) >> Zip(preds) >> Diff() >>
view_images >> Consume())
def view():
print('\n\nVIEW...')
train_samples, test_samples = load_samples()
(train_samples >> Take(10) >> PrintColType() >> ViewImage(0, pause=1) >>
Consume())
if __name__ == "__main__":
view()
train()
predict()
| 2,640 | 26.8 | 78 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/cnn_train.py | """
.. module:: cnn_train
:synopsis: Example nuts-ml pipeline for training a CNN on MNIST
This is code is based on a Keras example (see here)
https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py
to train a Multi-layer perceptron on the MNIST data and modified to
use nuts for the data-preprocessing.
"""
from __future__ import print_function
from six.moves import zip, range
from nutsflow import PrintProgress, Collect, Unzip, Shuffle, Pick, Mean, NOP
from nutsml import (KerasNetwork, TransformImage, AugmentImage, BuildBatch,
Boost, PrintColType, PlotLines)
PICK = 0.1 # Pick 10% of the data for a quick trial
NUM_EPOCHS = 10
INPUT_SHAPE = (28, 28, 1)
BATCH_SIZE = 128
NUM_CLASSES = 10
def load_samples():
from tensorflow.python.keras.datasets import mnist
h, w, c = INPUT_SHAPE
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], h, w)
x_test = x_test.reshape(x_test.shape[0], h, w)
return list(zip(x_train, y_train)), list(zip(x_test, y_test))
def create_network():
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=INPUT_SHAPE))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return KerasNetwork(model, 'cnn_weights.hd5')
def train():
from tensorflow.keras.metrics import categorical_accuracy
print('creating network ...')
network = create_network()
print('loading data...')
train_samples, test_samples = load_samples()
augment = (AugmentImage(0)
.by('identical', 1)
.by('translate', 0.5, [-3, +3], [-3, +3])
.by('rotate', 0.5, [-5, +5])
.by('shear', 0.5, [0, 0.2])
.by('elastic', 0.5, [5, 5], [100, 100], [0, 100]))
transform = (TransformImage(0)
.by('rerange', 0, 255, 0, 1, 'float32'))
build_batch = (BuildBatch(BATCH_SIZE, prefetch=0)
.input(0, 'image', 'float32')
.output(1, 'one_hot', 'uint8', NUM_CLASSES))
plot = PlotLines((0, 1), layout=(2, 1), every_sec=1)
print('training...', NUM_EPOCHS)
for epoch in range(NUM_EPOCHS):
print('EPOCH:', epoch)
t_loss, t_acc = (train_samples >> PrintProgress(train_samples) >>
Pick(PICK) >> augment >> transform >> Shuffle(100) >>
build_batch >> network.train() >> plot >> Unzip())
print('train loss : {:.6f}'.format(t_loss >> Mean()))
print('train acc : {:.1f}'.format(100 * (t_acc >> Mean())))
e_acc = (test_samples >> transform >> build_batch >>
network.evaluate([categorical_accuracy]))
print('test acc : {:.1f}'.format(100 * e_acc))
network.save_best(e_acc, isloss=False)
if __name__ == "__main__":
train()
| 3,378 | 34.568421 | 78 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/write_images.py | """
.. module:: write_images
:synopsis: Example for writing of image data
"""
from six.moves import zip
from nutsflow import Take, Consume, Enumerate, Zip, Format, Get, Print
from nutsml import WriteImage
def load_samples():
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
return list(zip(X_train, y_train)), list(zip(X_test, y_test))
if __name__ == '__main__':
train_samples, _ = load_samples()
imagepath = 'images/*.png'
names = Enumerate() >> Zip(train_samples >> Get(1)) >> Format('{1}/img{0}')
names = names >> Print()
train_samples >> Take(30) >> WriteImage(0, imagepath, names) >> Consume()
| 681 | 28.652174 | 80 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/mnist/mlp_train.py | """
.. module:: mlp_train
:synopsis: Example nuts-ml pipeline for training and evaluation
This is code is based on a Keras example (see here)
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
to train a Multi-layer perceptron on the MNIST data and modified to
use nuts for the data-preprocessing.
"""
from __future__ import print_function
from six.moves import zip, range
from nutsflow import PrintProgress, Collect, Unzip, Mean
from nutsml import (KerasNetwork, TransformImage, BuildBatch, PlotLines,
PrintType)
NUM_EPOCHS = 5
BATCH_SIZE = 128
NUM_CLASSES = 10
def load_samples():
from tensorflow.python.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
train_samples = list(zip(x_train, map(int, y_train)))
test_samples = list(zip(x_test, map(int, y_test)))
return train_samples, test_samples
def create_network():
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return KerasNetwork(model, 'mlp_weights.hd5')
def train():
from tensorflow.keras.metrics import categorical_accuracy
TransformImage.register('flatten', lambda img: img.flatten())
transform = (TransformImage(0)
.by('rerange', 0, 255, 0, 1, 'float32')
.by('flatten'))
build_batch = (BuildBatch(BATCH_SIZE)
.input(0, 'vector', 'float32')
.output(1, 'one_hot', 'uint8', NUM_CLASSES))
plot = PlotLines((0, 1), layout=(2, 1), every_sec=1)
print('loading data...')
train_samples, test_samples = load_samples()
print('creating network ...')
network = create_network()
print('training...', NUM_EPOCHS)
for epoch in range(NUM_EPOCHS):
print('EPOCH:', epoch)
t_loss, t_acc = (train_samples >> PrintProgress(train_samples) >>
transform >> build_batch >>
network.train() >> plot >> Unzip())
print('train loss : {:.6f}'.format(t_loss >> Mean()))
print('train acc : {:.1f}'.format(100 * (t_acc >> Mean())))
e_acc = (test_samples >> transform >> build_batch >>
network.evaluate([categorical_accuracy]))
print('test acc : {:.1f}'.format(100 * e_acc))
network.save_best(e_acc, isloss=False)
if __name__ == "__main__":
train()
| 2,820 | 31.056818 | 73 | py |
nuts-ml | nuts-ml-master/nutsml/examples/keras_/cifar/cnn_train.py | """
.. module:: mlp_view_misclassified
:synopsis: Example for showing misclassified examples
"""
from __future__ import print_function
import pickle
import os.path as osp
from six.moves import zip, map, range
from nutsflow import PrintProgress, Zip, Unzip, Pick, Shuffle, Mean
from nutsml import (KerasNetwork, TransformImage, AugmentImage, BuildBatch,
SplitRandom, PlotLines, PrintType)
PICK = 0.1 # Pick 10% of the data for a quick trial
NUM_EPOCHS = 10
BATCH_SIZE = 128
NUM_CLASSES = 10
INPUT_SHAPE = (32, 32, 3)
def load_samples():
from tensorflow.python.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
train_samples = list(zip(x_train, map(int, y_train)))
test_samples = list(zip(x_test, map(int, y_test)))
return train_samples, test_samples
def load_names():
from tensorflow.python.keras.utils.data_utils import get_file
dirname = 'cifar-10-batches-py'
origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
with open(osp.join(path, 'batches.meta'), 'rb') as f:
return pickle.load(f)['label_names']
def create_network():
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Convolution2D, MaxPooling2D
model = Sequential()
model.add(Convolution2D(32, (3, 3), padding='same',
input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Convolution2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
return KerasNetwork(model, 'weights_cifar10.hd5')
def train():
from tensorflow.keras.metrics import categorical_accuracy
rerange = TransformImage(0).by('rerange', 0, 255, 0, 1, 'float32')
build_batch = (BuildBatch(BATCH_SIZE)
.input(0, 'image', 'float32')
.output(1, 'one_hot', 'uint8', NUM_CLASSES))
p = 0.1
augment = (AugmentImage(0)
.by('identical', 1.0)
.by('elastic', p, [5, 5], [100, 100], [0, 100])
.by('brightness', p, [0.7, 1.3])
.by('color', p, [0.7, 1.3])
.by('shear', p, [0, 0.1])
.by('fliplr', p)
.by('rotate', p, [-10, 10]))
plot_eval = PlotLines((0, 1), layout=(2, 1), titles=['train', 'val'])
print('creating network...')
network = create_network()
print('loading data...')
train_samples, test_samples = load_samples()
train_samples, val_samples = train_samples >> SplitRandom(0.8)
print('training...', len(train_samples), len(val_samples))
for epoch in range(NUM_EPOCHS):
print('EPOCH:', epoch)
t_loss, t_acc = (train_samples >> PrintProgress(train_samples) >>
Pick(PICK) >> augment >> rerange >> Shuffle(100) >>
build_batch >> network.train() >> Unzip())
t_loss, t_acc = t_loss >> Mean(), t_acc >> Mean()
print("train loss : {:.6f}".format(t_loss))
print("train acc : {:.1f}".format(100 * t_acc))
v_loss, v_acc = (val_samples >> rerange >>
build_batch >> network.validate() >> Unzip())
v_loss, v_acc = v_acc >> Mean(), v_acc >> Mean()
print('val loss : {:.6f}'.format(v_loss))
print('val acc : {:.1f}'.format(100 * v_acc))
network.save_best(v_acc, isloss=False)
plot_eval((t_acc, v_acc))
print('testing...', len(test_samples))
e_acc = (test_samples >> rerange >> build_batch >>
network.evaluate([categorical_accuracy]))
print('test acc : {:.1f}'.format(100 * e_acc))
if __name__ == "__main__":
train()
| 4,395 | 33.614173 | 76 | py |
Excessive-Invariance | Excessive-Invariance-master/l0/invariant_l0_attack.py | import tensorflow as tf
import random
import time
import numpy as np
from keras.datasets import mnist
import sys
import os
import itertools
import sklearn.cluster
import scipy.misc
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
DTYPE = tf.float32
def make_model(filters=64, s1=5, s2=5, s3=3,
d1=0, d2=0, fc=256,
lr=1e-3, decay=1e-3):
model = Sequential()
model.add(Conv2D(filters, kernel_size=(s1, s1),
activation='relu',
input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters*2, (s2, s2), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters*2, (s3, s3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(d1))
model.add(Flatten())
model.add(Dense(fc, activation='relu'))
model.add(Dropout(d2))
model.add(Dense(10))
opt = keras.optimizers.Adam(lr, decay=decay)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
final = Sequential()
final.add(model)
final.add(Activation('softmax'))
final.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
return model, final
def train_model(model, x_train, y_train, batch_size=256,
epochs=20):
model.fit(x_train, keras.utils.to_categorical(y_train, 10),
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=2,
)
return model
def show(img):
img = img
remap = " .*#" + "#" * 100
img = (img.flatten()) * 3
print("START")
for i in range(28):
print("".join([remap[int(round(x))] for x in img[i * 28:i * 28 + 28]]))
def compute_mat(angle, sx, sy, ax, ay, tx, ty, da, db):
mat = np.eye(3)
mat = np.dot(mat, [[1,ax,0],
[ay,1,0],
[0, 0, 1]])
mat = np.dot(mat, [[sx,0,0],
[0,sy,0],
[0, 0, 1]])
mat = np.dot(mat, [[1,0,tx],
[0,1,ty],
[0, 0, 1]])
mat = np.dot(mat, [[np.cos(angle), np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
inv = np.linalg.inv(mat)
return mat, inv
def cluster(mask):
dbscan = sklearn.cluster.DBSCAN(2, min_samples=5)
points = [(i,j) for i in range(28) for j in range(28) if mask[0,i,j,0]]
points = np.array(points)
dbscan.fit(points)
flat = points[:,0]*28+points[:,1]
labels = dbscan.labels_
arr = np.zeros((28*28))
arr[flat] = -1
for i in range(max(labels)+1):
arr[flat[labels==i]] = 1+i
arr = arr.reshape((28,28))
return arr
def improve_transform():
sys.path.append("gan/")
from gan.acgan_mnist import Generator
zin = tf.placeholder(tf.float32, [None, 74])
x_target = tf.placeholder(tf.float32, [None, 28, 28, 1])
generated_images, _ = Generator(None, zin)
generated_images = tf.reshape(generated_images, [-1, 28, 28, 1])
similarity_loss = tf.reduce_sum(np.abs(generated_images - x_target),axis=(1,2,3))
z_loss = 0.01*tf.reduce_sum(zin[:,10:]**2, axis=1)
total_loss = similarity_loss + z_loss
grads = tf.gradients(similarity_loss, [zin])[0]
sess = tf.Session()
touse = [x for x in tf.trainable_variables() if 'Generator' in x.name]
saver = tf.train.Saver(touse)
saver.restore(sess, 'gan/model/mnist-acgan-2')
keras.backend.set_learning_phase(False)
def score(image, label):
#show(image)
zs = np.random.normal(0, 1, size=(128, 74))
zs[:,:10] = 0
zs[:,label] = 1
for _ in range(30):
#print("generate")
ell, l_sim, l_z, nimg, delta = sess.run((total_loss, similarity_loss,
z_loss, generated_images,grads),
{zin: zs,
x_target: image[np.newaxis,:,:,:]})
#print(l_sim)
#show(nimg)
zs[:,10:] -= delta[:,10:]*.01
return np.min(ell)
transformation_matrix = tf.placeholder(tf.float32, [8])
xs = tf.placeholder(DTYPE, [None, 28, 28, 1])
transformed = tf.contrib.image.transform(xs, transformation_matrix,
'BILINEAR')
uids = list(set([int(x.split("_")[1]) for x in os.listdir("best") if 'best_' in x and "_10000" in x]))
num = [max([int(x.split("_")[2][:-4]) for x in os.listdir("best") if str(uids[i]) in x and 'idx' not in x and 'tran' not in x]) for i in range(4)]
arr = []
for fileid, filecount in zip(uids, num):
best = np.load("best/best_%d_%d.npy"%(fileid,filecount))
best_idx = np.array(np.load("best/best_%d_%d_idx.npy"%(fileid,filecount)), dtype=np.int32)
best_transforms = np.load("best/best_%d_transforms_%d.npy"%(fileid,filecount))
mask = (abs(best-x_test[use_idx]) > .5)
delta = np.sum(mask,axis=(1,2,3))
arr.append(delta)
print(delta)
print(np.median(delta))
arr = np.min(arr,axis=0)
fout = open("/tmp/out.html","w")
def write(txt, img, lab, delta, doinv=False, do=True):
if do:
if len(img.shape) == 4:
img = img[0]
if doinv:
timg = sess.run(transformed, {xs: img[np.newaxis,:,:,:],
transformation_matrix: inv.flatten()[:-1]})[0]
else:
timg = img
s = score(timg, lab)
else:
s = 0
print(lab, type(lab))
print(delta, type(delta))
fout.write('<div style="float: left; padding: 3px">%d[%d]@%d<br/><img style="width:50px; height:50px;" src="%s"/></div>'%(int(s),lab,delta,txt))
scipy.misc.imsave("/tmp/"+txt, img.reshape((28,28)))
print("score of being", lab, "is:", s)
show(img)
fout.flush()
return s
candidates = []
for IDX in range(100):
fout.write("<br/><div style='clear: both'></div><br/>")
mat, inv = compute_mat(*best_transforms[IDX])
img = sess.run(transformed, {xs: x_train[best_idx[IDX:IDX+1]],
transformation_matrix: mat.flatten()[:-1]})
print("Source image")
write("img_%d_0.png"%IDX, x_test[use_idx[IDX]], y_test[use_idx[IDX]],0)
print("Target image")
write("img_%d_2.png"%IDX, x_train[best_idx[IDX]], y_train[best_idx[IDX]],0)
mask = (abs(x_test[use_idx[IDX]]-img) > .5)
#origs.append(np.sum(mask))
print("Transformed target image")
write("img_%d_1.png"%IDX, img, y_train[best_idx[IDX]],np.sum(mask), True)
write("img_%d_1.5.png"%IDX, np.array(mask,dtype=np.int32), y_train[best_idx[IDX]], np.sum(mask), True, do=False)
print("Mask delta", np.sum(mask))
show(mask)
clusters = cluster(mask)
print("\n".join(["".join([str(int(x)) for x in y]) for y in clusters]).replace("0"," ").replace("-1","*"))
write("img_%d_1.6.png"%IDX, np.array(mask,dtype=np.int32), y_train[best_idx[IDX]], np.sum(mask), True, do=False)
import matplotlib
colored = np.zeros((28,28,3))
for i in range(28):
for j in range(28):
if mask[0,i,j,0] != 0:
colored[i,j,:] = matplotlib.colors.to_rgb("C"+str(int(clusters[i,j]+1)))
scipy.misc.imsave("/tmp/img_%d_1.6.png"%IDX, colored)
possible = []
for nid,subset in enumerate(itertools.product([False,True], repeat=int(np.max(clusters)))):
if np.sum(subset) == 0: continue
mask = np.any([clusters==(i+1) for i,x in enumerate(subset) if x], axis=0)+0.0
mask = mask.reshape(img.shape)
print("Mask weight", np.sum(mask))
out = ((mask)*img) + ((1-mask)*x_test[use_idx[IDX]])
print("New Image")
s = write("img_%d_%d.png"%(IDX,3+nid), out, y_train[best_idx[IDX]], np.sum(mask), True)
possible.append((out,s))
candidates.append(possible)
print("-"*80)
import pickle
pickle.dump(candidates, open("/tmp/candidates.p","wb"))
def find_transform():
global x_train, x_test
x_train = (x_train>.5) + 0
x_test = (x_test>.5) + 0
UID = random.randint(0,1000000)
transformation_matrix = tf.placeholder(tf.float32, [8])
inverse_matrix = tf.placeholder(tf.float32, [8])
darkena = tf.placeholder(DTYPE, [])
darkenb = tf.placeholder(DTYPE, [])
print('shape',x_train.shape)
dataset = tf.constant(x_train, dtype=DTYPE)
labels = tf.constant(y_train, dtype=tf.int32)
print('a1')
transformed_dataset = tf.contrib.image.transform(dataset, transformation_matrix,
'BILINEAR')
inverted_dataset = tf.contrib.image.transform(transformed_dataset, inverse_matrix,
'BILINEAR')
ok_transform = tf.reduce_sum(inverted_dataset,axis=(1,2,3)) > tf.reduce_sum(dataset,axis=(1,2,3))*.85
transformed_dataset = (1-(1-transformed_dataset)**darkenb)**(1./darkenb)
print('a2')
flat_transformed = tf.cast(tf.reshape(transformed_dataset, [-1, 28*28]), dtype=DTYPE)
query = tf.placeholder(DTYPE, (None, 28, 28, 1))
query_y = tf.placeholder(tf.int32, [None])
query_t = tf.transpose(tf.reshape(query, [-1, 28*28]))
query_t = (1-(1-query_t)**darkena)**(1./darkena)
print('a3')
norms = tf.reduce_sum(tf.square(flat_transformed), axis=1)[:, tf.newaxis] \
- 2*tf.matmul(flat_transformed, query_t)
badness1 = 1000*tf.reshape((1-tf.cast(ok_transform,dtype=DTYPE)),[-1,1])
badness2 = 1000*tf.cast(tf.equal(tf.reshape(query_y, [1, -1]), tf.reshape(labels, [-1, 1])), dtype=DTYPE)
print(norms, badness1, badness2, query_y, labels)
norms = norms + badness1 + badness2
_, topk_indices = tf.nn.top_k(-tf.transpose(norms), k=1, sorted=False)
print('done')
def rand(low,high):
return random.random()*(high-low)+low
sess = tf.Session()
best = np.zeros((100,28,28,1))
l0 = np.zeros(100)+10000
best_idx = np.zeros(100)
best_transforms = [None]*100
for tick in range(10000000):
angle = rand(-.25,.25)
sx, sy = rand(.8,1.2), rand(.8,1.2)
ax, ay = rand(-.2,.2), rand(-.2,.2)
tx, ty = rand(-8,8), rand(-8,8)
da, db = rand(-.25,4), rand(-.25,4)
mat, inv = compute_mat(angle, sx, sy, ax, ay, tx, ty, da, db)
now = time.time()
ns, topk, dat, is_ok = sess.run((norms, topk_indices, transformed_dataset, ok_transform),
{transformation_matrix: mat.flatten()[:-1],
inverse_matrix: inv.flatten()[:-1],
query: x_test[use_idx],
query_y: y_test[use_idx],
darkena: db,
darkenb: db})
#print(time.time()-now)
for i in range(100):
e = topk[i][0]
v = ns[e, i]
dd = np.sum((x_test[use_idx[i]]>.5)^(dat[e]>.5))
#print('check', 'idx',i, 'to',e, 'val',v, 'was',best[i])
if dd < l0[i]:
#print("new better", 'idx',i, 'map to',e, 'was', best[i], 'now', v)
#print('l0 diff',np.sum((x_train[i]>.5)^(dat[e]>.5)))
l0[i] = min(l0[i], dd)
best[i] = dat[e]
best_idx[i] = e
best_transforms[i] = [angle, sx, sy, ax ,ay, tx, ty, da, db]
if tick%1000 == 0:
print('mean',np.mean(l0),'median',np.median(l0))
print(sorted(l0))
np.save("best/best_%d_%d.npy"%(UID,tick),best)
np.save("best/best_%d_%d_idx.npy"%(UID,tick),best_idx)
np.save("best/best_%d_transforms_%d.npy"%(UID,tick),best_transforms)
if tick%10000 == 0:
for i in range(100):
print("is",l0[i])
show(x_test[use_idx[i]])
show(best[i])
show((x_test[use_idx[i]]>.5)^(best[i]>.5))
x_train = y_train = None
if __name__ == "__main__":
(x_train, y_train), (x_test, y_test) = mnist.load_data()
img_rows = img_cols = 28
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
#data_scores = np.load("mnist_scores.npy")
#x_train = x_train[data_scores>1.0] # only keep the best 80% of the data
#y_train = y_train[data_scores>1.0] # only keep the best 80% of the data
use_idx = [159, 235, 247, 452, 651, 828, 937, 1018, 1021, 1543, 1567, 1692, 1899, 1904, 1930, 1944, 2027, 2082, 2084,
2232, 2273, 2306, 2635, 2733, 2805, 2822, 3169, 3290, 3335, 3364, 3394, 3469, 3471, 3540, 3628, 3735, 3999,
4014, 4086, 4329, 4456, 4471, 4482, 4496, 4503, 4504, 4611, 4630, 4649, 4726, 4840, 4974, 4980, 5089, 5209,
5281, 5447, 5522, 5700, 5820, 5909, 5926, 5946, 5988, 6054, 6130, 6408, 6506, 6558, 6693, 6759, 6762, 6779,
6881, 6947, 6997, 7031, 7063, 7154, 7377, 7547, 7625, 7759, 7790, 7796, 7826, 8334, 8535, 9073, 9181, 9195,
9231, 9375, 9458, 9563, 9639, 9695, 9720, 9811, 9825]
#model, final = make_model()
#train_model(final, x_train, y_train)
#model.save("baseline.model")
find_transform()
#improve_transform()
| 14,333 | 36.03876 | 152 | py |
irbl | irbl-master/src/main.py | import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import datasets
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import os
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
import torch
from sklearn.isotonic import IsotonicRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression as SKLearnLogisticRegression
from sklearn.base import clone
from scipy import stats
import math
import Orange
from skorch import NeuralNetClassifier,NeuralNetBinaryClassifier
import itertools
import random
class LogisticRegressionSoftmax(torch.nn.Module):
def __init__(self, input_size, output_size):
super(LogisticRegressionSoftmax, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.fc = torch.nn.Linear(self.input_size, self.output_size)
def forward(self, x):
return torch.nn.functional.softmax(self.fc(x), dim=1)
class BinaryLogisticRegression(torch.nn.Module):
def __init__(self, input_size):
super(BinaryLogisticRegression, self).__init__()
self.input_size = input_size
self.fc = torch.nn.Linear(self.input_size, 1)
def forward(self, x):
return self.fc(x)
class LogisticRegression(torch.nn.Module):
def __init__(self, input_size, output_size):
super(LogisticRegression, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.fc = torch.nn.Linear(self.input_size, self.output_size)
def forward(self, x):
return self.fc(x)
class MLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(MLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.ReLU = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(self.hidden_size, self.output_size)
def forward(self, x):
hidden = self.fc1(x)
ReLU = self.ReLU(hidden)
output = self.fc2(ReLU)
return output
def friedman_test(*args, reverse=False):
k = len(args)
if k < 2:
raise ValueError('Less than 2 levels')
n = len(args[0])
if len(set([len(v) for v in args])) != 1:
raise ValueError('Unequal number of samples')
rankings = []
for i in range(n):
row = [col[i] for col in args]
row_sort = sorted(row, reverse=reverse)
rankings.append([row_sort.index(v) + 1 + (row_sort.count(v) - 1) / 2. for v in row])
rankings_avg = [np.mean([case[j] for case in rankings]) for j in range(k)]
rankings_cmp = [r / np.sqrt(k * (k + 1) / (6. * n)) for r in rankings_avg]
chi2 = ((12 * n) / float((k * (k + 1)))) * ((np.sum(r**2 for r in rankings_avg)) - ((k * (k + 1)**2) / float(4)))
iman_davenport = ((n - 1) * chi2) / float((n * (k - 1) - chi2))
p_value = 1 - stats.f.cdf(iman_davenport, k - 1, (k - 1) * (n - 1))
return iman_davenport, p_value, rankings_avg, rankings_cmp
def wilcoxon_test(score_A, score_B):
# compute abs delta and sign
delta_score = [score_B[i] - score_A[i] for i in range(len(score_A))]
sign_delta_score = list(np.sign(delta_score))
abs_delta_score = list(np.abs(delta_score))
N_r = float(len(delta_score))
# hadling scores
score_df = pd.DataFrame({'abs_delta_score': abs_delta_score, 'sign_delta_score': sign_delta_score})
# sort
score_df.sort_values(by='abs_delta_score', inplace=True)
score_df.index = range(1, len(score_df) + 1)
# adding ranks
score_df['Ranks'] = score_df.index
score_df['Ranks'] = score_df['Ranks'].astype('float64')
score_df.dropna(inplace=True)
# z : pouput value
W = sum(score_df['sign_delta_score'] * score_df['Ranks'])
z = W / (math.sqrt(N_r * (N_r + 1) * (2 * N_r + 1) / 6.0))
# rejecte or not the null hypothesis
null_hypothesis_rejected = False
if z < -1.96 or z > 1.96:
null_hypothesis_rejected = True
return z, null_hypothesis_rejected
def noisy_completly_at_random(y, ratio):
n = y.shape
is_missing = np.random.binomial(1, ratio, n)
missing_value = np.random.binomial(1, 0.5, len(y[is_missing == 1]))
y_missing = np.copy(y)
y_missing[is_missing == 1] = missing_value
return y_missing
def noisy_not_at_random(proba, y, ratio):
n = y.shape
if ratio == 1:
scaled = np.full_like(proba, 1)
else:
scaled = 1-(1-ratio)*np.power(np.abs(1-2*proba), 1/(1-ratio))
is_missing = np.random.binomial(1, scaled, n)
missing_value = np.random.binomial(1, 0.5, len(y[is_missing == 1]))
y_missing = np.copy(y)
y_missing[is_missing == 1] = missing_value
return y_missing
def split_dataset(dataset, split):
train_idx, val_idx = train_test_split(list(range(len(dataset))), test_size=split, stratify=dataset[:][1])
train = torch.utils.data.Subset(dataset, train_idx)
val = torch.utils.data.Subset(dataset, val_idx)
return (XYDataset(train[:][0], train[:][1]), XYDataset(val[:][0], val[:][1]))
def split_scale_dataset(dataset, split):
train_idx, val_idx = train_test_split(list(range(len(dataset))), test_size=split, stratify=dataset[:][1])
train = torch.utils.data.Subset(dataset, train_idx)
val = torch.utils.data.Subset(dataset, val_idx)
scaler = StandardScaler().fit(train[:][0])
scaled_train = XYDataset(scaler.transform(train[:][0]), train[:][1])
if val[:][0].shape[0] == 0:
scaled_val = val
else:
scaled_val = XYDataset(scaler.transform(val[:][0]), val[:][1])
return (scaled_train, scaled_val)
def corrupt_dataset(dataset, corrupt_fn, cr):
return XYDataset(dataset[:][0], corrupt_fn(dataset[:][1], cr))
def split_corrupt_dataset(dataset, corrupt_fn, split, cr):
trusted_idx, untrusted_idx = train_test_split(list(range(len(dataset))), test_size=split, stratify=dataset[:][1])
trusted = torch.utils.data.Subset(dataset, trusted_idx)
untrusted = torch.utils.data.Subset(dataset, untrusted_idx)
corrupted = XYDataset(untrusted[:][0], corrupt_fn(untrusted[:][1], cr))
return (XYDataset(trusted[:][0], trusted[:][1]), corrupted)
class UnhingedLoss(torch.nn.Module):
def __init__(self):
super(UnhingedLoss, self).__init__()
def forward(self, X, y):
return 1 - (2*y-1) * X[:, 1]
class ad(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/ad/train.csv").iloc[:, :-1].to_numpy(),
(pd.read_csv("data/ad/train.csv").iloc[:, -1].to_numpy() == 'ad.').astype(int))
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class web(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/web/train")[0].todense(), datasets.load_svmlight_file("data/web/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class fourclass(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/fourclass/train")[0].todense(), datasets.load_svmlight_file("data/fourclass/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class svmguide3(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/svmguide3/train")[0].todense(), datasets.load_svmlight_file("data/svmguide3/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class svmguide1(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/svmguide1/train")[0].todense(), datasets.load_svmlight_file("data/svmguide1/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class ionosphere(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/ionosphere/train.csv").iloc[:, :-1].to_numpy(),
(pd.read_csv("data/ionosphere/train.csv").iloc[:, -1].to_numpy() == "b").astype(int))
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class banknote(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/banknote/train.csv", header=None).iloc[:, :-1].to_numpy(),
(pd.read_csv("data/banknote/train.csv", header=None).iloc[:, -1].to_numpy()).astype(int))
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class musk(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/musk/train.csv", header=None).iloc[:, 2:-1].to_numpy(),
(pd.read_csv("data/musk/train.csv", header=None).iloc[:, -1].to_numpy()).astype(int))
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class ijcnn1(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/ijcnn1/train")
[0].todense(), datasets.load_svmlight_file("data/ijcnn1/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class eeg(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/eeg/train.csv", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/eeg/train.csv", header=None).iloc[:, -1].to_numpy())
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class hiva(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/hiva/train.csv", sep=" ", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/hiva/label.csv", sep=" ", header=None).iloc[:, 0].to_numpy())
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class ibn_sina(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/ibn-sina/train.csv", sep=" ", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/ibn-sina/label.csv", sep=" ", header=None).iloc[:, 0].to_numpy())
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class zebra(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/zebra/train.csv", sep=" ", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/zebra/label.csv", sep=" ", header=None).iloc[:, 0].to_numpy())
self.X, self.y = np.nan_to_num(np.squeeze(np.asarray(X)).astype(
np.float32), nan=0.0, posinf=0.0, neginf=0.0), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class sylva(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/sylva/train.csv", sep=" ", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/sylva/label.csv", sep=" ", header=None).iloc[:, 0].to_numpy())
self.X, self.y = np.nan_to_num(np.squeeze(np.asarray(X)).astype(
np.float32), nan=0.0, posinf=0.0, neginf=0.0), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class australian(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/australian/train")
[0].todense(), datasets.load_svmlight_file("data/australian/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class diabetes(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/diabetes/train")
[0].todense(), datasets.load_svmlight_file("data/diabetes/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class breast(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/breast-cancer/train")
[0].todense(), datasets.load_svmlight_file("data/breast-cancer/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class adult(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/adult/train", n_features=123)
[0].todense(), datasets.load_svmlight_file("data/adult/train", n_features=123)[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class german(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/german/train")
[0].todense(), datasets.load_svmlight_file("data/german/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class phishing(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/phishing/train")
[0].todense(), datasets.load_svmlight_file("data/phishing/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class spam(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/spam/train.csv", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/spam/train.csv", header=None).iloc[:, -1].to_numpy())
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class XYDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X, self.y = X, y
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class MergedDataset(torch.utils.data.Dataset):
def __init__(self, trusted, untrusted):
self.trusted, self.untrusted = trusted, untrusted
pass
def __getitem__(self, index):
if index < len(self.trusted):
item = self.trusted.__getitem__(index)
return item[0], (item[1], 0)
else:
item = self.untrusted.__getitem__(index - len(self.trusted))
return item[0], (item[1], 1)
def __len__(self):
return len(self.trusted) + len(self.untrusted)
class WeightedDataset(torch.utils.data.Dataset):
def __init__(self, dataset, weights):
self.dataset, self.weights = dataset, weights
pass
def __getitem__(self, index):
return (self.dataset.__getitem__(index)[0], self.weights.__getitem__(index)), self.dataset.__getitem__(index)[1]
def __len__(self):
return self.dataset.__len__()
def normal(train, test, optimizer, batch_size, epochs, lr, weight_decay, hidden_size, loss="cross_entropy"):
input_size = len(train[0][0])
num_classes = 2
if hidden_size == 0:
model = LogisticRegression(input_size, num_classes)
else:
model = MLP(input_size, hidden_size, num_classes)
if optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
elif optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")
if loss == "cross_entropy":
train_loss = torch.nn.CrossEntropyLoss(reduction="none")
elif loss == "unhinged":
train_loss = UnhingedLoss()
train_loader = torch.utils.data.DataLoader(dataset=train,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
valid_loader = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
mean_train_losses = []
mean_valid_losses = []
accs = []
for epoch in range(epochs):
model.train()
train_losses = []
valid_losses = []
valid_preds = []
valid_labels = []
for i, (data, labels) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(data)
loss = train_loss(outputs, labels).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
model.eval()
with torch.no_grad():
for i, (data, labels) in enumerate(valid_loader):
outputs = model(data)
loss = cross_entropy_loss(outputs, labels).mean()
valid_losses.append(loss.item())
valid_preds.append(torch.nn.functional.softmax(outputs, dim=1).numpy()[:, 1])
valid_labels.append(labels.numpy())
mean_train_losses.append(np.mean(train_losses))
mean_valid_losses.append(np.mean(valid_losses))
acc = accuracy_score(np.concatenate(valid_labels), np.concatenate(valid_preds) > 0.5)
accs.append(acc)
print('epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, valid acc : {:.2f}'
.format(epoch + 1, np.mean(train_losses), np.mean(valid_losses), acc))
return model, pd.DataFrame(list(zip(mean_train_losses, mean_valid_losses, accs)),
columns=["mean_train_losses", "mean_valid_losses", "accs"])
def irbl(trusted, untrusted, test, ft, fu, optimizer, batch_size, epochs, lr, weight_decay, hidden_size):
input_size = len(train[0][0])
num_classes = 2
if hidden_size == 0:
model = LogisticRegression(input_size, num_classes)
else:
model = MLP(input_size, hidden_size, num_classes)
if optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
elif optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")
valid_loader = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
mean_train_losses = []
mean_valid_losses = []
accs = []
#predict the beta as you go in the last training loop if your data can't fit in memory
total_data = torch.from_numpy(untrusted[:][0])
total_labels = torch.from_numpy(untrusted[:][1])
if hasattr(ft, "predict_proba") and hasattr(fu, "predict_proba"):
ft_proba = np.take_along_axis(ft.predict_proba(total_data.numpy()),
total_labels.numpy().reshape(-1, 1), axis=1).flatten()
fu_proba = np.take_along_axis(fu.predict_proba(total_data.numpy()),
total_labels.numpy().reshape(-1, 1), axis=1).flatten()
beta = np.divide(ft_proba,
fu_proba,
out=np.zeros_like((total_labels.numpy()), dtype=float),
where=fu_proba != 0)
beta = torch.from_numpy(beta).float()
else:
ft_proba = torch.flatten(torch.gather(torch.nn.functional.softmax(
ft(total_data), dim=1), 1, total_labels.view(-1, 1)))
fu_proba = torch.flatten(torch.gather(torch.nn.functional.softmax(
fu(total_data), dim=1), 1, total_labels.view(-1, 1)))
beta = torch.div(ft_proba,
fu_proba)
beta[torch.isnan(beta)] = 0.0
beta[torch.isinf(beta)] = 0.0
total_beta = torch.cat([torch.ones(len(trusted)), beta]).detach()
total_loader = torch.utils.data.DataLoader(dataset=WeightedDataset(MergedDataset(trusted, untrusted), total_beta),
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
for epoch in range(epochs):
model.train()
train_losses = []
valid_losses = []
valid_preds = []
valid_labels = []
for i, ((data, weights), (labels, is_corrupteds)) in enumerate(total_loader):
optimizer.zero_grad()
outputs = model(data)
loss = (cross_entropy_loss(outputs, labels) * weights).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
model.eval()
with torch.no_grad():
for i, (data, labels) in enumerate(valid_loader):
outputs = model(data)
loss = cross_entropy_loss(outputs, labels).mean()
valid_losses.append(loss.item())
valid_preds.append(torch.nn.functional.softmax(outputs, dim=1).float().numpy()[:, 1])
valid_labels.append(labels.numpy())
mean_train_losses.append(np.mean(train_losses))
mean_valid_losses.append(np.mean(valid_losses))
acc = accuracy_score(np.concatenate(valid_labels), np.concatenate(valid_preds) > 0.5)
accs.append(acc)
print('epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, valid acc : {:.2f}'
.format(epoch + 1, np.mean(train_losses), np.mean(valid_losses), acc))
return model, pd.DataFrame(list(zip(mean_train_losses, mean_valid_losses, accs)),
columns=["mean_train_losses", "mean_valid_losses", "accs"]), pd.Series(total_beta.detach().numpy())
def normal_sklearn(train, test, estimator, calibration_method="isotonic", sample_weight=None):
X_train = train[:][0]
y_train = train[:][1]
X_test = test[:][0]
y_test = test[:][1]
if calibration_method == "nothing":
model = clone(estimator).fit(X_train, y_train)
elif calibration_method == "isotonic":
model = CalibratedClassifierCV(estimator, method=calibration_method).fit(
X_train, y_train, sample_weight=sample_weight)
acc = accuracy_score(y_test, model.predict(X_test))
print('valid acc : {:.2f}'
.format(acc))
return model, pd.DataFrame([[acc]],
columns=["acc"])
def kdr(trusted, untrusted, test, optimizer, batch_size, epochs, lr, weight_decay, hidden_size):
input_size = len(trusted[0][0])
num_classes = 2
if hidden_size == 0:
model = LogisticRegression(input_size, num_classes)
else:
model = MLP(input_size, hidden_size, num_classes)
if optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
elif optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")
valid_loader = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
mean_train_losses = []
mean_valid_losses = []
accs = []
#predict the beta as you go in the last training loop if your data can't fit in memory
X_trusted = torch.from_numpy(trusted[:][0])
y_trusted = torch.from_numpy(trusted[:][1])
X_untrusted = torch.from_numpy(untrusted[:][0])
y_untrusted = torch.from_numpy(untrusted[:][1])
n_samples_trusted, _ = X_trusted.shape
n_samples_untrusted, _ = X_untrusted.shape
beta = torch.zeros(n_samples_untrusted)
prior = torch.true_divide(torch.true_divide(torch.bincount(y_trusted),n_samples_trusted),torch.true_divide(torch.bincount(y_untrusted),n_samples_untrusted))
for i in range(num_classes):
X_trusted_i = X_trusted[y_trusted == i]
X_untrusted_i = X_untrusted[y_untrusted == i]
n_trusted_i, _ = X_trusted_i.shape
n_untrusted_i, _ = X_untrusted_i.shape
s_trusted_i = torch.ones(n_trusted_i).float()
s_untrusted_i = torch.zeros(n_untrusted_i).float()
ratio = n_untrusted_i/n_trusted_i
lr,_ = normal_sklearn(
XYDataset(np.vstack((X_trusted_i,X_untrusted_i)),np.hstack((s_trusted_i,s_untrusted_i))), test, NeuralNetBinaryClassifier(
module=BinaryLogisticRegression,
module__input_size=input_size,
max_epochs=epochs,
train_split=None,
lr=learning_rate,
batch_size=batch_size,
optimizer__weight_decay=weight_decay,
iterator_train__shuffle=True),calibration_method="nothing")
ratio = ratio * torch.exp(lr.forward(X_untrusted_i))
beta[y_untrusted == i] = ratio * prior[i]
total_beta = torch.cat([torch.ones(len(trusted)), beta]).detach()
total_loader = torch.utils.data.DataLoader(dataset=WeightedDataset(MergedDataset(trusted, untrusted), total_beta),
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
for epoch in range(epochs):
model.train()
train_losses = []
valid_losses = []
valid_preds = []
valid_labels = []
for i, ((data, weights), (labels, is_corrupteds)) in enumerate(total_loader):
optimizer.zero_grad()
outputs = model(data)
loss = (cross_entropy_loss(outputs, labels) * weights).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
model.eval()
with torch.no_grad():
for i, (data, labels) in enumerate(valid_loader):
outputs = model(data)
loss = cross_entropy_loss(outputs, labels).mean()
valid_losses.append(loss.item())
valid_preds.append(torch.nn.functional.softmax(outputs, dim=1).float().numpy()[:, 1])
valid_labels.append(labels.numpy())
mean_train_losses.append(np.mean(train_losses))
mean_valid_losses.append(np.mean(valid_losses))
acc = accuracy_score(np.concatenate(valid_labels), np.concatenate(valid_preds) > 0.5)
accs.append(acc)
print('epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, valid acc : {:.2f}'
.format(epoch + 1, np.mean(train_losses), np.mean(valid_losses), acc))
return model, pd.DataFrame(list(zip(mean_train_losses, mean_valid_losses, accs)),
columns=["mean_train_losses", "mean_valid_losses", "accs"]), pd.Series(total_beta.detach().numpy())
def glc(trusted, untrusted, test, fu, optimizer, batch_size, epochs, lr, weight_decay, hidden_size):
input_size = len(trusted[0][0])
num_classes = int(max(test[:][1]) + 1)
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")
nll_loss = torch.nn.NLLLoss(reduction="none")
trusted_loader = torch.utils.data.DataLoader(dataset=trusted,
batch_size=beta_batch_size[0],
shuffle=True,
num_workers=1,
drop_last=False)
total_loader = torch.utils.data.DataLoader(dataset=MergedDataset(trusted, untrusted),
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
valid_loader = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
if hidden_size == 0:
model = LogisticRegression(input_size, num_classes)
else:
model = MLP(input_size, hidden_size, num_classes)
if optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
elif optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)
mean_train_losses = []
mean_valid_losses = []
accs = []
C = torch.zeros((num_classes, num_classes))
for k in range(num_classes):
num_examples = 0
for i, (data, labels) in enumerate(trusted_loader):
data_k = data[labels.numpy() == k]
num_examples += len(data_k)
if hasattr(fu, "predict_proba"):
if not len(data_k.numpy()) == 0:
C[k] += np.sum(fu.predict_proba(data_k.numpy()), axis=0)
else:
C[k] += torch.sum(torch.nn.functional.softmax(fu(data_k), dim=1), axis=0)
if num_examples == 0:
C[k] = torch.ones(num_classes) / num_classes
else:
C[k] = C[k] / num_examples
C = C.detach()
print(C)
print(C.t())
for epoch in range(epochs):
model.train()
train_losses = []
valid_losses = []
valid_preds = []
valid_labels = []
for i, (data, (labels, is_corrupteds)) in enumerate(total_loader):
optimizer.zero_grad()
outputs = model(data)
loss_trusted = (cross_entropy_loss(outputs, labels) * (1 - is_corrupteds)).sum()
loss_untrusted = (nll_loss(torch.log(torch.matmul(torch.nn.functional.softmax(outputs, dim=1), C)),
labels) * is_corrupteds).sum()
loss = (loss_trusted + loss_untrusted) / len(data)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
model.eval()
with torch.no_grad():
for i, (data, labels) in enumerate(valid_loader):
outputs = model(data)
loss = cross_entropy_loss(outputs, labels).mean()
valid_losses.append(loss.item())
valid_preds.append(torch.nn.functional.softmax(outputs, dim=1).numpy()[:, 1])
valid_labels.append(labels.numpy())
mean_train_losses.append(np.mean(train_losses))
mean_valid_losses.append(np.mean(valid_losses))
acc = accuracy_score(np.concatenate(valid_labels), np.concatenate(valid_preds) > 0.5)
accs.append(acc)
print('c : epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, valid acc : {:.2f}'
.format(epoch + 1, np.mean(train_losses), np.mean(valid_losses), acc))
return model, pd.DataFrame(list(zip(mean_train_losses, mean_valid_losses, accs)),
columns=["mean_train_losses", "mean_valid_losses", "accs"]), pd.DataFrame(C)
def loop(dir, trusted, untrusted, test, optimizer, beta_batch_size, batch_size, beta_epochs, epochs,
beta_learning_rate, learning_rate, beta_weight_decay, weight_decay, beta_hidden_size, hidden_size, calibration_method):
print("ft-torched")
ft_torched, ft_torched_data = normal(
trusted, test, optimizer, beta_batch_size[0], beta_epochs[0], beta_learning_rate[0], beta_weight_decay[0], beta_hidden_size[0])
ft_torched_data.to_csv("{}/ft-torched-perfs.csv".format(dir), index=False)
print("fu-torched")
fu_torched, fu_torched_data = normal(
untrusted, test, optimizer, beta_batch_size[1], beta_epochs[1], beta_learning_rate[1], beta_weight_decay[1], beta_hidden_size[1])
fu_torched_data.to_csv("{}/fu-torched-perfs.csv".format(dir), index=False)
print("full-torched")
full_torched, full_torched_data, full_torched_beta = irbl(
trusted, untrusted, test, ft_torched, fu_torched, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
full_torched_data.to_csv("{}/full-torched-perfs.csv".format(dir), index=False)
full_torched_beta.to_csv("{}/full-torched-beta.csv".format(dir), index=False, header=False)
print("ft-calibrated")
ft_torched_calibrated, ft_torched_calibrated_data = normal_sklearn(
trusted, test, NeuralNetClassifier(
module=LogisticRegressionSoftmax,
module__input_size=len(trusted[0][0]),
module__output_size=2,
max_epochs=beta_epochs[0],
train_split=None,
lr=beta_learning_rate[0],
batch_size=beta_batch_size[0],
optimizer__weight_decay=beta_weight_decay[0],
iterator_train__shuffle=True,
verbose=0))
ft_torched_calibrated_data.to_csv("{}/ft-torched-calibrated-perfs.csv".format(dir), index=False)
print("fu-calibrated")
fu_torched_calibrated, fu_torched_calibrated_data = normal_sklearn(
untrusted, test, NeuralNetClassifier(
module=LogisticRegressionSoftmax,
module__input_size=len(trusted[0][0]),
module__output_size=2,
max_epochs=beta_epochs[1],
train_split=None,
lr=beta_learning_rate[1],
batch_size=beta_batch_size[1],
optimizer__weight_decay=beta_weight_decay[1],
iterator_train__shuffle=True,
verbose=0)
)
fu_torched_calibrated_data.to_csv("{}/fu-torched-calibrated-perfs.csv".format(dir), index=False)
print("full-calibrated")
full_torched_calibrated, full_torched_data_calibrated, full_torched_beta_calibrated = irbl(
trusted, untrusted, test, ft_torched_calibrated, fu_torched_calibrated, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
full_torched_data_calibrated.to_csv("{}/full-torched-calibrated-perfs.csv".format(dir), index=False)
full_torched_beta_calibrated.to_csv("{}/full-torched-calibrated-beta.csv".format(dir), index=False, header=False)
print("glc")
_, glc_data, C = glc(
trusted, untrusted, test, fu_torched, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
glc_data.to_csv("{}/glc-perfs.csv".format(dir), index=False)
C.to_csv("{}/glc-beta.csv".format(dir), index=False, header=False)
print("mixed")
_, mixed_data = normal(torch.utils.data.ConcatDataset([trusted, untrusted]), test, optimizer, batch_size, epochs,
learning_rate, weight_decay, hidden_size)
mixed_data.to_csv("{}/mixed-perfs.csv".format(dir), index=False)
print("symetric")
_, symetric_data = normal(torch.utils.data.ConcatDataset([trusted, untrusted]), test, optimizer, batch_size, epochs,
learning_rate, weight_decay, hidden_size, loss="unhinged")
symetric_data.to_csv("{}/symetric-perfs.csv".format(dir), index=False)
print("kdr")
_, kdrnc_data, kdrnc_beta = kdr(
trusted, untrusted, test, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
kdrnc_data.to_csv("{}/kdr-perfs.csv".format(dir), index=False)
kdrnc_beta.to_csv("{}/kdr-beta.csv".format(dir), index=False, header=False)
return
def learning_curve_plot(figdir, resdir, name, p, q, criteria):
total = pd.read_csv("{}/{}/total-perfs.csv".format(resdir, name))
figures_directory = "{}/{}-{}-{}".format(figdir, name, p, q)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
results_directory = "{}/{}-{}-{}".format(resdir, name, p, q)
ftt = pd.read_csv("{}/ft-torched-perfs.csv".format(results_directory))
fut = pd.read_csv("{}/fu-torched-perfs.csv".format(results_directory))
bt = pd.read_csv("{}/full-torched-perfs.csv".format(results_directory))
btc = pd.read_csv("{}/full-torched-calibrated-perfs.csv".format(results_directory))
mixed = pd.read_csv("{}/mixed-perfs.csv".format(results_directory))
glc = pd.read_csv("{}/glc-perfs.csv".format(results_directory))
symetric = pd.read_csv("{}/symetric-perfs.csv".format(results_directory))
if criteria == "mean_valid_losse":
ftt_error = ftt[criteria + "s"]
fut_error = fut[criteria + "s"]
bt_error = bt[criteria + "s"]
btc_error = btc[criteria + "s"]
mixed_error = mixed[criteria + "s"]
glc_error = glc[criteria + "s"]
symetric_error = symetric[criteria + "s"]
total_error = total[criteria + "s"]
else:
ftt_error = 1 - ftt[criteria + "s"]
fut_error = 1 - fut[criteria + "s"]
bt_error = 1 - bt[criteria + "s"]
btc_error = 1 - btc[criteria + "s"]
mixed_error = 1 - mixed[criteria + "s"]
glc_error = 1 - glc[criteria + "s"]
symetric_error = 1 - symetric[criteria + "s"]
total_error = 1 - total[criteria + "s"]
fig, ax = plt.subplots()
ax.set_xlabel("epochs")
ax.set_xticks(range(len(ftt_error)))
ax.set_xticklabels(range(1,len(ftt_error)+1))
ax.set_ylabel("error")
ax.plot(ftt_error, label='trusted')
ax.plot(fut_error, label='untrtusted')
ax.plot(bt_error, label='irbl')
ax.plot(btc_error, label='irblc')
ax.plot(mixed_error, label='mixed')
ax.plot(glc_error, label='glc')
ax.plot(symetric_error, label='symmetric')
ax.plot(total_error, label='total')
ax.legend()
fig.savefig("{}/learning-curve-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("epochs")
ax.set_xticks(range(len(ftt_error)))
ax.set_xticklabels(range(1,len(ftt_error)+1))
ax.set_ylabel("loss")
ax.plot(btc_error, label='irbl', color="black")
ax.plot(total_error, label='total', color="black", linestyle="--")
ax.plot(mixed_error, label='mixed', color="black", linestyle="-.")
ax.plot(ftt_error, label='trusted', color="black", linestyle=":")
ax.plot(fut_error, label='untrusted', color="black", linestyle="--",marker=".")
ax.legend(loc = 'upper right')
fig.savefig("{}/learning-curve-simple-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("epochs")
ax.set_xticks(range(len(ftt_error)))
ax.set_xticklabels(range(1,len(ftt_error)+1))
ax.set_ylabel("loss")
ax.plot(btc_error, label='irbl', color="black")
ax.plot(total_error, label='total', color="black", linestyle="--")
ax.plot(glc_error, label='glc', color="black", linestyle="-.")
ax.plot(symetric_error, label='rll', color="black", linestyle=":")
ax.legend(loc = 'upper right')
fig.savefig("{}/learning-curve-competitors-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
def hist_plot(figdir, resdir, name, p, q):
figures_directory = "{}/{}-{}-{}".format(figdir, name, p, q)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
results_directory = "{}/{}-{}-{}".format(resdir, name, p, q)
flipped = pd.read_csv("{}/flipped.csv".format(results_directory)).to_numpy().flatten()
bt = pd.read_csv("{}/full-torched-beta.csv".format(results_directory)).to_numpy().flatten()
btc = pd.read_csv("{}/full-torched-calibrated-beta.csv".format(results_directory)).to_numpy().flatten()
kdr = pd.read_csv("{}/kdr-beta.csv".format(results_directory)).to_numpy().flatten()
fig, ax = plt.subplots(figsize=(8,4))
ax.hist([bt[flipped == 0], bt[flipped == 1]], label=["cleaned", "corrupted"], bins=20, color = ["lightgray","dimgray",])
ax.legend(loc = 'upper right')
fig.savefig("{}/full-torched-hist.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig1, ax1 = plt.subplots(figsize=(8,4))
ax1.hist([btc[flipped == 0], btc[flipped == 1]], #btc[flipped == 2]],
label=["cleaned", "corrupted"], bins=20, color = ["lightgray","dimgray",])
ax1.legend(loc = 'upper right')
fig1.savefig("{}/full-torched-calibrated-hist.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig1)
fig2, ax2 = plt.subplots(figsize=(8,4))
ax2.hist([kdr[flipped == 0], kdr[flipped == 1]], #kdr[flipped == 2]],
label=["cleaned", "corrupted"], bins=20, color = ["lightgray","dimgray",])
ax2.legend(loc = 'upper right')
fig2.savefig("{}/kdr-hist.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig2)
return
def box_plot2(figdir, resdir, name, p, qs):
figures_directory = "{}/{}-{}".format(figdir, name, p)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
bt_list = []
bt_f_list = []
bt_t_list = []
btc_list = []
btc_f_list = []
btc_t_list = []
kdr_list = []
kdr_f_list = []
kdr_t_list = []
for q_idx, q in enumerate(qs):
results_directory = "{}/{}-{}-{}".format(resdir, name, p, q)
flipped = pd.read_csv("{}/flipped.csv".format(results_directory)).to_numpy().flatten()
bt = pd.read_csv("{}/full-torched-beta.csv".format(results_directory)).to_numpy().flatten()
btc = pd.read_csv("{}/full-torched-calibrated-beta.csv".format(results_directory)).to_numpy().flatten()
kdr = pd.read_csv("{}/kdr-beta.csv".format(results_directory)).to_numpy().flatten()
bt_list.append(bt[flipped == 0])
bt_f_list.append(bt[flipped == 1])
bt_t_list.append(bt[flipped == 2])
btc_list.append(btc[flipped == 0])
btc_f_list.append(btc[flipped == 1])
btc_t_list.append(btc[flipped == 2])
kdr_list.append(kdr[flipped == 0])
kdr_f_list.append(kdr[flipped == 1])
kdr_t_list.append(kdr[flipped == 2])
c = 'lightgray'
c0_dict = {
'patch_artist': True,
'boxprops': dict(facecolor=c,color="black"),
'capprops': dict(color="black"),
'flierprops': dict(color="black"),
'medianprops': dict(color="black"),
'whiskerprops': dict(color="black")}
c = 'dimgray'
c1_dict = {
'patch_artist': True,
'boxprops': dict(facecolor=c,color="black"),
'capprops': dict(color="black"),
'flierprops': dict(color="black"),
'medianprops': dict(color="black"),
'whiskerprops': dict(color="black")}
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("q = 1-r")
bp1 = ax.boxplot(bt_list[::-1], showfliers=False, labels=sorted(qs), **c0_dict)
bp2 = ax.boxplot(bt_f_list[::-1], showfliers=False, labels=sorted(qs), **c1_dict)
ax.legend([bp1["boxes"][0], bp2["boxes"][0]], ['cleaned', 'corrupted'], loc='upper right')
fig.savefig("{}/full-torched-box-plot-2.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig2, ax2 = plt.subplots(figsize=(8,4))
ax2.set_xlabel("q = 1-r")
bp1 = ax2.boxplot(btc_list[::-1], showfliers=False, labels=sorted(qs), **c0_dict)
bp2 = ax2.boxplot(btc_f_list[::-1], showfliers=False, labels=sorted(qs), **c1_dict)
ax2.legend([bp1["boxes"][0], bp2["boxes"][0]], ['cleaned', 'corrupted'], loc='upper right')
fig2.savefig("{}/full-torched-calibrated-box-plot-2.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig2)
fig3, ax3 = plt.subplots(figsize=(8,4))
ax3.set_xlabel("q = 1-r")
bp1 = ax3.boxplot(kdr_list[::-1], showfliers=False, labels=sorted(qs), **c0_dict)
bp2 = ax3.boxplot(kdr_f_list[::-1], showfliers=False, labels=sorted(qs), **c1_dict)
ax3.legend([bp1["boxes"][0], bp2["boxes"][0]], ['cleaned', 'corrupted'], loc='upper right')
fig3.savefig("{}/kdr-box-plot-2.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig3)
return
def mean_area_under_error_curve(resdir, criteria):
results = pd.read_csv("{}/results-{}.csv".format(resdir,criteria))
agg = results.groupby(["p", "name"]).agg(list).reset_index()
agg["area_under_error_curve_trusted"] = agg.apply(lambda row: np.trapz(row["trusted"]), axis=1)
agg["area_under_error_curve_untrusted"] = agg.apply(lambda row: np.trapz(row["untrusted"]), axis=1)
agg["area_under_error_curve_irbl"] = agg.apply(lambda row: np.trapz(row["irbl"]), axis=1)
agg["area_under_error_curve_irblc"] = agg.apply(lambda row: np.trapz(row["irblc"]), axis=1)
agg["area_under_error_curve_glc"] = agg.apply(lambda row: np.trapz(row["glc"]), axis=1)
agg["area_under_error_curve_mixed"] = agg.apply(lambda row: np.trapz(row["mixed"]), axis=1)
agg["area_under_error_curve_symetric"] = agg.apply(lambda row: np.trapz(row["symetric"]), axis=1)
agg["area_under_error_curve_total"] = agg.apply(lambda row: np.trapz(row["total"]), axis=1)
final = agg.groupby("p").mean().reset_index()
final.to_csv("{}/area-{}.csv".format(resdir,criteria), index=False)
def wilcoxon_area_under_error_curve(resdir, criteria):
results = pd.read_csv("{}/results-{}.csv".format(resdir,criteria))
agg = results.groupby(["p", "name"]).agg(list).reset_index()
agg["area_under_error_curve_trusted"] = agg.apply(lambda row: np.trapz(row["trusted"]), axis=1)
agg["area_under_error_curve_untrusted"] = agg.apply(lambda row: np.trapz(row["untrusted"]), axis=1)
agg["area_under_error_curve_irbl"] = agg.apply(lambda row: np.trapz(row["irbl"]), axis=1)
agg["area_under_error_curve_irblc"] = agg.apply(lambda row: np.trapz(row["irblc"]), axis=1)
agg["area_under_error_curve_glc"] = agg.apply(lambda row: np.trapz(row["glc"]), axis=1)
agg["area_under_error_curve_mixed"] = agg.apply(lambda row: np.trapz(row["mixed"]), axis=1)
agg["area_under_error_curve_symetric"] = agg.apply(lambda row: np.trapz(row["symetric"]), axis=1)
agg["area_under_error_curve_total"] = agg.apply(lambda row: np.trapz(row["total"]), axis=1)
agg = agg.drop(
["q", "trusted", "untrusted", "irbl", "irblc", "mixed", "glc", "symetric", "total"], axis=1)
final = agg.groupby("p").agg(list).reset_index()
final[["area_under_error_curve_irblc_glc_score","area_under_error_curve_irblc_glc_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_glc"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_trusted_score","area_under_error_curve_irblc_trusted_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_trusted"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_untrusted_score","area_under_error_curve_irblc_untrusted_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_untrusted"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_mixed_score","area_under_error_curve_irblc_mixed_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_mixed"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_symetric_score","area_under_error_curve_irblc_symetric_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_symetric"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_total_score","area_under_error_curve_irblc_total_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_total"]), axis=1).values.tolist())
final = final.drop(
["name", "area_under_error_curve_trusted", "area_under_error_curve_untrusted", "area_under_error_curve_irbl",
"area_under_error_curve_irblc", "area_under_error_curve_glc", "area_under_error_curve_mixed", "area_under_error_curve_symetric",
"area_under_error_curve_total"], axis=1)
final.to_csv("{}/wilcoxon-area-{}.csv".format(resdir,criteria), index=False)
def error_curve_plot(figdir, resdir, name, p, qs, criteria):
figures_directory = "{}/{}-{}".format(figdir, name, p)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
res = pd.read_csv("{}/results-{}.csv".format(resdir,criteria))
res = res[(res["name"] == name) & (res["p"]==p) & (res["q"].isin(qs))]
fig, ax = plt.subplots()
ax.set_xlabel("q = 1-r")
ax.set_xticks(range(len(qs)))
ax.set_xticklabels(qs)
ax.set_ylabel("error")
ax.plot(res["trusted"], label='trusted')
ax.plot(res["untrusted"], label='untrusted')
ax.plot(res["irbl"], label='irbl')
ax.plot(res["irblc"], label='irblc')
ax.plot(res["mixed"], label='mixed')
ax.plot(res["glc"], label='glc')
ax.plot(res["symetric"], label='symmetric')
ax.plot(res["total"], label='total')
ax.legend()
fig.savefig("{}/error-curve-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("q = 1-r")
ax.set_xticks(range(len(qs)))
ax.set_xticklabels(sorted(qs))
ax.set_ylabel("error")
ax.plot(res["irblc"].values[::-1], label='irbl', color="black")
ax.plot(res["total"].values[::-1], label='total', color="black", linestyle="--")
ax.plot(res["mixed"].values[::-1], label='mixed', color="black", linestyle="-.")
ax.plot(res["trusted"].values[::-1], label='trusted', color="black", linestyle=":")
ax.plot(res["untrusted"].values[::-1], label='untrusted', color="black", linestyle="--",marker=".")
ax.legend(loc = 'upper right')
fig.savefig("{}/error-curve-simple-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("q = 1-r")
ax.set_xticks(range(len(qs)))
ax.set_xticklabels(sorted(qs))
ax.set_ylabel("error")
ax.plot(res["irblc"].values[::-1], label='irbl', color="black")
ax.plot(res["total"].values[::-1], label='total', color="black", linestyle="--")
ax.plot(res["glc"].values[::-1], label='glc', color="black", linestyle="-.")
ax.plot(res["symetric"].values[::-1], label='rll', color="black", linestyle=":")
ax.legend(loc = 'upper right')
fig.savefig("{}/error-curve-competitors-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
def generate_results(resdir, names, ps, qs, criteria):
ftt_error_list = []
fut_error_list = []
bt_error_list = []
btc_error_list = []
mixed_error_list = []
glc_error_list = []
symetric_error_list = []
total_error_list = []
kdr_error_list = []
name_list = []
p_list = []
q_list = []
for _, name in enumerate(names):
for _, p in enumerate(ps):
for _, q in enumerate(qs):
complete_resdir = "{}/{}-{}-{}".format(resdir, name, p, q)
bt = pd.read_csv("{}/full-torched-perfs.csv".format(complete_resdir))
ftt = pd.read_csv("{}/ft-torched-perfs.csv".format(complete_resdir))
fut = pd.read_csv("{}/fu-torched-perfs.csv".format(complete_resdir))
btc = pd.read_csv("{}/full-torched-calibrated-perfs.csv".format(complete_resdir))
mixed = pd.read_csv("{}/mixed-perfs.csv".format(complete_resdir))
glc = pd.read_csv("{}/glc-perfs.csv".format(complete_resdir))
symetric = pd.read_csv("{}/symetric-perfs.csv".format(complete_resdir))
total = pd.read_csv("{}/{}/total-perfs.csv".format(resdir, name))
kdr = pd.read_csv("{}/kdr-perfs.csv".format(complete_resdir))
if criteria == "mean_valid_losse":
ftt_error = np.min(ftt[criteria + "s"])
fut_error = np.min(fut[criteria + "s"])
bt_error = np.min(bt[criteria + "s"])
btc_error = np.min(btc[criteria + "s"])
mixed_error = np.min(mixed[criteria + "s"])
glc_error = np.min(glc[criteria + "s"])
symetric_error = np.min(symetric[criteria + "s"])
total_error = np.min(total[criteria + "s"])
kdr_error = np.min(kdr[criteria + "s"])
else:
ftt_error = np.min(1 - ftt[criteria + "s"])
fut_error = np.min(1 - fut[criteria + "s"])
bt_error = np.min(1 - bt[criteria + "s"])
btc_error = np.min(1 - btc[criteria + "s"])
mixed_error = np.min(1 - mixed[criteria + "s"])
glc_error = np.min(1 - glc[criteria + "s"])
symetric_error = np.min(1 - symetric[criteria + "s"])
total_error = np.min(1 - total[criteria + "s"])
kdr_error = np.min(1 - kdr[criteria + "s"])
ftt_error_list.append(ftt_error)
fut_error_list.append(fut_error)
bt_error_list.append(bt_error)
btc_error_list.append(btc_error)
mixed_error_list.append(mixed_error)
glc_error_list.append(glc_error)
symetric_error_list.append(symetric_error)
total_error_list.append(total_error)
kdr_error_list.append(kdr_error)
name_list.append(name)
p_list.append(p)
q_list.append(q)
res = pd.DataFrame(list(zip(name_list, p_list, q_list, kdr_error_list)),
columns=["name", "p", "q", "kdr"])
res.to_csv("{}/results-{}.csv".format(resdir,criteria), index=False)
def generate_wilcoxon(resdir,criteria):
results = pd.read_csv("{}/results-{}.csv".format(resdir,criteria))
agg = results.groupby(["p", "q"]).agg(list).reset_index()
agg[["irblc_glc_score", "irblc_glc_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["glc"]), axis=1).values.tolist())
agg[["irblc_mixed_score", "irblc_mixed_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["mixed"]), axis=1).values.tolist())
agg[["irblc_trusted_score", "irblc_trusted_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["trusted"]), axis=1).values.tolist())
agg[["irblc_untrusted_score", "irblc_untrusted_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["untrusted"]), axis=1).values.tolist())
agg[["irblc_symetric_score", "irblc_symetric_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["symetric"]), axis=1).values.tolist())
agg[["irblc_total_score", "irblc_total_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["total"]), axis=1).values.tolist())
agg[["mixed_total_score", "mixed_total_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["mixed"], row["total"]), axis=1).values.tolist())
agg[["irblc_kdr_score", "irblc_kdr_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["kdr"]), axis=1).values.tolist())
agg = agg.reindex(agg.irblc_glc_score.abs().sort_values(ascending=False).index).drop(
["name", "trusted", "untrusted", "irbl", "irblc", "mixed", "glc", "symetric", "total", "kdr"], axis=1)
agg.to_csv("{}/wilcoxon-{}.csv".format(resdir,criteria), index=False)
return ((agg["irblc_glc_score"]>1.96).sum(),(agg["irblc_glc_score"]<-1.96).sum())
def wilcoxon_plot(figdir, resdir, criteria, comp1, comp2):
agg = pd.read_csv("{}/wilcoxon-{}.csv".format(resdir,criteria))
ps_dict= {0.02:0,0.05:0.5,0.1:1,0.25:1.5}
ps = agg["p"].sort_values().unique()
qs = agg["q"].sort_values().unique()
score_col = "{}_{}_score".format(comp1,comp2)
scores = agg
fig, ax = plt.subplots(figsize=(5,2.5))
ties = scores[(scores[score_col]<1.96)&(scores[score_col]>-1.96)]
losses= scores[scores[score_col]<-1.96]
wins = scores[scores[score_col]>1.96]
ax.scatter(wins["q"],np.array([ps_dict[x] for x in wins["p"].values]),color="black",facecolor="white",label="win")
ax.scatter(ties["q"],np.array([ps_dict[x] for x in ties["p"].values]),color="black",marker=".",s=1,label="tie")
ax.scatter(losses["q"],np.array([ps_dict[x] for x in losses["p"].values]),color="black",label="loss")
ax.set_xlabel("q = 1-r")
ax.set_ylabel("p")
ax.set_xticks(qs)
ax.set_yticks(np.array([ps_dict[x] for x in ps]))
ax.set_xticklabels(qs)
ax.set_yticklabels(ps)
plt.tight_layout()
filename = "{}/wilcoxon-{}-{}-{}.pdf".format(figdir, criteria, comp1, comp2)
fig.savefig(filename, bbox='tight', bbox_inches="tight", format="pdf")
plt.close(fig)
optimizer = "sgd"
beta_batch_size = (24, 24)
batch_size = 24
beta_epochs = (20, 20)
epochs = 20
beta_learning_rate = (0.005, 0.005)
learning_rate = 0.005
beta_weight_decay = (1e-6, 1e-6)
weight_decay = 1e-6
beta_hidden_size = (0, 0)
hidden_size = 0
calibration_method = "isotonic"
dss = [
ad,
banknote,
ibn_sina,
eeg,
ijcnn1,
adult,
phishing,
spam,
musk,
australian,
diabetes,
breast,
german,
fourclass,
svmguide3,
svmguide1,
web,
hiva,
sylva,
zebra,
]
names = [
"ad",
"banknote",
"ibn_sina",
"eeg",
"ijcnn1",
"adult",
"phishing",
"spam",
"musk",
"australian",
"diabetes",
"breast",
"german",
"fourclass",
"svmguide3",
"svmguide1",
"web",
"hiva",
"sylva",
"zebra",
]
cr_kinds = [#noisy_completly_at_random,
noisy_not_at_random
]
cr_names = ["ncar",
#"nnar"
]
ps = [0.02,0.05,0.1,0.25]
qs = [1.0, 0.9, 0.8, 0.7,0.6, 0.5, 0.4,0.3,0.2,0.1, 0.0]
for cr_idx, cr_kind in enumerate(cr_kinds):
base_dir = cr_names[cr_idx]
for ds_idx, ds_lazy in enumerate(dss):
name = names[ds_idx]
print(name)
ds_dir = "{}/{}".format(base_dir, name)
if not os.path.exists(ds_dir):
os.makedirs(ds_dir)
dataset = ds_lazy()
train, test = split_scale_dataset(dataset, 0.2)
print("total")
total_model, total = normal(train, test, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
total.to_csv("{}/total-perfs.csv".format(ds_dir), index=False)
for _, p in enumerate(ps):
trusted, untrusted = split_dataset(train, (1 - p))
for _, q in enumerate(qs):
print(name, p, q)
dir = "{}-{}-{}".format(ds_dir, p, q)
if not os.path.exists(dir):
os.makedirs(dir)
#corrupted = corrupt_dataset(untrusted, cr_kind, 1 - q)
#Use with NNAR
corrupted = corrupt_dataset(untrusted, lambda y,ratio: cr_kind(torch.nn.functional.softmax(total_model(torch.from_numpy(untrusted[:][0])),dim=1)[:,1].detach().numpy(),y,ratio), 1 - q)
print(np.sum(corrupted[:][1] != untrusted[:][1]) / len(corrupted[:][1] != untrusted[:][1]))
pd.Series(np.full(len(trusted), 2.0)).append(pd.Series(corrupted[:][1] != untrusted[:][1]).astype(int)).to_csv(
"{}/flipped.csv".format(dir), index=False, header=False)
loop(dir, trusted, corrupted, test, optimizer, beta_batch_size,
batch_size, beta_epochs, epochs, beta_learning_rate,
learning_rate, beta_weight_decay, weight_decay, beta_hidden_size, hidden_size, calibration_method)
hist_plot("{}-figures".format(base_dir), base_dir, name, p, q)
learning_curve_plot("{}-figures".format(base_dir), base_dir, name, p, q, "mean_valid_losse")
learning_curve_plot("{}-figures".format(base_dir), base_dir, name, p, q, "acc")
generate_results(base_dir, [name], [p], qs, "acc")
error_curve_plot("{}-figures".format(base_dir), base_dir, name, p, qs, "acc")
box_plot2("{}-figures".format(base_dir), base_dir, name, p, qs)
generate_results(base_dir, names, ps, qs, "acc")
generate_wilcoxon(base_dir, "acc")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","glc")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","mixed")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","trusted")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","untrusted")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","total")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","symetric")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","kdr")
results = pd.read_csv("{}/results-{}.csv".format(base_dir,"acc"))
method_names = ["trusted","rll","irbl","glc","mixed"]
final = results.groupby(["name"]).sum().reset_index()
avranks = friedman_test(final["trusted"].values,final["symetric"].values,final["irblc"].values,final["glc"].values,
final["mixed"].values,reverse=False)[2]
cd = Orange.evaluation.compute_CD(avranks, 20)
Orange.evaluation.graph_ranks(avranks, method_names, cd=cd, width=6, textspace=1)
plt.savefig("{}/cd.pdf".format("{}-figures".format(base_dir), "acc"), bbox = 'tight', bbox_inches="tight", format="pdf")
print(wilcoxon_test(final["irblc"].values,final["glc"].values))
print(wilcoxon_test(final["irblc"].values,final["mixed"].values))
generate_results(base_dir, names, ps, qs, "acc")
results = pd.read_csv("{}/results-{}.csv".format(base_dir,"acc"))
results["irblc"] = 100*(1 - results["irblc"])
results["trusted"] = 100*(1 - results["trusted"])
results["symetric"] = 100*(1 - results["symetric"])
results["glc"] = 100*(1 - results["glc"])
results["mixed"] = 100*(1 - results["mixed"])
results["total"] = 100*(1 - results["total"])
results["kdr"] = 100*(1 - results["kdr"])
results = results.drop(["untrusted","irbl"],axis=1)
results.groupby(["p","name"]).agg(["mean","std"]).drop("q",axis=1,level=0).reset_index().groupby("p").mean().to_csv("{}/aggregated-results-{}.csv".format(base_dir,"acc"), index=False)
| 67,075 | 35.673592 | 199 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/embedding.py | from sentence_transformers import SentenceTransformer, util
from numpy import add
from torch import Tensor
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def get_model(name):
""" Loads the SentenceTransformer for the specific model. If the model is not stored it will be downloaded
:param name: the name of the model
:return: specific SentenceTransformer
"""
# best performance has roberta-large-nli-stsb-mean-tokens
return SentenceTransformer(name)
def get_embeddings(sentences, model):
""" Creates the sentence embeddings from the sentences
:param sentences: the sentences as array to calculate the embeddings from
:param model: the SentenceTransformer model
:return: Array of embeddings as tensor-object
"""
embeddings = []
for embedding in model.encode(sentences, convert_to_tensor=True):
embeddings.append(embedding)
return embeddings
def calculate_divisor(n):
""" Calculates how many tuples of sentence will be compared. This information is used as divisor to calculate
average values
:param n: the amount of sentences
:return: the amount of comparisons
"""
divisor = 1
n = (n - 1)
while n > 1: # do not add 1 to the divisor because it starts with 1
divisor += n
n = (n - 1)
return divisor
def get_similarity(embeddings):
""" This function calculates every similarity between two sentences and takes the average one
:param embeddings: array of embeddings
:return: average similarity
"""
length = len(embeddings)
if length == 0: # when array is empty there is no similarity
return 0.0
elif length == 1:
return 1.0
else:
i = 0
similarity = 0
while i < (length - 1): # iterates through the array of embeddings
z = i + 1
other_embeddings = []
while z < length: # iterates through all following embeddings to form a pair with the current one
other_embeddings.append(embeddings[z])
score = util.pytorch_cos_sim(embeddings[i], embeddings[z])[0] # consinus sim to show similarity
similarity += score.item()
z += 1
i += 1
return round(similarity / calculate_divisor(length), 2) # rounding for better representation
def calculate_avg_embedding(embeddings):
""" Calculates the average sentence embedding/vector of all sentence embeddings
:param embeddings: Array of embeddings to calculate the average embedding
:return: Average embedding (array of all coordinates)
"""
avg = [0] * 1024 # default vector
if not len(embeddings): # if length is 0 return default to avoid division with zero
return avg
for emb in embeddings:
avg = add(avg, emb.cpu().numpy()) # adds complete embedding to vector (numpy because embedding was an tensor object)
# divides every predictor with the amount of embeddings to get the average number
return [value / len(embeddings) for value in avg]
def reduce_dimension(embeddings):
""" Reduce the dimension of embeddings so that they can be used in classification. A high dimension of 1024 would
lead to a bad representation of the correlations. Other features would be drowned out by their small number
:param embeddings: the embeddings with high dimension
:return: reduced dimension embeddings
"""
n = 16 # used for experiments
print("Reduce Embeddings to dimension " + str(n))
pca = PCA(n_components=n)
x = StandardScaler().fit_transform(embeddings)
values = pca.fit_transform(x)
information = 0 # default amount of information
for value in pca.explained_variance_ratio_:
information += value
print("Reduced embeddings. Embeddings contain about " + str(round(information, 4) * 100) + " % information")
result = []
for embd in values.tolist():
# rounds values to avoid wrong representation of floating number
result.append([round(value, 6) for value in embd])
return result
def process_video_embeddings(slides, transcript, model):
""" Calculates the embeddings for the slides and the transcript. After that the features are going to be calculated
:param slides: An array with all lines of the slides (Array of Strings)
:param transcript: An array with all sentences of the transcript (Array of Strings)
:return: The calculated features
"""
embeddings_slides = get_embeddings(slides, model)
embeddings_transcript = get_embeddings(transcript, model)
similarity_slides = round(get_similarity(embeddings_slides), 6)
similarity_transcript = round(get_similarity(embeddings_transcript), 6)
diff_similarity = round(abs(similarity_slides - similarity_transcript), 6)
avg_slides = calculate_avg_embedding(embeddings_slides)
avg_slides = [round(value, 6) for value in avg_slides]
avg_transcript = calculate_avg_embedding(embeddings_transcript)
avg_transcript = [round(value, 6) for value in avg_transcript]
avg_vectors = [Tensor(avg_slides), Tensor(avg_transcript)]
similarity_vector = round(get_similarity(avg_vectors), 6)
features = [similarity_slides, similarity_transcript, diff_similarity, similarity_vector], avg_slides, \
avg_transcript
return features
| 5,370 | 42.314516 | 125 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/files.py | import os
import sys
import csv
import fitz
import processor
import client
import embedding
import re
import scipy
import numpy
import torch
from operator import itemgetter
from SortedCollection import SortedCollection
def load_stop_words(path):
""" Loads stopwords of the stopwords.txt file to use them later
:param path: the path where the file exist
:return: Array of stopwords
"""
try:
stops = open(path, 'r')
lines = stops.readlines()
stopwords = []
for line in lines:
stopwords.append(line.replace("\n", ""))
return stopwords
except IOError:
print("Couldn't load stopwords.txt")
sys.exit(1)
def load_embd_model(name):
""" Loads embedding model. the program will stop if the name does not exist
:param name: name of model
:return: specific model
"""
try:
return embedding.get_model(name)
except IOError:
print("Couldn't load sentence-embedding model")
sys.exit(1)
def get_embedding_array(name, length):
""" Creates an array of strings where every string represents a predictore of the embedding.
:param name: name that is used for the strings
:param length: length of embedding
:return: array with the names
"""
arr = []
i = 1
while i <= length:
arr.append(name + "_" + str(i)) # enumerate predicores (gives them a number)
i += 1
return arr
def store_embeddings_to_features(features, embeddings_sli, embeddings_tra):
""" Takes the average embeddings of slides and transcripts and includes them into array of features for a specific
video
:param features: features of videos
:param embeddings_sli: average embedding of slides
:param embeddings_tra: average embedding of transcript
:return: features and average embeddings as one array
"""
embd_pos = 0
for i in range(len(features)):
for y in range(len(features[i])): # iterates through videos
new_list = features[i][y]
knowledge_gain = new_list[-1]
knowledge_gain_value = new_list[-2]
new_list = new_list[:-2] # Removes the knowledge gain values to put them later to the end of the list
# Stores every predictor of the embeddings as one feature
for z in range(len(embeddings_sli[embd_pos])):
new_list.append(embeddings_sli[embd_pos][z])
for z in range(len(embeddings_tra[embd_pos])):
new_list.append(embeddings_tra[embd_pos][z])
embd_pos += 1
new_list.append(knowledge_gain_value)
new_list.append(knowledge_gain)
features[i][y] = new_list
return features
def load_csv(path):
""" Loads a specific csv-file. The program will be stopped if the file does not exist
:param path: the path of the file
:return: the loaded file
"""
try:
csv_file = open(path, 'r', newline='', encoding="ISO-8859-1")
return csv_file
except IOError:
print("Couldn't load csv-file")
sys.exit(1)
def get_z_values(test):
""" Loads the knowledge gain of the test values (csv-file) and calculates them into z-values to get a nominal
distribution. This distribution is used to classify the knowledge gain into one of three knowledge gain classes.
:param test:
:return: z-values of knowledge gain
"""
test.seek(1)
reader = csv.reader(test, delimiter=',')
values = []
first = True # ignores header
for row in reader:
if first:
first = False
else:
values.append(float(row[len(row) - 1])) # last entry contains the knowledge gain
print("Convert Knowledge Gain values into nominal ones:")
print("Mean:" + str(scipy.mean(values)))
print("Standard Deviation:" + str(numpy.std(values)))
converted = scipy.stats.zscore(values)
print("Converted.")
return converted
def process_test(test, name, z_scores, slides_result, transcript_result, embedding_features):
""" Generates all rows of a video for the result csv-file. The method checks how many persons watched the video.
Every person represents a row and contains their knowledge gain
:param test: csv-file with knowledge gain of persons
:param name: name of the video
:param z_scores: z_scores of all knowledge gains
:param slides_result: features of the slides
:param transcript_result: features of the transcript
:param embedding_features: features of the embeddings for the video
:return: rows that represent all information of a video
"""
name = name.replace("video", "") # test file has not video in the name of videos
rows = []
found = False # shows if correct video values were already found
pos = -1 # shouldn't check header so begin with -1 (-1 will automatically goes to 0)
test.seek(1) # go to start of file again (so every video can be processed)
reader = csv.reader(test, delimiter=',')
for row in reader: # Iterates through all rows
if name in row: # checks if row is about the specific video
# generates row with features
person_id = [row[1]]
# get visual features
visual_features = row[2: len(row) - 3]
knowledge_gain = [row[len(row) - 1]]
# converts knowledge gain to nominal value
if -0.5 <= z_scores[pos] <= 0.5:
knowledge_gain_level = ["Moderate"]
elif z_scores[pos] < -0.5:
knowledge_gain_level = ["Low"]
else:
knowledge_gain_level = ["High"]
rows.append([name] + person_id + slides_result + transcript_result + embedding_features
+ visual_features + knowledge_gain + knowledge_gain_level)
elif found:
break
pos += 1
return rows
def create_csv(rows):
""" Stores all the extracted features into a csv file
:param rows: rows that contains the information of the videos (every row has information about one video)
"""
features = open('./Features/all_features.csv', 'w', newline='')
text_features = open('./Features/text_features.csv', 'w', newline='')
multimedia_features = open('./Features/multimedia_features.csv', 'w', newline='')
avg_slide_embedding = open('./Features/slide_embedding.csv', 'w', newline='')
avg_transcript_embedding = open('./Features/transcript_embedding.csv', 'w', newline='')
text_features_writer = csv.writer(text_features, delimiter=',')
multimedia_features_writer = csv.writer(multimedia_features, delimiter=',')
features_writer = csv.writer(features, delimiter=',')
slide_writer = csv.writer(avg_slide_embedding, delimiter=',')
transcript_writer = csv.writer(avg_transcript_embedding, delimiter=',')
slides_features = ["amount_tok_sli",
"amount_uni_tok_sli", "ratio_uni_tok_sli", "amount_uni_lemma_sli", "ratio_uni_lemma_sli",
"sum_tok_len_sli", "min_tok_len_sli",
"avg_tok_len_sli", "max_tok_len_sli", "avg_freq_tok_sli",
"avg_trigram_sli", "avg_tetragram_sli", "min_line_len", "avg_line_len",
"max_line_len", "min_line_chars", "avg_line_chars", "max_line_chars",
"amount_syl_sli", "amount_one_syl_sli", "amount_two_syl_sli",
"amount_psyl_sli", "amount_hard_sli", "avg_syl_sli", "ratio_one_syl_sli",
"ratio_two_syl_sli", "ratio_psyl_sli", "ratio_hard_sli",
"min_age_sli", "avg_age_sli", "max_age_sli",
"amount_slides", "sum_lines", "min_lines", "avg_lines", "max_lines",
"min_words_slide", "avg_words_slide", "max_words_slide", "flesch_ease_sli", "flesch_kin_sli",
"gunning_fog_sli", "smog_sli", "ari_sli", "coleman_sli",
"read_time_sli", "amount_adj_sli", "avg_adj_sli",
"ratio_adj_sli", "amount_adpos_sli", "avg_adpos_sli", "ratio_adpos_sli",
"amount_noun_sli", "avg_noun_sli",
"ratio_noun_sli", "amount_pronoun_sli", "avg_pronoun_sli",
"ratio_pronoun_sli", "ratio_pronoun_noun_sli", "amount_verb_sli", "avg_verb_sli",
"ratio_verb_sli",
"amount_main_verb_sli", "avg_main_verb_sli", "ratio_main_verb_sli", "amount_aux_sli",
"avg_aux_sli", "ratio_aux_sli",
"amount_adverb_sli", "avg_adverb_sli", "ratio_adverb_sli", "amount_coord_conj_sli",
"avg_coord_conj_sli", "ratio_coord_conj_sli", "amount_determiner_sli",
"avg_determiner_sli", "ratio_determiner_sli",
"amount_interj_sli", "avg_interj_sli", "ratio_interj_sli", "amount_num_sli",
"avg_num_sli", "ratio_num_sli",
"amount_particle_sli", "avg_particle_sli", "ratio_particle_sli", "amount_subord_conj_sli",
"avg_subord_conj_sli", "ratio_subord_conj_sli", "amount_foreign_sli",
"avg_foreign_sli", "ratio_foreign_sli",
"amount_content_word_sli", "avg_content_word_sli", "ratio_content_word_sli",
"amount_function_word_sli", "avg_function_word_sli", "ratio_function_word_sli",
"amount_filtered_sli", "avg_filtered_sli", "ratio_filtered_sli",
"amount_statement_sli", "ratio_statement_sli",
"amount_question_sli", "ratio_question_sli", "ADJP_sli", "ratio_ADJP_sli", "avg_ADJP_sli",
"ADVP_sli",
"ratio_ADVP_sli", "avg_ADVP_sli",
"NP_sli", "ratio_NP_sli", "avg_NP_sli", "PP_sli", "ratio_PP_sli", "avg_PP_sli",
"S_sli", "ratio_S_sli", "avg_S_sli", "FRAG_sli", "ratio_FRAG_sli", "avg_FRAG_sli",
"SBAR_sli", "ratio_SBAR_sli", "avg_SBAR_sli", "SBARQ_sli", "ratio_SBARQ_sli", "avg_SBARQ_sli",
"SINV_sli", "ratio_SINV_sli", "avg_SINV_sli", "SQ_sli", "ratio_SQ_sli", "avg_SQ_sli",
"VP_sli", "ratio_VP_sli", "avg_VP_sli", "WHADVP_sli", "ratio_WHADVP_sli", "avg_WHADVP_sli",
"WHNP_sli", "ratio_WHNP_sli", "avg_WHNP_sli", "WHPP_sli", "ratio_WHPP_sli", "avg_WHPP_sli",
"avg_phrases_sli",
"sim_pres_sli", "ratio_sim_pres_sli", "pres_prog_sli", "ratio_pres_prog_sli",
"pres_perf_sli", "ratio_pres_perf_sli", "pres_perf_prog_sli", "ratio_pres_perf_prog_sli",
"sim_pas_sli", "ratio_sim_pas_sli", "pas_prog_sli", "ratio_pas_prog_sli",
"pas_perf_sli", "ratio_pas_perf_sli", "pas_perf_prog_sli", "ratio_pas_perf_prog_sli",
"will_sli", "ratio_will_sli", "fu_prog_sli", "ratio_fu_prog_sli", "fu_perf_sli",
"ratio_fu_perf_sli",
"fu_perf_prog_sli", "ratio_fu_perf_prog_sli", "cond_sim_sli", "ratio_cond_sim_sli",
"cond_prog_sli", "ratio_cond_prog_sli", "cond_perf_sli", "ratio_cond_perf_sli",
"cond_perf_prog_sli", "ratio_cond_perf_prog_sli",
"gerund_sli", "ratio_gerund_sli", "perf_part_sli", "ratio_perf_part_sli",
"inf_sli", "ratio_inf_sli", "perf_inf_sli", "ratio_perf_inf_sli",
"active_sli", "ratio_active_sli", "passive_sli", "ratio_passive_sli"]
transcript_features = ["amount_sentences", "amount_tok_tra",
"amount_uni_tok_tra", "ratio_uni_tok_tra", "amount_uni_lemma_tra", "ratio_uni_lemma_tra",
"sum_tok_len_tra", "min_tok_len_tra",
"avg_tok_len_tra", "max_tok_len_tra", "avg_freq_tok_tra", "avg_trigram_tra",
"avg_tetragram_tra", "min_sen_len", "avg_sen_len",
"max_sen_len", "min_sen_chars", "avg_sen_chars", "max_sen_chars",
"amount_syl_tra", "amount_one_syl_tra", "amount_two_syl_tra",
"amount_psyl_tra", "amount_hard_tra", "avg_syl_tra", "ratio_one_syl_tra",
"ratio_two_syl_tra", "ratio_psyl_tra", "ratio_hard_tra",
"min_age_tra", "avg_age_tra", "max_age_tra", "flesch_ease_tra", "flesch_kin_tra",
"gunning_fog_tra", "smog_tra", "ari_tra", "coleman_tra", "read_time_tra",
"speak_time",
"speak_difference", "amount_subtitles", "amount_adj_tra", "avg_adj_tra", "ratio_adj_tra",
"amount_adpos_tra", "avg_adpos_tra", "ratio_adpos_tra", "amount_noun_tra",
"avg_noun_tra", "ratio_noun_tra",
"amount_pronoun_tra", "avg_pronoun_tra", "ratio_pronoun_tra", "ratio_pronoun_noun_tra",
"amount_verb_tra", "avg_verb_tra", "ratio_verb_tra",
"amount_main_verb_tra", "avg_main_verb_tra", "ratio_main_verb_tra", "amount_aux_tra",
"avg_aux_tra",
"ratio_aux_tra", "amount_adverb_tra", "avg_adverb_tra", "ratio_adverb_tra",
"amount_coord_conj_tra", "avg_coord_conj_tra", "ratio_coord_conj_tra",
"amount_determiner_tra", "avg_determiner_tra",
"ratio_determiner_tra", "amount_interj_tra", "avg_interj_tra",
"ratio_interj_tra",
"amount_num_tra", "avg_num_tra", "ratio_num_tra", "amount_particle_tra",
"avg_particle_tra", "ratio_particle_tra",
"amount_subord_conj_tra", "avg_subord_conj_tra", "ratio_subord_conj_tra",
"amount_foreign_tra", "avg_foreign_tra", "ratio_foreign_tra",
"amount_content_word_tra", "avg_content_word_tra", "ratio_content_word_tra",
"amount_function_word_tra", "avg_function_word_tra", "ratio_function_word_tra",
"amount_filtered_tra", "avg_filtered_tra", "ratio_filtered_tra",
"amount_statement_tra", "ratio_statement_tra", "amount_question_tra", "ratio_question_tra",
"ADJP_tra", "ratio_ADJP_tra", "avg_ADJP_tra", "ADVP_tra", "ratio_ADVP_tra", "avg_ADVP_tra",
"NP_tra", "ratio_NP_tra", "avg_NP_tra", "PP_tra", "ratio_PP_tra", "avg_PP_tra",
"S_tra", "ratio_S_tra", "avg_S_tra", "FRAG_tra", "ratio_FRAG_tra", "avg_FRAG_tra",
"SBAR_tra", "ratio_SBAR_tra", "avg_SBAR_tra", "SBARQ_tra", "ratio_SBARQ_tra",
"avg_SBARQ_tra", "SINV_tra", "ratio_SINV_tra", "avg_SINV_tra",
"SQ_tra", "ratio_SQ_tra", "avg_SQ_tra", "VP_tra", "ratio_VP_tra", "avg_VP_tra",
"WHADVP_tra", "ratio_WHADVP_tra", "avg_WHADVP_tra", "WHNP_tra", "ratio_WHNP_tra",
"avg_WHNP_tra", "WHPP_tra", "ratio_WHPP_tra", "avg_WHPP_tra", "avg_phrases_tra",
"sim_pres_tra", "ratio_sim_pres_tra", "pres_prog_tra", "ratio_pres_prog_tra",
"pres_perf_tra", "ratio_pres_perf_tra", "pres_perf_prog_tra", "ratio_pres_perf_prog_tra",
"sim_pas_tra", "ratio_sim_pas_tra", "pas_prog_tra", "ratio_pas_prog_tra",
"pas_perf_tra", "ratio_pas_perf_tra", "pas_perf_prog_tra", "ratio_pas_perf_prog_tra",
"will_tra", "ratio_will_tra", "fu_prog_tra", "ratio_fu_prog_tra", "fu_perf_tra",
"ratio_fu_perf_tra",
"fu_perf_prog_tra", "ratio_fu_perf_prog_tra", "cond_sim_tra", "ratio_cond_sim_tra",
"cond_prog_tra", "ratio_cond_prog_tra", "cond_perf_tra", "ratio_cond_perf_tra",
"cond_perf_prog_tra", "ratio_cond_perf_prog_tra", "gerund_tra", "ratio_gerund_tra",
"perf_part_tra", "ratio_perf_part_tra", "inf_tra", "ratio_inf_tra",
"perf_inf_tra", "ratio_perf_inf_tra", "active_tra",
"ratio_active_tra", "passive_tra", "ratio_passive_tra"]
embedding_features = ["similarity_sli", "similarity_tra", "diff_similarity", "similarity_vectors"]
visual_features = ["Clear_Language", "Vocal_Diversity", "Filler_Words", "Speed_of_Presentation",
"Coverage_of_the_Content",
"Level_of_Detail", "Highlight", "Summary", "Text_Design", "Image_Design", "Formula_Design",
"Table_Design",
"Structure", "Entry_Level", "Overall_Rating", "loudness_avg", "mod_loudness_avg",
"rms_energy_avg",
"f0_avg", "jitter_avg", "delta_jitter_avg", "shimmer_avg", "harmonicity_avg", "log_HNR_avg",
"PVQ_avg", "speech_rate", "articulation_rate", "average_syllable_duration", "txt_ratio_avg",
"txt_ratio_var", "img_ratio_avg", "img_ratio_var", "highlight", "level_of_detailing_avg",
"level_of_detailing_var",
"coverage_of_slide_content_avg", "coverage_of_slide_content_var"]
avg_embedding_slides = get_embedding_array("avg_embd_slides_dim", 16)
avg_embedding_transcript = get_embedding_array("avg_embd_transcript_dim", 16)
features_writer.writerow(["Video_ID"] + ["Person_ID"] + slides_features + transcript_features + embedding_features
+ visual_features + ["Knowledge_Gain", "Knowledge_Gain_Level"])
text_features_writer.writerow(
["Video_ID"] + ["Person_ID"] + slides_features + transcript_features + embedding_features
+ ["Knowledge_Gain", "Knowledge_Gain_Level"])
multimedia_features_writer.writerow(["Video_ID"] + ["Person_ID"] + visual_features + ["Knowledge_Gain",
"Knowledge_Gain_Level"])
slide_writer.writerow(
["Video_ID"] + ["Person_ID"] + avg_embedding_slides + ["Knowledge_Gain", "Knowledge_Gain_Level"])
transcript_writer.writerow(
["Video_ID"] + ["Person_ID"] + avg_embedding_transcript + ["Knowledge_Gain", "Knowledge_Gain_Level"])
# write values inside csv-files
for line in rows:
for row in line:
i = 0
stop = 2 + len(slides_features) + len(transcript_features) + len(embedding_features)
feature_values = []
text_feature_values = []
multimedia_feature_values = []
slides_embd = []
transcript_embd = []
# store video_id and user_id
while i < 2:
feature_values.append(row[i])
text_feature_values.append(row[i])
multimedia_feature_values.append(row[i])
slides_embd.append(row[i])
transcript_embd.append(row[i])
i += 1
# store text-features
while i < stop:
feature_values.append(row[i])
text_feature_values.append(row[i])
i += 1
# store multimedia-features
stop += len(visual_features)
while i < stop:
feature_values.append(row[i])
multimedia_feature_values.append(row[i])
i += 1
stop += len(avg_embedding_slides)
# store slides-embedding
while i < stop:
slides_embd.append(row[i])
i += 1
stop += len(avg_embedding_transcript)
# store transcript-embedding
while i < stop:
transcript_embd.append(row[i])
i += 1
# store knowledge-gain
feature_values.append(row[-2])
feature_values.append(row[-1])
text_feature_values.append(row[-2])
text_feature_values.append(row[-1])
multimedia_feature_values.append(row[-2])
multimedia_feature_values.append(row[-1])
slides_embd.append(row[-2])
slides_embd.append(row[-1])
transcript_embd.append(row[-2])
transcript_embd.append(row[-1])
#store to files
features_writer.writerow(feature_values)
text_features_writer.writerow(text_feature_values)
multimedia_features_writer.writerow(multimedia_feature_values)
slide_writer.writerow(slides_embd)
transcript_writer.writerow(transcript_embd)
# close files
features.close()
text_features.close()
multimedia_features.close()
avg_slide_embedding.close()
avg_transcript_embedding.close()
def remove_files(files):
""" Deletes files. This method is used to deletes old files when the user starts a new calculation
:param files: Array of old files
"""
for file in files:
os.remove(file)
def load_files(path):
""" Loads files of an path
:param path: the path where the files exist
:return: array of strings of the path of files
"""
path_files = []
for root, dirs, files in os.walk(path):
for file in files:
path = root + '/' + file
# replace \ to / to get for windows/linux/mac the same representation
path_files.append(path.replace('\\', '/'))
return path_files
def process_files(files, sta, cli):
""" Process all files of the videos to generate the features. The pdf files and the srt files must have the same
name to know that they belong to the same video. Otherwise the program can't recognize it and stops the
calculation.
:param files: tuple of files for the videos
:param sta: stanza object to get word specific features
:param cli: client to generate sentence trees
"""
rows = []
embeddings_sli = [] # contains average embeddings for slides
embeddings_tra = [] # contains average embeddings for transcripts
properties = load_csv('./wordlists/freq_syll_words.csv')
age = load_csv('./wordlists/AoA_51715_words.csv')
test = load_csv('./Data/Test/test.csv')
stopwords = load_stop_words('./wordlists/stopwords.txt')
z_scores = get_z_values(test)
model = load_embd_model('roberta-large-nli-stsb-mean-tokens')
for slides, transcript in files:
try:
s = open(slides, 'rb')
t = open(transcript, 'rb')
s_name = os.path.basename(s.name[:-4])
t_name = os.path.basename(t.name[:-4])
if s_name != t_name: # Check if slides and transcript have the same name
print("Names of slides and transcript must be the same.")
client.stop_client(cli)
properties.close()
test.close()
sys.exit(1)
# get features and stores them
features, embd_sli, embd_tra = (process_video(s, t, s_name, sta, cli, properties, age,
test, z_scores, model, stopwords))
rows.append(features)
for i in range(len(features)):
embeddings_sli.append(embd_sli)
embeddings_tra.append(embd_tra)
s.close()
t.close()
# clean gpu cache
if torch.cuda.is_available():
torch.cuda.empty_cache()
except IOError:
print("Can't open slides or transcript for a video. Video will be ignored.")
pass
properties.close()
test.close()
client.stop_client(cli)
# reduce dimension of embeddings to have a better representation
embeddings_sli = embedding.reduce_dimension(embeddings_sli)
embeddings_tra = embedding.reduce_dimension(embeddings_tra)
create_csv(store_embeddings_to_features(rows, embeddings_sli, embeddings_tra))
print("Stored features as csv-files in ./Features")
def process_video(slides, transcript, name, sta, cli, properties, age, test, z_scores, model, stopwords):
""" Process a specific video. The files for slides and transcript are used to get the features about this video.
Also all important objects are passed to realize the calculation
:param slides: files for slides
:param transcript: files for transcript
:param name: name of the video
:param sta: stanza to calculate word specific features
:param cli: client to calculate sentence trees
:param properties: csv-table with amount of syllables and frequency for words
:param age: csv-table that includes the age of acquisition for words
:param test: csv-file with the knowledge gains
:param z_scores: values of the knowledge gains as z-score to calculate the nominal classes
:param model: the sentence embedding model to calculate the embeddings
:param stopwords: stopwords that has to be filtered for the frequency
:return: rows of features for the specific video
"""
print("Process slide: " + name + ".pdf")
slides_result, slides_lines = process_slides(slides, sta, cli, properties, age, stopwords)
print("Finished process of slide: " + name + ".pdf")
print("Process transcript: " + name + ".srt")
transcript_result, transcript_sentences = process_transcript(transcript, sta, cli, properties, age, stopwords)
embd_features, embd_sli, embd_tra = embedding.process_video_embeddings(slides_lines, transcript_sentences, model)
print("Finished process of transcript: " + name + ".srt")
return process_test(test, name, z_scores, slides_result, transcript_result, embd_features), embd_sli, embd_tra
def process_slides(slides, sta, cli, properties, age, stopwords):
""" Calculates the feature for specific slides of a video
:param slides: the slides of a video
:param sta: stanza to calculate word specific features
:param cli: client to calculate sentence trees
:param properties: csv-table with amount of syllables and frequency for words
:param age: csv-table that includes the age of acquisition for words
:param stopwords: stopwords that has to be filtered for the frequency
:return: features of the slides
"""
calculator = processor.Calculator()
features, sentences = calculator.process_lines(slides.readlines(), cli, sta, processor.Counter(), properties, age,
stopwords)
return features, sentences
def process_transcript(transcript, sta, cli, properties, age, stopwords):
""" Calculates the feature for specific transcript of a video
:param transcript: the transcript of a video
:param sta: stanza to calculate word specific features
:param cli: client to calculate sentence trees
:param properties: csv-table with amount of syllables and frequency for words
:param age: csv-table that includes the age of acquisition for words
:param stopwords: stopwords that has to be filtered for the frequency
:return: features of the slides
"""
calculator = processor.Calculator()
features, sentences = calculator.process_sentences(transcript.read(), cli, sta, processor.Counter(), properties,
age, stopwords)
return features, sentences
def write_pdf(location, pages):
""" Stores the textual aspects of a pdf into a txt-file.
:param location: location to store it
:param pages: array of pages that contains an array of lines for every page
"""
f = open(location, "w", encoding="utf-8")
number = 0
for page in pages:
# Every page gets the beginning line "Starting next page:" to clarify that a new page started
f.write("Starting next page:" + str(number) + "\n")
for line in page:
"""
Replaces special characters with whitespace or nothing to get a better formatted string/line.
Also replace -\xad‐ to - because so it is the same as the text in the pdf.
Add \n to the string to see it later as one line.
"""
f.write(line[4].replace("\xa0", "").replace("\r", "").replace("\t ", " ").replace("\t", " ")
.replace("-\xad‐", "‐") + "\n")
number += 1
f.close()
def write_embeddings(location, embeddings):
""" Method to store embeddings as txt file (not used but can be useful)
:param location: the location to store them
:param embeddings: the embeddings to store
"""
f = open(location, "w")
for embedding in embeddings:
"""the first entry of an embedding represents the normal sentence. This sentence ist stored in " " to identify
what the embedding represents. Every predictor is separated with a whitespace """
f.write('"' + embedding[0] + '"')
for emb in embedding[1]:
f.write(" " + str(emb.item()))
f.write('\n')
f.close()
def positions(lines):
""" Stores every line in a new array and checks if the order is correct or the line should be placed earlier.
:param lines: lines to check
:return: array of lines with new order
"""
result = []
for line in lines:
if len(result) > 1: # array has more then 1 element
# check backwards if previous lines are more on the right side then the current one
for i in range(len(result) - 1, -1, -1):
current = result[i]
diff = current[3] - line[3]
""" Checks if previous line has similar y coordinate and a higher x coordinate. If this is true the
order will be replaced to the position where this is no more true
"""
if (0 >= diff >= -5) and (current[0] > line[0]):
if i == 0: # reached end. Replaces it to the beginning
result.insert(0, line)
break
continue
else:
if i < (len(result) - 1): # Replaces it to the correct position
result.insert(i + 1, line)
break
result.append(line)
break
elif len(result) == 1: # 1 element in array
current = result[0]
diff = current[3] - line[3]
if (0 >= diff >= -5) and (current[0] > line[0]):
result.insert(0, line)
else:
result.append(line)
else: # empty array
result.append(line)
return result
def contains_letters(text):
""" Check if text is bigger then 1 and check if text contains letters. This is a helpfunction to check if a line is
useful or useless. A character or only special characters / numbers have no meaning.
:param text: text to check the criteria
:return: Returns True if text is longer than 1 and contains a letter otherwise False
"""
return len(text) > 1 and re.search("[a-zA-Z]", text)
def process_pdf(file):
"""" Converts a PDF file to a txt file to get the text.
:param file: PDF file which should be converted
"""
doc = fitz.open(file)
# print(file)
i = 0
pages = []
for page in doc:
block_page = page.getTextWords()
sorted_blocks = SortedCollection(key=itemgetter(5, 6, 7))
for block in block_page:
sorted_blocks.insert(block)
lines = merge_line(sorted_blocks)
sorted_lines = SortedCollection(key=itemgetter(3, 0))
for line in lines:
# print(line)
# Checks if line is bigger then 1 and has a letter.
if contains_letters(line[4]):
sorted_lines.insert(line)
# print()
sorted_lines = positions(sorted_lines)
i += 1
pages.append(sorted_lines)
write_pdf("./Data/Slides-Processed/" + os.path.basename(file).replace(".pdf", ".txt"), pages)
def change_values(word1, word2, text, string):
""" Merges the text of two words.
:param word1: first word
:param word2: second word
:param text: the text that represents the words
:param string:
:return: changed first word
"""
word1[text] = word1[text] + string + word2[text]
return word1
def merge_line(words):
""" Merge words-objects (lines) to one object (line) if they have the same block_no, a different line_no and a
a maximum difference for y0 and y1 of 2. The object that starts first is the beginning of the merged object.
:param words: words-object (lines) to manipulate
:return: merged objects list
"""
merged = []
for word in words:
if not merged:
merged.append(word)
else:
prev = list(merged[len(merged) - 1])
# get values of objects
block_no1, line_no1, word_no1 = prev[5], prev[6], prev[7]
block_no2, line_no2, word_no2 = word[5], word[6], word[7]
# checks if both words-objects are in the same block and line. If it is true merge the prev with the next.
if block_no1 == block_no2 and line_no1 == line_no2:
prev = change_values(prev, word, 4, " ")
merged[len(merged) - 1] = prev
# Checks if objects have same block_no and different line_no to look deeper for merging
elif block_no1 == block_no2 and line_no1 != line_no2:
# Checks if y0 and y1 coordinates are similar between the objects (merge criteria)
if (abs(prev[1] - word[1]) <= 2) and (abs(prev[3] - word[3]) <= 2):
diff = prev[0] - word[0]
""" checks if the x0 coordinate of the previous one is higher. If it is higher the word will be
append to it. Otherwise the word will be prepend to it"""
if diff > 0:
word = change_values(list(word), prev, 4, "\t")
merged[len(merged) - 1] = word
else:
prev = change_values(prev, word, 4, "\t")
merged[len(merged) - 1] = prev
else: # no merge
merged.append(word)
else: # no merge
merged.append(word)
return merged
| 34,651 | 50.642325 | 119 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/parsing-new-version.py | # -*- coding: utf-8 -*-
import sys
import os
import re
import lxml.html
from lxml.html.clean import Cleaner
from urllib.parse import unquote
import pprint
import logging
from cssselect import HTMLTranslator
logger = logging.getLogger(__name__)
class InvalidSearchTypeException(Exception):
pass
class UnknowUrlException(Exception):
pass
class NoParserForSearchEngineException(Exception):
pass
class Parser():
"""Parses SERP pages.
Each search engine results page (SERP) has a similar layout:
The main search results are usually in a html container element (#main, .results, #leftSide).
There might be separate columns for other search results (like ads for example). Then each
result contains basically a link, a snippet and a description (usually some text on the
target site). It's really astonishing how similar other search engines are to Google.
Each child class (that can actual parse a concrete search engine results page) needs
to specify css selectors for the different search types (Like normal search, news search, video search, ...).
Attributes:
search_results: The results after parsing.
"""
# this selector specified the element that notifies the user whether the search
# had any results.
no_results_selector = []
# if subclasses specify an value for this attribute and the attribute
# targets an element in the serp page, then there weren't any results
# for the original query.
effective_query_selector = []
# the selector that gets the number of results (guessed) as shown by the search engine.
num_results_search_selectors = []
# some search engine show on which page we currently are. If supportd, this selector will get this value.
page_number_selectors = []
# The supported search types. For instance, Google supports Video Search, Image Search, News search
search_types = []
# Each subclass of Parser may declare an arbitrary amount of attributes that
# follow a naming convention like this:
# *_search_selectors
# where the asterix may be replaced with arbitrary identifier names.
# Any of these attributes represent css selectors for a specific search type.
# If you didn't specify the search type in the search_types list, this attribute
# will not be evaluated and no data will be parsed.
def __init__(self, search_type='normal', html='', query=''):
"""Create new Parser instance and parse all information.
Args:
html: The raw html from the search engine search. If not provided, you can parse
the data later by calling parse(html) directly.
searchtype: The search type. By default "normal"
Raises:
Assertion error if the subclassed
specific parser cannot handle the the settings.
"""
self.searchtype = search_type
assert self.searchtype in self.search_types, 'search type "{}" is not supported in {}'.format(
self.searchtype,
self.__class__.__name__
)
self.query = query
self.html = html
self.dom = None
self.search_results = {}
self.num_results_for_query = ''
self.num_results = 0
self.effective_query = ''
self.page_number = -1
self.no_results = False
# to be set by the implementing sub classes
self.search_engine = ''
# short alias because we use it so extensively
self.css_to_xpath = HTMLTranslator().css_to_xpath
if self.html:
self.parse()
def parse(self, html=None):
"""Public function to start parsing the search engine results.
Args:
html: The raw html data to extract the SERP entries from.
"""
if html:
self.html = html
# lets do the actual parsing
self._parse()
# Apply subclass specific behaviour after parsing has happened
# This is needed because different parsers need to clean/modify
# the parsed data uniquely.
self.after_parsing()
def _parse_lxml(self, cleaner=None):
try:
parser = lxml.html.HTMLParser(encoding='utf-8')
if cleaner:
self.dom = cleaner.clean_html(self.dom)
self.dom = lxml.html.document_fromstring(self.html, parser=parser)
self.dom.resolve_base_href()
except Exception as e:
# maybe wrong encoding
logger.error(e)
def _parse(self, cleaner=None):
"""Internal parse the dom according to the provided css selectors.
Raises: InvalidSearchTypeException if no css selectors for the searchtype could be found.
"""
self.num_results = 0
self._parse_lxml(cleaner)
# try to parse the number of results.
attr_name = self.searchtype + '_search_selectors'
selector_dict = getattr(self, attr_name, None)
# get the appropriate css selectors for the num_results for the keyword
num_results_selector = getattr(self, 'num_results_search_selectors', None)
self.num_results_for_query = self.first_match(num_results_selector, self.dom)
if not self.num_results_for_query:
logger.debug('{}: Cannot parse num_results from serp page with selectors {}'.format(self.__class__.__name__,
num_results_selector))
# get the current page we are at. Sometimes we search engines don't show this.
try:
self.page_number = int(self.first_match(self.page_number_selectors, self.dom))
except ValueError:
self.page_number = -1
# let's see if the search query was shitty (no results for that query)
self.effective_query = self.first_match(self.effective_query_selector, self.dom)
if self.effective_query:
logger.debug('{}: There was no search hit for the search query. Search engine used {} instead.'.format(
self.__class__.__name__, self.effective_query))
else:
self.effective_query = ''
# the element that notifies the user about no results.
self.no_results_text = self.first_match(self.no_results_selector, self.dom)
# get the stuff that is of interest in SERP pages.
if not selector_dict and not isinstance(selector_dict, dict):
raise InvalidSearchTypeException('There is no such attribute: {}. No selectors found'.format(attr_name))
for result_type, selector_class in selector_dict.items():
self.search_results[result_type] = []
for selector_specific, selectors in selector_class.items():
if 'result_container' in selectors and selectors['result_container']:
css = '{container} {result_container}'.format(**selectors)
else:
css = selectors['container']
results = self.dom.xpath(
self.css_to_xpath(css)
)
to_extract = set(selectors.keys()) - {'container', 'result_container'}
selectors_to_use = {key: selectors[key] for key in to_extract if key in selectors.keys()}
for index, result in enumerate(results):
# Let's add primitive support for CSS3 pseudo selectors
# We just need two of them
# ::text
# ::attr(attribute)
# You say we should use xpath expressions instead?
# Maybe you're right, but they are complicated when it comes to classes,
# have a look here: http://doc.scrapy.org/en/latest/topics/selectors.html
serp_result = {}
# key are for example 'link', 'snippet', 'visible-url', ...
# selector is the selector to grab these items
for key, selector in selectors_to_use.items():
serp_result[key] = self.advanced_css(selector, result)
serp_result['rank'] = index + 1
# Avoid duplicates. Duplicates are serp_result elemnts where the 'link' and 'title' are identical
# If statement below: Lazy evaluation. The more probable case first.
if not [e for e in self.search_results[result_type] if (e['link'] == serp_result['link'] and e['title'] == serp_result['title']) ]:
self.search_results[result_type].append(serp_result)
self.num_results += 1
def advanced_css(self, selector, element):
"""Evaluate the :text and ::attr(attr-name) additionally.
Args:
selector: A css selector.
element: The element on which to apply the selector.
Returns:
The targeted element.
"""
value = None
if selector.endswith('::text'):
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].text_content()
except IndexError:
pass
else:
match = re.search(r'::attr\((?P<attr>.*)\)$', selector)
if match:
attr = match.group('attr')
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].get(attr)
except IndexError:
pass
else:
try:
value = element.xpath(self.css_to_xpath(selector))[0].text_content()
except IndexError:
pass
return value
def first_match(self, selectors, element):
"""Get the first match.
Args:
selectors: The selectors to test for a match.
element: The element on which to apply the selectors.
Returns:
The very first match or False if all selectors didn't match anything.
"""
assert isinstance(selectors, list), 'selectors must be of type list!'
for selector in selectors:
if selector:
try:
match = self.advanced_css(selector, element=element)
if match:
return match
except IndexError as e:
pass
return False
def after_parsing(self):
"""Subclass specific behaviour after parsing happened.
Override in subclass to add search engine specific behaviour.
Commonly used to clean the results.
"""
def __str__(self):
"""Return a nicely formatted overview of the results."""
return pprint.pformat(self.search_results)
@property
def cleaned_html(self):
# Try to parse the provided HTML string using lxml
# strip all unnecessary information to save space
cleaner = Cleaner()
cleaner.scripts = True
cleaner.javascript = True
cleaner.comments = True
cleaner.style = True
self.dom = cleaner.clean_html(self.dom)
assert len(self.dom), 'The html needs to be parsed to get the cleaned html'
return lxml.html.tostring(self.dom)
def iter_serp_items(self):
"""Yields the key and index of any item in the serp results that has a link value"""
for key, value in self.search_results.items():
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict) and item['link']:
yield (key, i)
"""
Here follow the different classes that provide CSS selectors
for different types of SERP pages of several common search engines.
Just look at them and add your own selectors in a new class if you
want the Scraper to support them.
You can easily just add new selectors to a search engine. Just follow
the attribute naming convention and the parser will recognize them:
If you provide a dict with a name like finance_search_selectors,
then you're adding a new search type with the name finance.
Each class needs a attribute called num_results_search_selectors, that
extracts the number of searches that were found by the keyword.
Please note:
The actual selectors are wrapped in a dictionary to clarify with which IP
they were requested. The key to the wrapper div allows to specify distinct
criteria to whatever settings you used when you requested the page. So you
might add your own selectors for different User-Agents, distinct HTTP headers, what-
ever you may imagine. This allows the most dynamic parsing behaviour and makes
it very easy to grab all data the site has to offer.
"""
class GoogleParser(Parser):
"""Parses SERP pages of the Google search engine."""
search_engine = 'google'
search_types = ['normal', 'image']
effective_query_selector = ['#topstuff .med > b::text', '.med > a > b::text']
no_results_selector = []
num_results_search_selectors = ['#resultStats']
page_number_selectors = ['#navcnt td.cur::text']
normal_search_selectors = {
'results': {
'us_ip': {
'container': '#center_col',
'result_container': 'div.g ',
'link': 'h3.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'h3.r > a:first-child::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#center_col',
'result_container': 'li.g ',
'link': 'h3.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'h3.r > a:first-child::text',
'visible_link': 'cite::text'
},
'de_ip_news_items': {
'container': 'li.card-section',
'link': 'a._Dk::attr(href)',
'snippet': 'span._dwd::text',
'title': 'a._Dk::text',
'visible_link': 'cite::text'
},
},
'ads_main': {
'us_ip': {
'container': '#center_col',
'result_container': 'li.ads-ad',
'link': 'h3.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'h3.r > a:first-child::text',
'visible_link': '.ads-visurl cite::text',
},
'de_ip': {
'container': '#center_col',
'result_container': '.ads-ad',
'link': 'h3 > a:first-child::attr(href)',
'snippet': '.ads-creative::text',
'title': 'h3 > a:first-child::text',
'visible_link': '.ads-visurl cite::text',
}
},
# those css selectors are probably not worth much
'maps_local': {
'de_ip': {
'container': '#center_col',
'result_container': '.ccBEnf > div',
'link': 'link::attr(href)',
'snippet': 'div.rl-qs-crs-t::text',
'title': 'div[role="heading"] span::text',
'rating': 'span.BTtC6e::text',
'num_reviews': '.rllt__details::text',
}
},
'ads_aside': {
}
}
image_search_selectors = {
'results': {
'de_ip': {
'container': 'li#isr_mc',
'result_container': 'div.rg_di',
'link': 'a.rg_l::attr(href)'
},
'de_ip_raw': {
'container': '.images_table',
'result_container': 'tr td',
'link': 'a::attr(href)',
'visible_link': 'cite::text',
}
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
A typical scraped results looks like the following:
'/url?q=http://www.youtube.com/user/Apple&sa=U&ei=\
lntiVN7JDsTfPZCMgKAO&ved=0CFQQFjAO&usg=AFQjCNGkX65O-hKLmyq1FX9HQqbb9iYn9A'
Clean with a short regex.
"""
super().after_parsing()
if self.searchtype == 'normal':
if self.num_results > 0:
self.no_results = False
elif self.num_results <= 0:
self.no_results = True
if 'No results found for' in self.html or 'did not match any documents' in self.html:
self.no_results = True
# finally try in the snippets
if self.no_results is True:
for key, i in self.iter_serp_items():
if 'snippet' in self.search_results[key][i] and self.query:
if self.query.replace('"', '') in self.search_results[key][i]['snippet']:
self.no_results = False
clean_regexes = {
'normal': r'/url\?q=(?P<url>.*?)&sa=U&ei=',
'image': r'imgres\?imgurl=(?P<url>.*?)&'
}
for key, i in self.iter_serp_items():
result = re.search(
clean_regexes[self.searchtype],
self.search_results[key][i]['link']
)
if result:
self.search_results[key][i]['link'] = unquote(result.group('url'))
class YandexParser(Parser):
"""Parses SERP pages of the Yandex search engine."""
search_engine = 'yandex'
search_types = ['normal', 'image']
no_results_selector = ['.message .misspell__message::text']
effective_query_selector = ['.misspell__message .misspell__link']
# @TODO: In december 2015, I saw that yandex only shows the number of search results in the search input field
# with javascript. One can scrape it in plain http mode, but the values are hidden in some javascript and not
# accessible with normal xpath/css selectors. A normal text search is done.
num_results_search_selectors = ['.serp-list .serp-adv__found::text', '.input__found_visibility_visible font font::text']
page_number_selectors = ['.pager__group .button_checked_yes span::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '.serp-list',
'result_container': '.serp-item',
'link': 'a.link::attr(href)',
'snippet': 'div.text-container::text',
'title': 'div.organic__url-text::text',
'visible_link': '.typo_type_greenurl::text'
}
}
}
image_search_selectors = {
'results': {
'de_ip': {
'container': '.page-layout__content-wrapper',
'result_container': '.serp-item__preview',
'link': '.serp-item__preview .serp-item__link::attr(onmousedown)'
},
'de_ip_raw': {
'container': '.page-layout__content-wrapper',
'result_container': '.serp-item__preview',
'link': '.serp-item__preview .serp-item__link::attr(href)'
}
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
Normally Yandex image search store the image url in the onmousedown attribute in a json object. Its
pretty messsy. This method grabs the link with a quick regex.
c.hit({"dtype":"iweb","path":"8.228.471.241.184.141","pos":69,"reqid":\
"1418919408668565-676535248248925882431999-ws35-986-IMG-p2"}, \
{"href":"http://www.thewallpapers.org/wallpapers/3/382/thumb/600_winter-snow-nature002.jpg"});
Sometimes the img url is also stored in the href attribute (when requesting with raw http packets).
href="/images/search?text=snow&img_url=\
http%3A%2F%2Fwww.proza.ru%2Fpics%2F2009%2F12%2F07%2F1290.jpg&pos=2&rpt=simage&pin=1">
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.no_results_text:
self.no_results = 'По вашему запросу ничего не нашлось' in self.no_results_text
if self.num_results == 0:
self.no_results = True
# very hackish, probably prone to all kinds of errors.
if not self.num_results_for_query:
substr = 'function() { var title = "%s —' % self.query
try:
i = self.html.index(substr)
if i:
self.num_results_for_query = re.search(r'— (.)*?"', self.html[i:i+len(self.query) + 150]).group()
except Exception as e:
logger.debug(str(e))
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'\{"href"\s*:\s*"(?P<url>.*?)"\}',
r'img_url=(?P<url>.*?)&'
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = result.group('url')
break
class BingParser(Parser):
"""Parses SERP pages of the Bing search engine."""
search_engine = 'bing'
search_types = ['normal', 'image']
no_results_selector = ['#b_results > .b_ans::text']
num_results_search_selectors = ['.sb_count']
effective_query_selector = ['#sp_requery a > strong', '#sp_requery + #sp_recourse a::attr(href)']
page_number_selectors = ['.sb_pagS::text']
normal_search_selectors = {
'results': {
'us_ip': {
'container': '#b_results',
'result_container': '.b_algo',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#b_results',
'result_container': '.b_algo',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2::text',
'visible_link': 'cite::text'
},
'de_ip_news_items': {
'container': 'ul.b_vList li',
'link': ' h5 a::attr(href)',
'snippet': 'p::text',
'title': ' h5 a::text',
'visible_link': 'cite::text'
},
},
'ads_main': {
'us_ip': {
'container': '#b_results .b_ad',
'result_container': '.sb_add',
'link': 'h2 > a::attr(href)',
'snippet': '.sb_addesc::text',
'title': 'h2 > a::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#b_results .b_ad',
'result_container': '.sb_add',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2 > a::text',
'visible_link': 'cite::text'
}
}
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#dg_c .imgres',
'result_container': '.dg_u',
'link': 'a.dv_i::attr(m)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
The image url data is in the m attribute.
m={ns:"images.1_4",k:"5018",mid:"46CE8A1D71B04B408784F0219B488A5AE91F972E",
surl:"http://berlin-germany.ca/",imgurl:"http://berlin-germany.ca/images/berlin250.jpg",
oh:"184",tft:"45",oi:"http://berlin-germany.ca/images/berlin250.jpg"}
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.no_results_text:
self.no_results = self.query in self.no_results_text \
or 'Do you want results only for' in self.no_results_text
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'imgurl:"(?P<url>.*?)"',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = result.group('url')
break
class YahooParser(Parser):
"""Parses SERP pages of the Yahoo search engine."""
search_engine = 'yahoo'
search_types = ['normal', 'image']
no_results_selector = []
effective_query_selector = ['.msg #cquery a::attr(href)']
num_results_search_selectors = ['#pg > span:last-child', '.compPagination span::text']
page_number_selectors = ['#pg > strong::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#main',
'result_container': '.res',
'link': 'div > h3 > a::attr(href)',
'snippet': 'div.abstr::text',
'title': 'div > h3 > a::text',
'visible_link': 'span.url::text'
},
'de_ip_december_2015': {
'container': '#main',
'result_container': '.searchCenterMiddle li',
'link': 'h3.title a::attr(href)',
'snippet': '.compText p::text',
'title': 'h3.title a::text',
'visible_link': 'span::text'
},
},
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#results',
'result_container': '#sres > li',
'link': 'a::attr(href)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
The url is in the href attribute and the &imgurl= parameter.
<a id="yui_3_5_1_1_1419284335995_1635" aria-label="<b>Matterhorn</b> sunrise"
href="/images/view;_ylt=AwrB8phvj5hU7moAFzOJzbkF;_ylu=\
X3oDMTIyc3ZrZ3RwBHNlYwNzcgRzbGsDaW1nBG9pZANmNTgyY2MyYTY4ZmVjYTI5YmYwNWZlM2E3ZTc1YzkyMARncG9zAzEEaXQDYmluZw--?
.origin=&back=https%3A%2F%2Fimages.search.yahoo.com%2Fsearch%2Fimages%3F\
p%3Dmatterhorn%26fr%3Dyfp-t-901%26fr2%3Dpiv-web%26tab%3Dorganic%26ri%3D1&w=4592&h=3056&
imgurl=www.summitpost.org%2Fimages%2Foriginal%2F699696.JPG&rurl=http%3A%2F%2Fwww.summitpost.org\
%2Fmatterhorn-sunrise%2F699696&size=5088.0KB&
name=%3Cb%3EMatterhorn%3C%2Fb%3E+sunrise&p=matterhorn&oid=f582cc2a68feca29bf05fe3a7e75c920&fr2=piv-web&
fr=yfp-t-901&tt=%3Cb%3EMatterhorn%3C%2Fb%3E+sunrise&b=0&ni=21&no=1&ts=&tab=organic&
sigr=11j056ue0&sigb=134sbn4gc&sigi=11df3qlvm&sigt=10pd8j49h&sign=10pd8j49h&.crumb=qAIpMoHvtm1&\
fr=yfp-t-901&fr2=piv-web">
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.num_results == 0:
self.no_results = True
if len(self.dom.xpath(self.css_to_xpath('#cquery'))) >= 1:
self.no_results = True
for key, i in self.iter_serp_items():
if self.search_results[key][i]['visible_link'] is None:
del self.search_results[key][i]
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'&imgurl=(?P<url>.*?)&',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
# TODO: Fix this manual protocol adding by parsing "rurl"
self.search_results[key][i]['link'] = 'http://' + unquote(result.group('url'))
break
class BaiduParser(Parser):
"""Parses SERP pages of the Baidu search engine."""
search_engine = 'baidu'
search_types = ['normal', 'image']
num_results_search_selectors = ['#container .nums']
no_results_selector = []
# no such thing for baidu
effective_query_selector = ['']
page_number_selectors = ['.fk_cur + .pc::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#content_left',
'result_container': '.result-op',
'link': 'h3 > a.t::attr(href)',
'snippet': '.c-abstract::text',
'title': 'h3 > a.t::text',
'visible_link': 'span.c-showurl::text'
},
'nojs': {
'container': '#content_left',
'result_container': '.result',
'link': 'h3 > a::attr(href)',
'snippet': '.c-abstract::text',
'title': 'h3 > a::text',
'visible_link': 'span.g::text'
}
},
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#imgContainer',
'result_container': '.pageCon > li',
'link': '.imgShow a::attr(href)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
href="/i?ct=503316480&z=&tn=baiduimagedetail&ipn=d&word=matterhorn&step_word=&ie=utf-8&in=9250&
cl=2&lm=-1&st=&cs=3326243323,1574167845&os=1495729451,4260959385&pn=0&rn=1&di=69455168860&ln=1285&
fr=&&fmq=1419285032955_R&ic=&s=&se=&sme=0&tab=&width=&height=&face=&is=&istype=&ist=&jit=&
objurl=http%3A%2F%2Fa669.phobos.apple.com%2Fus%2Fr1000%2F077%2FPurple%2F\
v4%2F2a%2Fc6%2F15%2F2ac6156c-e23e-62fd-86ee-7a25c29a6c72%2Fmzl.otpvmwuj.1024x1024-65.jpg&adpicid=0"
"""
super().after_parsing()
if self.search_engine == 'normal':
if len(self.dom.xpath(self.css_to_xpath('.hit_top_new'))) >= 1:
self.no_results = True
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'&objurl=(?P<url>.*?)&',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = unquote(result.group('url'))
break
class DuckduckgoParser(Parser):
"""Parses SERP pages of the Duckduckgo search engine."""
search_engine = 'duckduckgo'
search_types = ['normal']
num_results_search_selectors = []
no_results_selector = []
effective_query_selector = ['']
# duckduckgo is loads next pages with ajax
page_number_selectors = ['']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#links',
'result_container': '.result',
'link': '.result__title > a::attr(href)',
'snippet': 'result__snippet::text',
'title': '.result__title > a::text',
'visible_link': '.result__url__domain::text'
},
'non_javascript_mode': {
'container': '#content',
'result_container': '.results_links',
'link': '.links_main > a::attr(href)',
'snippet': '.snippet::text',
'title': '.links_main > a::text',
'visible_link': '.url::text'
},
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
super().after_parsing()
if self.searchtype == 'normal':
try:
if 'No more results.' in self.dom.xpath(self.css_to_xpath('.no-results'))[0].text_content():
self.no_results = True
except:
pass
if self.num_results > 0:
self.no_results = False
elif self.num_results <= 0:
self.no_results = True
class AskParser(Parser):
"""Parses SERP pages of the Ask search engine."""
search_engine = 'ask'
search_types = ['normal']
num_results_search_selectors = []
no_results_selector = []
effective_query_selector = ['#spell-check-result > a']
page_number_selectors = ['.pgcsel .pg::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#midblock',
'result_container': '.ptbs.ur',
'link': '.abstract > a::attr(href)',
'snippet': '.abstract::text',
'title': '.txt_lg.b::text',
'visible_link': '.durl span::text'
},
'de_ip_december_2015': {
'container': '.l-mid-content',
'result_container': '.web-result',
'link': '.web-result-title > a::attr(href)',
'snippet': '.web-result-description::text',
'title': '.web-result-title > a::text',
'visible_link': '.web-result-url::text'
},
# as requested by httm mode
'de_ip_december_2015_raw_http': {
'container': '#midblock',
'result_container': '#teoma-results .wresult',
'link': 'a.title::attr(href)',
'snippet': '.abstract::text',
'title': 'a.title::text',
'visible_link': '.durl span::text'
}
},
}
class BlekkoParser(Parser):
"""Parses SERP pages of the Blekko search engine."""
search_engine = 'blekko'
search_types = ['normal']
effective_query_selector = ['']
no_results_selector = []
num_results_search_selectors = []
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#links',
'result_container': '.result',
'link': '.result__title > a::attr(href)',
'snippet': 'result__snippet::text',
'title': '.result__title > a::text',
'visible_link': '.result__url__domain::text'
}
},
}
def get_parser_by_url(url):
"""Get the appropriate parser by an search engine url.
Args:
url: The url that was used to issue the search
Returns:
The correct parser that can parse results for this url.
Raises:
UnknowUrlException if no parser could be found for the url.
"""
parser = None
if re.search(r'^http[s]?://www\.google', url):
parser = GoogleParser
elif re.search(r'^http://yandex\.ru', url):
parser = YandexParser
elif re.search(r'^http://www\.bing\.', url):
parser = BingParser
elif re.search(r'^http[s]?://search\.yahoo.', url):
parser = YahooParser
elif re.search(r'^http://www\.baidu\.com', url):
parser = BaiduParser
elif re.search(r'^https://duckduckgo\.com', url):
parser = DuckduckgoParser
if re.search(r'^http[s]?://[a-z]{2}?\.ask', url):
parser = AskParser
if re.search(r'^http[s]?://blekko', url):
parser = BlekkoParser
if not parser:
raise UnknowUrlException('No parser for {}.'.format(url))
return parser
def get_parser_by_search_engine(search_engine):
"""Get the appropriate parser for the search_engine
Args:
search_engine: The name of a search_engine.
Returns:
A parser for the search_engine
Raises:
NoParserForSearchEngineException if no parser could be found for the name.
"""
if search_engine == 'google' or search_engine == 'googleimg':
return GoogleParser
elif search_engine == 'yandex':
return YandexParser
elif search_engine == 'bing':
return BingParser
elif search_engine == 'yahoo':
return YahooParser
elif search_engine == 'baidu' or search_engine == 'baiduimg':
return BaiduParser
elif search_engine == 'duckduckgo':
return DuckduckgoParser
elif search_engine == 'ask':
return AskParser
elif search_engine == 'blekko':
return BlekkoParser
else:
raise NoParserForSearchEngineException('No such parser for "{}"'.format(search_engine))
def parse_serp(html='', query='', search_engine='google'):
"""Store the parsed data in the sqlalchemy session.
If no parser is supplied then we are expected to parse again with
the provided html.
This function may be called from scraping and caching.
When called from caching, some info is lost (like current page number).
Args:
TODO: A whole lot
Returns:
The parsed SERP object.
"""
parser = get_parser_by_search_engine(search_engine)
parser = parser(html=html, query=query)
parser.parse(html)
return parser.search_results
if __name__ == '__main__':
"""Originally part of https://github.com/NikolaiT/GoogleScraper.
Only for testing purposes: May be called directly with an search engine
search url. For example:
python3 parsing.py 'http://yandex.ru/yandsearch?text=GoogleScraper&lr=178&csg=82%2C4317%2C20%2C20%2C0%2C0%2C0'
Please note: Using this module directly makes little sense, because requesting such urls
directly without imitating a real browser (which is done in my GoogleScraper module) makes
the search engines return crippled html, which makes it impossible to parse.
But for some engines it nevertheless works (for example: yandex, google, ...).
"""
import requests
assert len(sys.argv) >= 2, 'Usage: {} url/file'.format(sys.argv[0])
url = sys.argv[1]
if os.path.exists(url):
raw_html = open(url, 'r').read()
parser = get_parser_by_search_engine(sys.argv[2])
else:
raw_html = requests.get(url).text
parser = get_parser_by_url(url)
parser = parser(html=raw_html)
parser.parse()
print(parser)
with open('/tmp/testhtml.html', 'w') as of:
of.write(raw_html)
| 38,678 | 34.355576 | 151 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/parsing.py | # -*- coding: utf-8 -*-
import sys
import os
import re
import lxml.html
from lxml.html.clean import Cleaner
from urllib.parse import unquote
import pprint
from GoogleScraper.database import SearchEngineResultsPage
import logging
from cssselect import HTMLTranslator
logger = logging.getLogger(__name__)
class InvalidSearchTypeException(Exception):
pass
class UnknowUrlException(Exception):
pass
class NoParserForSearchEngineException(Exception):
pass
class Parser():
"""Parses SERP pages.
Each search engine results page (SERP) has a similar layout:
The main search results are usually in a html container element (#main, .results, #leftSide).
There might be separate columns for other search results (like ads for example). Then each
result contains basically a link, a snippet and a description (usually some text on the
target site). It's really astonishing how similar other search engines are to Google.
Each child class (that can actual parse a concrete search engine results page) needs
to specify css selectors for the different search types (Like normal search, news search, video search, ...).
Attributes:
search_results: The results after parsing.
"""
# this selector specified the element that notifies the user whether the search
# had any results.
no_results_selector = []
# if subclasses specify an value for this attribute and the attribute
# targets an element in the serp page, then there weren't any results
# for the original query.
effective_query_selector = []
# the selector that gets the number of results (guessed) as shown by the search engine.
num_results_search_selectors = []
# some search engine show on which page we currently are. If supportd, this selector will get this value.
page_number_selectors = []
# The supported search types. For instance, Google supports Video Search, Image Search, News search
search_types = []
# Each subclass of Parser may declare an arbitrary amount of attributes that
# follow a naming convention like this:
# *_search_selectors
# where the asterix may be replaced with arbitrary identifier names.
# Any of these attributes represent css selectors for a specific search type.
# If you didn't specify the search type in the search_types list, this attribute
# will not be evaluated and no data will be parsed.
def __init__(self, config={}, html='', query=''):
"""Create new Parser instance and parse all information.
Args:
html: The raw html from the search engine search. If not provided, you can parse
the data later by calling parse(html) directly.
searchtype: The search type. By default "normal"
Raises:
Assertion error if the subclassed
specific parser cannot handle the the settings.
"""
self.config = config
self.searchtype = self.config.get('search_type', 'normal')
assert self.searchtype in self.search_types, 'search type "{}" is not supported in {}'.format(
self.searchtype,
self.__class__.__name__
)
self.query = query
self.html = html
self.dom = None
self.search_results = {}
self.num_results_for_query = ''
self.num_results = 0
self.effective_query = ''
self.page_number = -1
self.no_results = False
# to be set by the implementing sub classes
self.search_engine = ''
# short alias because we use it so extensively
self.css_to_xpath = HTMLTranslator().css_to_xpath
if self.html:
self.parse()
def parse(self, html=None):
"""Public function to start parsing the search engine results.
Args:
html: The raw html data to extract the SERP entries from.
"""
if html:
self.html = html
# lets do the actual parsing
self._parse()
# Apply subclass specific behaviour after parsing has happened
# This is needed because different parsers need to clean/modify
# the parsed data uniquely.
self.after_parsing()
def _parse_lxml(self, cleaner=None):
try:
parser = lxml.html.HTMLParser(encoding='utf-8')
if cleaner:
self.dom = cleaner.clean_html(self.dom)
self.dom = lxml.html.document_fromstring(self.html, parser=parser)
self.dom.resolve_base_href()
except Exception as e:
# maybe wrong encoding
logger.error(e)
def _parse(self, cleaner=None):
"""Internal parse the dom according to the provided css selectors.
Raises: InvalidSearchTypeException if no css selectors for the searchtype could be found.
"""
self.num_results = 0
self._parse_lxml(cleaner)
# try to parse the number of results.
attr_name = self.searchtype + '_search_selectors'
selector_dict = getattr(self, attr_name, None)
# get the appropriate css selectors for the num_results for the keyword
num_results_selector = getattr(self, 'num_results_search_selectors', None)
self.num_results_for_query = self.first_match(num_results_selector, self.dom)
if not self.num_results_for_query:
logger.debug('{}: Cannot parse num_results from serp page with selectors {}'.format(self.__class__.__name__,
num_results_selector))
# get the current page we are at. Sometimes we search engines don't show this.
try:
self.page_number = int(self.first_match(self.page_number_selectors, self.dom))
except ValueError:
self.page_number = -1
# let's see if the search query was shitty (no results for that query)
self.effective_query = self.first_match(self.effective_query_selector, self.dom)
if self.effective_query:
logger.debug('{}: There was no search hit for the search query. Search engine used {} instead.'.format(
self.__class__.__name__, self.effective_query))
else:
self.effective_query = ''
# the element that notifies the user about no results.
self.no_results_text = self.first_match(self.no_results_selector, self.dom)
# get the stuff that is of interest in SERP pages.
if not selector_dict and not isinstance(selector_dict, dict):
raise InvalidSearchTypeException('There is no such attribute: {}. No selectors found'.format(attr_name))
for result_type, selector_class in selector_dict.items():
self.search_results[result_type] = []
for selector_specific, selectors in selector_class.items():
if 'result_container' in selectors and selectors['result_container']:
css = '{container} {result_container}'.format(**selectors)
else:
css = selectors['container']
results = self.dom.xpath(
self.css_to_xpath(css)
)
to_extract = set(selectors.keys()) - {'container', 'result_container'}
selectors_to_use = {key: selectors[key] for key in to_extract if key in selectors.keys()}
for index, result in enumerate(results):
# Let's add primitive support for CSS3 pseudo selectors
# We just need two of them
# ::text
# ::attr(attribute)
# You say we should use xpath expressions instead?
# Maybe you're right, but they are complicated when it comes to classes,
# have a look here: http://doc.scrapy.org/en/latest/topics/selectors.html
serp_result = {}
# key are for example 'link', 'snippet', 'visible-url', ...
# selector is the selector to grab these items
for key, selector in selectors_to_use.items():
serp_result[key] = self.advanced_css(selector, result)
serp_result['rank'] = index + 1
# Avoid duplicates. Duplicates are serp_result elemnts where the 'link' and 'title' are identical
# If statement below: Lazy evaluation. The more probable case first.
# if not [e for e in self.search_results[result_type] if (e['link'] == serp_result['link'] and e['title'] == serp_result['title']) ]:
self.search_results[result_type].append(serp_result)
self.num_results += 1
def advanced_css(self, selector, element):
"""Evaluate the :text and ::attr(attr-name) additionally.
Args:
selector: A css selector.
element: The element on which to apply the selector.
Returns:
The targeted element.
"""
value = None
if selector.endswith('::text'):
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].text_content()
except IndexError:
pass
else:
match = re.search(r'::attr\((?P<attr>.*)\)$', selector)
if match:
attr = match.group('attr')
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].get(attr)
except IndexError:
pass
else:
try:
value = element.xpath(self.css_to_xpath(selector))[0].text_content()
except IndexError:
pass
return value
def first_match(self, selectors, element):
"""Get the first match.
Args:
selectors: The selectors to test for a match.
element: The element on which to apply the selectors.
Returns:
The very first match or False if all selectors didn't match anything.
"""
assert isinstance(selectors, list), 'selectors must be of type list!'
for selector in selectors:
if selector:
try:
match = self.advanced_css(selector, element=element)
if match:
return match
except IndexError as e:
pass
return False
def after_parsing(self):
"""Subclass specific behaviour after parsing happened.
Override in subclass to add search engine specific behaviour.
Commonly used to clean the results.
"""
def __str__(self):
"""Return a nicely formatted overview of the results."""
return pprint.pformat(self.search_results)
@property
def cleaned_html(self):
# Try to parse the provided HTML string using lxml
# strip all unnecessary information to save space
cleaner = Cleaner()
cleaner.scripts = True
cleaner.javascript = True
cleaner.comments = True
cleaner.style = True
self.dom = cleaner.clean_html(self.dom)
assert len(self.dom), 'The html needs to be parsed to get the cleaned html'
return lxml.html.tostring(self.dom)
def iter_serp_items(self):
"""Yields the key and index of any item in the serp results that has a link value"""
for key, value in self.search_results.items():
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict) and item['link']:
yield (key, i)
"""
Here follow the different classes that provide CSS selectors
for different types of SERP pages of several common search engines.
Just look at them and add your own selectors in a new class if you
want the Scraper to support them.
You can easily just add new selectors to a search engine. Just follow
the attribute naming convention and the parser will recognize them:
If you provide a dict with a name like finance_search_selectors,
then you're adding a new search type with the name finance.
Each class needs a attribute called num_results_search_selectors, that
extracts the number of searches that were found by the keyword.
Please note:
The actual selectors are wrapped in a dictionary to clarify with which IP
they were requested. The key to the wrapper div allows to specify distinct
criteria to whatever settings you used when you requested the page. So you
might add your own selectors for different User-Agents, distinct HTTP headers, what-
ever you may imagine. This allows the most dynamic parsing behaviour and makes
it very easy to grab all data the site has to offer.
"""
class GoogleParser(Parser):
"""Parses SERP pages of the Google search engine."""
search_engine = 'google'
search_types = ['normal', 'image']
effective_query_selector = ['#topstuff .med > b::text', '.med > a > b::text']
no_results_selector = []
num_results_search_selectors = ['#resultStats']
page_number_selectors = ['#navcnt td.cur::text']
normal_search_selectors = {
'results': {
'us_ip': {
'container': '#center_col',
'result_container': 'div.g ',
'link': 'div.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'div.r > a > h3::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#center_col',
'result_container': 'li.g ',
'link': 'h3.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'h3.r > a:first-child::text',
'visible_link': 'cite::text'
},
'de_ip_news_items': {
'container': 'li.card-section',
'link': 'a._Dk::attr(href)',
'snippet': 'span._dwd::text',
'title': 'a._Dk::text',
'visible_link': 'cite::text'
},
},
'ads_main': {
'us_ip': {
'container': '#center_col',
'result_container': 'li.ads-ad',
'link': 'div.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'div.r > a > h3::text',
'visible_link': '.ads-visurl cite::text',
},
'de_ip': {
'container': '#center_col',
'result_container': '.ads-ad',
'link': 'h3 > a:first-child::attr(href)',
'snippet': '.ads-creative::text',
'title': 'h3 > a:first-child::text',
'visible_link': '.ads-visurl cite::text',
}
},
# those css selectors are probably not worth much
'maps_local': {
'de_ip': {
'container': '#center_col',
'result_container': '.ccBEnf > div',
'link': 'link::attr(href)',
'snippet': 'div.rl-qs-crs-t::text',
'title': 'div[role="heading"] span::text',
'rating': 'span.BTtC6e::text',
'num_reviews': '.rllt__details::text',
}
},
'ads_aside': {
}
}
image_search_selectors = {
'results': {
'de_ip': {
'container': '#res',
'result_container': '.rg_bx',
'link': 'a.rg_l::attr(href)',
'snippet': '.a-no-hover-decoration::text',
},
'de_ip_http_mode': {
'container': '#search',
'result_container': '.rg_bx',
'link': 'a.rg_l::attr(href)',
'snippet': '.a-no-hover-decoration::text',
}
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
A typical scraped results looks like the following:
'/url?q=http://www.youtube.com/user/Apple&sa=U&ei=\
lntiVN7JDsTfPZCMgKAO&ved=0CFQQFjAO&usg=AFQjCNGkX65O-hKLmyq1FX9HQqbb9iYn9A'
Clean with a short regex.
"""
super().after_parsing()
if self.searchtype == 'normal':
if self.num_results > 0:
self.no_results = False
elif self.num_results <= 0:
self.no_results = True
if 'No results found for' in self.html or 'did not match any documents' in self.html:
self.no_results = True
# finally try in the snippets
if self.no_results is True:
for key, i in self.iter_serp_items():
if 'snippet' in self.search_results[key][i] and self.query:
if self.query.replace('"', '') in self.search_results[key][i]['snippet']:
self.no_results = False
clean_regexes = {
'normal': r'/url\?q=(?P<url>.*?)&sa=U&ei=',
'image': r'imgres\?imgurl=(?P<url>.*?)&'
}
for key, i in self.iter_serp_items():
result = re.search(
clean_regexes[self.searchtype],
self.search_results[key][i]['link']
)
if result:
self.search_results[key][i]['link'] = unquote(result.group('url'))
class YandexParser(Parser):
"""Parses SERP pages of the Yandex search engine."""
search_engine = 'yandex'
search_types = ['normal', 'image']
no_results_selector = ['.message .misspell__message::text']
effective_query_selector = ['.misspell__message .misspell__link']
# @TODO: In december 2015, I saw that yandex only shows the number of search results in the search input field
# with javascript. One can scrape it in plain http mode, but the values are hidden in some javascript and not
# accessible with normal xpath/css selectors. A normal text search is done.
num_results_search_selectors = ['.serp-list .serp-adv__found::text', '.input__found_visibility_visible font font::text']
page_number_selectors = ['.pager__group .button_checked_yes span::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '.serp-list',
'result_container': '.serp-item',
'link': 'a.link::attr(href)',
'snippet': 'div.text-container::text',
'title': 'div.organic__url-text::text',
'visible_link': '.typo_type_greenurl::text'
}
}
}
image_search_selectors = {
'results': {
'de_ip': {
'container': '.page-layout__content-wrapper',
'result_container': '.serp-item__preview',
'link': '.serp-item__preview .serp-item__link::attr(onmousedown)'
},
'de_ip_raw': {
'container': '.page-layout__content-wrapper',
'result_container': '.serp-item__preview',
'link': '.serp-item__preview .serp-item__link::attr(href)'
}
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
Normally Yandex image search store the image url in the onmousedown attribute in a json object. Its
pretty messsy. This method grabs the link with a quick regex.
c.hit({"dtype":"iweb","path":"8.228.471.241.184.141","pos":69,"reqid":\
"1418919408668565-676535248248925882431999-ws35-986-IMG-p2"}, \
{"href":"http://www.thewallpapers.org/wallpapers/3/382/thumb/600_winter-snow-nature002.jpg"});
Sometimes the img url is also stored in the href attribute (when requesting with raw http packets).
href="/images/search?text=snow&img_url=\
http%3A%2F%2Fwww.proza.ru%2Fpics%2F2009%2F12%2F07%2F1290.jpg&pos=2&rpt=simage&pin=1">
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.no_results_text:
self.no_results = 'По вашему запросу ничего не нашлось' in self.no_results_text
if self.num_results == 0:
self.no_results = True
# very hackish, probably prone to all kinds of errors.
if not self.num_results_for_query:
substr = 'function() { var title = "%s —' % self.query
try:
i = self.html.index(substr)
if i:
self.num_results_for_query = re.search(r'— (.)*?"', self.html[i:i+len(self.query) + 150]).group()
except Exception as e:
logger.debug(str(e))
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'\{"href"\s*:\s*"(?P<url>.*?)"\}',
r'img_url=(?P<url>.*?)&'
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = result.group('url')
break
class BingParser(Parser):
"""Parses SERP pages of the Bing search engine."""
search_engine = 'bing'
search_types = ['normal', 'image']
no_results_selector = ['#b_results > .b_ans::text']
num_results_search_selectors = ['.sb_count']
effective_query_selector = ['#sp_requery a > strong', '#sp_requery + #sp_recourse a::attr(href)']
page_number_selectors = ['.sb_pagS::text']
normal_search_selectors = {
'results': {
'us_ip': {
'container': '#b_results',
'result_container': '.b_algo',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#b_results',
'result_container': '.b_algo',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2::text',
'visible_link': 'cite::text'
},
'de_ip_news_items': {
'container': 'ul.b_vList li',
'link': ' h5 a::attr(href)',
'snippet': 'p::text',
'title': ' h5 a::text',
'visible_link': 'cite::text'
},
},
'ads_main': {
'us_ip': {
'container': '#b_results .b_ad',
'result_container': '.sb_add',
'link': 'h2 > a::attr(href)',
'snippet': '.sb_addesc::text',
'title': 'h2 > a::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#b_results .b_ad',
'result_container': '.sb_add',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2 > a::text',
'visible_link': 'cite::text'
}
}
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#dg_c .imgres',
'result_container': '.dg_u',
'link': 'a.dv_i::attr(m)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
The image url data is in the m attribute.
m={ns:"images.1_4",k:"5018",mid:"46CE8A1D71B04B408784F0219B488A5AE91F972E",
surl:"http://berlin-germany.ca/",imgurl:"http://berlin-germany.ca/images/berlin250.jpg",
oh:"184",tft:"45",oi:"http://berlin-germany.ca/images/berlin250.jpg"}
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.no_results_text:
self.no_results = self.query in self.no_results_text \
or 'Do you want results only for' in self.no_results_text
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'imgurl:"(?P<url>.*?)"',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = result.group('url')
break
class YahooParser(Parser):
"""Parses SERP pages of the Yahoo search engine."""
search_engine = 'yahoo'
search_types = ['normal', 'image']
no_results_selector = []
effective_query_selector = ['.msg #cquery a::attr(href)']
num_results_search_selectors = ['#pg > span:last-child', '.compPagination span::text']
page_number_selectors = ['#pg > strong::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#main',
'result_container': '.res',
'link': 'div > h3 > a::attr(href)',
'snippet': 'div.abstr::text',
'title': 'div > h3 > a::text',
'visible_link': 'span.url::text'
},
'de_ip_december_2015': {
'container': '#main',
'result_container': '.searchCenterMiddle li',
'link': 'h3.title a::attr(href)',
'snippet': '.compText p::text',
'title': 'h3.title a::text',
'visible_link': 'span::text'
},
},
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#results',
'result_container': '#sres > li',
'link': 'a::attr(href)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
The url is in the href attribute and the &imgurl= parameter.
<a id="yui_3_5_1_1_1419284335995_1635" aria-label="<b>Matterhorn</b> sunrise"
href="/images/view;_ylt=AwrB8phvj5hU7moAFzOJzbkF;_ylu=\
X3oDMTIyc3ZrZ3RwBHNlYwNzcgRzbGsDaW1nBG9pZANmNTgyY2MyYTY4ZmVjYTI5YmYwNWZlM2E3ZTc1YzkyMARncG9zAzEEaXQDYmluZw--?
.origin=&back=https%3A%2F%2Fimages.search.yahoo.com%2Fsearch%2Fimages%3F\
p%3Dmatterhorn%26fr%3Dyfp-t-901%26fr2%3Dpiv-web%26tab%3Dorganic%26ri%3D1&w=4592&h=3056&
imgurl=www.summitpost.org%2Fimages%2Foriginal%2F699696.JPG&rurl=http%3A%2F%2Fwww.summitpost.org\
%2Fmatterhorn-sunrise%2F699696&size=5088.0KB&
name=%3Cb%3EMatterhorn%3C%2Fb%3E+sunrise&p=matterhorn&oid=f582cc2a68feca29bf05fe3a7e75c920&fr2=piv-web&
fr=yfp-t-901&tt=%3Cb%3EMatterhorn%3C%2Fb%3E+sunrise&b=0&ni=21&no=1&ts=&tab=organic&
sigr=11j056ue0&sigb=134sbn4gc&sigi=11df3qlvm&sigt=10pd8j49h&sign=10pd8j49h&.crumb=qAIpMoHvtm1&\
fr=yfp-t-901&fr2=piv-web">
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.num_results == 0:
self.no_results = True
if len(self.dom.xpath(self.css_to_xpath('#cquery'))) >= 1:
self.no_results = True
for key, i in self.iter_serp_items():
if self.search_results[key][i]['visible_link'] is None:
del self.search_results[key][i]
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'&imgurl=(?P<url>.*?)&',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
# TODO: Fix this manual protocol adding by parsing "rurl"
self.search_results[key][i]['link'] = 'http://' + unquote(result.group('url'))
break
class BaiduParser(Parser):
"""Parses SERP pages of the Baidu search engine."""
search_engine = 'baidu'
search_types = ['normal', 'image']
num_results_search_selectors = ['#container .nums']
no_results_selector = []
# no such thing for baidu
effective_query_selector = ['']
page_number_selectors = ['.fk_cur + .pc::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#content_left',
'result_container': '.result-op',
'link': 'h3 > a.t::attr(href)',
'snippet': '.c-abstract::text',
'title': 'h3 > a.t::text',
'visible_link': 'span.c-showurl::text'
},
'nojs': {
'container': '#content_left',
'result_container': '.result',
'link': 'h3 > a::attr(href)',
'snippet': '.c-abstract::text',
'title': 'h3 > a::text',
'visible_link': 'span.g::text'
}
},
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#imgContainer',
'result_container': '.pageCon > li',
'link': '.imgShow a::attr(href)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
href="/i?ct=503316480&z=&tn=baiduimagedetail&ipn=d&word=matterhorn&step_word=&ie=utf-8&in=9250&
cl=2&lm=-1&st=&cs=3326243323,1574167845&os=1495729451,4260959385&pn=0&rn=1&di=69455168860&ln=1285&
fr=&&fmq=1419285032955_R&ic=&s=&se=&sme=0&tab=&width=&height=&face=&is=&istype=&ist=&jit=&
objurl=http%3A%2F%2Fa669.phobos.apple.com%2Fus%2Fr1000%2F077%2FPurple%2F\
v4%2F2a%2Fc6%2F15%2F2ac6156c-e23e-62fd-86ee-7a25c29a6c72%2Fmzl.otpvmwuj.1024x1024-65.jpg&adpicid=0"
"""
super().after_parsing()
if self.search_engine == 'normal':
if len(self.dom.xpath(self.css_to_xpath('.hit_top_new'))) >= 1:
self.no_results = True
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'&objurl=(?P<url>.*?)&',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = unquote(result.group('url'))
break
class DuckduckgoParser(Parser):
"""Parses SERP pages of the Duckduckgo search engine."""
search_engine = 'duckduckgo'
search_types = ['normal']
num_results_search_selectors = []
no_results_selector = []
effective_query_selector = ['']
# duckduckgo is loads next pages with ajax
page_number_selectors = ['']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#links',
'result_container': '.result',
'link': '.result__title > a::attr(href)',
'snippet': 'result__snippet::text',
'title': '.result__title > a::text',
'visible_link': '.result__url__domain::text'
},
'non_javascript_mode': {
'container': '#content',
'result_container': '.results_links',
'link': '.links_main > a::attr(href)',
'snippet': '.snippet::text',
'title': '.links_main > a::text',
'visible_link': '.url::text'
},
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
super().after_parsing()
if self.searchtype == 'normal':
try:
if 'No more results.' in self.dom.xpath(self.css_to_xpath('.no-results'))[0].text_content():
self.no_results = True
except:
pass
if self.num_results > 0:
self.no_results = False
elif self.num_results <= 0:
self.no_results = True
class AskParser(Parser):
"""Parses SERP pages of the Ask search engine."""
search_engine = 'ask'
search_types = ['normal']
num_results_search_selectors = []
no_results_selector = []
effective_query_selector = ['#spell-check-result > a']
page_number_selectors = ['.pgcsel .pg::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#midblock',
'result_container': '.ptbs.ur',
'link': '.abstract > a::attr(href)',
'snippet': '.abstract::text',
'title': '.txt_lg.b::text',
'visible_link': '.durl span::text'
},
'de_ip_december_2015': {
'container': '.l-mid-content',
'result_container': '.web-result',
'link': '.web-result-title > a::attr(href)',
'snippet': '.web-result-description::text',
'title': '.web-result-title > a::text',
'visible_link': '.web-result-url::text'
},
# as requested by httm mode
'de_ip_december_2015_raw_http': {
'container': '#midblock',
'result_container': '#teoma-results .wresult',
'link': 'a.title::attr(href)',
'snippet': '.abstract::text',
'title': 'a.title::text',
'visible_link': '.durl span::text'
}
},
}
class BlekkoParser(Parser):
"""Parses SERP pages of the Blekko search engine."""
search_engine = 'blekko'
search_types = ['normal']
effective_query_selector = ['']
no_results_selector = []
num_results_search_selectors = []
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#links',
'result_container': '.result',
'link': '.result__title > a::attr(href)',
'snippet': 'result__snippet::text',
'title': '.result__title > a::text',
'visible_link': '.result__url__domain::text'
}
},
}
def get_parser_by_url(url):
"""Get the appropriate parser by an search engine url.
Args:
url: The url that was used to issue the search
Returns:
The correct parser that can parse results for this url.
Raises:
UnknowUrlException if no parser could be found for the url.
"""
parser = None
if re.search(r'^http[s]?://www\.google', url):
parser = GoogleParser
elif re.search(r'^http://yandex\.ru', url):
parser = YandexParser
elif re.search(r'^http://www\.bing\.', url):
parser = BingParser
elif re.search(r'^http[s]?://search\.yahoo.', url):
parser = YahooParser
elif re.search(r'^http://www\.baidu\.com', url):
parser = BaiduParser
elif re.search(r'^https://duckduckgo\.com', url):
parser = DuckduckgoParser
if re.search(r'^http[s]?://[a-z]{2}?\.ask', url):
parser = AskParser
if re.search(r'^http[s]?://blekko', url):
parser = BlekkoParser
if not parser:
raise UnknowUrlException('No parser for {}.'.format(url))
return parser
def get_parser_by_search_engine(search_engine):
"""Get the appropriate parser for the search_engine
Args:
search_engine: The name of a search_engine.
Returns:
A parser for the search_engine
Raises:
NoParserForSearchEngineException if no parser could be found for the name.
"""
if search_engine == 'google' or search_engine == 'googleimg':
return GoogleParser
elif search_engine == 'yandex':
return YandexParser
elif search_engine == 'bing':
return BingParser
elif search_engine == 'yahoo':
return YahooParser
elif search_engine == 'baidu' or search_engine == 'baiduimg':
return BaiduParser
elif search_engine == 'duckduckgo':
return DuckduckgoParser
elif search_engine == 'ask':
return AskParser
elif search_engine == 'blekko':
return BlekkoParser
else:
raise NoParserForSearchEngineException('No such parser for "{}"'.format(search_engine))
def parse_serp(config, html=None, parser=None, scraper=None, search_engine=None, query=''):
"""Store the parsed data in the sqlalchemy session.
If no parser is supplied then we are expected to parse again with
the provided html.
This function may be called from scraping and caching.
When called from caching, some info is lost (like current page number).
Args:
TODO: A whole lot
Returns:
The parsed SERP object.
"""
if not parser and html:
parser = get_parser_by_search_engine(search_engine)
parser = parser(config, query=query)
parser.parse(html)
serp = SearchEngineResultsPage()
if query:
serp.query = query
if parser:
serp.set_values_from_parser(parser)
if scraper:
serp.set_values_from_scraper(scraper)
return serp
if __name__ == '__main__':
"""Originally part of https://github.com/NikolaiT/GoogleScraper.
Only for testing purposes: May be called directly with an search engine
search url. For example:
python3 parsing.py 'http://yandex.ru/yandsearch?text=GoogleScraper&lr=178&csg=82%2C4317%2C20%2C20%2C0%2C0%2C0'
Please note: Using this module directly makes little sense, because requesting such urls
directly without imitating a real browser (which is done in my GoogleScraper module) makes
the search engines return crippled html, which makes it impossible to parse.
But for some engines it nevertheless works (for example: yandex, google, ...).
"""
import requests
assert len(sys.argv) >= 2, 'Usage: {} url/file'.format(sys.argv[0])
url = sys.argv[1]
if os.path.exists(url):
raw_html = open(url, 'r').read()
parser = get_parser_by_search_engine(sys.argv[2])
else:
raw_html = requests.get(url).text
parser = get_parser_by_url(url)
parser = parser(raw_html)
parser.parse()
print(parser)
with open('/tmp/testhtml.html', 'w') as of:
of.write(raw_html)
| 39,004 | 34.203069 | 153 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/selenium_mode.py | # -*- coding: utf-8 -*-
import tempfile
import threading
from urllib.parse import quote
import json
import datetime
import time
import math
import random
import re
import sys
import os
try:
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
except ImportError as ie:
print(ie)
sys.exit('You can install missing modules with `pip3 install [modulename]`')
from GoogleScraper.scraping import SearchEngineScrape, SeleniumSearchError, get_base_search_url_by_search_engine, MaliciousRequestDetected
from GoogleScraper.user_agents import random_user_agent
import logging
logger = logging.getLogger(__name__)
class NotSupportedException(Exception):
pass
def check_detection(config, search_engine_name):
"""
Checks whether the search engine specified by search_engine_name
blocked us.
"""
status = ''
chromedriver = config.get('chromedriver_path', '/usr/bin/chromedriver')
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1200x600')
browser = webdriver.Chrome(chrome_options=options, executable_path=chromedriver)
if search_engine_name == 'google':
url = get_base_search_url_by_search_engine(config, 'google', 'selenium')
browser.get(url)
def check(browser, status):
needles = SearchEngineScrape.malicious_request_needles['google']
if needles['inurl'] in browser.current_url and needles['inhtml'] in browser.page_source:
status += 'Google is asking for a captcha! '
code = 'DETECTED'
else:
status += 'No captcha prompt detected. '
code = 'UNDETECTED'
return (code, status)
search_input = None
try:
search_input = WebDriverWait(browser, 5).until(
EC.visibility_of_element_located((By.NAME, 'q')))
status += 'Got a search input field. '
except TimeoutException:
status += 'No search input field located after 5 seconds. '
return check(browser, status)
try:
# random query
search_input.send_keys('President of Finland'+ Keys.ENTER)
status += 'Google Search successful! '
except WebDriverException:
status += 'Cannot make a google search! '
return check(browser, status)
return check(browser, status)
else:
raise NotImplementedError('Detection check only implemented for Google Right now.')
browser.quit()
return status
def get_selenium_scraper_by_search_engine_name(config, search_engine_name, *args, **kwargs):
"""Get the appropriate selenium scraper for the given search engine name.
Args:
search_engine_name: The search engine name.
args: The arguments for the target search engine instance creation.
kwargs: The keyword arguments for the target search engine instance creation.
Returns;
Either a concrete SelScrape instance specific for the given search engine or the abstract SelScrape object.
"""
class_name = search_engine_name[0].upper() + search_engine_name[1:].lower() + 'SelScrape'
ns = globals()
if class_name in ns:
return ns[class_name](config, *args, **kwargs)
return SelScrape(config, *args, **kwargs)
class SelScrape(SearchEngineScrape, threading.Thread):
"""Instances of this class make use of selenium browser
objects to query the search engines on a high level.
"""
next_page_selectors = {
'google': '#pnnext',
'yandex': '.pager__item_kind_next',
'bing': '.sb_pagN',
'yahoo': '#pg-next',
'baidu': '.n',
'ask': '#paging div a.txt3.l_nu',
'blekko': '',
'duckduckgo': '',
'googleimg': '#pnnext',
'baiduimg': '.n',
}
input_field_selectors = {
'google': (By.NAME, 'q'),
'yandex': (By.NAME, 'text'),
'bing': (By.NAME, 'q'),
'yahoo': (By.NAME, 'p'),
'baidu': (By.NAME, 'wd'),
'duckduckgo': (By.NAME, 'q'),
'ask': (By.NAME, 'q'),
'blekko': (By.NAME, 'q'),
'google': (By.NAME, 'q'),
'googleimg': (By.NAME, 'as_q'),
'baiduimg': (By.NAME, 'word'),
}
param_field_selectors = {
'googleimg': {
'image_type': (By.ID, 'imgtype_input'),
'image_size': (By.ID, 'imgsz_input'),
},
}
search_params = {
'googleimg': {
'image_type': None,
'image_size': None,
},
}
normal_search_locations = {
'google': 'https://www.google.com/',
'yandex': 'http://www.yandex.ru/',
'bing': 'http://www.bing.com/',
'yahoo': 'https://yahoo.com/',
'baidu': 'http://baidu.com/',
'duckduckgo': 'https://duckduckgo.com/',
'ask': 'http://ask.com/',
'blekko': 'http://blekko.com/',
}
image_search_locations = {
'google': 'https://www.google.com/imghp?tbm=isch',
'yandex': 'http://yandex.ru/images/',
'bing': 'https://www.bing.com/?scope=images',
'yahoo': 'http://images.yahoo.com/',
'baidu': 'http://image.baidu.com/',
'duckduckgo': None, # duckduckgo doesnt't support direct image search
'ask': 'http://www.ask.com/pictures/',
'blekko': None,
'googleimg':'https://www.google.com/advanced_image_search',
'baiduimg': 'http://image.baidu.com/',
}
def __init__(self, config, *args, captcha_lock=None, browser_num=1, **kwargs):
"""Create a new SelScraper thread Instance.
Args:
captcha_lock: To sync captcha solving (stdin)
proxy: Optional, if set, use the proxy to route all scrapign through it.
browser_num: A unique, semantic number for each thread.
"""
self.search_input = None
threading.Thread.__init__(self)
SearchEngineScrape.__init__(self, config, *args, **kwargs)
self.browser_type = self.config.get('sel_browser', 'chrome').lower()
self.browser_mode = self.config.get('browser_mode', 'headless').lower()
self.browser_num = browser_num
self.captcha_lock = captcha_lock
self.scrape_method = 'selenium'
# number of tabs per instance
self.number_of_tabs = self.config.get('num_tabs', 1)
self.xvfb_display = self.config.get('xvfb_display', None)
self.search_param_values = self._get_search_param_values()
self.user_agent = random_user_agent()
# get the base search url based on the search engine.
self.base_search_url = get_base_search_url_by_search_engine(self.config, self.search_engine_name, self.scrape_method)
super().instance_creation_info(self.__class__.__name__)
def switch_to_tab(self, tab_number):
"""Switch to tab identified by tab_number
https://stackoverflow.com/questions/46425797/opening-link-in-the-new-tab-and-switching-between-tabs-selenium-webdriver-pyt
https://gist.github.com/lrhache/7686903
"""
assert tab_number < self.number_of_tabs
first_link = first_result.find_element_by_tag_name('a')
# Save the window opener (current window, do not mistaken with tab... not the same)
main_window = browser.current_window_handle
# Open the link in a new tab by sending key strokes on the element
# Use: Keys.CONTROL + Keys.SHIFT + Keys.RETURN to open tab on top of the stack
first_link.send_keys(Keys.CONTROL + Keys.RETURN)
# Switch tab to the new tab, which we will assume is the next one on the right
browser.find_element_by_tag_name('body').send_keys(Keys.CONTROL + Keys.TAB)
# Put focus on current window which will, in fact, put focus on the current visible tab
browser.switch_to_window(main_window)
# do whatever you have to do on this page, we will just got to sleep for now
sleep(2)
# Close current tab
browser.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 'w')
# Put focus on current window which will be the window opener
browser.switch_to_window(main_window)
def set_proxy(self):
"""Install a proxy on the communication channel."""
def switch_proxy(self, proxy):
"""Switch the proxy on the communication channel."""
def proxy_check(self, proxy):
assert self.proxy and self.webdriver, 'Scraper instance needs valid webdriver and proxy instance to make the proxy check'
online = False
status = 'Proxy check failed: {host}:{port} is not used while requesting'.format(**self.proxy.__dict__)
ipinfo = {}
try:
self.webdriver.get(self.config.get('proxy_info_url'))
try:
text = re.search(r'(\{.*?\})', self.webdriver.page_source, flags=re.DOTALL).group(0)
ipinfo = json.loads(text)
except ValueError as v:
logger.critical(v)
except Exception as e:
status = str(e)
if 'ip' in ipinfo and ipinfo['ip']:
online = True
status = 'Proxy is working.'
else:
logger.warning(status)
super().update_proxy_status(status, ipinfo, online)
return online
def _save_debug_screenshot(self):
"""
Saves a debug screenshot of the browser window to figure
out what went wrong.
"""
tempdir = tempfile.gettempdir()
location = os.path.join(tempdir, '{}_{}_debug_screenshot.png'.format(self.search_engine_name, self.browser_type))
self.webdriver.get_screenshot_as_file(location)
def _set_xvfb_display(self):
# TODO: should we check the format of the config?
if self.xvfb_display:
os.environ['DISPLAY'] = self.xvfb_display
def _get_webdriver(self):
"""Return a webdriver instance and set it up with the according profile/ proxies.
https://stackoverflow.com/questions/49162667/unknown-error-call-function-result-missing-value-for-selenium-send-keys-even
Get Chrome Drivers here: https://chromedriver.storage.googleapis.com/index.html?path=2.41/
Returns:
The appropriate webdriver mode according to self.browser_type. If no webdriver mode
could be found, return False.
"""
if self.browser_type == 'chrome':
return self._get_Chrome()
elif self.browser_type == 'firefox':
return self._get_Firefox()
return False
def _get_Chrome(self):
try:
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = ""
# save resouces, options are experimental
# See here:
# https://news.ycombinator.com/item?id=14103503
# https://stackoverflow.com/questions/49008008/chrome-headless-puppeteer-too-much-cpu
# https://engineering.21buttons.com/crawling-thousands-of-products-using-aws-lambda-80332e259de1
chrome_options.add_argument("test-type")
chrome_options.add_argument('--js-flags="--expose-gc --max-old-space-size=500"')
chrome_options.add_argument(
'user-agent={}'.format(self.user_agent))
chrome_options.add_argument('--enable-precise-memory-info')
chrome_options.add_argument('--disable-default-apps')
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--incognito')
chrome_options.add_argument('--disable-application-cache')
if self.browser_mode == 'headless':
chrome_options.add_argument('headless')
#chrome_options.add_argument('window-size=1200x600') # optional
if self.proxy:
chrome_options.add_argument(
'--proxy-server={}://{}:{}'.format(self.proxy.proto, self.proxy.host, self.proxy.port))
chromedriver_path = self.config.get('chromedriver_path')
self.webdriver = webdriver.Chrome(executable_path=chromedriver_path,
chrome_options=chrome_options)
return True
except WebDriverException as e:
# we don't have a chrome executable or a chrome webdriver installed
raise
return False
def _get_Firefox(self):
try:
bin_path = self.config.get('firefox_binary_path')
binary = FirefoxBinary(bin_path)
geckodriver_path = self.config.get('geckodriver_path')
options = FirefoxOptions()
profile = webdriver.FirefoxProfile()
options.add_argument(
'user-agent={}'.format(self.user_agent))
if self.browser_mode == 'headless':
options.set_headless(headless=True)
#options.add_argument('window-size=1200x600') # optional
if self.proxy:
# this means that the proxy is user set, regardless of the type
profile.set_preference("network.proxy.type", 1)
if self.proxy.proto.lower().startswith('socks'):
profile.set_preference("network.proxy.socks", self.proxy.host)
profile.set_preference("network.proxy.socks_port", self.proxy.port)
profile.set_preference("network.proxy.socks_version", 5 if self.proxy.proto[-1] == '5' else 4)
profile.update_preferences()
elif self.proxy.proto == 'http':
profile.set_preference("network.proxy.http", self.proxy.host)
profile.set_preference("network.proxy.http_port", self.proxy.port)
else:
raise ValueError('Invalid protocol given in proxyfile.')
profile.update_preferences()
self.webdriver = webdriver.Firefox(firefox_binary=binary, firefox_options=options,
executable_path=geckodriver_path, firefox_profile=profile)
return True
except WebDriverException as e:
# reaching here is bad, since we have no available webdriver instance.
logger.error(e)
return False
def malicious_request_detected(self):
"""Checks whether a malicious request was detected.
"""
needles = self.malicious_request_needles[self.search_engine_name]
return needles and needles['inurl'] in self.webdriver.current_url \
and needles['inhtml'] in self.webdriver.page_source
def handle_request_denied(self):
"""Checks whether Google detected a potentially harmful request.
Whenever such potential abuse is detected, Google shows an captcha.
This method just blocks as long as someone entered the captcha in the browser window.
When the window is not visible (For example when using chrome headless), this method
makes a png from the html code and shows it to the user, which should enter it in a command
line.
Returns:
The search input field.
Raises:
MaliciousRequestDetected when there was not way to stp Google From denying our requests.
"""
# selenium webdriver objects have no status code :/
if self.malicious_request_detected():
super().handle_request_denied('400')
# only solve when in non headless mode
if self.config.get('manual_captcha_solving', False) and self.config.get('browser_mode') != 'headless':
with self.captcha_lock:
solution = input('Please solve the captcha in the browser! Enter any key when done...')
try:
self.search_input = WebDriverWait(self.webdriver, 7).until(
EC.visibility_of_element_located(self._get_search_input_field()))
except TimeoutException:
raise MaliciousRequestDetected('Requesting with this IP address or cookies is not possible at the moment.')
elif self.config.get('captcha_solving_service', False):
# implement request to manual captcha solving service such
# as https://2captcha.com/
pass
else:
# Just wait until the user solves the captcha in the browser window
# 10 hours if needed :D
logger.info('Waiting for user to solve captcha')
return self._wait_until_search_input_field_appears(10 * 60 * 60)
def _get_search_param_values(self):
search_param_values = {}
if self.search_engine_name in self.search_params:
for param_key in self.search_params[self.search_engine_name]:
cfg = self.config.get(param_key, None)
if cfg:
search_param_values[param_key] = cfg
return search_param_values
def _get_search_input_field(self):
"""Get the search input field for the current search_engine.
Returns:
A tuple to locate the search field as used by seleniums function presence_of_element_located()
"""
return self.input_field_selectors[self.search_engine_name]
def _get_search_param_fields(self):
if self.search_engine_name in self.param_field_selectors:
return self.param_field_selectors[self.search_engine_name]
else:
return {}
def _wait_until_search_input_field_appears(self, max_wait=5):
"""Waits until the search input field can be located for the current search engine
Args:
max_wait: How long to wait maximally before returning False.
Returns: False if the search input field could not be located within the time
or the handle to the search input field.
"""
def find_visible_search_input(driver):
input_field = driver.find_element(*self._get_search_input_field())
return input_field
try:
search_input = WebDriverWait(self.webdriver, max_wait).until(find_visible_search_input)
return search_input
except TimeoutException as e:
logger.error('{}: TimeoutException waiting for search input field: {}'.format(self.name, e))
return False
def _wait_until_search_param_fields_appears(self, max_wait=5):
"""Waits until the search input field contains the query.
Args:
max_wait: How long to wait maximally before returning False.
"""
def find_visible_search_param(driver):
for param, field in self._get_search_param_fields().items():
input_field = driver.find_element(*field)
if not input_field:
return False
return True
try:
fields = WebDriverWait(self.webdriver, max_wait).until(find_visible_search_param)
return fields
except TimeoutException as e:
logger.error('{}: TimeoutException waiting for search param field: {}'.format(self.name, e))
return False
def _goto_next_page(self):
"""
Click the next page element,
Returns:
The url of the next page or False if there is no such url
(end of available pages for instance).
"""
next_url = ''
element = self._find_next_page_element()
if element and hasattr(element, 'click'):
next_url = element.get_attribute('href')
try:
element.click()
except WebDriverException:
# See http://stackoverflow.com/questions/11908249/debugging-element-is-not-clickable-at-point-error
# first move mouse to the next element, some times the element is not visibility, like blekko.com
selector = self.next_page_selectors[self.search_engine_name]
if selector:
try:
next_element = WebDriverWait(self.webdriver, 5).until(
EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
webdriver.ActionChains(self.webdriver).move_to_element(next_element).perform()
# wait until the next page link emerges
WebDriverWait(self.webdriver, 8).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
element = self.webdriver.find_element_by_css_selector(selector)
next_url = element.get_attribute('href')
element.click()
except WebDriverException:
pass
# wait until the next page was loaded
if not next_url:
return False
else:
return next_url
def _find_next_page_element(self):
"""Finds the element that locates the next page for any search engine.
Returns:
The element that needs to be clicked to get to the next page or a boolean value to
indicate an error condition.
"""
if self.search_type == 'normal':
selector = self.next_page_selectors[self.search_engine_name]
try:
# wait until the next page link is clickable
WebDriverWait(self.webdriver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, selector)))
except (WebDriverException, TimeoutException) as e:
# If we can't detect the next page element because there is no
# next page (for example because the search query is to unique)
# we need to return false
self._save_debug_screenshot()
logger.warning('{}: Cannot locate next page element: {}'.format(self.name, str(e)))
return False
return self.webdriver.find_element_by_css_selector(selector)
elif self.search_type == 'image':
self.page_down()
return True
def wait_until_serp_loaded(self):
"""
This method tries to wait until the page requested is loaded.
We know that the correct page is loaded when self.page_number appears
in the navigation of the page.
"""
if self.search_type == 'normal':
if self.search_engine_name == 'google':
selector = '#navcnt td.cur'
elif self.search_engine_name == 'yandex':
selector = '.pager__item_current_yes'
elif self.search_engine_name == 'bing':
selector = 'nav li a.sb_pagS'
elif self.search_engine_name == 'yahoo':
selector = '.compPagination strong'
elif self.search_engine_name == 'baidu':
selector = '#page .fk_cur + .pc'
elif self.search_engine_name == 'duckduckgo':
# no pagination in duckduckgo
pass
elif self.search_engine_name == 'ask':
selector = '#paging .pgcsel .pg'
if self.search_engine_name == 'duckduckgo':
time.sleep(1.5)
else:
try:
WebDriverWait(self.webdriver, 5).\
until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, selector), str(self.page_number)))
except TimeoutException as e:
self._save_debug_screenshot()
logger.warning('Pagenumber={} did not appear in serp. Maybe there is only one result for this query?'.format(self.page_number))
elif self.search_type == 'image':
self.wait_until_title_contains_keyword()
else:
self.wait_until_title_contains_keyword()
def wait_until_title_contains_keyword(self):
try:
WebDriverWait(self.webdriver, 5).until(EC.title_contains(self.query))
except TimeoutException:
logger.debug(SeleniumSearchError(
'{}: Keyword "{}" not found in title: {}'.format(self.name, self.query, self.webdriver.title)))
def build_search(self):
"""Build the search for SelScrapers"""
assert self.webdriver, 'Webdriver needs to be ready to build the search'
if self.config.get('search_type', 'normal') == 'image':
starting_url = self.image_search_locations[self.search_engine_name]
else:
starting_url = self.base_search_url
num_results = self.config.get('num_results_per_page', 10)
if self.search_engine_name == 'google':
if num_results not in (10, 20, 30, 50, 100):
raise Exception('num_results_per_page for selenium mode and search engine Google must be in (10, 20, 30, 50, 100)')
starting_url += 'num={}'.format(num_results)
elif self.search_engine_name == 'bing':
if num_results not in range(1, 100):
raise Exception('num_results_per_page for selenium mode and search engine Bing must be in range(1, 100)')
starting_url += 'count={}'.format(num_results)
elif self.search_engine_name == 'yahoo':
if num_results not in range(1, 100):
raise Exception('num_results_per_page for selenium mode and search engine Yahoo must be in range(1, 100)')
starting_url += 'n={}'.format(num_results)
self.webdriver.get(starting_url)
def search(self):
"""Search with webdriver.
Fills out the search form of the search engine for each keyword.
Clicks the next link while pages_per_keyword is not reached.
"""
for self.query, self.pages_per_keyword in self.jobs.items():
self.search_input = self._wait_until_search_input_field_appears()
if self.search_input is False and self.config.get('stop_on_detection'):
self.status = 'Malicious request detected'
return
# check if request denied
self.handle_request_denied()
if self.search_input:
self.search_input.clear()
time.sleep(.25)
self.search_param_fields = self._get_search_param_fields()
if self.search_param_fields:
wait_res = self._wait_until_search_param_fields_appears()
if wait_res is False:
raise Exception('Waiting search param input fields time exceeds')
for param, field in self.search_param_fields.items():
if field[0] == By.ID:
js_tpl = '''
var field = document.getElementById("%s");
field.setAttribute("value", "%s");
'''
elif field[0] == By.NAME:
js_tpl = '''
var fields = document.getElementsByName("%s");
for (var f in fields) {
f.setAttribute("value", "%s");
}
'''
js_str = js_tpl % (field[1], self.search_param_values[param])
self.webdriver.execute_script(js_str)
try:
self.search_input.send_keys(self.query + Keys.ENTER)
except ElementNotVisibleException:
time.sleep(2)
self.search_input.send_keys(self.query + Keys.ENTER)
self.requested_at = datetime.datetime.utcnow()
else:
logger.debug('{}: Cannot get handle to the input form for keyword {}.'.format(self.name, self.query))
continue
super().detection_prevention_sleep()
super().keyword_info()
for self.page_number in self.pages_per_keyword:
self.wait_until_serp_loaded()
try:
self.html = self.webdriver.execute_script('return document.body.innerHTML;')
except WebDriverException as e:
self.html = self.webdriver.page_source
super().after_search()
# Click the next page link not when leaving the loop
# in the next iteration.
if self.page_number in self.pages_per_keyword:
next_url = self._goto_next_page()
self.requested_at = datetime.datetime.utcnow()
if not next_url:
break
def page_down(self):
"""Scrolls down a page with javascript.
Used for next page in image search mode or when the
next results are obtained by scrolling down a page.
"""
js = '''
var w = window,
d = document,
e = d.documentElement,
g = d.getElementsByTagName('body')[0],
y = w.innerHeight|| e.clientHeight|| g.clientHeight;
window.scrollBy(0,y);
return y;
'''
self.webdriver.execute_script(js)
def run(self):
"""Run the SelScraper."""
self._set_xvfb_display()
if not self._get_webdriver():
raise Exception('{}: Aborting: No available selenium webdriver.'.format(self.name))
try:
self.webdriver.set_window_size(400, 400)
self.webdriver.set_window_position(400 * (self.browser_num % 4), 400 * (math.floor(self.browser_num // 4)))
except WebDriverException as e:
logger.debug('Cannot set window size: {}'.format(e))
super().before_search()
if self.startable:
self.build_search()
self.search()
if self.webdriver:
self.webdriver.quit()
"""
For most search engines, the normal SelScrape works perfectly, but sometimes
the scraping logic is different for other search engines.
Duckduckgo loads new results on the fly (via ajax) and doesn't support any "next page"
link. Other search engines like gekko.com have a completely different SERP page format.
That's why we need to inherit from SelScrape for specific logic that only applies for the given
search engine.
The following functionality may differ in particular:
- _goto_next_page()
- _get_search_input()
- _wait_until_search_input_field_appears()
- _handle_request_denied()
- wait_until_serp_loaded()
"""
class GoogleSelScrape(SelScrape):
"""
Add Google Settings via this subclass.
"""
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
self.largest_id = 0
def build_search(self):
"""
Specify google page settings according to config.
Doing this automatically often provocates a captcha question.
This is highly sensitive.
"""
super().build_search()
if self.config.get('google_selenium_search_settings', False):
# assume we are on the normal google search page right now
self.webdriver.get('https://www.google.com/preferences?hl=en')
time.sleep(random.randint(1,4))
if self.config.get('google_selenium_manual_settings', False):
return input('Press any Key after search settings completed...')
oldsize = self.webdriver.get_window_size()
self.webdriver.maximize_window()
# wait until we see the settings
element = WebDriverWait(self.webdriver, 7).until(EC.presence_of_element_located((By.NAME, 'safeui')))
try:
if self.config.get('google_selenium_safe_search', False):
if self.webdriver.find_element_by_name('safeui').get_attribute('value') != 'on':
self.webdriver.find_element_by_name('safeui').click()
try:
if self.config.get('google_selenium_personalization', False):
self.webdriver.find_element_by_css_selector('#pson-radio > div:first-child').click()
else:
self.webdriver.find_element_by_css_selector('#pson-radio > div:nth-child(2)').click()
except WebDriverException as e:
logger.warning('Cannot set personalization settings.')
time.sleep(random.randint(1,4))
# set the region
try:
self.webdriver.find_element_by_id('regionanchormore').click()
except WebDriverException as e:
logger.warning('Regions probably already expanded.')
try:
region = self.config.get('google_selenium_region', 'US')
self.webdriver.find_element_by_css_selector('div[data-value="{}"]'.format(region)).click()
except WebDriverException as e:
logger.warning('Cannot set region settings.')
# set the number of results
try:
num_results = self.config.get('google_selenium_num_results', 10)
self.webdriver.find_element_by_id('result_slider').click()
# reset
for i in range(5):
self.webdriver.find_element_by_id('result_slider').send_keys(Keys.LEFT)
# move to desicred result
for i in range((num_results//10)-1):
time.sleep(.25)
self.webdriver.find_element_by_id('result_slider').send_keys(Keys.RIGHT)
except WebDriverException as e:
logger.warning('Cannot set number of results settings.')
time.sleep(random.randint(1,4))
# save settings
self.webdriver.find_element_by_css_selector('#form-buttons div:first-child').click()
time.sleep(1)
# accept alert
self.webdriver.switch_to.alert.accept()
time.sleep(random.randint(1,4))
self.handle_request_denied()
except WebDriverException as e:
logger.error('Unable to set google page settings')
wait = input('waiting...')
raise e
driver.set_window_size(oldsize['width'], oldsize['height'])
class DuckduckgoSelScrape(SelScrape):
"""
Duckduckgo is a little special since new results are obtained by ajax.
next page thus is then to scroll down.
It cannot be the User-Agent, because I already tried this.
"""
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
self.largest_id = 0
def _goto_next_page(self):
super().page_down()
return 'No more results' not in self.html
def wait_until_serp_loaded(self):
super()._wait_until_search_input_field_appears()
class BlekkoSelScrape(SelScrape):
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
def _goto_next_page(self):
pass
class AskSelScrape(SelScrape):
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
def wait_until_serp_loaded(self):
def wait_until_keyword_in_url(driver):
try:
return quote(self.query) in driver.current_url or \
self.query.replace(' ', '+') in driver.current_url
except WebDriverException:
pass
WebDriverWait(self.webdriver, 5).until(wait_until_keyword_in_url)
| 36,591 | 37.845011 | 147 | py |
GoogleScraper | GoogleScraper-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'GoogleScraper'
copyright = '2018, Nikolai Tschacher'
author = 'Nikolai Tschacher'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = 'October 2018'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GoogleScraperdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GoogleScraper.tex', 'GoogleScraper Documentation',
'Nikolai Tschacher', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'googlescraper', 'GoogleScraper Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GoogleScraper', 'GoogleScraper Documentation',
author, 'GoogleScraper', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True | 5,975 | 29.181818 | 79 | py |
crnn-pytorch | crnn-pytorch-master/utils.py | #!/usr/bin/python
# encoding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
class strLabelConverter(object):
"""Convert between str and label.
NOTE:
Insert `blank` to the alphabet for CTC.
Args:
alphabet (str): set of the possible characters.
ignore_case (bool, default=True): whether or not to ignore all of the case.
"""
def __init__(self, alphabet, ignore_case=False):
self._ignore_case = ignore_case
if self._ignore_case:
alphabet = alphabet.lower()
self.alphabet = alphabet + '-' # for `-1` index
self.dict = {}
for i, char in enumerate(alphabet):
# NOTE: 0 is reserved for 'blank' required by wrap_ctc
self.dict[char] = i + 1
def encode(self, text):
"""Support batch or single str.
Args:
text (str or list of str): texts to convert.
Returns:
torch.LongTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.LongTensor [n]: length of each text.
"""
length = []
result = []
for item in text:
item = item.decode('utf-8','strict')
length.append(len(item))
r = []
for char in item:
index = self.dict[char]
# result.append(index)
r.append(index)
result.append(r)
max_len = 0
for r in result:
if len(r) > max_len:
max_len = len(r)
result_temp = []
for r in result:
for i in range(max_len - len(r)):
r.append(0)
result_temp.append(r)
text = result_temp
return (torch.LongTensor(text), torch.LongTensor(length))
def decode(self, t, length, raw=False):
"""Decode encoded texts back into strs.
Args:
torch.LongTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.LongTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1:
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.LongTensor([l]), raw=raw))
index += l
return texts
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def oneHot(v, v_length, nc):
batchSize = v_length.size(0)
maxLength = v_length.max()
v_onehot = torch.FloatTensor(batchSize, maxLength, nc).fill_(0)
acc = 0
for i in range(batchSize):
length = v_length[i]
label = v[acc:acc + length].view(-1, 1).long()
v_onehot[i, :length].scatter_(1, label, 1.0)
acc += length
return v_onehot
def loadData(v, data):
with torch.no_grad():
v.resize_(data.size()).copy_(data)
def prettyPrint(v):
print('Size {0}, Type: {1}'.format(str(v.size()), v.data.type()))
print('| Max: %f | Min: %f | Mean: %f' % (v.max().data[0], v.min().data[0],
v.mean().data[0]))
def assureRatio(img):
"""Ensure imgH <= imgW."""
b, c, h, w = img.size()
if h > w:
main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
img = main(img)
return img
| 4,860 | 28.107784 | 136 | py |
crnn-pytorch | crnn-pytorch-master/dataset.py | #!/usr/bin/python
# encoding: utf-8
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import sys
from PIL import Image
import numpy as np
class lmdbDataset(Dataset):
def __init__(self, root=None, transform=None, target_transform=None):
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode('utf-8')))
self.nSamples = nSamples
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
img_key = 'image-%09d' % index
imgbuf = txn.get(img_key.encode('utf-8'))
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print('Corrupted image for %d' % index)
return self[index + 1]
if self.transform is not None:
img = self.transform(img)
label_key = 'label-%09d' % index
label = txn.get(label_key.encode('utf-8'))
if self.target_transform is not None:
label = self.target_transform(label)
return (img, label)
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
class randomSequentialSampler(sampler.Sampler):
def __init__(self, data_source, batch_size):
self.num_samples = len(data_source)
self.batch_size = batch_size
def __iter__(self):
n_batch = len(self) // self.batch_size
tail = len(self) % self.batch_size
index = torch.LongTensor(len(self)).fill_(0)
for i in range(n_batch):
random_start = random.randint(0, len(self) - self.batch_size)
batch_index = random_start + torch.range(0, self.batch_size - 1)
index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
# deal with tail
if tail:
random_start = random.randint(0, len(self) - self.batch_size)
tail_index = random_start + torch.range(0, tail - 1)
index[(i + 1) * self.batch_size:] = tail_index
return iter(index)
def __len__(self):
return self.num_samples
class alignCollate(object):
def __init__(self, imgH=32, imgW=100, keep_ratio=False, min_ratio=1):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio = keep_ratio
self.min_ratio = min_ratio
def __call__(self, batch):
images, labels = zip(*batch)
imgH = self.imgH
imgW = self.imgW
if self.keep_ratio:
ratios = []
for image in images:
w, h = image.size
ratios.append(w / float(h))
ratios.sort()
max_ratio = ratios[-1]
imgW = int(np.floor(max_ratio * imgH))
imgW = max(imgH * self.min_ratio, imgW) # assure imgH >= imgW
transform = resizeNormalize((imgW, imgH))
images = [transform(image) for image in images]
images = torch.cat([t.unsqueeze(0) for t in images], 0)
return images, labels
| 4,008 | 28.262774 | 78 | py |
crnn-pytorch | crnn-pytorch-master/demo.py | import torch
from torch.autograd import Variable
import utils
import dataset
from PIL import Image
import models.crnn as crnn
import params
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model_path', type = str, required = True, help = 'crnn model path')
parser.add_argument('-i', '--image_path', type = str, required = True, help = 'demo image path')
args = parser.parse_args()
model_path = args.model_path
image_path = args.image_path
# net init
nclass = len(params.alphabet) + 1
model = crnn.CRNN(params.imgH, params.nc, nclass, params.nh)
if torch.cuda.is_available():
model = model.cuda()
# load model
print('loading pretrained model from %s' % model_path)
if params.multi_gpu:
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(model_path))
converter = utils.strLabelConverter(params.alphabet)
transformer = dataset.resizeNormalize((100, 32))
image = Image.open(image_path).convert('L')
image = transformer(image)
if torch.cuda.is_available():
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.LongTensor([preds.size(0)]))
raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
print('%-20s => %-20s' % (raw_pred, sim_pred))
| 1,455 | 27.54902 | 96 | py |
crnn-pytorch | crnn-pytorch-master/train.py | from __future__ import print_function
from __future__ import division
import argparse
import random
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import numpy as np
# from warpctc_pytorch import CTCLoss
from torch.nn import CTCLoss
import os
import utils
import dataset
import models.crnn as net
import params
parser = argparse.ArgumentParser()
parser.add_argument('-train', '--trainroot', required=True, help='path to train dataset')
parser.add_argument('-val', '--valroot', required=True, help='path to val dataset')
args = parser.parse_args()
if not os.path.exists(params.expr_dir):
os.makedirs(params.expr_dir)
# ensure everytime the random is the same
random.seed(params.manualSeed)
np.random.seed(params.manualSeed)
torch.manual_seed(params.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not params.cuda:
print("WARNING: You have a CUDA device, so you should probably set cuda in params.py to True")
# -----------------------------------------------
"""
In this block
Get train and val data_loader
"""
def data_loader():
# train
train_dataset = dataset.lmdbDataset(root=args.trainroot)
assert train_dataset
if not params.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, params.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params.batchSize, \
shuffle=True, sampler=sampler, num_workers=int(params.workers), \
collate_fn=dataset.alignCollate(imgH=params.imgH, imgW=params.imgW, keep_ratio=params.keep_ratio))
# val
val_dataset = dataset.lmdbDataset(root=args.valroot, transform=dataset.resizeNormalize((params.imgW, params.imgH)))
assert val_dataset
val_loader = torch.utils.data.DataLoader(val_dataset, shuffle=True, batch_size=params.batchSize, num_workers=int(params.workers))
return train_loader, val_loader
train_loader, val_loader = data_loader()
# -----------------------------------------------
"""
In this block
Net init
Weight init
Load pretrained model
"""
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def net_init():
nclass = len(params.alphabet) + 1
crnn = net.CRNN(params.imgH, params.nc, nclass, params.nh)
crnn.apply(weights_init)
if params.pretrained != '':
print('loading pretrained model from %s' % params.pretrained)
if params.multi_gpu:
crnn = torch.nn.DataParallel(crnn)
crnn.load_state_dict(torch.load(params.pretrained))
return crnn
crnn = net_init()
print(crnn)
# -----------------------------------------------
"""
In this block
Init some utils defined in utils.py
"""
# Compute average for `torch.Variable` and `torch.Tensor`.
loss_avg = utils.averager()
# Convert between str and label.
converter = utils.strLabelConverter(params.alphabet)
# -----------------------------------------------
"""
In this block
criterion define
"""
criterion = CTCLoss()
# -----------------------------------------------
"""
In this block
Init some tensor
Put tensor and net on cuda
NOTE:
image, text, length is used by both val and train
becaues train and val will never use it at the same time.
"""
image = torch.FloatTensor(params.batchSize, 3, params.imgH, params.imgH)
text = torch.LongTensor(params.batchSize * 5)
length = torch.LongTensor(params.batchSize)
if params.cuda and torch.cuda.is_available():
criterion = criterion.cuda()
image = image.cuda()
text = text.cuda()
crnn = crnn.cuda()
if params.multi_gpu:
crnn = torch.nn.DataParallel(crnn, device_ids=range(params.ngpu))
image = Variable(image)
text = Variable(text)
length = Variable(length)
# -----------------------------------------------
"""
In this block
Setup optimizer
"""
if params.adam:
optimizer = optim.Adam(crnn.parameters(), lr=params.lr, betas=(params.beta1, 0.999))
elif params.adadelta:
optimizer = optim.Adadelta(crnn.parameters())
else:
optimizer = optim.RMSprop(crnn.parameters(), lr=params.lr)
# -----------------------------------------------
"""
In this block
Dealwith lossnan
NOTE:
I use different way to dealwith loss nan according to the torch version.
"""
if params.dealwith_lossnan:
if torch.__version__ >= '1.1.0':
"""
zero_infinity (bool, optional):
Whether to zero infinite losses and the associated gradients.
Default: ``False``
Infinite losses mainly occur when the inputs are too short
to be aligned to the targets.
Pytorch add this param after v1.1.0
"""
criterion = CTCLoss(zero_infinity = True)
else:
"""
only when
torch.__version__ < '1.1.0'
we use this way to change the inf to zero
"""
crnn.register_backward_hook(crnn.backward_hook)
# -----------------------------------------------
def val(net, criterion):
print('Start val')
for p in crnn.parameters():
p.requires_grad = False
net.eval()
val_iter = iter(val_loader)
i = 0
n_correct = 0
loss_avg = utils.averager() # The blobal loss_avg is used by train
max_iter = len(val_loader)
for i in range(max_iter):
data = val_iter.next()
i += 1
cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
preds = crnn(image)
preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
loss_avg.add(cost)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
cpu_texts_decode = []
for i in cpu_texts:
cpu_texts_decode.append(i.decode('utf-8', 'strict'))
for pred, target in zip(sim_preds, cpu_texts_decode):
if pred == target:
n_correct += 1
raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:params.n_val_disp]
for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts_decode):
print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt))
accuracy = n_correct / float(max_iter * params.batchSize)
print('Val loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
def train(net, criterion, optimizer, train_iter):
for p in crnn.parameters():
p.requires_grad = True
crnn.train()
data = train_iter.next()
cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
optimizer.zero_grad()
preds = crnn(image)
preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
# crnn.zero_grad()
cost.backward()
optimizer.step()
return cost
if __name__ == "__main__":
for epoch in range(params.nepoch):
train_iter = iter(train_loader)
i = 0
while i < len(train_loader):
cost = train(crnn, criterion, optimizer, train_iter)
loss_avg.add(cost)
i += 1
if i % params.displayInterval == 0:
print('[%d/%d][%d/%d] Loss: %f' %
(epoch, params.nepoch, i, len(train_loader), loss_avg.val()))
loss_avg.reset()
if i % params.valInterval == 0:
val(crnn, criterion)
# do checkpointing
if i % params.saveInterval == 0:
torch.save(crnn.state_dict(), '{0}/netCRNN_{1}_{2}.pth'.format(params.expr_dir, epoch, i))
| 8,227 | 29.587361 | 133 | py |
crnn-pytorch | crnn-pytorch-master/models/crnn.py | import torch.nn as nn
import params
import torch.nn.functional as F
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input):
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
t_rec = recurrent.view(T * b, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
return output
class CRNN(nn.Module):
def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False):
super(CRNN, self).__init__()
assert imgH % 16 == 0, 'imgH has to be a multiple of 16'
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
cnn = nn.Sequential()
def convRelu(i, batchNormalization=False):
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
if leakyRelu:
cnn.add_module('relu{0}'.format(i),
nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
convRelu(0)
cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x16x64
convRelu(1)
cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x8x32
convRelu(2, True)
convRelu(3)
cnn.add_module('pooling{0}'.format(2),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16
convRelu(4, True)
convRelu(5)
cnn.add_module('pooling{0}'.format(3),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16
convRelu(6, True) # 512x1x16
self.cnn = cnn
self.rnn = nn.Sequential(
BidirectionalLSTM(512, nh, nh),
BidirectionalLSTM(nh, nh, nclass))
def forward(self, input):
# conv features
conv = self.cnn(input)
b, c, h, w = conv.size()
assert h == 1, "the height of conv must be 1"
conv = conv.squeeze(2)
conv = conv.permute(2, 0, 1) # [w, b, c]
# rnn features
output = self.rnn(conv)
# add log_softmax to converge output
output = F.log_softmax(output, dim=2)
return output
def backward_hook(self, module, grad_input, grad_output):
for g in grad_input:
g[g != g] = 0 # replace all nan/inf in gradients to zero
| 2,865 | 30.494505 | 78 | py |
crnn-pytorch | crnn-pytorch-master/tool/convert_t7.py | import torchfile
import argparse
import torch
from torch.nn.parameter import Parameter
import numpy as np
import models.crnn as crnn
layer_map = {
'SpatialConvolution': 'Conv2d',
'SpatialBatchNormalization': 'BatchNorm2d',
'ReLU': 'ReLU',
'SpatialMaxPooling': 'MaxPool2d',
'SpatialAveragePooling': 'AvgPool2d',
'SpatialUpSamplingNearest': 'UpsamplingNearest2d',
'View': None,
'Linear': 'linear',
'Dropout': 'Dropout',
'SoftMax': 'Softmax',
'Identity': None,
'SpatialFullConvolution': 'ConvTranspose2d',
'SpatialReplicationPadding': None,
'SpatialReflectionPadding': None,
'Copy': None,
'Narrow': None,
'SpatialCrossMapLRN': None,
'Sequential': None,
'ConcatTable': None, # output is list
'CAddTable': None, # input is list
'Concat': None,
'TorchObject': None,
'LstmLayer': 'LSTM',
'BiRnnJoin': 'Linear'
}
def torch_layer_serial(layer, layers):
name = layer[0]
if name == 'nn.Sequential' or name == 'nn.ConcatTable':
tmp_layers = []
for sub_layer in layer[1]:
torch_layer_serial(sub_layer, tmp_layers)
layers.extend(tmp_layers)
else:
layers.append(layer)
def py_layer_serial(layer, layers):
"""
Assume modules are defined as executive sequence.
"""
if len(layer._modules) >= 1:
tmp_layers = []
for sub_layer in layer.children():
py_layer_serial(sub_layer, tmp_layers)
layers.extend(tmp_layers)
else:
layers.append(layer)
def trans_pos(param, part_indexes, dim=0):
parts = np.split(param, len(part_indexes), dim)
new_parts = []
for i in part_indexes:
new_parts.append(parts[i])
return np.concatenate(new_parts, dim)
def load_params(py_layer, t7_layer):
if type(py_layer).__name__ == 'LSTM':
# LSTM
all_weights = []
num_directions = 2 if py_layer.bidirectional else 1
for i in range(py_layer.num_layers):
for j in range(num_directions):
suffix = '_reverse' if j == 1 else ''
weights = ['weight_ih_l{}{}', 'bias_ih_l{}{}',
'weight_hh_l{}{}', 'bias_hh_l{}{}']
weights = [x.format(i, suffix) for x in weights]
all_weights += weights
params = []
for i in range(len(t7_layer)):
params.extend(t7_layer[i][1])
params = [trans_pos(p, [0, 1, 3, 2], dim=0) for p in params]
else:
all_weights = []
name = t7_layer[0].split('.')[-1]
if name == 'BiRnnJoin':
weight_0, bias_0, weight_1, bias_1 = t7_layer[1]
weight = np.concatenate((weight_0, weight_1), axis=1)
bias = bias_0 + bias_1
t7_layer[1] = [weight, bias]
all_weights += ['weight', 'bias']
elif name == 'SpatialConvolution' or name == 'Linear':
all_weights += ['weight', 'bias']
elif name == 'SpatialBatchNormalization':
all_weights += ['weight', 'bias', 'running_mean', 'running_var']
params = t7_layer[1]
params = [torch.from_numpy(item) for item in params]
assert len(all_weights) == len(params), "params' number not match"
for py_param_name, t7_param in zip(all_weights, params):
item = getattr(py_layer, py_param_name)
if isinstance(item, Parameter):
item = item.data
try:
item.copy_(t7_param)
except RuntimeError:
print('Size not match between %s and %s' %
(item.size(), t7_param.size()))
def torch_to_pytorch(model, t7_file, output):
py_layers = []
for layer in list(model.children()):
py_layer_serial(layer, py_layers)
t7_data = torchfile.load(t7_file)
t7_layers = []
for layer in t7_data:
torch_layer_serial(layer, t7_layers)
j = 0
for i, py_layer in enumerate(py_layers):
py_name = type(py_layer).__name__
t7_layer = t7_layers[j]
t7_name = t7_layer[0].split('.')[-1]
if layer_map[t7_name] != py_name:
raise RuntimeError('%s does not match %s' % (py_name, t7_name))
if py_name == 'LSTM':
n_layer = 2 if py_layer.bidirectional else 1
n_layer *= py_layer.num_layers
t7_layer = t7_layers[j:j + n_layer]
j += n_layer
else:
j += 1
load_params(py_layer, t7_layer)
torch.save(model.state_dict(), output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert torch t7 model to pytorch'
)
parser.add_argument(
'--model_file',
'-m',
type=str,
required=True,
help='torch model file in t7 format'
)
parser.add_argument(
'--output',
'-o',
type=str,
default=None,
help='output file name prefix, xxx.py xxx.pth'
)
args = parser.parse_args()
py_model = crnn.CRNN(32, 1, 37, 256, 1)
torch_to_pytorch(py_model, args.model_file, args.output)
| 5,075 | 29.214286 | 76 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/parser.py |
import os
import torch
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description="Benchmarking Visual Geolocalization",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Training parameters
parser.add_argument("--train_batch_size", type=int, default=4,
help="Number of triplets (query, pos, negs) in a batch. Each triplet consists of 12 images")
parser.add_argument("--infer_batch_size", type=int, default=16,
help="Batch size for inference (caching and testing)")
parser.add_argument("--criterion", type=str, default='triplet', help='loss to be used',
choices=["triplet", "sare_ind", "sare_joint"])
parser.add_argument("--margin", type=float, default=0.1,
help="margin for the triplet loss")
parser.add_argument("--epochs_num", type=int, default=1000,
help="number of epochs to train for")
parser.add_argument("--patience", type=int, default=3)
parser.add_argument("--lr", type=float, default=0.00001, help="_")
parser.add_argument("--lr_crn_layer", type=float, default=5e-3, help="Learning rate for the CRN layer")
parser.add_argument("--lr_crn_net", type=float, default=5e-4, help="Learning rate to finetune pretrained network when using CRN")
parser.add_argument("--optim", type=str, default="adam", help="_", choices=["adam", "sgd"])
parser.add_argument("--cache_refresh_rate", type=int, default=1000,
help="How often to refresh cache, in number of queries")
parser.add_argument("--queries_per_epoch", type=int, default=5000,
help="How many queries to consider for one epoch. Must be multiple of cache_refresh_rate")
parser.add_argument("--negs_num_per_query", type=int, default=10,
help="How many negatives to consider per each query in the loss")
parser.add_argument("--neg_samples_num", type=int, default=1000,
help="How many negatives to use to compute the hardest ones")
parser.add_argument("--mining", type=str, default="partial", choices=["partial", "full", "random", "msls_weighted"])
# Model parameters
parser.add_argument("--backbone", type=str, default="resnet18conv4",
choices=["alexnet", "vgg16", "resnet18conv4", "resnet18conv5",
"resnet50conv4", "resnet50conv5", "resnet101conv4", "resnet101conv5",
"cct384", "vit"], help="_")
parser.add_argument("--l2", type=str, default="before_pool", choices=["before_pool", "after_pool", "none"],
help="When (and if) to apply the l2 norm with shallow aggregation layers")
parser.add_argument("--aggregation", type=str, default="netvlad", choices=["netvlad", "gem", "spoc", "mac", "rmac", "crn", "rrm",
"cls", "seqpool"])
parser.add_argument('--netvlad_clusters', type=int, default=64, help="Number of clusters for NetVLAD layer.")
parser.add_argument('--pca_dim', type=int, default=None, help="PCA dimension (number of principal components). If None, PCA is not used.")
parser.add_argument('--fc_output_dim', type=int, default=None,
help="Output dimension of fully connected layer. If None, don't use a fully connected layer.")
parser.add_argument('--pretrain', type=str, default="imagenet", choices=['imagenet', 'gldv2', 'places'],
help="Select the pretrained weights for the starting network")
parser.add_argument("--off_the_shelf", type=str, default="imagenet", choices=["imagenet", "radenovic_sfm", "radenovic_gldv1", "naver"],
help="Off-the-shelf networks from popular GitHub repos. Only with ResNet-50/101 + GeM + FC 2048")
parser.add_argument("--trunc_te", type=int, default=None, choices=list(range(0, 14)))
parser.add_argument("--freeze_te", type=int, default=None, choices=list(range(-1, 14)))
# Initialization parameters
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--resume", type=str, default=None,
help="Path to load checkpoint from, for resuming training or testing.")
# Other parameters
parser.add_argument("--device", type=str, default="cuda", choices=["cuda", "cpu"])
parser.add_argument("--num_workers", type=int, default=8, help="num_workers for all dataloaders")
parser.add_argument('--resize', type=int, default=[480, 640], nargs=2, help="Resizing shape for images (HxW).")
parser.add_argument('--test_method', type=str, default="hard_resize",
choices=["hard_resize", "single_query", "central_crop", "five_crops", "nearest_crop", "maj_voting"],
help="This includes pre/post-processing methods and prediction refinement")
parser.add_argument("--majority_weight", type=float, default=0.01,
help="only for majority voting, scale factor, the higher it is the more importance is given to agreement")
parser.add_argument("--efficient_ram_testing", action='store_true', help="_")
parser.add_argument("--val_positive_dist_threshold", type=int, default=25, help="_")
parser.add_argument("--train_positives_dist_threshold", type=int, default=10, help="_")
parser.add_argument('--recall_values', type=int, default=[1, 5, 10, 20], nargs="+",
help="Recalls to be computed, such as R@5.")
# Data augmentation parameters
parser.add_argument("--brightness", type=float, default=0, help="_")
parser.add_argument("--contrast", type=float, default=0, help="_")
parser.add_argument("--saturation", type=float, default=0, help="_")
parser.add_argument("--hue", type=float, default=0, help="_")
parser.add_argument("--rand_perspective", type=float, default=0, help="_")
parser.add_argument("--horizontal_flip", action='store_true', help="_")
parser.add_argument("--random_resized_crop", type=float, default=0, help="_")
parser.add_argument("--random_rotation", type=float, default=0, help="_")
# Paths parameters
parser.add_argument("--datasets_folder", type=str, default=None, help="Path with all datasets")
parser.add_argument("--dataset_name", type=str, default="pitts30k", help="Relative path of the dataset")
parser.add_argument("--pca_dataset_folder", type=str, default=None,
help="Path with images to be used to compute PCA (ie: pitts30k/images/train")
parser.add_argument("--save_dir", type=str, default="default",
help="Folder name of the current run (saved in ./logs/)")
args = parser.parse_args()
if args.datasets_folder is None:
try:
args.datasets_folder = os.environ['DATASETS_FOLDER']
except KeyError:
raise Exception("You should set the parameter --datasets_folder or export " +
"the DATASETS_FOLDER environment variable as such \n" +
"export DATASETS_FOLDER=../datasets_vg/datasets")
if args.aggregation == "crn" and args.resume is None:
raise ValueError("CRN must be resumed from a trained NetVLAD checkpoint, but you set resume=None.")
if args.queries_per_epoch % args.cache_refresh_rate != 0:
raise ValueError("Ensure that queries_per_epoch is divisible by cache_refresh_rate, " +
f"because {args.queries_per_epoch} is not divisible by {args.cache_refresh_rate}")
if torch.cuda.device_count() >= 2 and args.criterion in ['sare_joint', "sare_ind"]:
raise NotImplementedError("SARE losses are not implemented for multiple GPUs, " +
f"but you're using {torch.cuda.device_count()} GPUs and {args.criterion} loss.")
if args.mining == "msls_weighted" and args.dataset_name != "msls":
raise ValueError("msls_weighted mining can only be applied to msls dataset, but you're using it on {args.dataset_name}")
if args.off_the_shelf in ["radenovic_sfm", "radenovic_gldv1", "naver"]:
if args.backbone not in ["resnet50conv5", "resnet101conv5"] or args.aggregation != "gem" or args.fc_output_dim != 2048:
raise ValueError("Off-the-shelf models are trained only with ResNet-50/101 + GeM + FC 2048")
if args.pca_dim is not None and args.pca_dataset_folder is None:
raise ValueError("Please specify --pca_dataset_folder when using pca")
if args.backbone == "vit":
if args.resize != [224, 224] and args.resize != [384, 384]:
raise ValueError(f'Image size for ViT must be either 224 or 384 {args.resize}')
if args.backbone == "cct384":
if args.resize != [384, 384]:
raise ValueError(f'Image size for CCT384 must be 384, but it is {args.resize}')
if args.backbone in ["alexnet", "vgg16", "resnet18conv4", "resnet18conv5",
"resnet50conv4", "resnet50conv5", "resnet101conv4", "resnet101conv5"]:
if args.aggregation in ["cls", "seqpool"]:
raise ValueError(f"CNNs like {args.backbone} can't work with aggregation {args.aggregation}")
if args.backbone in ["cct384"]:
if args.aggregation in ["spoc", "mac", "rmac", "crn", "rrm"]:
raise ValueError(f"CCT can't work with aggregation {args.aggregation}. Please use one among [netvlad, gem, cls, seqpool]")
if args.backbone == "vit":
if args.aggregation not in ["cls", "gem", "netvlad"]:
raise ValueError(f"ViT can't work with aggregation {args.aggregation}. Please use one among [netvlad, gem, cls]")
return args
| 9,823 | 70.188406 | 142 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/test.py |
import faiss
import torch
import logging
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
def test_efficient_ram_usage(args, eval_ds, model, test_method="hard_resize"):
"""This function gives the same output as test(), but uses much less RAM.
This can be useful when testing with large descriptors (e.g. NetVLAD) on large datasets (e.g. San Francisco).
Obviously it is slower than test(), and can't be used with PCA.
"""
model = model.eval()
if test_method == 'nearest_crop' or test_method == "maj_voting":
distances = np.empty([eval_ds.queries_num * 5, eval_ds.database_num], dtype=np.float32)
else:
distances = np.empty([eval_ds.queries_num, eval_ds.database_num], dtype=np.float32)
with torch.no_grad():
if test_method == 'nearest_crop' or test_method == 'maj_voting':
queries_features = np.ones((eval_ds.queries_num * 5, args.features_dim), dtype="float32")
else:
queries_features = np.ones((eval_ds.queries_num, args.features_dim), dtype="float32")
logging.debug("Extracting queries features for evaluation/testing")
queries_infer_batch_size = 1 if test_method == "single_query" else args.infer_batch_size
eval_ds.test_method = test_method
queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
batch_size=queries_infer_batch_size, pin_memory=(args.device == "cuda"))
for inputs, indices in tqdm(queries_dataloader, ncols=100):
if test_method == "five_crops" or test_method == "nearest_crop" or test_method == 'maj_voting':
inputs = torch.cat(tuple(inputs)) # shape = 5*bs x 3 x 480 x 480
features = model(inputs.to(args.device))
if test_method == "five_crops": # Compute mean along the 5 crops
features = torch.stack(torch.split(features, 5)).mean(1)
if test_method == "nearest_crop" or test_method == 'maj_voting':
start_idx = (indices[0] - eval_ds.database_num) * 5
end_idx = start_idx + indices.shape[0] * 5
indices = np.arange(start_idx, end_idx)
queries_features[indices, :] = features.cpu().numpy()
else:
queries_features[indices.numpy()-eval_ds.database_num, :] = features.cpu().numpy()
queries_features = torch.tensor(queries_features).type(torch.float32).cuda()
logging.debug("Extracting database features for evaluation/testing")
# For database use "hard_resize", although it usually has no effect because database images have same resolution
eval_ds.test_method = "hard_resize"
database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, pin_memory=(args.device == "cuda"))
for inputs, indices in tqdm(database_dataloader, ncols=100):
inputs = inputs.to(args.device)
features = model(inputs)
for pn, (index, pred_feature) in enumerate(zip(indices, features)):
distances[:, index] = ((queries_features-pred_feature)**2).sum(1).cpu().numpy()
del features, queries_features, pred_feature
predictions = distances.argsort(axis=1)[:, :max(args.recall_values)]
if test_method == 'nearest_crop':
distances = np.array([distances[row, index] for row, index in enumerate(predictions)])
distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))
predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))
for q in range(eval_ds.queries_num):
# sort predictions by distance
sort_idx = np.argsort(distances[q])
predictions[q] = predictions[q, sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(predictions[q], return_index=True)
# unique_idx is sorted based on the unique values, sort it again
predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]
predictions = predictions[:, :20] # keep only the closer 20 predictions for each
elif test_method == 'maj_voting':
distances = np.array([distances[row, index] for row, index in enumerate(predictions)])
distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))
predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))
for q in range(eval_ds.queries_num):
# votings, modify distances in-place
top_n_voting('top1', predictions[q], distances[q], args.majority_weight)
top_n_voting('top5', predictions[q], distances[q], args.majority_weight)
top_n_voting('top10', predictions[q], distances[q], args.majority_weight)
# flatten dist and preds from 5, 20 -> 20*5
# and then proceed as usual to keep only first 20
dists = distances[q].flatten()
preds = predictions[q].flatten()
# sort predictions by distance
sort_idx = np.argsort(dists)
preds = preds[sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(preds, return_index=True)
# unique_idx is sorted based on the unique values, sort it again
# here the row corresponding to the first crop is used as a
# 'buffer' for each query, and in the end the dimension
# relative to crops is eliminated
predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]
predictions = predictions[:, 0, :20] # keep only the closer 20 predictions for each query
del distances
#### For each query, check if the predictions are correct
positives_per_query = eval_ds.get_positives()
# args.recall_values by default is [1, 5, 10, 20]
recalls = np.zeros(len(args.recall_values))
for query_index, pred in enumerate(predictions):
for i, n in enumerate(args.recall_values):
if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
recalls[i:] += 1
break
recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
return recalls, recalls_str
def test(args, eval_ds, model, test_method="hard_resize", pca=None):
"""Compute features of the given dataset and compute the recalls."""
assert test_method in ["hard_resize", "single_query", "central_crop", "five_crops",
"nearest_crop", "maj_voting"], f"test_method can't be {test_method}"
if args.efficient_ram_testing:
return test_efficient_ram_usage(args, eval_ds, model, test_method)
model = model.eval()
with torch.no_grad():
logging.debug("Extracting database features for evaluation/testing")
# For database use "hard_resize", although it usually has no effect because database images have same resolution
eval_ds.test_method = "hard_resize"
database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, pin_memory=(args.device == "cuda"))
if test_method == "nearest_crop" or test_method == 'maj_voting':
all_features = np.empty((5 * eval_ds.queries_num + eval_ds.database_num, args.features_dim), dtype="float32")
else:
all_features = np.empty((len(eval_ds), args.features_dim), dtype="float32")
for inputs, indices in tqdm(database_dataloader, ncols=100):
features = model(inputs.to(args.device))
features = features.cpu().numpy()
if pca is not None:
features = pca.transform(features)
all_features[indices.numpy(), :] = features
logging.debug("Extracting queries features for evaluation/testing")
queries_infer_batch_size = 1 if test_method == "single_query" else args.infer_batch_size
eval_ds.test_method = test_method
queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
batch_size=queries_infer_batch_size, pin_memory=(args.device == "cuda"))
for inputs, indices in tqdm(queries_dataloader, ncols=100):
if test_method == "five_crops" or test_method == "nearest_crop" or test_method == 'maj_voting':
inputs = torch.cat(tuple(inputs)) # shape = 5*bs x 3 x 480 x 480
features = model(inputs.to(args.device))
if test_method == "five_crops": # Compute mean along the 5 crops
features = torch.stack(torch.split(features, 5)).mean(1)
features = features.cpu().numpy()
if pca is not None:
features = pca.transform(features)
if test_method == "nearest_crop" or test_method == 'maj_voting': # store the features of all 5 crops
start_idx = eval_ds.database_num + (indices[0] - eval_ds.database_num) * 5
end_idx = start_idx + indices.shape[0] * 5
indices = np.arange(start_idx, end_idx)
all_features[indices, :] = features
else:
all_features[indices.numpy(), :] = features
queries_features = all_features[eval_ds.database_num:]
database_features = all_features[:eval_ds.database_num]
faiss_index = faiss.IndexFlatL2(args.features_dim)
faiss_index.add(database_features)
del database_features, all_features
logging.debug("Calculating recalls")
distances, predictions = faiss_index.search(queries_features, max(args.recall_values))
if test_method == 'nearest_crop':
distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))
predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))
for q in range(eval_ds.queries_num):
# sort predictions by distance
sort_idx = np.argsort(distances[q])
predictions[q] = predictions[q, sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(predictions[q], return_index=True)
# unique_idx is sorted based on the unique values, sort it again
predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]
predictions = predictions[:, :20] # keep only the closer 20 predictions for each query
elif test_method == 'maj_voting':
distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))
predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))
for q in range(eval_ds.queries_num):
# votings, modify distances in-place
top_n_voting('top1', predictions[q], distances[q], args.majority_weight)
top_n_voting('top5', predictions[q], distances[q], args.majority_weight)
top_n_voting('top10', predictions[q], distances[q], args.majority_weight)
# flatten dist and preds from 5, 20 -> 20*5
# and then proceed as usual to keep only first 20
dists = distances[q].flatten()
preds = predictions[q].flatten()
# sort predictions by distance
sort_idx = np.argsort(dists)
preds = preds[sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(preds, return_index=True)
# unique_idx is sorted based on the unique values, sort it again
# here the row corresponding to the first crop is used as a
# 'buffer' for each query, and in the end the dimension
# relative to crops is eliminated
predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]
predictions = predictions[:, 0, :20] # keep only the closer 20 predictions for each query
#### For each query, check if the predictions are correct
positives_per_query = eval_ds.get_positives()
# args.recall_values by default is [1, 5, 10, 20]
recalls = np.zeros(len(args.recall_values))
for query_index, pred in enumerate(predictions):
for i, n in enumerate(args.recall_values):
if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
recalls[i:] += 1
break
# Divide by the number of queries*100, so the recalls are in percentages
recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
return recalls, recalls_str
def top_n_voting(topn, predictions, distances, maj_weight):
if topn == 'top1':
n = 1
selected = 0
elif topn == 'top5':
n = 5
selected = slice(0, 5)
elif topn == 'top10':
n = 10
selected = slice(0, 10)
# find predictions that repeat in the first, first five,
# or fist ten columns for each crop
vals, counts = np.unique(predictions[:, selected], return_counts=True)
# for each prediction that repeats more than once,
# subtract from its score
for val, count in zip(vals[counts > 1], counts[counts > 1]):
mask = (predictions[:, selected] == val)
distances[:, selected][mask] -= maj_weight * count/n
| 14,018 | 53.761719 | 121 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/commons.py |
"""
This file contains some functions and classes which can be useful in very diverse projects.
"""
import os
import sys
import torch
import random
import logging
import traceback
import numpy as np
from os.path import join
def make_deterministic(seed=0):
"""Make results deterministic. If seed == -1, do not make deterministic.
Running the script in a deterministic way might slow it down.
"""
if seed == -1:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def setup_logging(save_dir, console="debug",
info_filename="info.log", debug_filename="debug.log"):
"""Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
save_dir (str): creates the folder where to save the files.
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file
"""
if os.path.exists(save_dir):
raise FileExistsError(f"{save_dir} already exists!")
os.makedirs(save_dir, exist_ok=True)
# logging.Logger.manager.loggerDict.keys() to check which loggers are in use
base_formatter = logging.Formatter('%(asctime)s %(message)s', "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if info_filename is not None:
info_file_handler = logging.FileHandler(join(save_dir, info_filename))
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if debug_filename is not None:
debug_file_handler = logging.FileHandler(join(save_dir, debug_filename))
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if console is not None:
console_handler = logging.StreamHandler()
if console == "debug":
console_handler.setLevel(logging.DEBUG)
if console == "info":
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def exception_handler(type_, value, tb):
logger.info("\n" + "".join(traceback.format_exception(type, value, tb)))
sys.excepthook = exception_handler
| 2,811 | 36.493333 | 91 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/util.py |
import re
import torch
import shutil
import logging
import torchscan
import numpy as np
from collections import OrderedDict
from os.path import join
from sklearn.decomposition import PCA
import datasets_ws
def get_flops(model, input_shape=(480, 640)):
"""Return the FLOPs as a string, such as '22.33 GFLOPs'"""
assert len(input_shape) == 2, f"input_shape should have len==2, but it's {input_shape}"
module_info = torchscan.crawl_module(model, (3, input_shape[0], input_shape[1]))
output = torchscan.utils.format_info(module_info)
return re.findall("Floating Point Operations on forward: (.*)\n", output)[0]
def save_checkpoint(args, state, is_best, filename):
model_path = join(args.save_dir, filename)
torch.save(state, model_path)
if is_best:
shutil.copyfile(model_path, join(args.save_dir, "best_model.pth"))
def resume_model(args, model):
checkpoint = torch.load(args.resume, map_location=args.device)
if 'model_state_dict' in checkpoint:
state_dict = checkpoint['model_state_dict']
else:
# The pre-trained models that we provide in the README do not have 'state_dict' in the keys as
# the checkpoint is directly the state dict
state_dict = checkpoint
# if the model contains the prefix "module" which is appendend by
# DataParallel, remove it to avoid errors when loading dict
if list(state_dict.keys())[0].startswith('module'):
state_dict = OrderedDict({k.replace('module.', ''): v for (k, v) in state_dict.items()})
model.load_state_dict(state_dict)
return model
def resume_train(args, model, optimizer=None, strict=False):
"""Load model, optimizer, and other training parameters"""
logging.debug(f"Loading checkpoint: {args.resume}")
checkpoint = torch.load(args.resume)
start_epoch_num = checkpoint["epoch_num"]
model.load_state_dict(checkpoint["model_state_dict"], strict=strict)
if optimizer:
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
best_r5 = checkpoint["best_r5"]
not_improved_num = checkpoint["not_improved_num"]
logging.debug(f"Loaded checkpoint: start_epoch_num = {start_epoch_num}, "
f"current_best_R@5 = {best_r5:.1f}")
if args.resume.endswith("last_model.pth"): # Copy best model to current save_dir
shutil.copy(args.resume.replace("last_model.pth", "best_model.pth"), args.save_dir)
return model, optimizer, best_r5, start_epoch_num, not_improved_num
def compute_pca(args, model, pca_dataset_folder, full_features_dim):
model = model.eval()
pca_ds = datasets_ws.PCADataset(args, args.datasets_folder, pca_dataset_folder)
dl = torch.utils.data.DataLoader(pca_ds, args.infer_batch_size, shuffle=True)
pca_features = np.empty([min(len(pca_ds), 2**14), full_features_dim])
with torch.no_grad():
for i, images in enumerate(dl):
if i*args.infer_batch_size >= len(pca_features):
break
features = model(images).cpu().numpy()
pca_features[i*args.infer_batch_size : (i*args.infer_batch_size)+len(features)] = features
pca = PCA(args.pca_dim)
pca.fit(pca_features)
return pca
| 3,201 | 40.584416 | 102 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/datasets_ws.py |
import os
import torch
import faiss
import logging
import numpy as np
from glob import glob
from tqdm import tqdm
from PIL import Image
from os.path import join
import torch.utils.data as data
import torchvision.transforms as T
from torch.utils.data.dataset import Subset
from sklearn.neighbors import NearestNeighbors
from torch.utils.data.dataloader import DataLoader
base_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def path_to_pil_img(path):
return Image.open(path).convert("RGB")
def collate_fn(batch):
"""Creates mini-batch tensors from the list of tuples (images,
triplets_local_indexes, triplets_global_indexes).
triplets_local_indexes are the indexes referring to each triplet within images.
triplets_global_indexes are the global indexes of each image.
Args:
batch: list of tuple (images, triplets_local_indexes, triplets_global_indexes).
considering each query to have 10 negatives (negs_num_per_query=10):
- images: torch tensor of shape (12, 3, h, w).
- triplets_local_indexes: torch tensor of shape (10, 3).
- triplets_global_indexes: torch tensor of shape (12).
Returns:
images: torch tensor of shape (batch_size*12, 3, h, w).
triplets_local_indexes: torch tensor of shape (batch_size*10, 3).
triplets_global_indexes: torch tensor of shape (batch_size, 12).
"""
images = torch.cat([e[0] for e in batch])
triplets_local_indexes = torch.cat([e[1][None] for e in batch])
triplets_global_indexes = torch.cat([e[2][None] for e in batch])
for i, (local_indexes, global_indexes) in enumerate(zip(triplets_local_indexes, triplets_global_indexes)):
local_indexes += len(global_indexes) * i # Increment local indexes by offset (len(global_indexes) is 12)
return images, torch.cat(tuple(triplets_local_indexes)), triplets_global_indexes
class PCADataset(data.Dataset):
def __init__(self, args, datasets_folder="dataset", dataset_folder="pitts30k/images/train"):
dataset_folder_full_path = join(datasets_folder, dataset_folder)
if not os.path.exists(dataset_folder_full_path):
raise FileNotFoundError(f"Folder {dataset_folder_full_path} does not exist")
self.images_paths = sorted(glob(join(dataset_folder_full_path, "**", "*.jpg"), recursive=True))
def __getitem__(self, index):
return base_transform(path_to_pil_img(self.images_paths[index]))
def __len__(self):
return len(self.images_paths)
class BaseDataset(data.Dataset):
"""Dataset with images from database and queries, used for inference (testing and building cache).
"""
def __init__(self, args, datasets_folder="datasets", dataset_name="pitts30k", split="train"):
super().__init__()
self.args = args
self.dataset_name = dataset_name
self.dataset_folder = join(datasets_folder, dataset_name, "images", split)
if not os.path.exists(self.dataset_folder):
raise FileNotFoundError(f"Folder {self.dataset_folder} does not exist")
self.resize = args.resize
self.test_method = args.test_method
#### Read paths and UTM coordinates for all images.
database_folder = join(self.dataset_folder, "database")
queries_folder = join(self.dataset_folder, "queries")
if not os.path.exists(database_folder):
raise FileNotFoundError(f"Folder {database_folder} does not exist")
if not os.path.exists(queries_folder):
raise FileNotFoundError(f"Folder {queries_folder} does not exist")
self.database_paths = sorted(glob(join(database_folder, "**", "*.jpg"), recursive=True))
self.queries_paths = sorted(glob(join(queries_folder, "**", "*.jpg"), recursive=True))
# The format must be path/to/file/@utm_easting@utm_northing@...@.jpg
self.database_utms = np.array([(path.split("@")[1], path.split("@")[2]) for path in self.database_paths]).astype(float)
self.queries_utms = np.array([(path.split("@")[1], path.split("@")[2]) for path in self.queries_paths]).astype(float)
# Find soft_positives_per_query, which are within val_positive_dist_threshold (deafult 25 meters)
knn = NearestNeighbors(n_jobs=-1)
knn.fit(self.database_utms)
self.soft_positives_per_query = knn.radius_neighbors(self.queries_utms,
radius=args.val_positive_dist_threshold,
return_distance=False)
self.images_paths = list(self.database_paths) + list(self.queries_paths)
self.database_num = len(self.database_paths)
self.queries_num = len(self.queries_paths)
def __getitem__(self, index):
img = path_to_pil_img(self.images_paths[index])
img = base_transform(img)
# With database images self.test_method should always be "hard_resize"
if self.test_method == "hard_resize":
# self.test_method=="hard_resize" is the default, resizes all images to the same size.
img = T.functional.resize(img, self.resize)
else:
img = self._test_query_transform(img)
return img, index
def _test_query_transform(self, img):
"""Transform query image according to self.test_method."""
C, H, W = img.shape
if self.test_method == "single_query":
# self.test_method=="single_query" is used when queries have varying sizes, and can't be stacked in a batch.
processed_img = T.functional.resize(img, min(self.resize))
elif self.test_method == "central_crop":
# Take the biggest central crop of size self.resize. Preserves ratio.
scale = max(self.resize[0]/H, self.resize[1]/W)
processed_img = torch.nn.functional.interpolate(img.unsqueeze(0), scale_factor=scale).squeeze(0)
processed_img = T.functional.center_crop(processed_img, self.resize)
assert processed_img.shape[1:] == torch.Size(self.resize), f"{processed_img.shape[1:]} {self.resize}"
elif self.test_method == "five_crops" or self.test_method == 'nearest_crop' or self.test_method == 'maj_voting':
# Get 5 square crops with size==shorter_side (usually 480). Preserves ratio and allows batches.
shorter_side = min(self.resize)
processed_img = T.functional.resize(img, shorter_side)
processed_img = torch.stack(T.functional.five_crop(processed_img, shorter_side))
assert processed_img.shape == torch.Size([5, 3, shorter_side, shorter_side]), \
f"{processed_img.shape} {torch.Size([5, 3, shorter_side, shorter_side])}"
return processed_img
def __len__(self):
return len(self.images_paths)
def __repr__(self):
return f"< {self.__class__.__name__}, {self.dataset_name} - #database: {self.database_num}; #queries: {self.queries_num} >"
def get_positives(self):
return self.soft_positives_per_query
class TripletsDataset(BaseDataset):
"""Dataset used for training, it is used to compute the triplets
with TripletsDataset.compute_triplets() with various mining methods.
If is_inference == True, uses methods of the parent class BaseDataset,
this is used for example when computing the cache, because we compute features
of each image, not triplets.
"""
def __init__(self, args, datasets_folder="datasets", dataset_name="pitts30k", split="train", negs_num_per_query=10):
super().__init__(args, datasets_folder, dataset_name, split)
self.mining = args.mining
self.neg_samples_num = args.neg_samples_num # Number of negatives to randomly sample
self.negs_num_per_query = negs_num_per_query # Number of negatives per query in each batch
if self.mining == "full": # "Full database mining" keeps a cache with last used negatives
self.neg_cache = [np.empty((0,), dtype=np.int32) for _ in range(self.queries_num)]
self.is_inference = False
identity_transform = T.Lambda(lambda x: x)
self.resized_transform = T.Compose([
T.Resize(self.resize) if self.resize is not None else identity_transform,
base_transform
])
self.query_transform = T.Compose([
T.ColorJitter(args.brightness, args.contrast, args.saturation, args.hue),
T.RandomPerspective(args.rand_perspective),
T.RandomResizedCrop(size=self.resize, scale=(1-args.random_resized_crop, 1)),
T.RandomRotation(degrees=args.random_rotation),
self.resized_transform,
])
# Find hard_positives_per_query, which are within train_positives_dist_threshold (10 meters)
knn = NearestNeighbors(n_jobs=-1)
knn.fit(self.database_utms)
self.hard_positives_per_query = list(knn.radius_neighbors(self.queries_utms,
radius=args.train_positives_dist_threshold, # 10 meters
return_distance=False))
#### Some queries might have no positive, we should remove those queries.
queries_without_any_hard_positive = np.where(np.array([len(p) for p in self.hard_positives_per_query], dtype=object) == 0)[0]
if len(queries_without_any_hard_positive) != 0:
logging.info(f"There are {len(queries_without_any_hard_positive)} queries without any positives " +
"within the training set. They won't be considered as they're useless for training.")
# Remove queries without positives
self.hard_positives_per_query = np.delete(self.hard_positives_per_query, queries_without_any_hard_positive)
self.queries_paths = np.delete(self.queries_paths, queries_without_any_hard_positive)
# Recompute images_paths and queries_num because some queries might have been removed
self.images_paths = list(self.database_paths) + list(self.queries_paths)
self.queries_num = len(self.queries_paths)
# msls_weighted refers to the mining presented in MSLS paper's supplementary.
# Basically, images from uncommon domains are sampled more often. Works only with MSLS dataset.
if self.mining == "msls_weighted":
notes = [p.split("@")[-2] for p in self.queries_paths]
try:
night_indexes = np.where(np.array([n.split("_")[0] == "night" for n in notes]))[0]
sideways_indexes = np.where(np.array([n.split("_")[1] == "sideways" for n in notes]))[0]
except IndexError:
raise RuntimeError("You're using msls_weighted mining but this dataset " +
"does not have night/sideways information. Are you using Mapillary SLS?")
self.weights = np.ones(self.queries_num)
assert len(night_indexes) != 0 and len(sideways_indexes) != 0, \
"There should be night and sideways images for msls_weighted mining, but there are none. Are you using Mapillary SLS?"
self.weights[night_indexes] += self.queries_num / len(night_indexes)
self.weights[sideways_indexes] += self.queries_num / len(sideways_indexes)
self.weights /= self.weights.sum()
logging.info(f"#sideways_indexes [{len(sideways_indexes)}/{self.queries_num}]; " +
"#night_indexes; [{len(night_indexes)}/{self.queries_num}]")
def __getitem__(self, index):
if self.is_inference:
# At inference time return the single image. This is used for caching or computing NetVLAD's clusters
return super().__getitem__(index)
query_index, best_positive_index, neg_indexes = torch.split(self.triplets_global_indexes[index], (1, 1, self.negs_num_per_query))
query = self.query_transform(path_to_pil_img(self.queries_paths[query_index]))
positive = self.resized_transform(path_to_pil_img(self.database_paths[best_positive_index]))
negatives = [self.resized_transform(path_to_pil_img(self.database_paths[i])) for i in neg_indexes]
images = torch.stack((query, positive, *negatives), 0)
triplets_local_indexes = torch.empty((0, 3), dtype=torch.int)
for neg_num in range(len(neg_indexes)):
triplets_local_indexes = torch.cat((triplets_local_indexes, torch.tensor([0, 1, 2 + neg_num]).reshape(1, 3)))
return images, triplets_local_indexes, self.triplets_global_indexes[index]
def __len__(self):
if self.is_inference:
# At inference time return the number of images. This is used for caching or computing NetVLAD's clusters
return super().__len__()
else:
return len(self.triplets_global_indexes)
def compute_triplets(self, args, model):
self.is_inference = True
if self.mining == "full":
self.compute_triplets_full(args, model)
elif self.mining == "partial" or self.mining == "msls_weighted":
self.compute_triplets_partial(args, model)
elif self.mining == "random":
self.compute_triplets_random(args, model)
@staticmethod
def compute_cache(args, model, subset_ds, cache_shape):
"""Compute the cache containing features of images, which is used to
find best positive and hardest negatives."""
subset_dl = DataLoader(dataset=subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, shuffle=False,
pin_memory=(args.device == "cuda"))
model = model.eval()
# RAMEfficient2DMatrix can be replaced by np.zeros, but using
# RAMEfficient2DMatrix is RAM efficient for full database mining.
cache = RAMEfficient2DMatrix(cache_shape, dtype=np.float32)
with torch.no_grad():
for images, indexes in tqdm(subset_dl, ncols=100):
images = images.to(args.device)
features = model(images)
cache[indexes.numpy()] = features.cpu().numpy()
return cache
def get_query_features(self, query_index, cache):
query_features = cache[query_index + self.database_num]
if query_features is None:
raise RuntimeError(f"For query {self.queries_paths[query_index]} " +
f"with index {query_index} features have not been computed!\n" +
"There might be some bug with caching")
return query_features
def get_best_positive_index(self, args, query_index, cache, query_features):
positives_features = cache[self.hard_positives_per_query[query_index]]
faiss_index = faiss.IndexFlatL2(args.features_dim)
faiss_index.add(positives_features)
# Search the best positive (within 10 meters AND nearest in features space)
_, best_positive_num = faiss_index.search(query_features.reshape(1, -1), 1)
best_positive_index = self.hard_positives_per_query[query_index][best_positive_num[0]].item()
return best_positive_index
def get_hardest_negatives_indexes(self, args, cache, query_features, neg_samples):
neg_features = cache[neg_samples]
faiss_index = faiss.IndexFlatL2(args.features_dim)
faiss_index.add(neg_features)
# Search the 10 nearest negatives (further than 25 meters and nearest in features space)
_, neg_nums = faiss_index.search(query_features.reshape(1, -1), self.negs_num_per_query)
neg_nums = neg_nums.reshape(-1)
neg_indexes = neg_samples[neg_nums].astype(np.int32)
return neg_indexes
def compute_triplets_random(self, args, model):
self.triplets_global_indexes = []
# Take 1000 random queries
sampled_queries_indexes = np.random.choice(self.queries_num, args.cache_refresh_rate, replace=False)
# Take all the positives
positives_indexes = [self.hard_positives_per_query[i] for i in sampled_queries_indexes]
positives_indexes = [p for pos in positives_indexes for p in pos] # Flatten list of lists to a list
positives_indexes = list(np.unique(positives_indexes))
# Compute the cache only for queries and their positives, in order to find the best positive
subset_ds = Subset(self, positives_indexes + list(sampled_queries_indexes + self.database_num))
cache = self.compute_cache(args, model, subset_ds, (len(self), args.features_dim))
# This loop's iterations could be done individually in the __getitem__(). This way is slower but clearer (and yields same results)
for query_index in tqdm(sampled_queries_indexes, ncols=100):
query_features = self.get_query_features(query_index, cache)
best_positive_index = self.get_best_positive_index(args, query_index, cache, query_features)
# Choose some random database images, from those remove the soft_positives, and then take the first 10 images as neg_indexes
soft_positives = self.soft_positives_per_query[query_index]
neg_indexes = np.random.choice(self.database_num, size=self.negs_num_per_query+len(soft_positives), replace=False)
neg_indexes = np.setdiff1d(neg_indexes, soft_positives, assume_unique=True)[:self.negs_num_per_query]
self.triplets_global_indexes.append((query_index, best_positive_index, *neg_indexes))
# self.triplets_global_indexes is a tensor of shape [1000, 12]
self.triplets_global_indexes = torch.tensor(self.triplets_global_indexes)
def compute_triplets_full(self, args, model):
self.triplets_global_indexes = []
# Take 1000 random queries
sampled_queries_indexes = np.random.choice(self.queries_num, args.cache_refresh_rate, replace=False)
# Take all database indexes
database_indexes = list(range(self.database_num))
# Compute features for all images and store them in cache
subset_ds = Subset(self, database_indexes + list(sampled_queries_indexes + self.database_num))
cache = self.compute_cache(args, model, subset_ds, (len(self), args.features_dim))
# This loop's iterations could be done individually in the __getitem__(). This way is slower but clearer (and yields same results)
for query_index in tqdm(sampled_queries_indexes, ncols=100):
query_features = self.get_query_features(query_index, cache)
best_positive_index = self.get_best_positive_index(args, query_index, cache, query_features)
# Choose 1000 random database images (neg_indexes)
neg_indexes = np.random.choice(self.database_num, self.neg_samples_num, replace=False)
# Remove the eventual soft_positives from neg_indexes
soft_positives = self.soft_positives_per_query[query_index]
neg_indexes = np.setdiff1d(neg_indexes, soft_positives, assume_unique=True)
# Concatenate neg_indexes with the previous top 10 negatives (neg_cache)
neg_indexes = np.unique(np.concatenate([self.neg_cache[query_index], neg_indexes]))
# Search the hardest negatives
neg_indexes = self.get_hardest_negatives_indexes(args, cache, query_features, neg_indexes)
# Update nearest negatives in neg_cache
self.neg_cache[query_index] = neg_indexes
self.triplets_global_indexes.append((query_index, best_positive_index, *neg_indexes))
# self.triplets_global_indexes is a tensor of shape [1000, 12]
self.triplets_global_indexes = torch.tensor(self.triplets_global_indexes)
def compute_triplets_partial(self, args, model):
self.triplets_global_indexes = []
# Take 1000 random queries
if self.mining == "partial":
sampled_queries_indexes = np.random.choice(self.queries_num, args.cache_refresh_rate, replace=False)
elif self.mining == "msls_weighted": # Pick night and sideways queries with higher probability
sampled_queries_indexes = np.random.choice(self.queries_num, args.cache_refresh_rate, replace=False, p=self.weights)
# Sample 1000 random database images for the negatives
sampled_database_indexes = np.random.choice(self.database_num, self.neg_samples_num, replace=False)
# Take all the positives
positives_indexes = [self.hard_positives_per_query[i] for i in sampled_queries_indexes]
positives_indexes = [p for pos in positives_indexes for p in pos]
# Merge them into database_indexes and remove duplicates
database_indexes = list(sampled_database_indexes) + positives_indexes
database_indexes = list(np.unique(database_indexes))
subset_ds = Subset(self, database_indexes + list(sampled_queries_indexes + self.database_num))
cache = self.compute_cache(args, model, subset_ds, cache_shape=(len(self), args.features_dim))
# This loop's iterations could be done individually in the __getitem__(). This way is slower but clearer (and yields same results)
for query_index in tqdm(sampled_queries_indexes, ncols=100):
query_features = self.get_query_features(query_index, cache)
best_positive_index = self.get_best_positive_index(args, query_index, cache, query_features)
# Choose the hardest negatives within sampled_database_indexes, ensuring that there are no positives
soft_positives = self.soft_positives_per_query[query_index]
neg_indexes = np.setdiff1d(sampled_database_indexes, soft_positives, assume_unique=True)
# Take all database images that are negatives and are within the sampled database images (aka database_indexes)
neg_indexes = self.get_hardest_negatives_indexes(args, cache, query_features, neg_indexes)
self.triplets_global_indexes.append((query_index, best_positive_index, *neg_indexes))
# self.triplets_global_indexes is a tensor of shape [1000, 12]
self.triplets_global_indexes = torch.tensor(self.triplets_global_indexes)
class RAMEfficient2DMatrix:
"""This class behaves similarly to a numpy.ndarray initialized
with np.zeros(), but is implemented to save RAM when the rows
within the 2D array are sparse. In this case it's needed because
we don't always compute features for each image, just for few of
them"""
def __init__(self, shape, dtype=np.float32):
self.shape = shape
self.dtype = dtype
self.matrix = [None] * shape[0]
def __setitem__(self, indexes, vals):
assert vals.shape[1] == self.shape[1], f"{vals.shape[1]} {self.shape[1]}"
for i, val in zip(indexes, vals):
self.matrix[i] = val.astype(self.dtype, copy=False)
def __getitem__(self, index):
if hasattr(index, "__len__"):
return np.array([self.matrix[i] for i in index])
else:
return self.matrix[index]
| 23,388 | 56.750617 | 138 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/eval.py |
"""
With this script you can evaluate checkpoints or test models from two popular
landmark retrieval github repos.
The first is https://github.com/naver/deep-image-retrieval from Naver labs,
provides ResNet-50 and ResNet-101 trained with AP on Google Landmarks 18 clean.
$ python eval.py --off_the_shelf=naver --l2=none --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048
The second is https://github.com/filipradenovic/cnnimageretrieval-pytorch from
Radenovic, provides ResNet-50 and ResNet-101 trained with a triplet loss
on Google Landmarks 18 and sfm120k.
$ python eval.py --off_the_shelf=radenovic_gldv1 --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048
$ python eval.py --off_the_shelf=radenovic_sfm --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048
Note that although the architectures are almost the same, Naver's
implementation does not use a l2 normalization before/after the GeM aggregation,
while Radenovic's uses it after (and we use it before, which shows better
results in VG)
"""
import os
import sys
import torch
import parser
import logging
import sklearn
from os.path import join
from datetime import datetime
from torch.utils.model_zoo import load_url
from google_drive_downloader import GoogleDriveDownloader as gdd
import test
import util
import commons
import datasets_ws
from model import network
OFF_THE_SHELF_RADENOVIC = {
'resnet50conv5_sfm' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet50-gem-w-97bf910.pth',
'resnet101conv5_sfm' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet101-gem-w-a155e54.pth',
'resnet50conv5_gldv1' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet50-gem-w-83fdc30.pth',
'resnet101conv5_gldv1' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet101-gem-w-a4d43db.pth',
}
OFF_THE_SHELF_NAVER = {
"resnet50conv5" : "1oPtE_go9tnsiDLkWjN4NMpKjh-_md1G5",
'resnet101conv5' : "1UWJGDuHtzaQdFhSMojoYVQjmCXhIwVvy"
}
######################################### SETUP #########################################
args = parser.parse_arguments()
start_time = datetime.now()
args.save_dir = join("test", args.save_dir, start_time.strftime('%Y-%m-%d_%H-%M-%S'))
commons.setup_logging(args.save_dir)
commons.make_deterministic(args.seed)
logging.info(f"Arguments: {args}")
logging.info(f"The outputs are being saved in {args.save_dir}")
######################################### MODEL #########################################
model = network.GeoLocalizationNet(args)
model = model.to(args.device)
if args.aggregation in ["netvlad", "crn"]:
args.features_dim *= args.netvlad_clusters
if args.off_the_shelf.startswith("radenovic") or args.off_the_shelf.startswith("naver"):
if args.off_the_shelf.startswith("radenovic"):
pretrain_dataset_name = args.off_the_shelf.split("_")[1] # sfm or gldv1 datasets
url = OFF_THE_SHELF_RADENOVIC[f"{args.backbone}_{pretrain_dataset_name}"]
state_dict = load_url(url, model_dir=join("data", "off_the_shelf_nets"))
else:
# This is a hacky workaround to maintain compatibility
sys.modules['sklearn.decomposition.pca'] = sklearn.decomposition._pca
zip_file_path = join("data", "off_the_shelf_nets", args.backbone + "_naver.zip")
if not os.path.exists(zip_file_path):
gdd.download_file_from_google_drive(file_id=OFF_THE_SHELF_NAVER[args.backbone],
dest_path=zip_file_path, unzip=True)
if args.backbone == "resnet50conv5":
state_dict_filename = "Resnet50-AP-GeM.pt"
elif args.backbone == "resnet101conv5":
state_dict_filename = "Resnet-101-AP-GeM.pt"
state_dict = torch.load(join("data", "off_the_shelf_nets", state_dict_filename))
state_dict = state_dict["state_dict"]
model_keys = model.state_dict().keys()
renamed_state_dict = {k: v for k, v in zip(model_keys, state_dict.values())}
model.load_state_dict(renamed_state_dict)
elif args.resume is not None:
logging.info(f"Resuming model from {args.resume}")
model = util.resume_model(args, model)
# Enable DataParallel after loading checkpoint, otherwise doing it before
# would append "module." in front of the keys of the state dict triggering errors
model = torch.nn.DataParallel(model)
if args.pca_dim is None:
pca = None
else:
full_features_dim = args.features_dim
args.features_dim = args.pca_dim
pca = util.compute_pca(args, model, args.pca_dataset_folder, full_features_dim)
######################################### DATASETS #########################################
test_ds = datasets_ws.BaseDataset(args, args.datasets_folder, args.dataset_name, "test")
logging.info(f"Test set: {test_ds}")
######################################### TEST on TEST SET #########################################
recalls, recalls_str = test.test(args, test_ds, model, args.test_method, pca)
logging.info(f"Recalls on {test_ds}: {recalls_str}")
logging.info(f"Finished in {str(datetime.now() - start_time)[:-7]}")
| 5,209 | 46.363636 | 146 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/train.py |
import math
import torch
import logging
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import multiprocessing
from os.path import join
from datetime import datetime
import torchvision.transforms as transforms
from torch.utils.data.dataloader import DataLoader
import util
import test
import parser
import commons
import datasets_ws
from model import network
from model.sync_batchnorm import convert_model
from model.functional import sare_ind, sare_joint
torch.backends.cudnn.benchmark = True # Provides a speedup
#### Initial setup: parser, logging...
args = parser.parse_arguments()
start_time = datetime.now()
args.save_dir = join("logs", args.save_dir, start_time.strftime('%Y-%m-%d_%H-%M-%S'))
commons.setup_logging(args.save_dir)
commons.make_deterministic(args.seed)
logging.info(f"Arguments: {args}")
logging.info(f"The outputs are being saved in {args.save_dir}")
logging.info(f"Using {torch.cuda.device_count()} GPUs and {multiprocessing.cpu_count()} CPUs")
#### Creation of Datasets
logging.debug(f"Loading dataset {args.dataset_name} from folder {args.datasets_folder}")
triplets_ds = datasets_ws.TripletsDataset(args, args.datasets_folder, args.dataset_name, "train", args.negs_num_per_query)
logging.info(f"Train query set: {triplets_ds}")
val_ds = datasets_ws.BaseDataset(args, args.datasets_folder, args.dataset_name, "val")
logging.info(f"Val set: {val_ds}")
test_ds = datasets_ws.BaseDataset(args, args.datasets_folder, args.dataset_name, "test")
logging.info(f"Test set: {test_ds}")
#### Initialize model
model = network.GeoLocalizationNet(args)
model = model.to(args.device)
if args.aggregation in ["netvlad", "crn"]: # If using NetVLAD layer, initialize it
if not args.resume:
triplets_ds.is_inference = True
model.aggregation.initialize_netvlad_layer(args, triplets_ds, model.backbone)
args.features_dim *= args.netvlad_clusters
model = torch.nn.DataParallel(model)
#### Setup Optimizer and Loss
if args.aggregation == "crn":
crn_params = list(model.module.aggregation.crn.parameters())
net_params = list(model.module.backbone.parameters()) + \
list([m[1] for m in model.module.aggregation.named_parameters() if not m[0].startswith('crn')])
if args.optim == "adam":
optimizer = torch.optim.Adam([{'params': crn_params, 'lr': args.lr_crn_layer},
{'params': net_params, 'lr': args.lr_crn_net}])
logging.info("You're using CRN with Adam, it is advised to use SGD")
elif args.optim == "sgd":
optimizer = torch.optim.SGD([{'params': crn_params, 'lr': args.lr_crn_layer, 'momentum': 0.9, 'weight_decay': 0.001},
{'params': net_params, 'lr': args.lr_crn_net, 'momentum': 0.9, 'weight_decay': 0.001}])
else:
if args.optim == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optim == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.001)
if args.criterion == "triplet":
criterion_triplet = nn.TripletMarginLoss(margin=args.margin, p=2, reduction="sum")
elif args.criterion == "sare_ind":
criterion_triplet = sare_ind
elif args.criterion == "sare_joint":
criterion_triplet = sare_joint
#### Resume model, optimizer, and other training parameters
if args.resume:
if args.aggregation != 'crn':
model, optimizer, best_r5, start_epoch_num, not_improved_num = util.resume_train(args, model, optimizer)
else:
# CRN uses pretrained NetVLAD, then requires loading with strict=False and
# does not load the optimizer from the checkpoint file.
model, _, best_r5, start_epoch_num, not_improved_num = util.resume_train(args, model, strict=False)
logging.info(f"Resuming from epoch {start_epoch_num} with best recall@5 {best_r5:.1f}")
else:
best_r5 = start_epoch_num = not_improved_num = 0
if args.backbone.startswith('vit'):
logging.info(f"Output dimension of the model is {args.features_dim}")
else:
logging.info(f"Output dimension of the model is {args.features_dim}, with {util.get_flops(model, args.resize)}")
if torch.cuda.device_count() >= 2:
# When using more than 1GPU, use sync_batchnorm for torch.nn.DataParallel
model = convert_model(model)
model = model.cuda()
#### Training loop
for epoch_num in range(start_epoch_num, args.epochs_num):
logging.info(f"Start training epoch: {epoch_num:02d}")
epoch_start_time = datetime.now()
epoch_losses = np.zeros((0, 1), dtype=np.float32)
# How many loops should an epoch last (default is 5000/1000=5)
loops_num = math.ceil(args.queries_per_epoch / args.cache_refresh_rate)
for loop_num in range(loops_num):
logging.debug(f"Cache: {loop_num} / {loops_num}")
# Compute triplets to use in the triplet loss
triplets_ds.is_inference = True
triplets_ds.compute_triplets(args, model)
triplets_ds.is_inference = False
triplets_dl = DataLoader(dataset=triplets_ds, num_workers=args.num_workers,
batch_size=args.train_batch_size,
collate_fn=datasets_ws.collate_fn,
pin_memory=(args.device == "cuda"),
drop_last=True)
model = model.train()
# images shape: (train_batch_size*12)*3*H*W ; by default train_batch_size=4, H=480, W=640
# triplets_local_indexes shape: (train_batch_size*10)*3 ; because 10 triplets per query
for images, triplets_local_indexes, _ in tqdm(triplets_dl, ncols=100):
# Flip all triplets or none
if args.horizontal_flip:
images = transforms.RandomHorizontalFlip()(images)
# Compute features of all images (images contains queries, positives and negatives)
features = model(images.to(args.device))
loss_triplet = 0
if args.criterion == "triplet":
triplets_local_indexes = torch.transpose(
triplets_local_indexes.view(args.train_batch_size, args.negs_num_per_query, 3), 1, 0)
for triplets in triplets_local_indexes:
queries_indexes, positives_indexes, negatives_indexes = triplets.T
loss_triplet += criterion_triplet(features[queries_indexes],
features[positives_indexes],
features[negatives_indexes])
elif args.criterion == 'sare_joint':
# sare_joint needs to receive all the negatives at once
triplet_index_batch = triplets_local_indexes.view(args.train_batch_size, 10, 3)
for batch_triplet_index in triplet_index_batch:
q = features[batch_triplet_index[0, 0]].unsqueeze(0) # obtain query as tensor of shape 1xn_features
p = features[batch_triplet_index[0, 1]].unsqueeze(0) # obtain positive as tensor of shape 1xn_features
n = features[batch_triplet_index[:, 2]] # obtain negatives as tensor of shape 10xn_features
loss_triplet += criterion_triplet(q, p, n)
elif args.criterion == "sare_ind":
for triplet in triplets_local_indexes:
# triplet is a 1-D tensor with the 3 scalars indexes of the triplet
q_i, p_i, n_i = triplet
loss_triplet += criterion_triplet(features[q_i:q_i+1], features[p_i:p_i+1], features[n_i:n_i+1])
del features
loss_triplet /= (args.train_batch_size * args.negs_num_per_query)
optimizer.zero_grad()
loss_triplet.backward()
optimizer.step()
# Keep track of all losses by appending them to epoch_losses
batch_loss = loss_triplet.item()
epoch_losses = np.append(epoch_losses, batch_loss)
del loss_triplet
logging.debug(f"Epoch[{epoch_num:02d}]({loop_num}/{loops_num}): " +
f"current batch triplet loss = {batch_loss:.4f}, " +
f"average epoch triplet loss = {epoch_losses.mean():.4f}")
logging.info(f"Finished epoch {epoch_num:02d} in {str(datetime.now() - epoch_start_time)[:-7]}, "
f"average epoch triplet loss = {epoch_losses.mean():.4f}")
# Compute recalls on validation set
recalls, recalls_str = test.test(args, val_ds, model)
logging.info(f"Recalls on val set {val_ds}: {recalls_str}")
is_best = recalls[1] > best_r5
# Save checkpoint, which contains all training parameters
util.save_checkpoint(args, {
"epoch_num": epoch_num, "model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(), "recalls": recalls, "best_r5": best_r5,
"not_improved_num": not_improved_num
}, is_best, filename="last_model.pth")
# If recall@5 did not improve for "many" epochs, stop training
if is_best:
logging.info(f"Improved: previous best R@5 = {best_r5:.1f}, current R@5 = {recalls[1]:.1f}")
best_r5 = recalls[1]
not_improved_num = 0
else:
not_improved_num += 1
logging.info(f"Not improved: {not_improved_num} / {args.patience}: best R@5 = {best_r5:.1f}, current R@5 = {recalls[1]:.1f}")
if not_improved_num >= args.patience:
logging.info(f"Performance did not improve for {not_improved_num} epochs. Stop training.")
break
logging.info(f"Best R@5: {best_r5:.1f}")
logging.info(f"Trained for {epoch_num+1:02d} epochs, in total in {str(datetime.now() - start_time)[:-7]}")
#### Test best model on test set
best_model_state_dict = torch.load(join(args.save_dir, "best_model.pth"))["model_state_dict"]
model.load_state_dict(best_model_state_dict)
recalls, recalls_str = test.test(args, test_ds, model, test_method=args.test_method)
logging.info(f"Recalls on {test_ds}: {recalls_str}")
| 10,186 | 45.729358 | 133 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/aggregation.py |
import math
import torch
import faiss
import logging
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader, SubsetRandomSampler
import model.functional as LF
import model.normalization as normalization
class MAC(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return LF.mac(x)
def __repr__(self):
return self.__class__.__name__ + '()'
class SPoC(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return LF.spoc(x)
def __repr__(self):
return self.__class__.__name__ + '()'
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6, work_with_tokens=False):
super().__init__()
self.p = Parameter(torch.ones(1)*p)
self.eps = eps
self.work_with_tokens=work_with_tokens
def forward(self, x):
return LF.gem(x, p=self.p, eps=self.eps, work_with_tokens=self.work_with_tokens)
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'
class RMAC(nn.Module):
def __init__(self, L=3, eps=1e-6):
super().__init__()
self.L = L
self.eps = eps
def forward(self, x):
return LF.rmac(x, L=self.L, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + '(' + 'L=' + '{}'.format(self.L) + ')'
class Flatten(torch.nn.Module):
def __init__(self): super().__init__()
def forward(self, x): assert x.shape[2] == x.shape[3] == 1; return x[:,:,0,0]
class RRM(nn.Module):
"""Residual Retrieval Module as described in the paper
`Leveraging EfficientNet and Contrastive Learning for AccurateGlobal-scale
Location Estimation <https://arxiv.org/pdf/2105.07645.pdf>`
"""
def __init__(self, dim):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
self.flatten = Flatten()
self.ln1 = nn.LayerNorm(normalized_shape=dim)
self.fc1 = nn.Linear(in_features=dim, out_features=dim)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(in_features=dim, out_features=dim)
self.ln2 = nn.LayerNorm(normalized_shape=dim)
self.l2 = normalization.L2Norm()
def forward(self, x):
x = self.avgpool(x)
x = self.flatten(x)
x = self.ln1(x)
identity = x
out = self.fc2(self.relu(self.fc1(x)))
out += identity
out = self.l2(self.ln2(out))
return out
# based on https://github.com/lyakaap/NetVLAD-pytorch/blob/master/netvlad.py
class NetVLAD(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, clusters_num=64, dim=128, normalize_input=True, work_with_tokens=False):
"""
Args:
clusters_num : int
The number of clusters
dim : int
Dimension of descriptors
alpha : float
Parameter of initialization. Larger value is harder assignment.
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
"""
super().__init__()
self.clusters_num = clusters_num
self.dim = dim
self.alpha = 0
self.normalize_input = normalize_input
self.work_with_tokens = work_with_tokens
if work_with_tokens:
self.conv = nn.Conv1d(dim, clusters_num, kernel_size=1, bias=False)
else:
self.conv = nn.Conv2d(dim, clusters_num, kernel_size=(1, 1), bias=False)
self.centroids = nn.Parameter(torch.rand(clusters_num, dim))
def init_params(self, centroids, descriptors):
centroids_assign = centroids / np.linalg.norm(centroids, axis=1, keepdims=True)
dots = np.dot(centroids_assign, descriptors.T)
dots.sort(0)
dots = dots[::-1, :] # sort, descending
self.alpha = (-np.log(0.01) / np.mean(dots[0,:] - dots[1,:])).item()
self.centroids = nn.Parameter(torch.from_numpy(centroids))
if self.work_with_tokens:
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * centroids_assign).unsqueeze(2))
else:
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha*centroids_assign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
def forward(self, x):
if self.work_with_tokens:
x = x.permute(0, 2, 1)
N, D, _ = x.shape[:]
else:
N, D, H, W = x.shape[:]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1) # Across descriptor dim
x_flatten = x.view(N, D, -1)
soft_assign = self.conv(x).view(N, self.clusters_num, -1)
soft_assign = F.softmax(soft_assign, dim=1)
vlad = torch.zeros([N, self.clusters_num, D], dtype=x_flatten.dtype, device=x_flatten.device)
for D in range(self.clusters_num): # Slower than non-looped, but lower memory usage
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3) - \
self.centroids[D:D+1, :].expand(x_flatten.size(-1), -1, -1).permute(1, 2, 0).unsqueeze(0)
residual = residual * soft_assign[:,D:D+1,:].unsqueeze(2)
vlad[:,D:D+1,:] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2) # intra-normalization
vlad = vlad.view(N, -1) # Flatten
vlad = F.normalize(vlad, p=2, dim=1) # L2 normalize
return vlad
def initialize_netvlad_layer(self, args, cluster_ds, backbone):
descriptors_num = 50000
descs_num_per_image = 100
images_num = math.ceil(descriptors_num / descs_num_per_image)
random_sampler = SubsetRandomSampler(np.random.choice(len(cluster_ds), images_num, replace=False))
random_dl = DataLoader(dataset=cluster_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, sampler=random_sampler)
with torch.no_grad():
backbone = backbone.eval()
logging.debug("Extracting features to initialize NetVLAD layer")
descriptors = np.zeros(shape=(descriptors_num, args.features_dim), dtype=np.float32)
for iteration, (inputs, _) in enumerate(tqdm(random_dl, ncols=100)):
inputs = inputs.to(args.device)
outputs = backbone(inputs)
norm_outputs = F.normalize(outputs, p=2, dim=1)
image_descriptors = norm_outputs.view(norm_outputs.shape[0], args.features_dim, -1).permute(0, 2, 1)
image_descriptors = image_descriptors.cpu().numpy()
batchix = iteration * args.infer_batch_size * descs_num_per_image
for ix in range(image_descriptors.shape[0]):
sample = np.random.choice(image_descriptors.shape[1], descs_num_per_image, replace=False)
startix = batchix + ix * descs_num_per_image
descriptors[startix:startix + descs_num_per_image, :] = image_descriptors[ix, sample, :]
kmeans = faiss.Kmeans(args.features_dim, self.clusters_num, niter=100, verbose=False)
kmeans.train(descriptors)
logging.debug(f"NetVLAD centroids shape: {kmeans.centroids.shape}")
self.init_params(kmeans.centroids, descriptors)
self = self.to(args.device)
class CRNModule(nn.Module):
def __init__(self, dim):
super().__init__()
# Downsample pooling
self.downsample_pool = nn.AvgPool2d(kernel_size=3, stride=(2, 2),
padding=0, ceil_mode=True)
# Multiscale Context Filters
self.filter_3_3 = nn.Conv2d(in_channels=dim, out_channels=32,
kernel_size=(3, 3), padding=1)
self.filter_5_5 = nn.Conv2d(in_channels=dim, out_channels=32,
kernel_size=(5, 5), padding=2)
self.filter_7_7 = nn.Conv2d(in_channels=dim, out_channels=20,
kernel_size=(7, 7), padding=3)
# Accumulation weight
self.acc_w = nn.Conv2d(in_channels=84, out_channels=1, kernel_size=(1, 1))
# Upsampling
self.upsample = F.interpolate
self._initialize_weights()
def _initialize_weights(self):
# Initialize Context Filters
torch.nn.init.xavier_normal_(self.filter_3_3.weight)
torch.nn.init.constant_(self.filter_3_3.bias, 0.0)
torch.nn.init.xavier_normal_(self.filter_5_5.weight)
torch.nn.init.constant_(self.filter_5_5.bias, 0.0)
torch.nn.init.xavier_normal_(self.filter_7_7.weight)
torch.nn.init.constant_(self.filter_7_7.bias, 0.0)
torch.nn.init.constant_(self.acc_w.weight, 1.0)
torch.nn.init.constant_(self.acc_w.bias, 0.0)
self.acc_w.weight.requires_grad = False
self.acc_w.bias.requires_grad = False
def forward(self, x):
# Contextual Reweighting Network
x_crn = self.downsample_pool(x)
# Compute multiscale context filters g_n
g_3 = self.filter_3_3(x_crn)
g_5 = self.filter_5_5(x_crn)
g_7 = self.filter_7_7(x_crn)
g = torch.cat((g_3, g_5, g_7), dim=1)
g = F.relu(g)
w = F.relu(self.acc_w(g)) # Accumulation weight
mask = self.upsample(w, scale_factor=2, mode='bilinear') # Reweighting Mask
return mask
class CRN(NetVLAD):
def __init__(self, clusters_num=64, dim=128, normalize_input=True):
super().__init__(clusters_num, dim, normalize_input)
self.crn = CRNModule(dim)
def forward(self, x):
N, D, H, W = x.shape[:]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1) # Across descriptor dim
mask = self.crn(x)
x_flatten = x.view(N, D, -1)
soft_assign = self.conv(x).view(N, self.clusters_num, -1)
soft_assign = F.softmax(soft_assign, dim=1)
# Weight soft_assign using CRN's mask
soft_assign = soft_assign * mask.view(N, 1, H * W)
vlad = torch.zeros([N, self.clusters_num, D], dtype=x_flatten.dtype, device=x_flatten.device)
for D in range(self.clusters_num): # Slower than non-looped, but lower memory usage
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3) - \
self.centroids[D:D + 1, :].expand(x_flatten.size(-1), -1, -1).permute(1, 2, 0).unsqueeze(0)
residual = residual * soft_assign[:, D:D + 1, :].unsqueeze(2)
vlad[:, D:D + 1, :] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2) # intra-normalization
vlad = vlad.view(N, -1) # Flatten
vlad = F.normalize(vlad, p=2, dim=1) # L2 normalize
return vlad
| 10,963 | 41.007663 | 132 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/network.py |
import os
import torch
import logging
import torchvision
from torch import nn
from os.path import join
from transformers import ViTModel
from google_drive_downloader import GoogleDriveDownloader as gdd
from model.cct import cct_14_7x2_384
from model.aggregation import Flatten
from model.normalization import L2Norm
import model.aggregation as aggregation
# Pretrained models on Google Landmarks v2 and Places 365
PRETRAINED_MODELS = {
'resnet18_places' : '1DnEQXhmPxtBUrRc81nAvT8z17bk-GBj5',
'resnet50_places' : '1zsY4mN4jJ-AsmV3h4hjbT72CBfJsgSGC',
'resnet101_places' : '1E1ibXQcg7qkmmmyYgmwMTh7Xf1cDNQXa',
'vgg16_places' : '1UWl1uz6rZ6Nqmp1K5z3GHAIZJmDh4bDu',
'resnet18_gldv2' : '1wkUeUXFXuPHuEvGTXVpuP5BMB-JJ1xke',
'resnet50_gldv2' : '1UDUv6mszlXNC1lv6McLdeBNMq9-kaA70',
'resnet101_gldv2' : '1apiRxMJpDlV0XmKlC5Na_Drg2jtGL-uE',
'vgg16_gldv2' : '10Ov9JdO7gbyz6mB5x0v_VSAUMj91Ta4o'
}
class GeoLocalizationNet(nn.Module):
"""The used networks are composed of a backbone and an aggregation layer.
"""
def __init__(self, args):
super().__init__()
self.backbone = get_backbone(args)
self.arch_name = args.backbone
self.aggregation = get_aggregation(args)
if args.aggregation in ["gem", "spoc", "mac", "rmac"]:
if args.l2 == "before_pool":
self.aggregation = nn.Sequential(L2Norm(), self.aggregation, Flatten())
elif args.l2 == "after_pool":
self.aggregation = nn.Sequential(self.aggregation, L2Norm(), Flatten())
elif args.l2 == "none":
self.aggregation = nn.Sequential(self.aggregation, Flatten())
if args.fc_output_dim != None:
# Concatenate fully connected layer to the aggregation layer
self.aggregation = nn.Sequential(self.aggregation,
nn.Linear(args.features_dim, args.fc_output_dim),
L2Norm())
args.features_dim = args.fc_output_dim
def forward(self, x):
x = self.backbone(x)
x = self.aggregation(x)
return x
def get_aggregation(args):
if args.aggregation == "gem":
return aggregation.GeM(work_with_tokens=args.work_with_tokens)
elif args.aggregation == "spoc":
return aggregation.SPoC()
elif args.aggregation == "mac":
return aggregation.MAC()
elif args.aggregation == "rmac":
return aggregation.RMAC()
elif args.aggregation == "netvlad":
return aggregation.NetVLAD(clusters_num=args.netvlad_clusters, dim=args.features_dim,
work_with_tokens=args.work_with_tokens)
elif args.aggregation == 'crn':
return aggregation.CRN(clusters_num=args.netvlad_clusters, dim=args.features_dim)
elif args.aggregation == "rrm":
return aggregation.RRM(args.features_dim)
elif args.aggregation in ['cls', 'seqpool']:
return nn.Identity()
def get_pretrained_model(args):
if args.pretrain == 'places': num_classes = 365
elif args.pretrain == 'gldv2': num_classes = 512
if args.backbone.startswith("resnet18"):
model = torchvision.models.resnet18(num_classes=num_classes)
elif args.backbone.startswith("resnet50"):
model = torchvision.models.resnet50(num_classes=num_classes)
elif args.backbone.startswith("resnet101"):
model = torchvision.models.resnet101(num_classes=num_classes)
elif args.backbone.startswith("vgg16"):
model = torchvision.models.vgg16(num_classes=num_classes)
if args.backbone.startswith('resnet'):
model_name = args.backbone.split('conv')[0] + "_" + args.pretrain
else:
model_name = args.backbone + "_" + args.pretrain
file_path = join("data", "pretrained_nets", model_name +".pth")
if not os.path.exists(file_path):
gdd.download_file_from_google_drive(file_id=PRETRAINED_MODELS[model_name],
dest_path=file_path)
state_dict = torch.load(file_path, map_location=torch.device('cpu'))
model.load_state_dict(state_dict)
return model
def get_backbone(args):
# The aggregation layer works differently based on the type of architecture
args.work_with_tokens = args.backbone.startswith('cct') or args.backbone.startswith('vit')
if args.backbone.startswith("resnet"):
if args.pretrain in ['places', 'gldv2']:
backbone = get_pretrained_model(args)
elif args.backbone.startswith("resnet18"):
backbone = torchvision.models.resnet18(pretrained=True)
elif args.backbone.startswith("resnet50"):
backbone = torchvision.models.resnet50(pretrained=True)
elif args.backbone.startswith("resnet101"):
backbone = torchvision.models.resnet101(pretrained=True)
for name, child in backbone.named_children():
# Freeze layers before conv_3
if name == "layer3":
break
for params in child.parameters():
params.requires_grad = False
if args.backbone.endswith("conv4"):
logging.debug(f"Train only conv4_x of the resnet{args.backbone.split('conv')[0]} (remove conv5_x), freeze the previous ones")
layers = list(backbone.children())[:-3]
elif args.backbone.endswith("conv5"):
logging.debug(f"Train only conv4_x and conv5_x of the resnet{args.backbone.split('conv')[0]}, freeze the previous ones")
layers = list(backbone.children())[:-2]
elif args.backbone == "vgg16":
if args.pretrain in ['places', 'gldv2']:
backbone = get_pretrained_model(args)
else:
backbone = torchvision.models.vgg16(pretrained=True)
layers = list(backbone.features.children())[:-2]
for l in layers[:-5]:
for p in l.parameters(): p.requires_grad = False
logging.debug("Train last layers of the vgg16, freeze the previous ones")
elif args.backbone == "alexnet":
backbone = torchvision.models.alexnet(pretrained=True)
layers = list(backbone.features.children())[:-2]
for l in layers[:5]:
for p in l.parameters(): p.requires_grad = False
logging.debug("Train last layers of the alexnet, freeze the previous ones")
elif args.backbone.startswith("cct"):
if args.backbone.startswith("cct384"):
backbone = cct_14_7x2_384(pretrained=True, progress=True, aggregation=args.aggregation)
if args.trunc_te:
logging.debug(f"Truncate CCT at transformers encoder {args.trunc_te}")
backbone.classifier.blocks = torch.nn.ModuleList(backbone.classifier.blocks[:args.trunc_te].children())
if args.freeze_te:
logging.debug(f"Freeze all the layers up to tranformer encoder {args.freeze_te}")
for p in backbone.parameters():
p.requires_grad = False
for name, child in backbone.classifier.blocks.named_children():
if int(name) > args.freeze_te:
for params in child.parameters():
params.requires_grad = True
args.features_dim = 384
return backbone
elif args.backbone.startswith("vit"):
assert args.resize[0] in [224, 384], f'Image size for ViT must be either 224 or 384, but it\'s {args.resize[0]}'
if args.resize[0] == 224:
backbone = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
elif args.resize[0] == 384:
backbone = ViTModel.from_pretrained('google/vit-base-patch16-384')
if args.trunc_te:
logging.debug(f"Truncate ViT at transformers encoder {args.trunc_te}")
backbone.encoder.layer = backbone.encoder.layer[:args.trunc_te]
if args.freeze_te:
logging.debug(f"Freeze all the layers up to tranformer encoder {args.freeze_te+1}")
for p in backbone.parameters():
p.requires_grad = False
for name, child in backbone.encoder.layer.named_children():
if int(name) > args.freeze_te:
for params in child.parameters():
params.requires_grad = True
backbone = VitWrapper(backbone, args.aggregation)
args.features_dim = 768
return backbone
backbone = torch.nn.Sequential(*layers)
args.features_dim = get_output_channels_dim(backbone) # Dinamically obtain number of channels in output
return backbone
class VitWrapper(nn.Module):
def __init__(self, vit_model, aggregation):
super().__init__()
self.vit_model = vit_model
self.aggregation = aggregation
def forward(self, x):
if self.aggregation in ["netvlad", "gem"]:
return self.vit_model(x).last_hidden_state[:, 1:, :]
else:
return self.vit_model(x).last_hidden_state[:, 0, :]
def get_output_channels_dim(model):
"""Return the number of channels in the output of a model."""
return model(torch.ones([1, 3, 224, 224])).shape[1]
| 9,160 | 43.687805 | 137 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/functional.py |
import math
import torch
import torch.nn.functional as F
def sare_ind(query, positive, negative):
'''all 3 inputs are supposed to be shape 1xn_features'''
dist_pos = ((query - positive)**2).sum(1)
dist_neg = ((query - negative)**2).sum(1)
dist = - torch.cat((dist_pos, dist_neg))
dist = F.log_softmax(dist, 0)
#loss = (- dist[:, 0]).mean() on a batch
loss = -dist[0]
return loss
def sare_joint(query, positive, negatives):
'''query and positive have to be 1xn_features; whereas negatives has to be
shape n_negative x n_features. n_negative is usually 10'''
# NOTE: the implementation is the same if batch_size=1 as all operations
# are vectorial. If there were the additional n_batch dimension a different
# handling of that situation would have to be implemented here.
# This function is declared anyway for the sake of clarity as the 2 should
# be called in different situations because, even though there would be
# no Exceptions, there would actually be a conceptual error.
return sare_ind(query, positive, negatives)
def mac(x):
return F.adaptive_max_pool2d(x, (1,1))
def spoc(x):
return F.adaptive_avg_pool2d(x, (1,1))
def gem(x, p=3, eps=1e-6, work_with_tokens=False):
if work_with_tokens:
x = x.permute(0, 2, 1)
# unseqeeze to maintain compatibility with Flatten
return F.avg_pool1d(x.clamp(min=eps).pow(p), (x.size(-1))).pow(1./p).unsqueeze(3)
else:
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1./p)
def rmac(x, L=3, eps=1e-6):
ovr = 0.4 # desired overlap of neighboring regions
steps = torch.Tensor([2, 3, 4, 5, 6, 7]) # possible regions for the long dimension
W = x.size(3)
H = x.size(2)
w = min(W, H)
# w2 = math.floor(w/2.0 - 1)
b = (max(H, W)-w)/(steps-1)
(tmp, idx) = torch.min(torch.abs(((w**2 - w*b)/w**2)-ovr), 0) # steps(idx) regions for long dimension
# region overplus per dimension
Wd = 0;
Hd = 0;
if H < W:
Wd = idx.item() + 1
elif H > W:
Hd = idx.item() + 1
v = F.max_pool2d(x, (x.size(-2), x.size(-1)))
v = v / (torch.norm(v, p=2, dim=1, keepdim=True) + eps).expand_as(v)
for l in range(1, L+1):
wl = math.floor(2*w/(l+1))
wl2 = math.floor(wl/2 - 1)
if l+Wd == 1:
b = 0
else:
b = (W-wl)/(l+Wd-1)
cenW = torch.floor(wl2 + torch.Tensor(range(l-1+Wd+1))*b) - wl2 # center coordinates
if l+Hd == 1:
b = 0
else:
b = (H-wl)/(l+Hd-1)
cenH = torch.floor(wl2 + torch.Tensor(range(l-1+Hd+1))*b) - wl2 # center coordinates
for i_ in cenH.tolist():
for j_ in cenW.tolist():
if wl == 0:
continue
R = x[:,:,(int(i_)+torch.Tensor(range(wl)).long()).tolist(),:]
R = R[:,:,:,(int(j_)+torch.Tensor(range(wl)).long()).tolist()]
vt = F.max_pool2d(R, (R.size(-2), R.size(-1)))
vt = vt / (torch.norm(vt, p=2, dim=1, keepdim=True) + eps).expand_as(vt)
v += vt
return v
| 3,170 | 36.305882 | 105 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/normalization.py |
import torch.nn as nn
import torch.nn.functional as F
class L2Norm(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
return F.normalize(x, p=2, dim=self.dim)
| 238 | 18.916667 | 48 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226 | 32.968421 | 115 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import torch
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, x, y):
adiff = float((x - y).abs().max())
if (y == 0).all():
rdiff = 'NaN'
else:
rdiff = float((adiff / y).abs().max())
message = (
'Tensor close check failed\n'
'adiff={}\n'
'rdiff={}\n'
).format(adiff, rdiff)
self.assertTrue(torch.allclose(x, y, atol=1e-5, rtol=1e-3), message)
| 768 | 24.633333 | 76 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import contextlib
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
try:
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
except ImportError:
ReduceAddCoalesced = Broadcast = None
try:
from jactorch.parallel.comm import SyncMaster
from jactorch.parallel.data_parallel import JacDataParallel as DataParallelWithCallback
except ImportError:
from .comm import SyncMaster
from .replicate import DataParallelWithCallback
__all__ = [
'set_sbn_eps_mode',
'SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d',
'patch_sync_batchnorm', 'convert_model'
]
SBN_EPS_MODE = 'clamp'
def set_sbn_eps_mode(mode):
global SBN_EPS_MODE
assert mode in ('clamp', 'plus')
SBN_EPS_MODE = mode
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dimensions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
assert ReduceAddCoalesced is not None, 'Can not use Synchronized Batch Normalization without CUDA support.'
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine,
track_running_stats=track_running_stats)
if not self.track_running_stats:
import warnings
warnings.warn('track_running_stats=False is not supported by the SynchronizedBatchNorm.')
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
assert input.size(1) == self.num_features, 'Channel size mismatch: got {}, expect {}.'.format(input.size(1), self.num_features)
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
if hasattr(torch, 'no_grad'):
with torch.no_grad():
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
else:
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
if SBN_EPS_MODE == 'clamp':
return mean, bias_var.clamp(self.eps) ** -0.5
elif SBN_EPS_MODE == 'plus':
return mean, (bias_var + self.eps) ** -0.5
else:
raise ValueError('Unknown EPS mode: {}.'.format(SBN_EPS_MODE))
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
@contextlib.contextmanager
def patch_sync_batchnorm():
import torch.nn as nn
backup = nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d
nn.BatchNorm1d = SynchronizedBatchNorm1d
nn.BatchNorm2d = SynchronizedBatchNorm2d
nn.BatchNorm3d = SynchronizedBatchNorm3d
yield
nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d = backup
def convert_model(module):
"""Traverse the input module and its child recursively
and replace all instance of torch.nn.modules.batchnorm.BatchNorm*N*d
to SynchronizedBatchNorm*N*d
Args:
module: the input module needs to be convert to SyncBN model
Examples:
>>> import torch.nn as nn
>>> import torchvision
>>> # m is a standard pytorch model
>>> m = torchvision.models.resnet18(True)
>>> m = nn.DataParallel(m)
>>> # after convert, m is using SyncBN
>>> m = convert_model(m)
"""
if isinstance(module, torch.nn.DataParallel):
mod = module.module
mod = convert_model(mod)
mod = DataParallelWithCallback(mod, device_ids=module.device_ids)
return mod
mod = module
for pth_module, sync_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.BatchNorm3d],
[SynchronizedBatchNorm1d,
SynchronizedBatchNorm2d,
SynchronizedBatchNorm3d]):
if isinstance(module, pth_module):
mod = sync_module(module.num_features, module.eps, module.momentum, module.affine)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_model(child))
return mod
| 16,465 | 38.869249 | 135 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/sync_batchnorm/batchnorm_reimpl.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batchnorm_reimpl.py
# Author : acgtyrant
# Date : 11/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
__all__ = ['BatchNorm2dReimpl']
class BatchNorm2dReimpl(nn.Module):
"""
A re-implementation of batch normalization, used for testing the numerical
stability.
Author: acgtyrant
See also:
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.empty(num_features))
self.bias = nn.Parameter(torch.empty(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
init.uniform_(self.weight)
init.zeros_(self.bias)
def forward(self, input_):
batchsize, channels, height, width = input_.size()
numel = batchsize * height * width
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
sum_ = input_.sum(1)
sum_of_square = input_.pow(2).sum(1)
mean = sum_ / numel
sumvar = sum_of_square - sum_ * mean
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * mean.detach()
)
unbias_var = sumvar / (numel - 1)
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach()
)
bias_var = sumvar / numel
inv_std = 1 / (bias_var + self.eps).pow(0.5)
output = (
(input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) *
self.weight.unsqueeze(1) + self.bias.unsqueeze(1))
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous()
| 2,385 | 30.813333 | 95 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/transformers.py | import torch
from torch.nn import Module, ModuleList, Linear, Dropout, LayerNorm, Identity, Parameter, init
import torch.nn.functional as F
from .stochastic_depth import DropPath
class Attention(Module):
"""
Obtained from timm: github.com:rwightman/pytorch-image-models
"""
def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1):
super().__init__()
self.num_heads = num_heads
head_dim = dim // self.num_heads
self.scale = head_dim ** -0.5
self.qkv = Linear(dim, dim * 3, bias=False)
self.attn_drop = Dropout(attention_dropout)
self.proj = Linear(dim, dim)
self.proj_drop = Dropout(projection_dropout)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MaskedAttention(Module):
def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1):
super().__init__()
self.num_heads = num_heads
head_dim = dim // self.num_heads
self.scale = head_dim ** -0.5
self.qkv = Linear(dim, dim * 3, bias=False)
self.attn_drop = Dropout(attention_dropout)
self.proj = Linear(dim, dim)
self.proj_drop = Dropout(projection_dropout)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
mask_value = -torch.finfo(attn.dtype).max
assert mask.shape[-1] == attn.shape[-1], 'mask has incorrect dimensions'
mask = mask[:, None, :] * mask[:, :, None]
mask = mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1)
attn.masked_fill_(~mask, mask_value)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class TransformerEncoderLayer(Module):
"""
Inspired by torch.nn.TransformerEncoderLayer and timm.
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
attention_dropout=0.1, drop_path_rate=0.1):
super(TransformerEncoderLayer, self).__init__()
self.pre_norm = LayerNorm(d_model)
self.self_attn = Attention(dim=d_model, num_heads=nhead,
attention_dropout=attention_dropout, projection_dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout1 = Dropout(dropout)
self.norm1 = LayerNorm(d_model)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout2 = Dropout(dropout)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else Identity()
self.activation = F.gelu
def forward(self, src: torch.Tensor, *args, **kwargs) -> torch.Tensor:
src = src + self.drop_path(self.self_attn(self.pre_norm(src)))
src = self.norm1(src)
src2 = self.linear2(self.dropout1(self.activation(self.linear1(src))))
src = src + self.drop_path(self.dropout2(src2))
return src
class MaskedTransformerEncoderLayer(Module):
"""
Inspired by torch.nn.TransformerEncoderLayer and timm.
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
attention_dropout=0.1, drop_path_rate=0.1):
super(MaskedTransformerEncoderLayer, self).__init__()
self.pre_norm = LayerNorm(d_model)
self.self_attn = MaskedAttention(dim=d_model, num_heads=nhead,
attention_dropout=attention_dropout, projection_dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout1 = Dropout(dropout)
self.norm1 = LayerNorm(d_model)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout2 = Dropout(dropout)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else Identity()
self.activation = F.gelu
def forward(self, src: torch.Tensor, mask=None, *args, **kwargs) -> torch.Tensor:
src = src + self.drop_path(self.self_attn(self.pre_norm(src), mask))
src = self.norm1(src)
src2 = self.linear2(self.dropout1(self.activation(self.linear1(src))))
src = src + self.drop_path(self.dropout2(src2))
return src
class TransformerClassifier(Module):
def __init__(self,
seq_pool=True,
embedding_dim=768,
num_layers=12,
num_heads=12,
mlp_ratio=4.0,
num_classes=1000,
dropout=0.1,
attention_dropout=0.1,
stochastic_depth=0.1,
positional_embedding='learnable',
sequence_length=None):
super().__init__()
positional_embedding = positional_embedding if \
positional_embedding in ['sine', 'learnable', 'none'] else 'sine'
dim_feedforward = int(embedding_dim * mlp_ratio)
self.embedding_dim = embedding_dim
self.sequence_length = sequence_length
self.seq_pool = seq_pool
assert sequence_length is not None or positional_embedding == 'none', \
f"Positional embedding is set to {positional_embedding} and" \
f" the sequence length was not specified."
if not seq_pool:
sequence_length += 1
self.class_emb = Parameter(torch.zeros(1, 1, self.embedding_dim),
requires_grad=True)
else:
self.attention_pool = Linear(self.embedding_dim, 1)
if positional_embedding != 'none':
if positional_embedding == 'learnable':
self.positional_emb = Parameter(torch.zeros(1, sequence_length, embedding_dim),
requires_grad=True)
init.trunc_normal_(self.positional_emb, std=0.2)
else:
self.positional_emb = Parameter(self.sinusoidal_embedding(sequence_length, embedding_dim),
requires_grad=False)
else:
self.positional_emb = None
self.dropout = Dropout(p=dropout)
dpr = [x.item() for x in torch.linspace(0, stochastic_depth, num_layers)]
self.blocks = ModuleList([
TransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads,
dim_feedforward=dim_feedforward, dropout=dropout,
attention_dropout=attention_dropout, drop_path_rate=dpr[i])
for i in range(num_layers)])
self.norm = LayerNorm(embedding_dim)
# self.fc = Linear(embedding_dim, num_classes)
self.apply(self.init_weight)
def forward(self, x):
if self.positional_emb is None and x.size(1) < self.sequence_length:
x = F.pad(x, (0, 0, 0, self.n_channels - x.size(1)), mode='constant', value=0)
if not self.seq_pool:
cls_token = self.class_emb.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), dim=1)
if self.positional_emb is not None:
x += self.positional_emb
x = self.dropout(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
# TODO: TOREMOVE
# if self.seq_pool:
# x = torch.matmul(F.softmax(self.attention_pool(x), dim=1).transpose(-1, -2), x).squeeze(-2)
#else:
# x = x[:, 0]
# x = self.fc(x)
return x
@staticmethod
def init_weight(m):
if isinstance(m, Linear):
init.trunc_normal_(m.weight, std=.02)
if isinstance(m, Linear) and m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, LayerNorm):
init.constant_(m.bias, 0)
init.constant_(m.weight, 1.0)
@staticmethod
def sinusoidal_embedding(n_channels, dim):
pe = torch.FloatTensor([[p / (10000 ** (2 * (i // 2) / dim)) for i in range(dim)]
for p in range(n_channels)])
pe[:, 0::2] = torch.sin(pe[:, 0::2])
pe[:, 1::2] = torch.cos(pe[:, 1::2])
return pe.unsqueeze(0)
class MaskedTransformerClassifier(Module):
def __init__(self,
seq_pool=True,
embedding_dim=768,
num_layers=12,
num_heads=12,
mlp_ratio=4.0,
num_classes=1000,
dropout=0.1,
attention_dropout=0.1,
stochastic_depth=0.1,
positional_embedding='sine',
seq_len=None,
*args, **kwargs):
super().__init__()
positional_embedding = positional_embedding if \
positional_embedding in ['sine', 'learnable', 'none'] else 'sine'
dim_feedforward = int(embedding_dim * mlp_ratio)
self.embedding_dim = embedding_dim
self.seq_len = seq_len
self.seq_pool = seq_pool
assert seq_len is not None or positional_embedding == 'none', \
f"Positional embedding is set to {positional_embedding} and" \
f" the sequence length was not specified."
if not seq_pool:
seq_len += 1
self.class_emb = Parameter(torch.zeros(1, 1, self.embedding_dim),
requires_grad=True)
else:
self.attention_pool = Linear(self.embedding_dim, 1)
if positional_embedding != 'none':
if positional_embedding == 'learnable':
seq_len += 1 # padding idx
self.positional_emb = Parameter(torch.zeros(1, seq_len, embedding_dim),
requires_grad=True)
init.trunc_normal_(self.positional_emb, std=0.2)
else:
self.positional_emb = Parameter(self.sinusoidal_embedding(seq_len,
embedding_dim,
padding_idx=True),
requires_grad=False)
else:
self.positional_emb = None
self.dropout = Dropout(p=dropout)
dpr = [x.item() for x in torch.linspace(0, stochastic_depth, num_layers)]
self.blocks = ModuleList([
MaskedTransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads,
dim_feedforward=dim_feedforward, dropout=dropout,
attention_dropout=attention_dropout, drop_path_rate=dpr[i])
for i in range(num_layers)])
self.norm = LayerNorm(embedding_dim)
self.fc = Linear(embedding_dim, num_classes)
self.apply(self.init_weight)
def forward(self, x, mask=None):
if self.positional_emb is None and x.size(1) < self.seq_len:
x = F.pad(x, (0, 0, 0, self.n_channels - x.size(1)), mode='constant', value=0)
if not self.seq_pool:
cls_token = self.class_emb.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), dim=1)
if mask is not None:
mask = torch.cat([torch.ones(size=(mask.shape[0], 1), device=mask.device), mask.float()], dim=1)
mask = (mask > 0)
if self.positional_emb is not None:
x += self.positional_emb
x = self.dropout(x)
for blk in self.blocks:
x = blk(x, mask=mask)
x = self.norm(x)
if self.seq_pool:
x = torch.matmul(F.softmax(self.attention_pool(x), dim=1).transpose(-1, -2), x).squeeze(-2)
else:
x = x[:, 0]
x = self.fc(x)
return x
@staticmethod
def init_weight(m):
if isinstance(m, Linear):
init.trunc_normal_(m.weight, std=.02)
if isinstance(m, Linear) and m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, LayerNorm):
init.constant_(m.bias, 0)
init.constant_(m.weight, 1.0)
@staticmethod
def sinusoidal_embedding(n_channels, dim, padding_idx=False):
pe = torch.FloatTensor([[p / (10000 ** (2 * (i // 2) / dim)) for i in range(dim)]
for p in range(n_channels)])
pe[:, 0::2] = torch.sin(pe[:, 0::2])
pe[:, 1::2] = torch.cos(pe[:, 1::2])
pe = pe.unsqueeze(0)
if padding_idx:
return torch.cat([torch.zeros((1, 1, dim)), pe], dim=1)
return pe
| 13,211 | 38.088757 | 112 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/embedder.py | import torch.nn as nn
class Embedder(nn.Module):
def __init__(self,
word_embedding_dim=300,
vocab_size=100000,
padding_idx=1,
pretrained_weight=None,
embed_freeze=False,
*args, **kwargs):
super(Embedder, self).__init__()
self.embeddings = nn.Embedding.from_pretrained(pretrained_weight, freeze=embed_freeze) \
if pretrained_weight is not None else \
nn.Embedding(vocab_size, word_embedding_dim, padding_idx=padding_idx)
self.embeddings.weight.requires_grad = not embed_freeze
def forward_mask(self, mask):
bsz, seq_len = mask.shape
new_mask = mask.view(bsz, seq_len, 1)
new_mask = new_mask.sum(-1)
new_mask = (new_mask > 0)
return new_mask
def forward(self, x, mask=None):
embed = self.embeddings(x)
embed = embed if mask is None else embed * self.forward_mask(mask).unsqueeze(-1).float()
return embed, mask
@staticmethod
def init_weight(m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
else:
nn.init.normal_(m.weight)
| 1,332 | 34.078947 | 96 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/stochastic_depth.py | # Thanks to rwightman's timm package
# github.com:rwightman/pytorch-image-models
import torch
import torch.nn as nn
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""
Obtained from: github.com:rwightman/pytorch-image-models
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
| 1,586 | 38.675 | 108 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/cct.py | from torch.hub import load_state_dict_from_url
import torch.nn as nn
import torch
import torch.nn.functional as F
from .transformers import TransformerClassifier
from .tokenizer import Tokenizer
from .helpers import pe_check
from timm.models.registry import register_model
model_urls = {
'cct_7_3x1_32':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_3x1_32_cifar10_300epochs.pth',
'cct_7_3x1_32_sine':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_3x1_32_sine_cifar10_5000epochs.pth',
'cct_7_3x1_32_c100':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_3x1_32_cifar100_300epochs.pth',
'cct_7_3x1_32_sine_c100':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_3x1_32_sine_cifar100_5000epochs.pth',
'cct_7_7x2_224_sine':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_7_7x2_224_flowers102.pth',
'cct_14_7x2_224':
'https://shi-labs.com/projects/cct/checkpoints/pretrained/cct_14_7x2_224_imagenet.pth',
'cct_14_7x2_384':
'https://shi-labs.com/projects/cct/checkpoints/finetuned/cct_14_7x2_384_imagenet.pth',
'cct_14_7x2_384_fl':
'https://shi-labs.com/projects/cct/checkpoints/finetuned/cct_14_7x2_384_flowers102.pth',
}
class CCT(nn.Module):
def __init__(self,
img_size=224,
embedding_dim=768,
n_input_channels=3,
n_conv_layers=1,
kernel_size=7,
stride=2,
padding=3,
pooling_kernel_size=3,
pooling_stride=2,
pooling_padding=1,
dropout=0.,
attention_dropout=0.1,
stochastic_depth=0.1,
num_layers=14,
num_heads=6,
mlp_ratio=4.0,
num_classes=1000,
positional_embedding='learnable',
aggregation=None,
*args, **kwargs):
super(CCT, self).__init__()
self.tokenizer = Tokenizer(n_input_channels=n_input_channels,
n_output_channels=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
pooling_kernel_size=pooling_kernel_size,
pooling_stride=pooling_stride,
pooling_padding=pooling_padding,
max_pool=True,
activation=nn.ReLU,
n_conv_layers=n_conv_layers,
conv_bias=False)
self.classifier = TransformerClassifier(
sequence_length=self.tokenizer.sequence_length(n_channels=n_input_channels,
height=img_size,
width=img_size),
embedding_dim=embedding_dim,
seq_pool=True,
dropout=dropout,
attention_dropout=attention_dropout,
stochastic_depth=stochastic_depth,
num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
num_classes=num_classes,
positional_embedding=positional_embedding
)
if aggregation in ['cls', 'seqpool']:
self.aggregation = aggregation
else:
self.aggregation = None
def forward(self, x):
x = self.tokenizer(x)
x = self.classifier(x)
if self.aggregation == 'cls':
return x[:, 0]
elif self.aggregation == 'seqpool':
x = torch.matmul(F.softmax(self.classifier.attention_pool(x), dim=1).transpose(-1, -2), x).squeeze(-2)
return x
else:
# x = x.permute(0, 2, 1)
return x
def _cct(arch, pretrained, progress,
num_layers, num_heads, mlp_ratio, embedding_dim,
kernel_size=3, stride=None, padding=None,
aggregation=None, *args, **kwargs):
stride = stride if stride is not None else max(1, (kernel_size // 2) - 1)
padding = padding if padding is not None else max(1, (kernel_size // 2))
model = CCT(num_layers=num_layers,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
embedding_dim=embedding_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
aggregation=aggregation,
*args, **kwargs)
if pretrained:
if arch in model_urls:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
state_dict = pe_check(model, state_dict)
model.load_state_dict(state_dict, strict=False)
else:
raise RuntimeError(f'Variant {arch} does not yet have pretrained weights.')
return model
def cct_2(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=2, num_heads=2, mlp_ratio=1, embedding_dim=128,
aggregation=aggregation, *args, **kwargs)
def cct_4(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=4, num_heads=2, mlp_ratio=1, embedding_dim=128,
aggregation=aggregation, *args, **kwargs)
def cct_6(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=6, num_heads=4, mlp_ratio=2, embedding_dim=256,
aggregation=aggregation, *args, **kwargs)
def cct_7(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=7, num_heads=4, mlp_ratio=2, embedding_dim=256,
aggregation=aggregation, *args, **kwargs)
def cct_14(arch, pretrained, progress, aggregation=None, *args, **kwargs):
return _cct(arch, pretrained, progress, num_layers=14, num_heads=6, mlp_ratio=3, embedding_dim=384,
aggregation=aggregation, *args, **kwargs)
@register_model
def cct_2_3x2_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_2('cct_2_3x2_32', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_2_3x2_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_2('cct_2_3x2_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_4_3x2_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_4('cct_4_3x2_32', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_4_3x2_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_4('cct_4_3x2_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_6_3x1_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_6('cct_6_3x1_32', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_6_3x1_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_6('cct_6_3x1_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_6_3x2_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_6('cct_6_3x2_32', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_6_3x2_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_6('cct_6_3x2_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x1_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x1_32', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x1_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x1_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x1_32_c100(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=100,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x1_32_c100', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x1_32_sine_c100(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=100,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x1_32_sine_c100', pretrained, progress,
kernel_size=3, n_conv_layers=1,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x2_32(pretrained=False, progress=False,
img_size=32, positional_embedding='learnable', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x2_32', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_3x2_32_sine(pretrained=False, progress=False,
img_size=32, positional_embedding='sine', num_classes=10,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_3x2_32_sine', pretrained, progress,
kernel_size=3, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_7x2_224(pretrained=False, progress=False,
img_size=224, positional_embedding='learnable', num_classes=102,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_7x2_224', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_7_7x2_224_sine(pretrained=False, progress=False,
img_size=224, positional_embedding='sine', num_classes=102,
aggregation=None, *args, **kwargs):
return cct_7('cct_7_7x2_224_sine', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_14_7x2_224(pretrained=False, progress=False,
img_size=224, positional_embedding='learnable', num_classes=1000,
aggregation=None, *args, **kwargs):
return cct_14('cct_14_7x2_224', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_14_7x2_384(pretrained=False, progress=False,
img_size=384, positional_embedding='learnable', num_classes=1000,
aggregation=None, *args, **kwargs):
return cct_14('cct_14_7x2_384', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
@register_model
def cct_14_7x2_384_fl(pretrained=False, progress=False,
img_size=384, positional_embedding='learnable', num_classes=102,
aggregation=None, *args, **kwargs):
return cct_14('cct_14_7x2_384_fl', pretrained, progress,
kernel_size=7, n_conv_layers=2,
img_size=img_size, positional_embedding=positional_embedding,
num_classes=num_classes, aggregation=aggregation,
*args, **kwargs)
| 15,794 | 42.753463 | 114 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/tokenizer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Tokenizer(nn.Module):
def __init__(self,
kernel_size, stride, padding,
pooling_kernel_size=3, pooling_stride=2, pooling_padding=1,
n_conv_layers=1,
n_input_channels=3,
n_output_channels=64,
in_planes=64,
activation=None,
max_pool=True,
conv_bias=False):
super(Tokenizer, self).__init__()
n_filter_list = [n_input_channels] + \
[in_planes for _ in range(n_conv_layers - 1)] + \
[n_output_channels]
self.conv_layers = nn.Sequential(
*[nn.Sequential(
nn.Conv2d(n_filter_list[i], n_filter_list[i + 1],
kernel_size=(kernel_size, kernel_size),
stride=(stride, stride),
padding=(padding, padding), bias=conv_bias),
nn.Identity() if activation is None else activation(),
nn.MaxPool2d(kernel_size=pooling_kernel_size,
stride=pooling_stride,
padding=pooling_padding) if max_pool else nn.Identity()
)
for i in range(n_conv_layers)
])
self.flattener = nn.Flatten(2, 3)
self.apply(self.init_weight)
def sequence_length(self, n_channels=3, height=224, width=224):
return self.forward(torch.zeros((1, n_channels, height, width))).shape[1]
def forward(self, x):
return self.flattener(self.conv_layers(x)).transpose(-2, -1)
@staticmethod
def init_weight(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
class TextTokenizer(nn.Module):
def __init__(self,
kernel_size, stride, padding,
pooling_kernel_size=3, pooling_stride=2, pooling_padding=1,
embedding_dim=300,
n_output_channels=128,
activation=None,
max_pool=True,
*args, **kwargs):
super(TextTokenizer, self).__init__()
self.max_pool = max_pool
self.conv_layers = nn.Sequential(
nn.Conv2d(1, n_output_channels,
kernel_size=(kernel_size, embedding_dim),
stride=(stride, 1),
padding=(padding, 0), bias=False),
nn.Identity() if activation is None else activation(),
nn.MaxPool2d(
kernel_size=(pooling_kernel_size, 1),
stride=(pooling_stride, 1),
padding=(pooling_padding, 0)
) if max_pool else nn.Identity()
)
self.apply(self.init_weight)
def seq_len(self, seq_len=32, embed_dim=300):
return self.forward(torch.zeros((1, seq_len, embed_dim)))[0].shape[1]
def forward_mask(self, mask):
new_mask = mask.unsqueeze(1).float()
cnn_weight = torch.ones(
(1, 1, self.conv_layers[0].kernel_size[0]),
device=mask.device,
dtype=torch.float)
new_mask = F.conv1d(
new_mask, cnn_weight, None,
self.conv_layers[0].stride[0], self.conv_layers[0].padding[0], 1, 1)
if self.max_pool:
new_mask = F.max_pool1d(
new_mask, self.conv_layers[2].kernel_size[0],
self.conv_layers[2].stride[0], self.conv_layers[2].padding[0], 1, False, False)
new_mask = new_mask.squeeze(1)
new_mask = (new_mask > 0)
return new_mask
def forward(self, x, mask=None):
x = x.unsqueeze(1)
x = self.conv_layers(x)
x = x.transpose(1, 3).squeeze(1)
x = x if mask is None else x * self.forward_mask(mask).unsqueeze(-1).float()
return x, mask
@staticmethod
def init_weight(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
| 4,035 | 35.690909 | 95 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/cct/helpers.py | import math
import torch
import torch.nn.functional as F
def resize_pos_embed(posemb, posemb_new, num_tokens=1):
# Copied from `timm` by Ross Wightman:
# github.com/rwightman/pytorch-image-models
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def pe_check(model, state_dict, pe_key='classifier.positional_emb'):
if pe_key is not None and pe_key in state_dict.keys() and pe_key in model.state_dict().keys():
if model.state_dict()[pe_key].shape != state_dict[pe_key].shape:
state_dict[pe_key] = resize_pos_embed(state_dict[pe_key],
model.state_dict()[pe_key],
num_tokens=model.classifier.num_tokens)
return state_dict
| 1,573 | 46.69697 | 132 | py |
anomaly-seg | anomaly-seg-master/dataset.py | import os
import json
import torch
from torchvision import transforms
import numpy as np
from PIL import Image
def imresize(im, size, interp='bilinear'):
if interp == 'nearest':
resample = Image.NEAREST
elif interp == 'bilinear':
resample = Image.BILINEAR
elif interp == 'bicubic':
resample = Image.BICUBIC
else:
raise Exception('resample method undefined!')
return im.resize(size, resample)
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, odgt, opt, **kwargs):
# parse options
self.imgSizes = opt.imgSizes
self.imgMaxSize = opt.imgMaxSize
# max down sampling rate of network to avoid rounding during conv or pooling
self.padding_constant = opt.padding_constant
# parse the input list
self.parse_input_list(odgt, **kwargs)
# mean and std
self.normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def parse_input_list(self, odgt, max_sample=-1, start_idx=-1, end_idx=-1):
if isinstance(odgt, list):
self.list_sample = odgt
elif isinstance(odgt, str):
self.list_sample = [json.loads(x.rstrip()) for x in open(odgt, 'r')][0]
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
if start_idx >= 0 and end_idx >= 0: # divide file list
self.list_sample = self.list_sample[start_idx:end_idx]
self.num_sample = len(self.list_sample)
assert self.num_sample > 0
print('# samples: {}'.format(self.num_sample))
def img_transform(self, img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = self.normalize(torch.from_numpy(img.copy()))
return img
def segm_transform(self, segm):
# to tensor, -1 to 149
segm = torch.from_numpy(np.array(segm)).long() - 1
return segm
# Round x to the nearest multiple of p and x' >= x
def round2nearest_multiple(self, x, p):
return ((x - 1) // p + 1) * p
class TrainDataset(BaseDataset):
def __init__(self, root_dataset, odgt, opt, batch_per_gpu=1, **kwargs):
super(TrainDataset, self).__init__(odgt, opt, **kwargs)
self.root_dataset = root_dataset
# down sampling rate of segm labe
self.segm_downsampling_rate = opt.segm_downsampling_rate
self.batch_per_gpu = batch_per_gpu
# classify images into two classes: 1. h > w and 2. h <= w
self.batch_record_list = [[], []]
# override dataset length when trainig with batch_per_gpu > 1
self.cur_idx = 0
self.if_shuffled = False
def _get_sub_batch(self):
while True:
# get a sample record
this_sample = self.list_sample[self.cur_idx]
if this_sample['height'] > this_sample['width']:
self.batch_record_list[0].append(this_sample) # h > w, go to 1st class
else:
self.batch_record_list[1].append(this_sample) # h <= w, go to 2nd class
# update current sample pointer
self.cur_idx += 1
if self.cur_idx >= self.num_sample:
self.cur_idx = 0
np.random.shuffle(self.list_sample)
if len(self.batch_record_list[0]) == self.batch_per_gpu:
batch_records = self.batch_record_list[0]
self.batch_record_list[0] = []
break
elif len(self.batch_record_list[1]) == self.batch_per_gpu:
batch_records = self.batch_record_list[1]
self.batch_record_list[1] = []
break
return batch_records
def __getitem__(self, index):
# NOTE: random shuffle for the first time. shuffle in __init__ is useless
if not self.if_shuffled:
np.random.seed(index)
np.random.shuffle(self.list_sample)
self.if_shuffled = True
# get sub-batch candidates
batch_records = self._get_sub_batch()
# resize all images' short edges to the chosen size
if isinstance(self.imgSizes, list) or isinstance(self.imgSizes, tuple):
this_short_size = np.random.choice(self.imgSizes)
else:
this_short_size = self.imgSizes
# calculate the BATCH's height and width
# since we concat more than one samples, the batch's h and w shall be larger than EACH sample
batch_widths = np.zeros(self.batch_per_gpu, np.int32)
batch_heights = np.zeros(self.batch_per_gpu, np.int32)
for i in range(self.batch_per_gpu):
img_height, img_width = batch_records[i]['height'], batch_records[i]['width']
this_scale = min(
this_short_size / min(img_height, img_width), \
self.imgMaxSize / max(img_height, img_width))
batch_widths[i] = img_width * this_scale
batch_heights[i] = img_height * this_scale
# Here we must pad both input image and segmentation map to size h' and w' so that p | h' and p | w'
batch_width = np.max(batch_widths)
batch_height = np.max(batch_heights)
batch_width = int(self.round2nearest_multiple(batch_width, self.padding_constant))
batch_height = int(self.round2nearest_multiple(batch_height, self.padding_constant))
assert self.padding_constant >= self.segm_downsampling_rate, \
'padding constant must be equal or large than segm downsamping rate'
batch_images = torch.zeros(
self.batch_per_gpu, 3, batch_height, batch_width)
batch_segms = torch.zeros(
self.batch_per_gpu,
batch_height // self.segm_downsampling_rate,
batch_width // self.segm_downsampling_rate).long()
for i in range(self.batch_per_gpu):
this_record = batch_records[i]
# load image and label
image_path = os.path.join(self.root_dataset, this_record['fpath_img'])
segm_path = os.path.join(self.root_dataset, this_record['fpath_segm'])
img = Image.open(image_path).convert('RGB')
segm = Image.open(segm_path)
assert(segm.mode == "L")
assert(img.size[0] == segm.size[0])
assert(img.size[1] == segm.size[1])
# random_flip
if np.random.choice([0, 1]):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
segm = segm.transpose(Image.FLIP_LEFT_RIGHT)
# note that each sample within a mini batch has different scale param
img = imresize(img, (batch_widths[i], batch_heights[i]), interp='bilinear')
segm = imresize(segm, (batch_widths[i], batch_heights[i]), interp='nearest')
# further downsample seg label, need to avoid seg label misalignment
segm_rounded_width = self.round2nearest_multiple(segm.size[0], self.segm_downsampling_rate)
segm_rounded_height = self.round2nearest_multiple(segm.size[1], self.segm_downsampling_rate)
segm_rounded = Image.new('L', (segm_rounded_width, segm_rounded_height), 0)
segm_rounded.paste(segm, (0, 0))
segm = imresize(
segm_rounded,
(segm_rounded.size[0] // self.segm_downsampling_rate, \
segm_rounded.size[1] // self.segm_downsampling_rate), \
interp='nearest')
# image transform, to torch float tensor 3xHxW
img = self.img_transform(img)
# segm transform, to torch long tensor HxW
segm = self.segm_transform(segm)
# put into batch arrays
batch_images[i][:, :img.shape[1], :img.shape[2]] = img
batch_segms[i][:segm.shape[0], :segm.shape[1]] = segm
output = dict()
output['img_data'] = batch_images
output['seg_label'] = batch_segms
return output
def __len__(self):
return int(1e10) # It's a fake length due to the trick that every loader maintains its own list
#return self.num_sampleclass
class ValDataset(BaseDataset):
def __init__(self, root_dataset, odgt, opt, **kwargs):
super(ValDataset, self).__init__(odgt, opt, **kwargs)
self.root_dataset = root_dataset
def __getitem__(self, index):
this_record = self.list_sample[index]
# load image and label
image_path = os.path.join(self.root_dataset, this_record['fpath_img'])
segm_path = os.path.join(self.root_dataset, this_record['fpath_segm'])
img = Image.open(image_path).convert('RGB')
segm = Image.open(segm_path)
assert(segm.mode == "L")
assert(img.size[0] == segm.size[0])
assert(img.size[1] == segm.size[1])
ori_width, ori_height = img.size
img_resized_list = []
for this_short_size in self.imgSizes:
# calculate target height and width
scale = min(this_short_size / float(min(ori_height, ori_width)),
self.imgMaxSize / float(max(ori_height, ori_width)))
target_height, target_width = int(ori_height * scale), int(ori_width * scale)
# to avoid rounding in network
target_width = self.round2nearest_multiple(target_width, self.padding_constant)
target_height = self.round2nearest_multiple(target_height, self.padding_constant)
# resize images
img_resized = imresize(img, (target_width, target_height), interp='bilinear')
# image transform, to torch float tensor 3xHxW
img_resized = self.img_transform(img_resized)
img_resized = torch.unsqueeze(img_resized, 0)
img_resized_list.append(img_resized)
# segm transform, to torch long tensor HxW
segm = self.segm_transform(segm)
batch_segms = torch.unsqueeze(segm, 0)
output = dict()
output['img_ori'] = np.array(img)
output['img_data'] = [x.contiguous() for x in img_resized_list]
output['seg_label'] = batch_segms.contiguous()
output['info'] = this_record['fpath_img']
return output
def __len__(self):
return self.num_sample
class TestDataset(BaseDataset):
def __init__(self, odgt, opt, **kwargs):
super(TestDataset, self).__init__(odgt, opt, **kwargs)
def __getitem__(self, index):
this_record = self.list_sample[index]
# load image
image_path = this_record['fpath_img']
img = Image.open(image_path).convert('RGB')
ori_width, ori_height = img.size
img_resized_list = []
for this_short_size in self.imgSizes:
# calculate target height and width
scale = min(this_short_size / float(min(ori_height, ori_width)),
self.imgMaxSize / float(max(ori_height, ori_width)))
target_height, target_width = int(ori_height * scale), int(ori_width * scale)
# to avoid rounding in network
target_width = self.round2nearest_multiple(target_width, self.padding_constant)
target_height = self.round2nearest_multiple(target_height, self.padding_constant)
# resize images
img_resized = imresize(img, (target_width, target_height), interp='bilinear')
# image transform, to torch float tensor 3xHxW
img_resized = self.img_transform(img_resized)
img_resized = torch.unsqueeze(img_resized, 0)
img_resized_list.append(img_resized)
output = dict()
output['img_ori'] = np.array(img)
output['img_data'] = [x.contiguous() for x in img_resized_list]
output['info'] = this_record['fpath_img']
return output
def __len__(self):
return self.num_sample
| 11,901 | 39.074074 | 108 | py |
anomaly-seg | anomaly-seg-master/eval_ood.py | # System libs
import os
import time
import argparse
from distutils.version import LooseVersion
# Numerical libs
import numpy as np
import torch
import torch.nn as nn
from scipy.io import loadmat
# Our libs
from config import cfg
from dataset import ValDataset
from models import ModelBuilder, SegmentationModule
from utils import AverageMeter, colorEncode, accuracy, intersectionAndUnion, setup_logger
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy
from PIL import Image
from tqdm import tqdm
import anom_utils
colors = loadmat('data/color150.mat')['colors']
def visualize_result(data, pred, dir_result):
(img, seg, info) = data
# segmentation
seg_color = colorEncode(seg, colors)
# prediction
pred_color = colorEncode(pred, colors)
# aggregate images and save
im_vis = np.concatenate((img, seg_color, pred_color),
axis=1).astype(np.uint8)
img_name = info.split('/')[-1]
Image.fromarray(im_vis).save(os.path.join(dir_result, img_name.replace('.jpg', '.png')))
def eval_ood_measure(conf, seg_label, cfg, mask=None):
out_labels = cfg.OOD.out_labels
if mask is not None:
seg_label = seg_label[mask]
out_label = seg_label == out_labels[0]
for label in out_labels:
out_label = np.logical_or(out_label, seg_label == label)
in_scores = - conf[np.logical_not(out_label)]
out_scores = - conf[out_label]
if (len(out_scores) != 0) and (len(in_scores) != 0):
auroc, aupr, fpr = anom_utils.get_and_print_results(out_scores, in_scores)
return auroc, aupr, fpr
else:
print("This image does not contain any OOD pixels or is only OOD.")
return None
def evaluate(segmentation_module, loader, cfg, gpu):
acc_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
time_meter = AverageMeter()
segmentation_module.eval()
aurocs, auprs, fprs = [], [], []
pbar = tqdm(total=len(loader))
for batch_data in loader:
# process data
batch_data = batch_data[0]
seg_label = as_numpy(batch_data['seg_label'][0])
img_resized_list = batch_data['img_data']
torch.cuda.synchronize()
tic = time.perf_counter()
with torch.no_grad():
segSize = (seg_label.shape[0], seg_label.shape[1])
scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])
scores = async_copy_to(scores, gpu)
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
feed_dict = async_copy_to(feed_dict, gpu)
# forward pass
scores_tmp = segmentation_module(feed_dict, segSize=segSize)
scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)
tmp_scores = scores
if cfg.OOD.exclude_back:
tmp_scores = tmp_scores[:,1:]
mask = None
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu())
#for evaluating MSP
if cfg.OOD.ood == "msp":
conf, _ = torch.max(nn.functional.softmax(tmp_scores, dim=1),dim=1)
conf = as_numpy(conf.squeeze(0).cpu())
elif cfg.OOD.ood == "maxlogit":
conf, _ = torch.max(tmp_scores,dim=1)
conf = as_numpy(conf.squeeze(0).cpu())
elif cfg.OOD.ood == "background":
conf = tmp_scores[:, 0]
conf = as_numpy(conf.squeeze(0).cpu())
elif cfg.OOD.ood == "crf":
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral, create_pairwise_gaussian
ch,h,w = scores.squeeze(0).size()
d = dcrf.DenseCRF2D(h, w, ch) # width, height, nlabels
tmp_scores = as_numpy(nn.functional.softmax(tmp_scores, dim=1).squeeze(0))
tmp_scores = as_numpy(tmp_scores)
U = unary_from_softmax(tmp_scores)
d.setUnaryEnergy(U)
pairwise_energy = create_pairwise_bilateral(sdims=(10,10), schan=13, img=tmp_scores, chdim=0)
d.addPairwiseEnergy(pairwise_energy, compat=10)
# Run inference for 100 iterations
Q_unary = d.inference(100)
# The Q is now the approximate posterior, we can get a MAP estimate using argmax.
map_soln_unary = np.argmax(Q_unary, axis=0)
# Unfortunately, the DenseCRF flattens everything, so get it back into picture form.
map_soln_unary = map_soln_unary.reshape((h,w))
conf = np.max(Q_unary, axis=0).reshape((h,w))
elif cfg.OOD.ood == "crf-gauss":
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral, create_pairwise_gaussian
ch,h,w = scores.squeeze(0).size()
d = dcrf.DenseCRF2D(h, w, ch) # width, height, nlabels
tmp_scores = as_numpy(nn.functional.softmax(tmp_scores, dim=1).squeeze(0))
tmp_scores = as_numpy(tmp_scores)
U = unary_from_softmax(tmp_scores)
d.setUnaryEnergy(U)
d.addPairwiseGaussian(sxy=3, compat=3) # `compat` is the "strength" of this potential.
# Run inference for 100 iterations
Q_unary = d.inference(100)
# The Q is now the approximate posterior, we can get a MAP estimate using argmax.
map_soln_unary = np.argmax(Q_unary, axis=0)
# Unfortunately, the DenseCRF flattens everything, so get it back into picture form.
map_soln_unary = map_soln_unary.reshape((h,w))
conf = np.max(Q_unary, axis=0).reshape((h,w))
res = eval_ood_measure(conf, seg_label, cfg, mask=mask)
if res is not None:
auroc, aupr, fpr = res
aurocs.append(auroc); auprs.append(aupr), fprs.append(fpr)
else:
pass
torch.cuda.synchronize()
time_meter.update(time.perf_counter() - tic)
# calculate accuracy
acc, pix = accuracy(pred, seg_label)
intersection, union = intersectionAndUnion(pred, seg_label, cfg.DATASET.num_class)
acc_meter.update(acc, pix)
intersection_meter.update(intersection)
union_meter.update(union)
# visualization
if cfg.VAL.visualize:
visualize_result(
(batch_data['img_ori'], seg_label, batch_data['info']),
pred,
os.path.join(cfg.DIR, 'result')
)
pbar.update(1)
# summary
iou = intersection_meter.sum / (union_meter.sum + 1e-10)
for i, _iou in enumerate(iou):
print('class [{}], IoU: {:.4f}'.format(i, _iou))
print('[Eval Summary]:')
print('Mean IoU: {:.4f}, Accuracy: {:.2f}%, Inference Time: {:.4f}s'
.format(iou.mean(), acc_meter.average()*100, time_meter.average()))
print("mean auroc = ", np.mean(aurocs), "mean aupr = ", np.mean(auprs), " mean fpr = ", np.mean(fprs))
def main(cfg, gpu):
torch.cuda.set_device(gpu)
# Network Builders
net_encoder = ModelBuilder.build_encoder(
arch=cfg.MODEL.arch_encoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
weights=cfg.MODEL.weights_encoder)
net_decoder = ModelBuilder.build_decoder(
arch=cfg.MODEL.arch_decoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
num_class=cfg.DATASET.num_class,
weights=cfg.MODEL.weights_decoder,
use_softmax=True)
crit = nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
# Dataset and Loader
dataset_val = ValDataset(
cfg.DATASET.root_dataset,
cfg.DATASET.list_val,
cfg.DATASET)
loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=cfg.VAL.batch_size,
shuffle=False,
collate_fn=user_scattered_collate,
num_workers=5,
drop_last=True)
segmentation_module.cuda()
# Main loop
evaluate(segmentation_module, loader_val, cfg, gpu)
print('Evaluation Done!')
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
parser = argparse.ArgumentParser(
description="PyTorch Semantic Segmentation Validation"
)
parser.add_argument(
"--cfg",
default="config/ade20k-resnet50dilated-ppm_deepsup.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--gpu",
default=0,
help="gpu to use"
)
parser.add_argument(
"--ood",
help="Choices are [msp, crf-gauss, crf, maxlogit, background]",
default="msp",
)
parser.add_argument(
"--exclude_back",
help="Whether to exclude the background class.",
action="store_true",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
ood = ["OOD.exclude_back", args.exclude_back, "OOD.ood", args.ood]
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(ood)
cfg.merge_from_list(args.opts)
# cfg.freeze()
logger = setup_logger(distributed_rank=0) # TODO
logger.info("Loaded configuration file {}".format(args.cfg))
logger.info("Running with config:\n{}".format(cfg))
# absolute paths of model weights
cfg.MODEL.weights_encoder = os.path.join(
cfg.DIR, 'encoder_' + cfg.VAL.checkpoint)
cfg.MODEL.weights_decoder = os.path.join(
cfg.DIR, 'decoder_' + cfg.VAL.checkpoint)
assert os.path.exists(cfg.MODEL.weights_encoder) and \
os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"
if not os.path.isdir(os.path.join(cfg.DIR, "result")):
os.makedirs(os.path.join(cfg.DIR, "result"))
main(cfg, args.gpu)
| 10,392 | 34.35034 | 116 | py |
fmriprep | fmriprep-master/docs/conf.py | # fmriprep documentation build configuration file, created by
# sphinx-quickstart on Mon May 9 09:04:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
from sphinx import __version__ as sphinxversion
from packaging import version as pver # Avoid distutils.LooseVersion which is deprecated
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath("sphinxext"))
sys.path.insert(0, os.path.abspath("../wrapper"))
from github_link import make_linkcode_resolve # noqa: E402
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.linkcode",
"sphinx.ext.napoleon",
"sphinxarg.ext", # argparse extension
"nipype.sphinxext.plot_workflow",
]
# Mock modules in autodoc:
autodoc_mock_imports = [
"numpy",
"nitime",
"matplotlib",
]
if pver.parse(sphinxversion) >= pver.parse("1.7.0"):
autodoc_mock_imports += [
"pandas",
"nilearn",
"seaborn",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Accept custom section names to be parsed for numpy-style docstrings
# of parameters.
# Requires pinning sphinxcontrib-napoleon to a specific commit while
# https://github.com/sphinx-contrib/napoleon/pull/10 is merged.
napoleon_use_param = False
napoleon_custom_sections = [
("Inputs", "Parameters"),
("Outputs", "Parameters"),
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "fmriprep"
author = "The fMRIPrep developers"
copyright = f"2016-{datetime.now().year}, {author}"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "version"
# The full version, including alpha/beta/rc tags.
release = "version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = u'fmriprep vversion'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g., ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "fmriprepdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "fmriprep.tex", "fMRIprep Documentation",
author,
"manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "fmriprep", "fmriprep Documentation",
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, "fmriprep", "fMRIprep Documentation",
author, "fmriprep", "One line description of project.",
"Miscellaneous"),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve("fmriprep",
"https://github.com/nipreps/"
"fmriprep/blob/{revision}/"
"{package}/{path}#L{lineno}")
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"matplotlib": ("https://matplotlib.org/stable/", None),
"bids": ("https://bids-standard.github.io/pybids/", None),
"nibabel": ("https://nipy.org/nibabel/", None),
"nipype": ("https://nipype.readthedocs.io/en/latest/", None),
"niworkflows": ("https://www.nipreps.org/niworkflows/", None),
"sdcflows": ("https://www.nipreps.org/sdcflows/", None),
"smriprep": ("https://www.nipreps.org/smriprep/", None),
"templateflow": ("https://www.templateflow.org/python-client", None),
}
suppress_warnings = ["image.nonlocal_uri"]
def setup(app):
app.add_css_file("theme_overrides.css")
# We need this for the boilerplate script
app.add_js_file("https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js")
| 11,942 | 32.642254 | 89 | py |
PIBConv | PIBConv-main/cnn/complexity.py | import torch
from model import *
from genotypes import *
from ptflops import get_model_complexity_info
def print_complexity(network):
macs, params = get_model_complexity_info(network, (3, 32, 32), as_strings=True,
print_per_layer_stat=True, verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
if __name__ == "__main__":
network = NetworkCIFAR(
C=36,
num_classes=10,
layers=20,
auxiliary=False,
genotype=DARTS_newconv_epoch50)
network.drop_path_prob = 0.2 # Placeholder - value is only for functionality and should not change complexity at all
print_complexity(network)
| 788 | 31.875 | 127 | py |
PIBConv | PIBConv-main/cnn/train_cpath.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import genotypes
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkADP as Network
# for ADP dataset only
from ADP_utils.classesADP import classesADP
parser = argparse.ArgumentParser("cpath")
####################
# Model details
parser.add_argument('--arch', type=str, default='DARTS_ADP_N4', help='choose network architecture')
parser.add_argument('--layers', type=int, default=4, help='total number of layers')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
####################
# Training details
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
####################
# Datasets
parser.add_argument('--data', type=str, default='./data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='ADP', help='choose dataset: ADP, BCSS, BACH, OS')
parser.add_argument('--image_size', type=int, default=272, help='CPATH image size')
# color augmentation
parser.add_argument('--color_aug', action='store_true', default=False, help='use color augmentation')
parser.add_argument('--color_distortion', type=float, default=0.3, help='color distortion param')
# ADP only
parser.add_argument('--adp_level', type=str, default='L3', help='ADP label level')
####################
# Others
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
args = parser.parse_args()
args.save = 'Eval-{}-data-{}-arch-{}-{}'.format(args.save, args.dataset, args.arch, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.dataset == 'ADP':
n_classes = classesADP[args.adp_level]['numClasses']
elif args.dataset == 'BCSS':
n_classes = 10
elif args.dataset == 'BACH' or args.dataset == 'OS':
n_classes = 4
else:
logging.info('Unknown dataset!')
sys.exit(1)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
logging.info('genotype = %s', genotype)
# dataset
if args.dataset == 'ADP':
train_transform, valid_transform = utils._data_transforms_adp(args)
train_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='train')
valid_data = utils.ADP_dataset(level=args.adp_level, transform=valid_transform, root=args.data, split='valid')
test_data = utils.ADP_dataset(level=args.adp_level, transform=valid_transform, root=args.data, split='test')
elif args.dataset == 'BCSS':
train_transform, valid_transform = utils._data_transforms_bcss(args)
train_data = utils.BCSSDataset(root=args.data, split='train', transform=train_transform)
valid_data = utils.BCSSDataset(root=args.data, split='valid', transform=valid_transform)
test_data = utils.BCSSDataset(root=args.data, split='test', transform=valid_transform)
elif args.dataset == 'BACH':
train_transform, valid_transform = utils._data_transforms_bach(args)
train_data = utils.BACH_transformed(root=args.data, split='train', transform=train_transform)
valid_data = utils.BACH_transformed(root=args.data, split='valid', transform=valid_transform)
test_data = utils.BACH_transformed(root=args.data, split='test', transform=valid_transform)
elif args.dataset == 'OS':
train_transform, valid_transform = utils._data_transforms_os(args)
train_data = utils.OS_transformed(root=args.data, split='train', transform=train_transform)
valid_data = utils.OS_transformed(root=args.data, split='valid', transform=valid_transform)
test_data = utils.OS_transformed(root=args.data, split='test', transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
dataset_size = len(train_queue.dataset)
print('train dataset size:', len(train_queue.dataset))
print('valid dataset size:', len(valid_queue.dataset))
print('test dataset size:', len(test_queue.dataset))
# criterion
# ADP and BCSS are multi-label datasets
# Use MultiLabelSoftMarginLoss
if args.dataset == 'ADP' or args.dataset == 'BCSS':
train_class_counts = np.sum(train_queue.dataset.class_labels, axis=0)
weightsBCE = dataset_size / train_class_counts
weightsBCE = torch.as_tensor(weightsBCE, dtype=torch.float32).to(int(args.gpu))
criterion = torch.nn.MultiLabelSoftMarginLoss(weight=weightsBCE).cuda()
# BACH and OS are single-label datasets
# Use CrossEntropyLoss
elif args.dataset == 'BACH' or args.dataset == 'OS':
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
# model
model = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info("param size = %fM", utils.count_parameters_in_MB(model))
# optimizer and scheduler
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
# train
best_acc = 0
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc_1, train_acc_5, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc_1 %f, train_acc_5 %f', train_acc_1, train_acc_5)
valid_acc_1, valid_acc_5, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc_1 %f, valid_acc_5 %f', valid_acc_1, valid_acc_5)
if valid_acc_1 > best_acc:
best_acc = valid_acc_1
utils.save(model, os.path.join(args.save, 'best_weights.pt'))
utils.save(model, os.path.join(args.save, 'last_weights.pt'))
# test
# use last weights
logging.info("Test using last weights ...")
model_test = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model_test = model_test.cuda()
utils.load(model_test, os.path.join(args.save, 'last_weights.pt'))
model_test.drop_path_prob = args.drop_path_prob
test_acc1, test_acc5, test_obj = infer(test_queue, model_test, criterion)
logging.info('test_acc_1 %f, test_acc_5 %f', test_acc1, test_acc5)
# use best weights on valid set
logging.info("Test using best weights ...")
model_test = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model_test = model_test.cuda()
utils.load(model_test, os.path.join(args.save, 'best_weights.pt'))
model_test.drop_path_prob = args.drop_path_prob
test_acc1, test_acc5, test_obj = infer(test_queue, model_test, criterion)
logging.info('test_acc_1 %f, test_acc_5 %f', test_acc1, test_acc5)
def train(train_queue, model, criterion, optimizer):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.train()
trained_data_size = 0
for step, (input, target) in enumerate(train_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight * loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
n = input.size(0)
trained_data_size += n
if args.dataset == 'ADP' or args.dataset == 'BCSS':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
elif args.dataset == 'BACH' or args.dataset == 'OS':
prec1, prec5 = utils.accuracy(logits, target, topk=(1, min(5, n_classes)))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
# report training loss
if step % args.report_freq == 0:
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (trained_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / trained_data_size)
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
logging.info('train %03d %e %f %f', step, objs.avg, top1_avg, top5_avg)
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (len(train_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(train_queue.dataset))
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
return top1_avg, top5_avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
infered_data_size = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
n = input.size(0)
infered_data_size += n
if args.dataset == 'ADP' or args.dataset == 'BCSS':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
elif args.dataset == 'BACH' or args.dataset == 'OS':
prec1, prec5 = utils.accuracy(logits, target, topk=(1, min(5, n_classes)))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
# report validation loss
if step % args.report_freq == 0:
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (infered_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / infered_data_size)
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
logging.info('valid %03d %e %f %f', step, objs.avg, top1_avg, top5_avg)
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (len(valid_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(valid_queue.dataset))
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
return top1_avg, top5_avg, objs.avg
if __name__ == '__main__':
main()
| 13,664 | 43.366883 | 118 | py |
PIBConv | PIBConv-main/cnn/apply_gradcam.py | import argparse
import cv2
import numpy as np
import torch
from torchvision import models
from pytorch_grad_cam import GradCAM, \
HiResCAM, \
ScoreCAM, \
GradCAMPlusPlus, \
AblationCAM, \
XGradCAM, \
EigenCAM, \
EigenGradCAM, \
LayerCAM, \
FullGrad, \
GradCAMElementWise
from model import *
from genotypes import *
import extract_cifar10
import matplotlib.pyplot as plt
from pytorch_grad_cam import GuidedBackpropReLUModel
from pytorch_grad_cam.utils.image import show_cam_on_image, \
deprocess_image, \
preprocess_image
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument(
'--image-path',
type=str,
default='./examples/both.png',
help='Input image path')
parser.add_argument('--aug_smooth', action='store_true',
help='Apply test time augmentation to smooth the CAM')
parser.add_argument(
'--eigen_smooth',
action='store_true',
help='Reduce noise by taking the first principle componenet'
'of cam_weights*activations')
parser.add_argument('--method', type=str, default='gradcam',
choices=['gradcam', 'hirescam', 'gradcam++',
'scorecam', 'xgradcam',
'ablationcam', 'eigencam',
'eigengradcam', 'layercam', 'fullgrad'],
help='Can be gradcam/gradcam++/scorecam/xgradcam'
'/ablationcam/eigencam/eigengradcam/layercam')
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print('Using GPU for acceleration')
else:
print('Using CPU for computation')
return args
if __name__ == '__main__':
""" python cam.py -image-path <path_to_image>
Example usage of loading an image, and computing:
1. CAM
2. Guided Back Propagation
3. Combining both
"""
args = get_args()
methods = \
{"gradcam": GradCAM,
"hirescam": HiResCAM,
"scorecam": ScoreCAM,
"gradcam++": GradCAMPlusPlus,
"ablationcam": AblationCAM,
"xgradcam": XGradCAM,
"eigencam": EigenCAM,
"eigengradcam": EigenGradCAM,
"layercam": LayerCAM,
"fullgrad": FullGrad,
"gradcamelementwise": GradCAMElementWise}
model = NetworkCIFAR(
C=36,
num_classes=10,
layers=20,
auxiliary=True,
genotype=DARTS_newconv_epoch50)
model.drop_path_prob = 0.2
# Choose the target layer you want to compute the visualization for.
# Usually this will be the last convolutional layer in the model.
# Some common choices can be:
# Resnet18 and 50: model._layers[-1]
# VGG, densenet161: model.features[-1]
# mnasnet1_0: model.layers[-1]
# You can print the model to help chose the layer
# You can pass a list with several target layers,
# in that case the CAMs will be computed per layer and then aggregated.
# You can also try selecting all layers of a certain type, with e.g:
# from pytorch_grad_cam.utils.find_layers import find_layer_types_recursive
# find_layer_types_recursive(model, [torch.nn.ReLU])
target_layers = [model.cells[-1]._ops[-1]]
NUM_CLASSES = 10
# Onehot encode labels
# imgUpsampled = extract_cifar10.upSampleCIFAR10()
# image = imgUpsampled.transpose(1,2,0)
# fig = plt.figure()
# plt.plot(image)
# fig.savefig('temp.png', dpi=fig.dpi)
rgb_img = cv2.imread(args.image_path, 1)[:, :, ::-1]
rgb_img = np.float32(rgb_img) / 255
input_tensor = preprocess_image(rgb_img,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# We have to specify the target we want to generate
# the Class Activation Maps for.
# If targets is None, the highest scoring category (for every member in the batch) will be used.
# You can target specific categories by
# targets = [e.g ClassifierOutputTarget(281)]
targets = None
# Using the with statement ensures the context is freed, and you can
# recreate different CAM objects in a loop.
cam_algorithm = methods[args.method]
with cam_algorithm(model=model,
target_layers=target_layers,
use_cuda=args.use_cuda) as cam:
# AblationCAM and ScoreCAM have batched implementations.
# You can override the internal batch size for faster computation.
cam.batch_size = 32
grayscale_cam = cam(input_tensor=input_tensor,
targets=targets,
aug_smooth=args.aug_smooth,
eigen_smooth=args.eigen_smooth)
# Here grayscale_cam has only one image in the batch
grayscale_cam = grayscale_cam[0, :]
cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
# cam_image is RGB encoded whereas "cv2.imwrite" requires BGR encoding.
cam_image = cv2.cvtColor(cam_image, cv2.COLOR_RGB2BGR)
gb_model = GuidedBackpropReLUModel(model=model, use_cuda=args.use_cuda)
gb = gb_model(input_tensor, target_category=None)
cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
cam_gb = deprocess_image(cam_mask * gb)
gb = deprocess_image(gb)
cv2.imwrite(f'{args.method}_cam.jpg', cam_image)
cv2.imwrite(f'{args.method}_gb.jpg', gb)
cv2.imwrite(f'{args.method}_cam_gb.jpg', cam_gb)
| 5,853 | 34.26506 | 100 | py |
PIBConv | PIBConv-main/cnn/test_cpath.py | import os
import sys
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkADP as Network
# for ADP dataset
from ADP_utils.classesADP import classesADP
parser = argparse.ArgumentParser("cpath")
parser.add_argument('--data', type=str, default='./data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='ADP', help='choose dataset: ADP, BCSS, BACH, OS')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--model_path', type=str, default='EXP/weights.pt', help='path of trained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS_ADP_N4', help='choose network architecture: DARTS_ADP_N2, DARTS_ADP_N3, DARTS_ADP_N4')
parser.add_argument('--image_size', type=int, default=272, help='ADP image size')
# ADP only
parser.add_argument('--adp_level', type=str, default='L3', help='ADP level')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
if args.dataset == 'ADP':
n_classes = classesADP[args.adp_level]['numClasses']
elif args.dataset == 'BCSS':
n_classes = 10
elif args.dataset == 'BACH' or args.dataset == 'OS':
n_classes = 4
else:
logging.info('Unknown dataset!')
sys.exit(1)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
logging.info('genotype = %s', genotype)
# dataset
if args.dataset == 'ADP':
train_transform, test_transform = utils._data_transforms_adp(args)
train_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='train')
test_data = utils.ADP_dataset(level=args.adp_level, transform=test_transform, root=args.data, split='test')
elif args.dataset == 'BCSS':
train_transform, test_transform = utils._data_transforms_bcss(args)
train_data = utils.BCSSDataset(root=args.data, split='train', transform=train_transform)
test_data = utils.BCSSDataset(root=args.data, split='test', transform=test_transform)
elif args.dataset == 'BACH':
train_transform, test_transform = utils._data_transforms_bach(args)
train_data = utils.BACH_transformed(root=args.data, split='train', transform=train_transform)
test_data = utils.BACH_transformed(root=args.data, split='test', transform=test_transform)
elif args.dataset == 'OS':
train_transform, test_transform = utils._data_transforms_os(args)
train_data = utils.OS_transformed(root=args.data, split='train', transform=train_transform)
test_data = utils.OS_transformed(root=args.data, split='test', transform=test_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
dataset_size = len(train_queue.dataset)
# criterion
# ADP and BCSS are multi-label datasets
# Use MultiLabelSoftMarginLoss
if args.dataset == 'ADP' or args.dataset == 'BCSS':
train_class_counts = np.sum(train_queue.dataset.class_labels, axis=0)
weightsBCE = dataset_size / train_class_counts
weightsBCE = torch.as_tensor(weightsBCE, dtype=torch.float32).to(int(args.gpu))
criterion = torch.nn.MultiLabelSoftMarginLoss(weight=weightsBCE).cuda()
# BACH and OS are single-label datasets
# Use CrossEntropyLoss
elif args.dataset == 'BACH' or args.dataset == 'OS':
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
# model
model = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, args.model_path)
model.drop_path_prob = args.drop_path_prob
logging.info("param size = %fM", utils.count_parameters_in_MB(model))
test_acc1, test_acc5, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc_1 %f, test_acc_5 %f', test_acc1, test_acc5)
def infer(valid_queue, model, criterion):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
infered_data_size = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
n = input.size(0)
infered_data_size += n
if args.dataset == 'ADP' or args.dataset == 'BCSS':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
elif args.dataset == 'BACH' or args.dataset == 'OS':
prec1, prec5 = utils.accuracy(logits, target, topk=(1, min(5, n_classes)))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
# report validation loss
if step % args.report_freq == 0:
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (infered_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / infered_data_size)
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
logging.info('valid %03d %e %f %f', step, objs.avg, top1_avg, top5_avg)
print('infered_data_size:', infered_data_size)
print('valid_data_size:', len(valid_queue.dataset))
if args.dataset == 'ADP' or args.dataset == 'BCSS':
top1_avg = (top1.sum_accuracy.cpu().item() / (len(valid_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(valid_queue.dataset))
elif args.dataset == 'BACH' or args.dataset == 'OS':
top1_avg = top1.avg
top5_avg = top5.avg
return top1_avg, top5_avg, objs.avg
if __name__ == '__main__':
main()
| 7,551 | 42.154286 | 141 | py |
PIBConv | PIBConv-main/cnn/train_search_rmsgd.py | from operator import index
import os
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"
print("bruv")
import sys
import time
import glob
import utils
import logging
import argparse
import numpy as np
import pandas as pd
import pickle
import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from copy import deepcopy
from numpy import linalg as LA
from torch.autograd import Variable
from model_search import Network
from architect import Architect
from adas import Adas
from adas.metrics import Metrics
from rmsgd import RMSGD
# for ADP dataset
from ADP_utils.classesADP import classesADP
parser = argparse.ArgumentParser("adaptive_darts")
####################
# Dataset
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='ADP-Release1', help='valid datasets: cifar10, cifar100, ADP-Release1')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--image_size', type=int, default=64, help='CPATH image size')
# color augmentation
parser.add_argument('--color_aug', action='store_true', default=False, help='use color augmentation')
parser.add_argument('--color_distortion', type=float, default=0.3, help='color distortion param')
# For ADP dataset only
parser.add_argument('--adp_level', type=str, default='L3', help='ADP level')
####################
# Training details
parser.add_argument('--gpu', type=str, default='0', help='gpu device id')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--learnable_bn', action='store_true', default=False, help='learnable parameters in batch normalization')
# Gumbel-softmax
parser.add_argument('--gumbel', action='store_true', default=False, help='use or not Gumbel-softmax trick')
parser.add_argument('--tau_max', type=float, default=10.0, help='initial tau')
parser.add_argument('--tau_min', type=float, default=1.0, help='minimum tau')
# Adas optimizer
parser.add_argument('--adas', action='store_true', default=False, help='whether or not to use adas optimizer')
parser.add_argument('--scheduler_beta', type=float, default=0.98, help='beta for lr scheduler')
parser.add_argument('--scheduler_p', type=int, default=1, help='p for lr scheduler')
parser.add_argument('--step_size', type=int, default=50, help='step_size for dropping lr')
parser.add_argument('--gamma', type=float, default=1.0, help='gamma for dropping lr')
# RM-SGD optimizer
parser.add_argument('--rmsgd', action='store_true', default=False, help='whether or not to use RM-SGD optimizer')
####################
# Model details
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=4, help='total number of layers')
parser.add_argument('--node', type=int, default=4, help='number of nodes in a cell')
####################
# Others
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--file_name', type=str, default='_', help='metrics and weights data file name')
args = parser.parse_args()
args.save = 'Search-{}-data-{}-{}'.format(args.save, args.dataset, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.dataset == 'cifar100':
n_classes = 100
data_folder = 'cifar-100-python'
elif args.dataset == 'cifar10':
n_classes = 10
data_folder = 'cifar-10-batches-py'
elif args.dataset == 'ADP-Release1':
n_classes = classesADP[args.adp_level]['numClasses']
else:
logging.info('dataset not supported')
sys.exit(1)
is_multi_gpu = False
def main():
global is_multi_gpu
gpus = [int(i) for i in args.gpu.split(',')]
logging.info('gpus = %s' % gpus)
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if len(gpus) == 1:
torch.cuda.set_device(int(args.gpu))
#with torch.cuda.device('cuda:0'):
#torch.cuda.empty_cache()
else:
print("Let's use", torch.cuda.device_count(), "GPUs!")
is_multi_gpu = True
if args.layers <= 2:
logging.info('Minimmum number of layers is 2')
sys.exit(1)
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %s' % args.gpu)
logging.info("args = %s", args)
# load dataset
if args.dataset == 'cifar100':
train_transform, valid_transform = utils._data_transforms_cifar100(args)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
elif args.dataset == 'cifar10':
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
elif args.dataset == 'ADP-Release1':
train_transform, valid_transform = utils._data_transforms_adp(args)
train_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='train_search', portion=args.train_portion)
valid_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='valid_search', portion=args.train_portion)
if args.dataset in ['cifar100', 'cifar10']:
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=0)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=0)
elif args.dataset == 'ADP-Release1':
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.RandomSampler(train_data),
pin_memory=True, num_workers=0)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.RandomSampler(valid_data),
pin_memory=True, num_workers=0)
# build network
if args.dataset in ['cifar100', 'cifar10']:
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
elif args.dataset == 'ADP-Release1':
dataset_size = len(train_queue.dataset)
print('train dataset size:', len(train_queue.dataset))
print('valid dataset size:', len(valid_queue.dataset))
train_class_counts = np.sum(train_queue.dataset.class_labels, axis=0)
weightsBCE = dataset_size / train_class_counts
weightsBCE = torch.as_tensor(weightsBCE, dtype=torch.float32).to(int(args.gpu))
criterion = torch.nn.MultiLabelSoftMarginLoss(weight=weightsBCE).cuda()
model = Network(args.init_channels, n_classes, args.layers, criterion, learnable_bn=args.learnable_bn, steps=args.node, multiplier=args.node)
if is_multi_gpu:
model = nn.DataParallel(model)
model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
# Optimizer for model weights update
# Use Adas: optimizer and scheduler
if args.adas:
optimizer = Adas(params=list(model_params),
lr=args.learning_rate,
beta=args.scheduler_beta,
step_size=args.step_size,
gamma=args.gamma,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Use SGD: default in DARTS paper
elif args.rmsgd:
optimizer = RMSGD(
params=list(model_params),
lr=args.learning_rate,
beta=args.scheduler_beta,
step_size=args.step_size,
linear=False,
gamma=args.gamma,
momentum=args.momentum,
dampening=0,
weight_decay=args.weight_decay,
nesterov=False
)
else:
optimizer = torch.optim.SGD(
model_params,
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, criterion, args)
if not args.adas and not args.rmsgd:
# record probing metrics
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
metrics = Metrics(params=list(model_params))
# files to record searching results
performance_statistics = {}
arch_statistics = {}
genotype_statistics = {}
index = 0
dir_path=r'../save_data'
while(os.path.isdir(dir_path+str(index))):
index += 1
print(dir_path+str(index))
dir_path=dir_path+str(index)
#make the new dir without overwriting previous data
os.mkdir(dir_path)
metrics_path = dir_path+ '/metrics_stat_{}.xlsx'.format(args.file_name)
weights_path = dir_path+'/weights_stat_{}.xlsx'.format(args.file_name)
genotypes_path = dir_path+'/genotypes_stat_{}.xlsx'.format(args.file_name)
print(genotypes_path)
errors_dict = {'train_acc_1': [], 'train_loss': [], 'valid_acc_1': [], 'valid_loss': [],
'train_acc_5': [], 'valid_acc_5':[]}
for epoch in range(args.epochs):
if args.adas:
lr = optimizer.lr_vector
elif args.rmsgd:
lr = optimizer.lr_vector
#logging.info('epoch %d lr %e', epoch, lr)
else:
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
genotype = model.module.genotype() if is_multi_gpu else model.genotype()
logging.info('epoch: %d', epoch)
logging.info('genotype = %s', genotype)
# training
train_acc_1, train_acc_5, train_obj = train(epoch, train_queue, valid_queue,
model, architect, criterion,
optimizer, lr)
print('\n')
logging.info('train_acc_1 %f, train_acc_5 %f', train_acc_1, train_acc_5)
# validation
valid_acc_1, valid_acc_5, valid_obj = infer(valid_queue, model, criterion)
print('\n')
logging.info('valid_acc_1 %f, valid_acc_5 %f', valid_acc_1, valid_acc_5)
# update the errors dictionary
errors_dict['train_acc_1'].append(train_acc_1)
errors_dict['train_loss'].append(train_obj)
errors_dict['valid_acc_1'].append(valid_acc_1)
errors_dict['valid_loss'].append(valid_obj)
errors_dict['valid_acc_5'].append(valid_acc_5)
errors_dict['train_acc_5'].append(train_acc_5)
# update network metrics (knowledge gain, condition mapping, etc)
if args.adas:
# AdaS: update learning rates
optimizer.epoch_step(epoch)
io_metrics = optimizer.KG
lr_metrics = optimizer.velocity
# added RM-SGD optimizer
elif args.rmsgd:
optimizer.epoch_step()
io_metrics = optimizer.KG
lr_metrics = optimizer.velocity
else:
metrics()
io_metrics = metrics.KG(epoch)
lr_metrics = None
# weights
weights_normal = F.softmax(model.module.alphas_normal if is_multi_gpu else model.alphas_normal, dim=-1).detach().cpu().numpy()
weights_reduce = F.softmax(model.module.alphas_reduce if is_multi_gpu else model.alphas_reduce, dim=-1).detach().cpu().numpy()
# write data to excel files
write_data(epoch, io_metrics, lr_metrics, weights_normal, weights_reduce, genotype,
performance_statistics, arch_statistics, genotype_statistics,
metrics_path, weights_path, genotypes_path)
# save model parameters
save_model = model.module if is_multi_gpu else model
utils.save(save_model, os.path.join(args.save, 'weights.pt'))
"""
ADDED BY LOUIS:
"""
# save errors_dict to pickle file
with open(dir_path + '/errors_dict.pkl', 'wb') as f:
pickle.dump(errors_dict, f)
def train(epoch, train_queue, valid_queue, model, architect, criterion, optimizer, lr):
global is_multi_gpu
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
trained_data_size = 0
for step, (input, target) in enumerate(train_queue):
# one mini-batch
print('\rtrain mini batch {:03d}'.format(step), end=' ')
model.train()
n = input.size(0)
trained_data_size += n
if args.gumbel:
model.module.set_tau(args.tau_max - epoch * 1.0 / args.epochs * (args.tau_max - args.tau_min)) if is_multi_gpu \
else model.set_tau(args.tau_max - epoch * 1.0 / args.epochs * (args.tau_max - args.tau_min))
input = input.cuda()
target = target.cuda()
# get a random minibatch from the search queue with replacement
input_search, target_search = next(iter(valid_queue))
input_search = input_search.cuda()
target_search = target_search.cuda()
# logging.info('update arch...')
architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
# logging.info('update weights...')
optimizer.zero_grad()
logits = model(input, gumbel=args.gumbel)
loss = criterion(logits, target)
loss.backward()
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
nn.utils.clip_grad_norm_(model_params, args.grad_clip)
optimizer.step()
if args.dataset in ['cifar100', 'cifar10']:
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
elif args.dataset == 'ADP-Release1':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
if step % args.report_freq == 0:
print('\n')
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (trained_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / trained_data_size)
logging.info('train %03d %e %f %f', step, objs_avg, top1_avg, top5_avg)
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (len(train_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(train_queue.dataset))
return top1_avg, top5_avg, objs_avg
def infer(valid_queue, model, criterion):
global is_multi_gpu
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
# for ADP dataset
preds = 0
valided_data_size = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
print('\rinfer mini batch {:03d}'.format(step), end=' ')
input = input.cuda()
target = target.cuda()
logits = model(input)
loss = criterion(logits, target)
n = input.size(0)
valided_data_size += n
if args.dataset in ['cifar100', 'cifar10']:
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
elif args.dataset == 'ADP-Release1':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
if step % args.report_freq == 0:
print('\n')
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (valided_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / valided_data_size)
logging.info('valid %03d %e %f %f', step, objs_avg, top1_avg, top5_avg)
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (len(valid_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(valid_queue.dataset))
return top1_avg, top5_avg, objs_avg
def write_data(epoch, net_metrics, lr_metrics, weights_normal, weights_reduce, genotype,
perform_stat, arch_stat, genotype_stat, metrics_path, weights_path, genotypes_path):
# genotype
#if epoch % 5 == 0 or epoch == args.epochs - 1:
genotype_stat['epoch_{}'.format(epoch)] = [genotype]
genotypes_df = pd.DataFrame(data=genotype_stat)
genotypes_df.to_excel(genotypes_path)
# io metrics
perform_stat['S_epoch_{}'.format(epoch)] = net_metrics
#perform_stat['out_S_epoch_{}'.format(epoch)] = net_metrics.output_channel_S
#perform_stat['fc_S_epoch_{}'.format(epoch)] = net_metrics.fc_S
# perform_stat['in_rank_epoch_{}'.format(epoch)] = net_metrics.input_channel_rank
# perform_stat['out_rank_epoch_{}'.format(epoch)] = net_metrics.output_channel_rank
# perform_stat['fc_rank_epoch_{}'.format(epoch)] = net_metrics.fc_rank
# perform_stat['in_condition_epoch_{}'.format(epoch)] = net_metrics.input_channel_condition
# perform_stat['out_condition_epoch_{}'.format(epoch)] = net_metrics.output_channel_condition
if args.adas:
# lr metrics
# perform_stat['rank_velocity_epoch_{}'.format(epoch)] = lr_metrics.rank_velocity
perform_stat['learning_rate_epoch_{}'.format(epoch)] = lr_metrics
# write metrics data to xls file
metrics_df = pd.DataFrame(data=perform_stat)
metrics_df.to_excel(metrics_path)
# alpha weights
# normal
arch_stat['normal_none_epoch{}'.format(epoch)] = weights_normal[:, 0]
arch_stat['normal_skip_connect_epoch{}'.format(epoch)] = weights_normal[:, 1]
arch_stat['normal_sep_conv1_3x3_epoch{}'.format(epoch)] = weights_normal[:, 2]
arch_stat['normal_sep_conv1_5x5_epoch{}'.format(epoch)] = weights_normal[:, 3]
arch_stat['normal_sep_conv1_7x7_epoch{}'.format(epoch)] = weights_normal[:, 4]
arch_stat['normal_sep_conv2_3x3_epoch{}'.format(epoch)] = weights_normal[:, 5]
arch_stat['normal_sep_conv2_5x5_epoch{}'.format(epoch)] = weights_normal[:, 6]
arch_stat['normal_sep_conv2_7x7_epoch{}'.format(epoch)] = weights_normal[:, 7]
arch_stat['normal_sep_conv3_3x3_epoch{}'.format(epoch)] = weights_normal[:, 8]
arch_stat['normal_sep_conv3_5x5_epoch{}'.format(epoch)] = weights_normal[:, 9]
arch_stat['normal_sep_conv3_7x7_epoch{}'.format(epoch)] = weights_normal[:, 10]
arch_stat['normal_sep_conv4_3x3_epoch{}'.format(epoch)] = weights_normal[:, 11]
arch_stat['normal_sep_conv4_5x5_epoch{}'.format(epoch)] = weights_normal[:, 12]
arch_stat['normal_sep_conv4_7x7_epoch{}'.format(epoch)] = weights_normal[:, 13]
# reduce
arch_stat['reduce_none_epoch{}'.format(epoch)] = weights_reduce[:, 0]
arch_stat['reduce_skip_connect_epoch{}'.format(epoch)] = weights_reduce[:, 1]
arch_stat['reduce_sep_conv1_3x3_epoch{}'.format(epoch)] = weights_reduce[:, 2]
arch_stat['reduce_sep_conv1_5x5_epoch{}'.format(epoch)] = weights_reduce[:, 3]
arch_stat['reduce_sep_conv1_7x7_epoch{}'.format(epoch)] = weights_reduce[:, 4]
arch_stat['reduce_sep_conv2_3x3_epoch{}'.format(epoch)] = weights_reduce[:, 5]
arch_stat['reduce_sep_conv2_5x5_epoch{}'.format(epoch)] = weights_reduce[:, 6]
arch_stat['reduce_sep_conv2_7x7_epoch{}'.format(epoch)] = weights_reduce[:, 7]
arch_stat['reduce_sep_conv3_3x3_epoch{}'.format(epoch)] = weights_reduce[:, 8]
arch_stat['reduce_sep_conv3_5x5_epoch{}'.format(epoch)] = weights_reduce[:, 9]
arch_stat['reduce_sep_conv3_7x7_epoch{}'.format(epoch)] = weights_reduce[:, 10]
arch_stat['reduce_sep_conv4_3x3_epoch{}'.format(epoch)] = weights_reduce[:, 11]
arch_stat['reduce_sep_conv4_5x5_epoch{}'.format(epoch)] = weights_reduce[:, 12]
arch_stat['reduce_sep_conv4_7x7_epoch{}'.format(epoch)] = weights_reduce[:, 13]
# write weights data to xls file
weights_df = pd.DataFrame(data=arch_stat)
weights_df.to_excel(weights_path)
if __name__ == '__main__':
main()
| 24,566 | 42.713523 | 153 | py |
PIBConv | PIBConv-main/cnn/architect.py | import torch
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from collections import OrderedDict
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
class Architect(object):
def __init__(self, model, criterion, args):
gpus = [int(i) for i in args.gpu.split(',')]
self.is_multi_gpu = True if len(gpus) > 1 else False
self.network_momentum = args.momentum
self.network_weight_decay = args.weight_decay
self.model = model
self.criterion = criterion
self.adas = args.adas
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
self.optimizer = torch.optim.Adam(arch_parameters,
lr=args.arch_learning_rate, betas=(0.5, 0.999),
weight_decay=args.arch_weight_decay)
self.gumbel = args.gumbel
self.grad_clip = args.grad_clip
def _compute_unrolled_model(self, input, target, lr_vector, network_optimizer):
logits = self.model(input, self.gumbel)
loss = self.criterion(logits, target)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_theta = _concat(arch_parameters).data
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
model_theta = _concat(model_params).data
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
try:
moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in model_params).mul_(
self.network_momentum)
except:
moment = torch.zeros_like(model_theta)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = list(filter(lambda p: id(p) not in arch_params, model_parameters))
# using gumbel-softmax:
# for unused ops there will be no gradient and this needs to be handled
if self.gumbel:
dtheta = _concat([grad_i + self.network_weight_decay * theta_i if grad_i is not None
else self.network_weight_decay * theta_i
for grad_i, theta_i in
zip(torch.autograd.grad(loss, model_params, allow_unused=True), model_params)])
# not using gumbel-softmax
else:
dtheta = _concat([grad_i + self.network_weight_decay * theta_i
for grad_i, theta_i in
zip(torch.autograd.grad(loss, model_params), model_params)])
# Adas
if self.adas:
iteration_p = 0
offset_p = 0
offset_dp = 0
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
for p in model_params:
p_length = np.prod(p.size())
lr = lr_vector[iteration_p]
d_p = moment[offset_p: offset_p + p_length] + \
dtheta[offset_dp: offset_dp + p_length]
model_theta[offset_p: offset_p + p_length].sub_(d_p, alpha=lr)
offset_p += p_length
offset_dp += p_length
iteration_p += 1
# original DARTS
else:
model_theta.sub_(lr_vector, moment + dtheta)
theta = torch.cat([arch_theta, model_theta])
unrolled_model = self._construct_model_from_theta(theta)
return unrolled_model
def step(self, input_train, target_train, input_valid, target_valid, lr, network_optimizer, unrolled):
self.optimizer.zero_grad()
if unrolled:
self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, lr, network_optimizer)
else:
self._backward_step(input_valid, target_valid)
# Add gradient clipping for gumbel-softmax because it leads to gradients with high magnitude
if self.gumbel:
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
torch.nn.utils.clip_grad_norm_(arch_parameters, self.grad_clip)
self.optimizer.step()
def _backward_step(self, input_valid, target_valid):
logits = self.model(input_valid, self.gumbel)
loss = self.criterion(logits, target_valid)
loss.backward()
def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, lr, network_optimizer):
# eqn(6):dαLval(w',α) ,where w' = w − ξ*dwLtrain(w, α)
# compute w'
unrolled_model = self._compute_unrolled_model(input_train, target_train, lr,
network_optimizer) # unrolled_model: w -> w'
# compute Lval: validation loss
logits = unrolled_model(input_valid, self.gumbel)
unrolled_loss = self.criterion(logits, target_valid)
unrolled_loss.backward()
# compute dαLval(w',α)
unrolled_arch_parameters = unrolled_model.module.arch_parameters() if self.is_multi_gpu else unrolled_model.arch_parameters()
dalpha = [v.grad for v in unrolled_arch_parameters] # grad wrt alpha
# compute dw'Lval(w',α)
# gumbel-softmax
unrolled_arch_parameters = unrolled_model.module.arch_parameters() if self.is_multi_gpu else unrolled_model.arch_parameters()
unrolled_arch_params = list(map(id, unrolled_arch_parameters))
unrolled_model_parameters = unrolled_model.module.parameters() if self.is_multi_gpu else unrolled_model.parameters()
unrolled_model_params = filter(lambda p: id(p) not in unrolled_arch_params, unrolled_model_parameters)
if self.gumbel:
vector = []
for v in unrolled_model_params:
if v.grad is not None:
# used operation by Gumbel-softmax
vector.append(v.grad.data)
else:
# unused operation by Gumbel-softmax
vector.append(torch.zeros_like(v))
else:
vector = [v.grad.data for v in unrolled_model_params]
# Adas: use different etas for different w's
if self.adas:
for i, p in enumerate(vector):
p.mul_(lr[i])
# eqn(8): (dαLtrain(w+,α)-dαLtrain(w-,α))/(2*epsilon)
# where w+=w+dw'Lval(w',α)*epsilon w- = w-dw'Lval(w',α)*epsilon
implicit_grads = self._hessian_vector_product(vector, input_train, target_train)
# eqn(6)-eqn(8): dαLval(w',α)-(dαLtrain(w+,α)-dαLtrain(w-,α))/(2*epsilon)
for g, ig in zip(dalpha, implicit_grads):
# g.data.sub_(ig.data, alpha=eta)
g.data.sub_(ig.data)
# update α
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
for v, g in zip(arch_parameters, dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)
def _construct_model_from_theta(self, theta):
model_new = self.model.module.new() if self.is_multi_gpu else self.model.new()
model_dict = self.model.module.state_dict() if self.is_multi_gpu else self.model.state_dict()
params, offset = {}, 0
named_parameters = self.model.module.named_parameters() if self.is_multi_gpu else self.model.named_parameters()
for k, v in named_parameters:
v_length = np.prod(v.size())
params[k] = theta[offset: offset + v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
if self.is_multi_gpu:
new_state_dict = OrderedDict()
for k, v in model_dict.items():
if 'module' not in k:
k = 'module.' + k
else:
k = k.replace('features.module.', 'module.features.')
new_state_dict[k] = v
else:
new_state_dict = model_dict
if self.is_multi_gpu:
model_new = nn.DataParallel(model_new)
cudnn.benchmark = True
model_new.load_state_dict(new_state_dict)
return model_new.cuda()
def _hessian_vector_product(self, vector, input, target, r=1e-2):
R = r / _concat(vector).norm()
# eqn(8): dαLtrain(w+,α)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
# compute w+ in eqn(8): w+ = w + dw'Lval(w',α) * epsilon
for p, v in zip(model_params, vector):
p.data.add_(v, alpha=R)
logits = self.model(input, self.gumbel)
loss = self.criterion(logits, target)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
grads_p = torch.autograd.grad(loss, arch_parameters)
# eqn(8): dαLtrain(w-,α)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
# compute w- in eqn(8): w- = w - dw'Lval(w',α) * epsilon
for p, v in zip(model_params, vector):
p.data.sub_(v, alpha=2 * R)
logits = self.model(input, self.gumbel)
loss = self.criterion(logits, target)
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
grads_n = torch.autograd.grad(loss, arch_parameters)
# recover w back
arch_parameters = self.model.module.arch_parameters() if self.is_multi_gpu else self.model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = self.model.module.parameters() if self.is_multi_gpu else self.model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
for p, v in zip(model_params, vector):
p.data.add_(v, alpha=R)
return [(x - y).div_(2 * R) for x, y in zip(grads_p, grads_n)]
| 11,819 | 48.456067 | 133 | py |
PIBConv | PIBConv-main/cnn/train_imagenet.py | import os
import sys
import numpy as np
import time
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.1, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-5, help='weight decay')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=250, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--gamma', type=float, default=0.97, help='learning rate decay')
parser.add_argument('--decay_period', type=int, default=1, help='epochs between two learning rate decays')
parser.add_argument('--parallel', action='store_true',default=False, help='data parallelism')
args = parser.parse_args()
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CLASSES = 1000
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(
1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + \
self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES,
args.layers, args.auxiliary, genotype)
if args.parallel:
model = nn.DataParallel(model).cuda()
else:
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
traindir = os.path.join(args.data, 'train')
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_data = dset.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
]))
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, args.decay_period, gamma=args.gamma)
best_acc_top1 = 0
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(
train_queue, model, criterion_smooth, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_acc_top5, valid_obj = infer(
valid_queue, model, criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc_top1': best_acc_top1,
'optimizer': optimizer.state_dict(),
}, is_best, args.save)
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
target = target.cuda(async=True)
input = input.cuda()
input = Variable(input)
target = Variable(target)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step,
objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step,
objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 8,636 | 34.9875 | 106 | py |
PIBConv | PIBConv-main/cnn/train_search_adas.py | import os
import sys
import time
import glob
import utils
import logging
import argparse
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import pickle
import gc
from copy import deepcopy
from numpy import linalg as LA
from torch.autograd import Variable
from model_search import Network
from architect import Architect
from adas import Adas
from adas.metrics import Metrics
# for ADP dataset
from ADP_utils.classesADP import classesADP
parser = argparse.ArgumentParser("adaptive_darts")
####################
# Dataset
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='ADP-Release1', help='valid datasets: cifar10, cifar100, ADP-Release1')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--image_size', type=int, default=64, help='CPATH image size')
# color augmentation
parser.add_argument('--color_aug', action='store_true', default=False, help='use color augmentation')
parser.add_argument('--color_distortion', type=float, default=0.3, help='color distortion param')
# For ADP dataset only
parser.add_argument('--adp_level', type=str, default='L3', help='ADP level')
####################
# Training details
parser.add_argument('--gpu', type=str, default='0', help='gpu device id')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.175, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--learnable_bn', action='store_true', default=False, help='learnable parameters in batch normalization')
# Gumbel-softmax
parser.add_argument('--gumbel', action='store_true', default=False, help='use or not Gumbel-softmax trick')
parser.add_argument('--tau_max', type=float, default=10.0, help='initial tau')
parser.add_argument('--tau_min', type=float, default=1.0, help='minimum tau')
# Adas optimizer
parser.add_argument('--adas', action='store_true', default=False, help='whether or not to use adas optimizer')
parser.add_argument('--scheduler_beta', type=float, default=0.98, help='beta for lr scheduler')
parser.add_argument('--scheduler_p', type=int, default=1, help='p for lr scheduler')
parser.add_argument('--step_size', type=int, default=50, help='step_size for dropping lr')
parser.add_argument('--gamma', type=float, default=1.0, help='gamma for dropping lr')
####################
# Model details
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=4, help='total number of layers')
parser.add_argument('--node', type=int, default=4, help='number of nodes in a cell')
####################
# Others
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--file_name', type=str, default='_', help='metrics and weights data file name')
args = parser.parse_args()
args.save = 'Search-{}-data-{}-{}'.format(args.save, args.dataset, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.dataset == 'cifar100':
n_classes = 100
data_folder = 'cifar-100-python'
elif args.dataset == 'cifar10':
n_classes = 10
data_folder = 'cifar-10-batches-py'
elif args.dataset == 'ADP-Release1':
n_classes = classesADP[args.adp_level]['numClasses']
else:
logging.info('dataset not supported')
sys.exit(1)
is_multi_gpu = False
def main():
gc.collect()
torch.cuda.empty_cache()
global is_multi_gpu
gpus = [int(i) for i in args.gpu.split(',')]
logging.info('gpus = %s' % gpus)
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if len(gpus) == 1:
torch.cuda.set_device(int(args.gpu))
else:
print("Let's use", torch.cuda.device_count(), "GPUs!")
is_multi_gpu = True
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %s' % args.gpu)
logging.info("args = %s", args)
# load dataset
if args.dataset == 'cifar100':
train_transform, valid_transform = utils._data_transforms_cifar100(args)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
elif args.dataset == 'cifar10':
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
elif args.dataset == 'ADP-Release1':
train_transform, valid_transform = utils._data_transforms_adp(args)
train_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='train_search', portion=args.train_portion)
valid_data = utils.ADP_dataset(level=args.adp_level, transform=train_transform, root=args.data, split='valid_search', portion=args.train_portion)
if args.dataset in ['cifar100', 'cifar10']:
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=0)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=0)
elif args.dataset == 'ADP-Release1':
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.RandomSampler(train_data),
pin_memory=True, num_workers=0)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.RandomSampler(valid_data),
pin_memory=True, num_workers=0)
# build network
if args.dataset in ['cifar100', 'cifar10']:
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
elif args.dataset == 'ADP-Release1':
dataset_size = len(train_queue.dataset)
print('train dataset size:', len(train_queue.dataset))
print('valid dataset size:', len(valid_queue.dataset))
train_class_counts = np.sum(train_queue.dataset.class_labels, axis=0)
weightsBCE = dataset_size / train_class_counts
weightsBCE = torch.as_tensor(weightsBCE, dtype=torch.float32).to(int(args.gpu))
criterion = torch.nn.MultiLabelSoftMarginLoss(weight=weightsBCE).cuda()
model = Network(args.init_channels, n_classes, args.layers, criterion, learnable_bn=args.learnable_bn, steps=args.node, multiplier=args.node)
if is_multi_gpu:
model = nn.DataParallel(model)
model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
# Optimizer for model weights update
# Use Adas: optimizer and scheduler
if args.adas:
optimizer = Adas(params=list(model_params),
lr=args.learning_rate,
beta=args.scheduler_beta,
step_size=args.step_size,
gamma=args.gamma,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Use SGD: default in DARTS paper
else:
optimizer = torch.optim.SGD(
model_params,
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, criterion, args)
if not args.adas:
# record probing metrics
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
metrics = Metrics(params=list(model_params))
# files to record searching results
performance_statistics = {}
arch_statistics = {}
genotype_statistics = {}
# make new save dir (will raise error if already exists - so make sure to specify args.file_name)
dir_path = f'../save_data_{args.file_name}'
os.mkdir(dir_path)
metrics_path = dir_path + '/metrics_stat_{}.xlsx'.format(args.file_name)
weights_path = dir_path +'/weights_stat_{}.xlsx'.format(args.file_name)
genotypes_path = dir_path +'/genotypes_stat_{}.xlsx'.format(args.file_name)
errors_dict = {'train_acc_1': [], 'train_loss': [], 'valid_acc_1': [], 'valid_loss': [], 'train_acc_5': [], 'valid_acc_5':[]}
# training
for epoch in range(args.epochs):
if args.adas:
lr = optimizer.lr_vector
else:
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
genotype = model.module.genotype() if is_multi_gpu else model.genotype()
logging.info('epoch: %d', epoch)
logging.info('genotype = %s', genotype)
# training
train_acc_1, train_acc_5, train_obj = train(epoch, train_queue, valid_queue,
model, architect, criterion,
optimizer, lr)
print('\n')
logging.info('train_acc_1 %f, train_acc_5 %f', train_acc_1, train_acc_5)
# validation
valid_acc_1, valid_acc_5, valid_obj = infer(valid_queue, model, criterion)
print('\n')
logging.info('valid_acc_1 %f, valid_acc_5 %f', valid_acc_1, valid_acc_5)
# update the errors dictionary
errors_dict['train_acc_1'].append(train_acc_1)
errors_dict['train_loss'].append(train_obj)
errors_dict['valid_acc_1'].append(valid_acc_1)
errors_dict['valid_loss'].append(valid_obj)
errors_dict['valid_acc_5'].append(valid_acc_5)
errors_dict['train_acc_5'].append(train_acc_5)
# update network metrics (knowledge gain, condition mapping, etc)
if args.adas:
# AdaS: update learning rates
optimizer.epoch_step(epoch)
io_metrics = optimizer.KG
lr_metrics = optimizer.velocity
else:
metrics()
io_metrics = metrics.KG(epoch)
lr_metrics = None
# weights
weights_normal = F.softmax(model.module.alphas_normal if is_multi_gpu else model.alphas_normal, dim=-1).detach().cpu().numpy()
weights_reduce = F.softmax(model.module.alphas_reduce if is_multi_gpu else model.alphas_reduce, dim=-1).detach().cpu().numpy()
# write data to excel files
write_data(epoch, io_metrics, lr_metrics, weights_normal, weights_reduce, genotype,
performance_statistics, arch_statistics, genotype_statistics,
metrics_path, weights_path, genotypes_path)
# save model parameters
save_model = model.module if is_multi_gpu else model
utils.save(save_model, os.path.join(args.save, 'weights.pt'))
# save errors_dict to pickle file
with open(dir_path + '/errors_dict.pkl', 'wb') as f:
pickle.dump(errors_dict, f)
def train(epoch, train_queue, valid_queue, model, architect, criterion, optimizer, lr):
global is_multi_gpu
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
trained_data_size = 0
for step, (input, target) in enumerate(train_queue):
# one mini-batch
print('\rtrain mini batch {:03d}'.format(step), end=' ')
model.train()
n = input.size(0)
trained_data_size += n
if args.gumbel:
model.module.set_tau(args.tau_max - epoch * 1.0 / args.epochs * (args.tau_max - args.tau_min)) if is_multi_gpu \
else model.set_tau(args.tau_max - epoch * 1.0 / args.epochs * (args.tau_max - args.tau_min))
input = input.cuda()
target = target.cuda()
# get a random minibatch from the search queue with replacement
input_search, target_search = next(iter(valid_queue))
input_search = input_search.cuda()
target_search = target_search.cuda()
# logging.info('update arch...')
architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
# logging.info('update weights...')
optimizer.zero_grad()
logits = model(input, gumbel=args.gumbel)
loss = criterion(logits, target)
loss.backward()
arch_parameters = model.module.arch_parameters() if is_multi_gpu else model.arch_parameters()
arch_params = list(map(id, arch_parameters))
model_parameters = model.module.parameters() if is_multi_gpu else model.parameters()
model_params = filter(lambda p: id(p) not in arch_params, model_parameters)
nn.utils.clip_grad_norm_(model_params, args.grad_clip)
optimizer.step()
if args.dataset in ['cifar100', 'cifar10']:
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
elif args.dataset == 'ADP-Release1':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
if step % args.report_freq == 0:
print('\n')
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (trained_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / trained_data_size)
logging.info('train %03d %e %f %f', step, objs_avg, top1_avg, top5_avg)
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (len(train_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(train_queue.dataset))
return top1_avg, top5_avg, objs_avg
def infer(valid_queue, model, criterion):
global is_multi_gpu
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
# for ADP dataset
preds = 0
valided_data_size = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
print('\rinfer mini batch {:03d}'.format(step), end=' ')
input = input.cuda()
target = target.cuda()
logits = model(input)
loss = criterion(logits, target)
n = input.size(0)
valided_data_size += n
if args.dataset in ['cifar100', 'cifar10']:
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
elif args.dataset == 'ADP-Release1':
m = nn.Sigmoid()
preds = (m(logits) > 0.5).int()
prec1, prec5 = utils.accuracyADP(preds, target)
objs.update(loss.item(), n)
top1.update(prec1.double(), n)
top5.update(prec5.double(), n)
if step % args.report_freq == 0:
print('\n')
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (valided_data_size * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / valided_data_size)
logging.info('valid %03d %e %f %f', step, objs_avg, top1_avg, top5_avg)
if args.dataset in ['cifar100', 'cifar10']:
objs_avg = objs.avg
top1_avg = top1.avg
top5_avg = top5.avg
elif args.dataset == 'ADP-Release1':
objs_avg = objs.avg
top1_avg = (top1.sum_accuracy.cpu().item() / (len(valid_queue.dataset) * n_classes))
top5_avg = (top5.sum_accuracy.cpu().item() / len(valid_queue.dataset))
return top1_avg, top5_avg, objs_avg
def write_data(epoch, net_metrics, lr_metrics, weights_normal, weights_reduce, genotype,
perform_stat, arch_stat, genotype_stat, metrics_path, weights_path, genotypes_path):
# genotype
#if epoch % 5 == 0 or epoch == args.epochs - 1:
genotype_stat['epoch_{}'.format(epoch)] = [genotype]
genotypes_df = pd.DataFrame(data=genotype_stat)
genotypes_df.to_excel(genotypes_path)
# io metrics
perform_stat['S_epoch_{}'.format(epoch)] = net_metrics
# perform_stat['out_S_epoch_{}'.format(epoch)] = net_metrics.output_channel_S
# perform_stat['fc_S_epoch_{}'.format(epoch)] = net_metrics.fc_S
# perform_stat['in_rank_epoch_{}'.format(epoch)] = net_metrics.input_channel_rank
# perform_stat['out_rank_epoch_{}'.format(epoch)] = net_metrics.output_channel_rank
# perform_stat['fc_rank_epoch_{}'.format(epoch)] = net_metrics.fc_rank
# perform_stat['in_condition_epoch_{}'.format(epoch)] = net_metrics.input_channel_condition
# perform_stat['out_condition_epoch_{}'.format(epoch)] = net_metrics.output_channel_condition
if args.adas:
# lr metrics
# perform_stat['rank_velocity_epoch_{}'.format(epoch)] = lr_metrics.rank_velocity
perform_stat['learning_rate_epoch_{}'.format(epoch)] = lr_metrics
# write metrics data to xls file
metrics_df = pd.DataFrame(data=perform_stat)
metrics_df.to_excel(metrics_path)
# weights
# normal
arch_stat['normal_none_epoch{}'.format(epoch)] = weights_normal[:, 0]
arch_stat['normal_max_epoch{}'.format(epoch)] = weights_normal[:, 1]
arch_stat['normal_avg_epoch{}'.format(epoch)] = weights_normal[:, 2]
arch_stat['normal_skip_epoch{}'.format(epoch)] = weights_normal[:, 3]
arch_stat['normal_sep_3_epoch{}'.format(epoch)] = weights_normal[:, 4]
arch_stat['normal_sep_5_epoch{}'.format(epoch)] = weights_normal[:, 5]
arch_stat['normal_dil_3_epoch{}'.format(epoch)] = weights_normal[:, 6]
arch_stat['normal_dil_5_epoch{}'.format(epoch)] = weights_normal[:, 7]
# reduce
arch_stat['reduce_none_epoch{}'.format(epoch)] = weights_reduce[:, 0]
arch_stat['reduce_max_epoch{}'.format(epoch)] = weights_reduce[:, 1]
arch_stat['reduce_avg_epoch{}'.format(epoch)] = weights_reduce[:, 2]
arch_stat['reduce_skip_epoch{}'.format(epoch)] = weights_reduce[:, 3]
arch_stat['reduce_sep_3_epoch{}'.format(epoch)] = weights_reduce[:, 4]
arch_stat['reduce_sep_5_epoch{}'.format(epoch)] = weights_reduce[:, 5]
arch_stat['reduce_dil_3_epoch{}'.format(epoch)] = weights_reduce[:, 6]
arch_stat['reduce_dil_5_epoch{}'.format(epoch)] = weights_reduce[:, 7]
# write weights data to xls file
weights_df = pd.DataFrame(data=arch_stat)
weights_df.to_excel(weights_path)
if __name__ == '__main__':
main()
| 22,044 | 43.445565 | 153 | py |
PIBConv | PIBConv-main/cnn/utils.py | import os
import numpy as np
import pandas as pd
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
from torchvision.datasets.utils import check_integrity,\
extract_archive, verify_str_arg, download_and_extract_archive
from torchvision.datasets.folder import default_loader
from torch.utils.data import Dataset
from ADP_utils.classesADP import classesADP
from typing import Any
import pickle
import re
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.sum_accuracy = 0
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.val = val
self.sum_accuracy += val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
# for ADP dataset (also used for BCSS dataset)
def accuracyADP(preds, targets):
acc5 = 0
targets_all = targets.data.int()
acc1 = torch.sum(preds == targets_all)
preds_cpu = preds.cpu()
targets_all_cpu = targets_all.cpu()
for i, pred_sample in enumerate(preds_cpu):
labelv = targets_all_cpu[i]
numerator = torch.sum(np.bitwise_and(pred_sample, labelv))
denominator = torch.sum(np.bitwise_or(pred_sample, labelv))
acc5 += (numerator.double()/denominator.double())
return acc1, acc5
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class ColorDistortion:
def __init__(self, distortion):
self.distortion = distortion
def __call__(self, image):
color_jitter = transforms.ColorJitter(0.8*self.distortion, 0.8*self.distortion,
0.8*self.distortion, 0.2*self.distortion)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=1.0)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([
rnd_color_jitter,
# rnd_gray
])
transformed_image = color_distort(image)
return transformed_image
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
"""From https://github.com/chenxin061/pdarts/"""
def _data_transforms_cifar100(args):
CIFAR_MEAN = [0.5071, 0.4867, 0.4408]
CIFAR_STD = [0.2675, 0.2565, 0.2761]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
# for ADP dataset
def _data_transforms_adp(args):
ADP_MEAN = [0.81233799, 0.64032477, 0.81902153]
ADP_STD = [0.18129702, 0.25731668, 0.16800649]
degrees = 45
horizontal_shift, vertical_shift = 0.1, 0.1
# train transform
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomAffine(degrees=degrees, translate=(
horizontal_shift, vertical_shift)),
transforms.ToTensor(),
transforms.Normalize(ADP_MEAN, ADP_STD)
])
if args.color_aug:
ColorAugmentation = ColorDistortion(args.color_distortion)
train_transform.transforms.insert(3, ColorAugmentation)
if args.image_size != 272:
train_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
# valid transform
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(ADP_MEAN, ADP_STD)
])
if args.image_size != 272:
valid_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
return train_transform, valid_transform
# for BCSS dataset
def _data_transforms_bcss(args):
BCSS_MEAN = [0.7107, 0.4878, 0.6726]
BCSS_STD = [0.1788, 0.2152, 0.1615]
degrees = 45
horizontal_shift, vertical_shift = 0.1, 0.1
# train transform
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomAffine(degrees=degrees, translate=(
horizontal_shift, vertical_shift)),
transforms.ToTensor(),
transforms.Normalize(BCSS_MEAN, BCSS_STD)
])
if args.color_aug:
ColorAugmentation = ColorDistortion(args.color_distortion)
train_transform.transforms.insert(3, ColorAugmentation)
if args.image_size != 272:
train_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
# valid transform
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(BCSS_MEAN, BCSS_STD)
])
if args.image_size != 272:
valid_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
return train_transform, valid_transform
# for CRC dataset
def _data_transforms_crc(args):
CRC_MEAN = [0.6976, 0.5340, 0.6687]
CRC_STD = [0.2272, 0.2697, 0.2247]
degrees = 45
horizontal_shift, vertical_shift = 0.1, 0.1
# train transform
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomAffine(degrees=degrees, translate=(
horizontal_shift, vertical_shift)),
transforms.ToTensor(),
transforms.Normalize(CRC_MEAN, CRC_STD)
])
if args.color_aug:
ColorAugmentation = ColorDistortion(args.color_distortion)
train_transform.transforms.insert(3, ColorAugmentation)
if args.image_size != 272:
train_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
# valid transform
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CRC_MEAN, CRC_STD)
])
if args.image_size != 272:
valid_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
return train_transform, valid_transform
# for BACH dataset
def _data_transforms_bach(args):
BACH_MEAN = [0.6880, 0.5881, 0.8209]
BACH_STD = [0.1632, 0.1841, 0.1175]
degrees = 45
horizontal_shift, vertical_shift = 0.1, 0.1
# train transform
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomAffine(degrees=degrees, translate=(
horizontal_shift, vertical_shift)),
transforms.ToTensor(),
transforms.Normalize(BACH_MEAN, BACH_STD)
])
if args.color_aug:
ColorAugmentation = ColorDistortion(args.color_distortion)
train_transform.transforms.insert(3, ColorAugmentation)
if args.image_size != 272:
train_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
# valid transform
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(BACH_MEAN, BACH_STD)
])
if args.image_size != 272:
valid_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
return train_transform, valid_transform
# for OS dataset
def _data_transforms_os(args):
OS_MEAN = [0.8414, 0.6492, 0.7377]
OS_STD = [0.1379, 0.2508, 0.1979]
degrees = 45
horizontal_shift, vertical_shift = 0.1, 0.1
# train transform
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomAffine(degrees=degrees, translate=(
horizontal_shift, vertical_shift)),
transforms.ToTensor(),
transforms.Normalize(OS_MEAN, OS_STD)
])
if args.color_aug:
ColorAugmentation = ColorDistortion(args.color_distortion)
train_transform.transforms.insert(3, ColorAugmentation)
if args.image_size != 272:
train_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
# valid transform
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(OS_MEAN, OS_STD)
])
if args.image_size != 272:
valid_transform.transforms.insert(0, transforms.Resize(
(args.image_size, args.image_size), interpolation=transforms.functional.InterpolationMode.BICUBIC))
return train_transform, valid_transform
# for ADP dataset
class ADP_dataset(Dataset):
db_name = 'ADP V1.0 Release'
ROI = 'img_res_1um_bicubic'
csv_file = 'ADP_EncodedLabels_Release1_Flat.csv'
def __init__(self,
level,
transform,
root,
split='train',
portion=0.5,
loader=default_loader):
'''
Args:
level (str): a string corresponding to a dict
defined in "ADP_scripts\classes\classesADP.py"
defines the hierarchy to be trained on
transform (callable, optional): A function/transform that takes in an
PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``,
``valid``, or ``test``.
loader (callable, optional): A function to load an image given its
path. Defaults to default_loader defined in torchvision
Attributes:
self.full_image_paths (list) : a list of image paths
self.class_labels (np.ndarray) : a numpy array of class labels
(num_samples, num_classes)
'''
self.root = root
self.split = verify_str_arg(
split, "split", ("train", "valid", "test", "train_search", "valid_search"))
self.transform = transform
self.loader = loader
self.portion = portion
# getting paths:
csv_file_path = os.path.join(self.root, self.db_name, self.csv_file)
# reads data and returns a pd.dataframe
ADP_data = pd.read_csv(filepath_or_buffer=csv_file_path, header=0)
# rows are integers starting from 0, columns are strings: e.g. "Patch Names", "E", ...
split_folder = os.path.join(self.root, self.db_name, 'splits')
if self.split == "train":
train_inds = np.load(os.path.join(split_folder, 'train.npy'))
out_df = ADP_data.loc[train_inds, :]
elif self.split == "valid":
valid_inds = np.load(os.path.join(split_folder, 'valid.npy'))
out_df = ADP_data.loc[valid_inds, :]
elif self.split == "test":
test_inds = np.load(os.path.join(split_folder, 'test.npy'))
out_df = ADP_data.loc[test_inds, :]
# for darts search
elif self.split == "train_search":
train_inds = np.load(os.path.join(split_folder, 'train.npy'))
train_search_inds = train_inds[: int(
np.floor(self.portion * len(train_inds)))]
out_df = ADP_data.loc[train_search_inds, :]
elif self.split == "valid_search":
train_inds = np.load(os.path.join(split_folder, 'train.npy'))
valid_search_inds = train_inds[int(
np.floor(self.portion * len(train_inds))):]
out_df = ADP_data.loc[valid_search_inds, :]
self.full_image_paths = [os.path.join(
self.root, self.db_name, self.ROI, image_name) for image_name in out_df['Patch Names']]
self.class_labels = out_df[classesADP[level]
['classesNames']].to_numpy(dtype=np.float32)
def __getitem__(self, idx) -> torch.Tensor:
path = self.full_image_paths[idx]
label = self.class_labels[idx]
sample = self.loader(path) # Loading image
if self.transform is not None: # PyTorch implementation
sample = self.transform(sample)
return sample, torch.tensor(label)
def __len__(self) -> int:
return(len(self.full_image_paths))
# for BCSS dataset
class BCSSDataset(Dataset):
db_name = 'BCSS_transformed'
def __init__(self,
root,
split="train",
transform=None,
loader=default_loader,
multi_labelled=True) -> None:
"""
Retrieved from: https://bcsegmentation.grand-challenge.org/
Args:
root (string):
Directory of the transformed dataset, e.g. "/home/BCSS_transformed"
split (string, optional): The dataset split, supports ``train``,
``valid``, or ``test``.
transform (callable, optional): A function/transform that takes in an
PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
loader (callable, optional): A function to load an image given its
path. Defaults to default_loader defined in torchvision
multi_labelled (bool): a boolean controlling whether the output labels are a multilabelled array
or an index corresponding to the single label
"""
self.root = root
self.split = verify_str_arg(split, "split", ("train", "valid", "test"))
self.transform = transform
self.loader = loader
# getting samples from preprocessed pickle file
if multi_labelled:
df = pd.read_csv(os.path.join(
self.root, self.db_name, self.split + ".csv"), index_col="image")
else:
df = pd.read_csv(os.path.join(
self.root, self.db_name, self.split + "_with_norm_mass.csv"), index_col="image")
self.samples = [(image.replace('\\', '/'), label)
for image, label in zip(df.index, df.to_records(index=False))]
if multi_labelled:
self.samples = [(os.path.join(self.root, self.db_name, path), list(
label)) for path, label in self.samples]
else:
self.samples = [(os.path.join(self.root, self.db_name, path), np.argmax(
list(label))) for path, label in self.samples]
self.class_to_idx = {cls: idx for idx, cls in enumerate(df.columns)}
self.class_labels = df.to_numpy(dtype=np.float32)
def __getitem__(self, idx) -> [Any, torch.Tensor]:
path, label = self.samples[idx]
sample = self.loader(path) # Loading image
if self.transform is not None: # PyTorch implementation
sample = self.transform(sample)
return sample, torch.tensor(label, dtype=torch.int64)
def __len__(self) -> int:
return len(self.samples)
# for CRC dataset
class CRC_transformed(Dataset):
db_name = 'CRC_transformed'
def __init__(self, root, split="train", transform=None, loader=default_loader) -> None:
"""
Args:
root (string):
Directory of the transformed dataset, e.g. /home/CRC_transformed
split (string, optional): The dataset split, supports ``train``,
``valid``, or ``test``.
transform (callable, optional): A function/transform that takes in an
PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
loader (callable, optional): A function to load an image given its
path. Defaults to default_loader defined in torchvision
"""
self.root = root
self.split = verify_str_arg(split, "split", ("train", "valid", "test"))
self.transform = transform
self.loader = loader
# getting samples from preprocessed pickle file
self.samples = pickle.load(
open(os.path.join(self.root, self.db_name, self.split+".pickle"), "rb"))
self.samples = [(os.path.join(self.root, self.db_name, path), label)
for path, label in self.samples]
self.class_to_idx = pickle.load(
open(os.path.join(self.root, self.db_name, "class_to_idx.pickle"), "rb"))
def __getitem__(self, idx) -> [Any, torch.Tensor]:
path, label = self.samples[idx]
sample = self.loader(path) # Loading image
if self.transform is not None: # PyTorch implementation
sample = self.transform(sample)
return sample, torch.tensor(label)
def __len__(self) -> int:
return len(self.samples)
# for BACH dataset
class BACH_transformed(Dataset):
db_name = 'BACH_transformed'
def __init__(self, root, split="train", transform=None, loader=default_loader) -> None:
"""
Args:
root (string):
Directory of the transformed dataset, e.g. /home/BACH_transformed
split (string, optional): The dataset split, supports ``train``,
``valid``, or ``test``.
transform (callable, optional): A function/transform that takes in an
PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
loader (callable, optional): A function to load an image given its
path. Defaults to default_loader defined in torchvision
"""
self.root = root
self.split = verify_str_arg(split, "split", ("train", "valid", "test"))
self.transform = transform
self.loader = loader
# getting samples from preprocessed pickle file
self.samples = pickle.load(
open(os.path.join(self.root, self.db_name, self.split+".pickle"), "rb"))
self.samples = [(os.path.join(self.root, self.db_name, path), label)
for path, label in self.samples]
self.class_to_idx = pickle.load(
open(os.path.join(self.root, self.db_name, "class_to_idx.pickle"), "rb"))
def __getitem__(self, idx) -> [Any, torch.Tensor]:
path, label = self.samples[idx]
sample = self.loader(path) # Loading image
if self.transform is not None: # PyTorch implementation
sample = self.transform(sample)
return sample, torch.tensor(label)
def __len__(self) -> int:
return len(self.samples)
# for OS dataset
class OS_transformed(Dataset):
db_name = 'OS_transformed'
def __init__(self, root, split="train", transform=None, loader=default_loader) -> None:
"""
Args:
root (string):
Directory of the transformed dataset, e.g. /home/OS_transformed
split (string, optional): The dataset split, supports ``train``,
``valid``, or ``test``.
transform (callable, optional): A function/transform that takes in an
PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
loader (callable, optional): A function to load an image given its
path. Defaults to default_loader defined in torchvision
"""
self.root = root
self.split = verify_str_arg(split, "split", ("train", "valid", "test"))
self.transform = transform
self.loader = loader
# getting samples from preprocessed pickle file
self.samples = pickle.load(
open(os.path.join(self.root, self.db_name, self.split+".pickle"), "rb"))
self.samples = [(os.path.join(self.root, self.db_name, path), label)
for path, label in self.samples]
self.class_to_idx = pickle.load(
open(os.path.join(self.root, self.db_name, "class_to_idx.pickle"), "rb"))
def __getitem__(self, idx) -> [Any, torch.Tensor]:
path, label = self.samples[idx]
sample = self.loader(path) # Loading image
if self.transform is not None: # PyTorch implementation
sample = self.transform(sample)
return sample, torch.tensor(label)
def __len__(self) -> int:
return len(self.samples)
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name) / 1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1. - drop_prob
mask = Variable(torch.cuda.FloatTensor(
x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def get_channel_size(path, model):
f_cell = os.path.join(path, 'cell_info.txt')
with open(f_cell, 'a') as fh:
for i, cell in enumerate(model.cells):
fh.write(f"{i} Cell Info: {cell}")
fh.write(f"----------------------\n Intermidery Tensors ----------------------")
#for index,op in enumerate(cell.ops):
f_layer = os.path.join(path, 'layer_info.txt')
cell_mem = np.zeros(len(model.cells))
cell_name_pat = r"cells\.([0-9]+)\..*"
for name, v in model.named_parameters():
m = re.match(cell_name_pat, name)
print(f"match {m}")
if m is not None:
cell_id = int(m.group(1))
print(f"cell_id {cell_id}")
cell_mem[cell_id] += np.prod(v.size())/1e6
with open(f_layer, 'a') as fh:
for i in range(0, len(model.cells)):
fh.write(f"Cell{i} mem_size: {cell_mem[i]} \n")
#
# fh.write(f"param name:{name} shape:{v.size()} mem:{np.prod(v.size())/1e6}")
| 25,313 | 33.161943 | 111 | py |
PIBConv | PIBConv-main/cnn/model.py | import torch
import torch.nn as nn
from operations import *
from torch.autograd import Variable
from utils import drop_path
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
print(C_prev_prev, C_prev, C)
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
# image size = 2 x 2
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
# print('before',x.shape)
x = self.features(x)
# print('after',x.shape)
x = self.classifier(x.view(x.size(0), -1))
return x
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 14x14"""
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
# NOTE: This batchnorm was omitted in my earlier implementation due to a typo.
# Commenting it out for consistency with the experiments in the paper.
# nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), -1))
return x
class AuxiliaryHeadADP(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 17x17"""
super(AuxiliaryHeadADP, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(2), # image size = 2 x 2
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
# print('before',x.shape)
x = self.features(x)
# print('after',x.shape)
x = self.classifier(x.view(x.size(0), -1))
return x
class NetworkCIFAR(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkCIFAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev,
C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr
if i == 2*layers//3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(
C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2*self._layers//3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkImageNet, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.stem0 = nn.Sequential(
nn.Conv2d(3, C // 2, kernel_size=3,
stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev,
C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(
C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = self.stem0(input)
s1 = self.stem1(s0)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
class NetworkADP(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkADP, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.stem = nn.Sequential(
nn.Conv2d(3, C // 2, kernel_size=3,
stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev,
C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr
if i == 2*layers//3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadADP(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2*self._layers//3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
| 10,318 | 33.627517 | 90 | py |
PIBConv | PIBConv-main/cnn/model_search.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from torch.autograd import Variable
from genotypes import PRIMITIVES
from genotypes import Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride, learnable_bn):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, learnable_bn)
self._ops.append(op)
def forward(self, x, weights, index=None, gumbel=False):
if gumbel:
return self._ops[index](x) * weights[index]
else:
return sum(w * op(x) for w, op in zip(weights, self._ops))
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev, learnable_bn):
super(Cell, self).__init__()
# print(C_prev_prev, C_prev, C)
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2 + i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride, learnable_bn)
self._ops.append(op)
def forward(self, s0, s1, weights, index=None, gumbel=False):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
if gumbel:
s = sum(self._ops[offset + j](h, weights[offset + j], index[offset + j], gumbel) for j, h in enumerate(states))
else:
s = sum(self._ops[offset + j](h, weights[offset + j]) for j, h in enumerate(states))
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C, num_classes, layers, criterion, learnable_bn=False, steps=4, multiplier=4, stem_multiplier=3):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
self._learnable_bn = learnable_bn
C_curr = stem_multiplier * C
self.stem = nn.Sequential(
# nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
# nn.BatchNorm2d(C_curr)
nn.Conv2d(3, C_curr, 4, stride=4,padding=1, bias=False),
LayerNorm(C_curr, data_format="channels_first")
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, learnable_bn)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier * C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
self.tau = 5
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion, self._learnable_bn, self._steps, self._multiplier).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def set_tau(self, tau):
self.tau = tau
def get_tau(self):
return self.tau
def forward(self, input, gumbel=False):
def get_gumbel_prob(xins):
while True:
gumbels = -torch.empty_like(xins).exponential_().log()
logits = (xins.log_softmax(dim=1) + gumbels) / self.tau
probs = nn.functional.softmax(logits, dim=1)
index = probs.max(-1, keepdim=True)[1]
one_h = torch.zeros_like(logits).scatter_(-1, index, 1.0)
hardwts = one_h - probs.detach() + probs
if (torch.isinf(gumbels).any()) or (torch.isinf(probs).any()) or (torch.isnan(probs).any()):
continue
else:
break
return hardwts, index
normal_hardwts, normal_index = get_gumbel_prob(self.alphas_normal)
reduce_hardwts, reduce_index = get_gumbel_prob(self.alphas_reduce)
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
index = None
if cell.reduction:
if gumbel:
weights, index = reduce_hardwts, reduce_index
else:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
if gumbel:
weights, index = normal_hardwts, normal_index
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights, index, gumbel)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits
def _loss(self, input, target, gumbel=False):
logits = self(input, gumbel)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2 + i))
num_ops = len(PRIMITIVES)
self.alphas_normal = nn.Parameter(1e-3 * torch.randn(k, num_ops))
self.alphas_reduce = nn.Parameter(1e-3 * torch.randn(k, num_ops))
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
edges = sorted(range(i + 2),
key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[
:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())
gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())
concat = range(2 + self._steps - self._multiplier, self._steps + 2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
def test():
net = Network(C=16, num_classes=10, layers=3, criterion=nn.CrossEntropyLoss(), learnable_bn=False, steps=2, multiplier=2, stem_multiplier=3)
print(net)
test()
| 7,707 | 36.057692 | 144 | py |
PIBConv | PIBConv-main/cnn/test_cifar.py | import os
import sys
import glob
import numpy as np
import pandas as pd
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--cifar100', action='store_true', default=False, help='search with cifar100 dataset')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
if args.cifar100:
CIFAR_CLASSES = 100
data_folder = 'cifar-100-python'
else:
CIFAR_CLASSES = 10
data_folder = 'cifar-10-batches-py'
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
logging.info('genotype = %s', genotype)
model = Network(args.init_channels, CIFAR_CLASSES,
args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
if args.cifar100:
_, test_transform = utils._data_transforms_cifar100(args)
else:
_, test_transform = utils._data_transforms_cifar10(args)
if args.cifar100:
test_data = dset.CIFAR100(
root=args.data, train=False, download=True, transform=test_transform)
else:
test_data = dset.CIFAR10(
root=args.data, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
model.drop_path_prob = args.drop_path_prob
test_acc1, test_acc5, test_obj = infer(test_queue, model, criterion)
logging.info('test_top1_acc %f, test_top5_acc %f', test_acc1, test_acc5)
def infer(test_queue, model, criterion):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
with torch.no_grad():
for step, (input, target) in enumerate(test_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % args.report_freq == 0:
logging.info('test %03d %e %f %f', step,
objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 4,431 | 34.456 | 106 | py |
PIBConv | PIBConv-main/cnn/test_imagenet.py | import os
import sys
import numpy as np
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CLASSES = 1000
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES,
args.layers, args.auxiliary, genotype)
model = model.cuda()
model.load_state_dict(torch.load(args.model_path)['state_dict'])
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
model.drop_path_prob = args.drop_path_prob
valid_acc_top1, valid_acc_top5, valid_obj = infer(
valid_queue, model, criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step,
objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 4,020 | 33.663793 | 104 | py |
PIBConv | PIBConv-main/cnn/train_cifar.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
####################
# Model details
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
#be careful with this.
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
####################
# Training details
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
####################
# Datasets
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--cifar100', action='store_true', default=False, help='whether to search with cifar100 dataset')
####################
# Others
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
args = parser.parse_args()
## LOUIS CHANGED##
args.save = 'Eval-{}-arch-{}-{}'.format(args.save, args.arch, time.strftime("%Y%m%d-%H%M%S"))
#args.save = 'Eval-{}-data-{}-arch-{}-{}'.format(args.save, args.dataset, args.arch, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.cifar100:
CIFAR_CLASSES = 100
data_folder = 'cifar-100-python'
else:
CIFAR_CLASSES = 10
data_folder = 'cifar-10-batches-py'
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
logging.info('genotype = %s', genotype)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
# utils.get_channel_size(args.save, model)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
if args.cifar100:
train_transform, valid_transform = utils._data_transforms_cifar100(args)
else:
train_transform, valid_transform = utils._data_transforms_cifar10(args)
if args.cifar100:
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
else:
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
valid_acc_max = -1000
for epoch in range(args.epochs):
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
scheduler.step()
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
if (valid_acc > valid_acc_max):
utils.save(model, os.path.join(args.save, 'weights.pt'))
valid_acc_max = valid_acc
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
def train(train_queue, model, criterion, optimizer):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
input = Variable(input).cuda()
target = Variable(target).cuda(non_blocking=True)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight * loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 7,720 | 37.412935 | 117 | py |
PIBConv | PIBConv-main/cnn/operations.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
OPS = {
'none': lambda C, stride, affine: Zero(stride),
'avg_pool_3x3': lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect': lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
'pib_conv_3x3': lambda C, stride, affine: PseudoInvBn(C, C, 3, stride, 1, affine=affine),
'pib_conv_5x5': lambda C, stride, affine: PseudoInvBn(C, C, 5, stride, 2, affine=affine),
'pib_conv_7x7': lambda C, stride, affine: PseudoInvBn(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
'conv_7x1_1x7': lambda C, stride, affine: nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1, 7), stride=(1, stride),
padding=(0, 3), bias=False),
nn.Conv2d(C, C, (7, 1), stride=(stride, 1),
padding=(3, 0), bias=False),
nn.BatchNorm2d(C, affine=affine)
)
}
class LayerNorm(nn.Module):
""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first","channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride,
padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size,
stride=stride, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1,
padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class LNormReduce (nn.Module):
def __init__(self, C_in):
super(LNormReduce, self).__init__()
self.op = nn.Sequential(
LayerNorm(C_in,eps=1e-5,data_format="channels_first"),
nn.Conv2d(C_in, C_in, kernel_size=2,stride=2, groups=C_in, bias=False)
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:, :, ::self.stride, ::self.stride].mul(0.)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1,
stride=2, padding=0, bias=False)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1,
stride=2, padding=0, bias=False)
#self.ln = LayerNorm(C_in,eps=1e-5,data_format="channels_first")
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
if x.shape[2] % 2 == 0:
out = torch.cat(
[self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)
else:
x2 = F.pad(x[:, :, 1:, 1:], (0, 1, 0, 1), mode='constant', value=0)
out = torch.cat([self.conv_1(x), self.conv_2(x2)], dim=1)
out = self.bn(out)
return out
class PseudoInvBn(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(PseudoInvBn, self).__init__()
self.op = nn.Sequential(
nn.Conv2d(C_in, C_in, kernel_size=kernel_size,
stride=stride, padding=padding, groups=C_in, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.Conv2d(C_in, C_in*2, kernel_size=1, padding=0, bias=False),
nn.Conv2d(C_in*2, C_in, kernel_size=kernel_size, stride=1,
padding=padding, groups=C_in, bias=False),
nn.GELU(),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
)
def forward(self, x):
return self.op(x) | 6,930 | 37.082418 | 115 | py |
PIBConv | PIBConv-main/cnn/adas/Adas.py | """
"""
from torch.optim.optimizer import Optimizer, required
import sys
import numpy as np
import torch
mod_name = vars(sys.modules[__name__])['__name__']
if 'adas.' in mod_name:
from .metrics import Metrics
else:
from .optim.metrics import Metrics
class Adas(Optimizer):
"""
Vectorized SGD from torch.optim.SGD
"""
def __init__(self,
params,
lr: float = required,
beta: float = 0.8,
step_size: int = None,
linear: bool = False,
gamma: float = 1,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super(Adas, self).__init__(params, defaults)
# Adas Specific stuff (not SGD)
if np.less(beta, 0) or np.greater_equal(beta, 1):
raise ValueError(f'Invalid beta: {beta}')
if np.less(gamma, 0):
raise ValueError(f'Invalid gamma: {gamma}')
if step_size is not None:
if np.less_equal(step_size, 0):
raise ValueError(f'Invalid step_size: {step_size}')
self.step_size = step_size
self.gamma = gamma
self.beta = beta
self.metrics = metrics = Metrics(params=params, linear=linear)
self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params))
self.velocity = np.zeros(
len(self.metrics.params) - len(self.metrics.mask))
self.not_ready = list(range(len(self.velocity)))
self.init_lr = lr
self.zeta = 1.
self.KG = 0.
def __setstate__(self, state):
super(Adas, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def epoch_step(self, epoch: int) -> None:
self.metrics()
if epoch == 0:
velocity = self.init_lr * np.ones(len(self.velocity))
self.KG = self.metrics.KG(epoch)
else:
KG = self.metrics.KG(epoch)
velocity = KG - self.KG
self.KG = KG
for idx in self.not_ready:
if np.isclose(KG[idx], 0.):
velocity[idx] = self.init_lr - \
self.beta * self.velocity[idx]
else:
self.not_ready.remove(idx)
if self.step_size is not None:
if epoch % self.step_size == 0 and epoch > 0:
# self.lr_vector *= self.gamma
self.zeta *= self.gamma
# Add here:
# self.velocity *= self.gamma
self.velocity = np.maximum(
self.beta * self.velocity + self.zeta * velocity, 0.)
count = 0
for i in range(len(self.metrics.params)):
if i in self.metrics.mask:
self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)]
else:
self.lr_vector[i] = self.velocity[count]
count += 1
def step(self, closure: callable = None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
iteration_group = 0
for group in self.param_groups:
iteration_group += 1
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p_index, p in enumerate(group['params']):
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(p.data, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(
d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# p.data.add_(-group['lr'], d_p)
p.data.add_(d_p, alpha=-self.lr_vector[p_index])
return loss
| 5,240 | 34.174497 | 78 | py |
PIBConv | PIBConv-main/cnn/adas/metrics.py | """
"""
from typing import List, Union, Tuple
import sys
import numpy as np
import torch
mod_name = vars(sys.modules[__name__])['__name__']
if 'adas.' in mod_name:
from .components import LayerMetrics, ConvLayerMetrics
from .matrix_factorization import EVBMF
else:
from optim.components import LayerMetrics, ConvLayerMetrics, LayerType
from optim.matrix_factorization import EVBMF
class Metrics:
def __init__(self, params, linear: bool = False) -> None:
'''
parameters: list of torch.nn.Module.parameters()
'''
self.params = params
self.history = list()
mask = list()
for param_idx, param in enumerate(params):
param_shape = param.shape
if not linear:
if len(param_shape) != 4:
mask.append(param_idx)
else:
if len(param_shape) != 4 and len(param_shape) != 2:
mask.append(param_idx)
self.mask = set(mask)
def compute_low_rank(self,
tensor: torch.Tensor,
normalizer: float) -> torch.Tensor:
if tensor.requires_grad:
tensor = tensor.detach()
try:
tensor_size = tensor.shape
if tensor_size[0] > tensor_size[1]:
tensor = tensor.T
U_approx, S_approx, V_approx = EVBMF(tensor)
except RuntimeError:
return None, None, None
rank = S_approx.shape[0] / tensor_size[0] # normalizer
low_rank_eigen = torch.diag(S_approx).data.cpu().numpy()
if len(low_rank_eigen) != 0:
condition = low_rank_eigen[0] / low_rank_eigen[-1]
sum_low_rank_eigen = low_rank_eigen / \
max(low_rank_eigen)
sum_low_rank_eigen = np.sum(sum_low_rank_eigen)
else:
condition = 0
sum_low_rank_eigen = 0
KG = sum_low_rank_eigen / tensor_size[0] # normalizer
return rank, KG, condition
def KG(self, epoch: int) -> np.ndarray:
KG_list = list()
for i, (index, metric) in enumerate(self.history[epoch]):
if isinstance(metric, ConvLayerMetrics):
KG_list.append((metric.input_channel.KG +
metric.output_channel.KG) / 2)
elif isinstance(metric, LayerMetrics):
KG_list.append(metric.KG)
return np.array(KG_list)
def __call__(self) -> List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]]:
'''
Computes the knowledge gain (S) and mapping condition (condition)
'''
metrics: List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]] = list()
# for separable convolution
separable_conv = False
for layer_index, layer in enumerate(self.params):
if layer_index in self.mask:
metrics.append((layer_index, None))
continue
# if np.less(np.prod(layer.shape), 10_000):
# metrics.append((layer_index, None))
if len(layer.shape) == 4:
if layer.shape[1] == 1:
# depth-wise conv, don't compute metrics
# use the following point-wise conv's metrics instead
metrics.append((layer_index, None))
separable_conv = True
else:
# other conv types
layer_tensor = layer.data
tensor_size = layer_tensor.shape
mode_3_unfold = layer_tensor.permute(1, 0, 2, 3)
mode_3_unfold = torch.reshape(
mode_3_unfold, [tensor_size[1], tensor_size[0] *
tensor_size[2] * tensor_size[3]])
mode_4_unfold = layer_tensor
mode_4_unfold = torch.reshape(
mode_4_unfold, [tensor_size[0], tensor_size[1] *
tensor_size[2] * tensor_size[3]])
in_rank, in_KG, in_condition = self.compute_low_rank(
mode_3_unfold, tensor_size[1])
if in_rank is None and in_KG is None and in_condition is None:
if len(self.history) > 0:
in_rank = self.history[-1][
layer_index][1].input_channel.rank
in_KG = self.history[-1][
layer_index][1].input_channel.KG
in_condition = self.history[-1][
layer_index][1].input_channel.condition
else:
in_rank = in_KG = in_condition = 0.
out_rank, out_KG, out_condition = self.compute_low_rank(
mode_4_unfold, tensor_size[0])
if out_rank is None and out_KG is None and out_condition is None:
if len(self.history) > 0:
out_rank = self.history[-1][
layer_index][1].output_channel.rank
out_KG = self.history[-1][
layer_index][1].output_channel.KG
out_condition = self.history[-1][
layer_index][1].output_channel.condition
else:
out_rank = out_KG = out_condition = 0.
metrics.append((layer_index, ConvLayerMetrics(
input_channel=LayerMetrics(
rank=in_rank,
KG=in_KG,
condition=in_condition),
output_channel=LayerMetrics(
rank=out_rank,
KG=out_KG,
condition=out_condition))))
if separable_conv:
# copy current metrics to preceding depth-wise conv
metrics[layer_index-1] = (layer_index-1, metrics[-1][1])
separable_conv = False
elif len(layer.shape) == 2:
rank, KG, condition = self.compute_low_rank(
layer, layer.shape[0])
if rank is None and KG is None and condition is None:
if len(self.history) > 0:
rank = self.history[-1][layer_index][1].rank
KG = self.history[-1][layer_index][1].KG
condition = self.history[-1][layer_index][1].condition
else:
rank = KG = condition = 0.
metrics.append((layer_index, LayerMetrics(
rank=rank,
KG=KG,
condition=condition)))
else:
metrics.append((layer_index, None))
self.history.append(metrics)
return metrics
| 7,146 | 43.391304 | 85 | py |
PIBConv | PIBConv-main/cnn/adas/matrix_factorization.py | from __future__ import division
import numpy as np
# from scipy.sparse.linalg import svds
from scipy.optimize import minimize_scalar
import torch
def EVBMF(Y, sigma2=None, H=None):
"""Implementation of the analytical solution to Empirical Variational
Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to
empirical VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix
factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free
energy.
If H is unspecified, it is set to the smallest of the sides of the
input Y.
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of
fully-observed variational Bayesian matrix factorization." Journal of
Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by
variational Bayesian PCA." Advances in Neural Information Processing
Systems. 2012.
"""
L, M = Y.shape # has to be L<=M
if H is None:
H = L
alpha = L/M
tauubar = 2.5129*np.sqrt(alpha)
# SVD of the input matrix, max rank of H
# U, s, V = np.linalg.svd(Y)
U, s, V = torch.svd(Y)
U = U[:, :H]
s = s[:H]
V = V[:H].T
# Calculate residual
residual = 0.
if H < L:
# residual = np.sum(np.sum(Y**2)-np.sum(s**2))
residual = torch.sum(np.sum(Y**2)-np.sum(s**2))
# Estimation of the variance when sigma2 is unspecified
if sigma2 is None:
xubar = (1+tauubar)*(1+alpha/tauubar)
eH_ub = int(np.min([np.ceil(L/(1+alpha))-1, H]))-1
# upper_bound = (np.sum(s**2)+residual)/(L*M)
# lower_bound = np.max(
# [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M])
upper_bound = (torch.sum(s**2)+residual)/(L*M)
lower_bound = torch.max(torch.stack(
[s[eH_ub+1]**2/(M*xubar), torch.mean(s[eH_ub+1:]**2)/M], dim=0))
scale = 1. # /lower_bound
s = s*np.sqrt(scale)
residual = residual*scale
lower_bound = lower_bound*scale
upper_bound = upper_bound*scale
sigma2_opt = minimize_scalar(
EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar),
bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()],
method='Bounded')
sigma2 = sigma2_opt.x
# Threshold gamma term
threshold = np.sqrt(M*sigma2*(1+tauubar)*(1+alpha/tauubar))
# pos = np.sum(s > threshold)
pos = torch.sum(s > threshold)
# Formula (15) from [2]
# d = torch.multiply(s[:pos]/2,
# 1-torch.divide(
# torch.tensor((L+M)*sigma2, device=s.device),
# s[:pos]**2) + torch.sqrt((1-torch.divide(
# torch.tensor(
# (L+M)*sigma2, device=s.device),
# s[:pos]**2))**2 -
# 4*L*M*sigma2**2/s[:pos]**4))
# d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt(
# (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4))
d = (s[:pos]/2)*(1-(L+M)*sigma2/s[:pos]**2 +
torch.sqrt((1 -
(L+M)*sigma2/s[:pos]**2)**2 - 4*L*M*sigma2**2/s[:pos]**4))
# Computation of the posterior
# post = {}
# post['ma'] = np.zeros(H)
# post['mb'] = np.zeros(H)
# post['sa2'] = np.zeros(H)
# post['sb2'] = np.zeros(H)
# post['cacb'] = np.zeros(H)
# tau = np.multiply(d, s[:pos])/(M*sigma2)
# delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau)
# post['ma'][:pos] = np.sqrt(np.multiply(d, delta))
# post['mb'][:pos] = np.sqrt(np.divide(d, delta))
# post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos])
# post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
# post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M))
# post['sigma2'] = sigma2
# post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) +
# (residual+np.sum(s**2))/sigma2 + np.sum(
# M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau))
return U[:, :pos], torch.diag(d), V[:, :pos] # , post
def EVBsigma2(sigma2, L, M, s, residual, xubar):
H = len(s)
alpha = L/M
x = s**2/(M*sigma2)
z1 = x[x > xubar]
z2 = x[x <= xubar]
tau_z1 = tau(z1, alpha)
term1 = np.sum(z2 - np.log(z2))
term2 = np.sum(z1 - tau_z1)
term3 = np.sum(np.log(np.divide(tau_z1+1, z1)))
term4 = alpha*np.sum(np.log(tau_z1/alpha+1))
obj = term1+term2+term3+term4 + residual/(M*sigma2) + (L-H)*np.log(sigma2)
return obj
def phi0(x):
return x-np.log(x)
def phi1(x, alpha):
return np.log(tau(x, alpha)+1) + alpha*np.log(tau(x, alpha)/alpha + 1
) - tau(x, alpha)
def tau(x, alpha):
return 0.5 * (x-(1+alpha) + np.sqrt((x-(1+alpha))**2 - 4*alpha))
| 5,693 | 30.458564 | 91 | py |
PIBConv | PIBConv-main/cnn/ADP_utils/thresholded_metrics.py | import os
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .classesADP import classesADP
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
class Thresholded_Metrics:
def __init__(self, targets, predictions, level, network, epoch):
self.target = targets.numpy()
self.predictions = predictions.numpy()
#class names
self.class_names = classesADP[level]['classesNames']
#path
cur_path = os.path.abspath(os.path.curdir)
self.eval_dir = os.path.join(cur_path, 'eval')
if not os.path.exists(self.eval_dir):
os.makedirs(self.eval_dir)
#sess_id
self.sess_id = 'adp_' + str(network) + '_' + str(level) + '_Epoch_' + str(epoch + 1) + '_Release1_1um_bicubic'
#Get optimal class thresholds
self.class_thresholds, self.class_fprs, self.class_tprs, self.auc_measures = self.get_optimal_thresholds()
#Get thresholded class accuracies
self.metric_tprs, self.metric_fprs, self.metric_tnrs, self.metric_fnrs, self.metric_accs, self.metric_f1s = self.get_thresholded_metrics()
#self.auc_measures_U = [ self.auc_measures[i] for i in self.unaugmented_class_inds]
#self.auc_measures_U.append(self.auc_measures[-1])
#Plot ROC curves
self.plot_rocs()
#Write metrics to excel
self.write_to_excel()
def get_optimal_thresholds(self):
def get_opt_thresh(tprs, fprs, thresholds):
return thresholds[np.argmin(abs(tprs - (1 - fprs)))]
class_fprs = []
class_tprs = []
class_thresholds = []
auc_measures = []
thresh_rng = [1/3,1]
for iter_class in range(self.predictions.shape[1]):
fprs, tprs, thresholds = \
roc_curve(self.target[:, iter_class], self.predictions[:, iter_class])
auc_measure = auc(fprs, tprs)
opt_thresh = min(max(get_opt_thresh(tprs, fprs, thresholds), thresh_rng[0]), thresh_rng[1])
class_thresholds.append(opt_thresh)
class_fprs.append(fprs)
class_tprs.append(tprs)
auc_measures.append(auc_measure)
auc_measures.append(sum(np.sum(self.target, 0) * auc_measures)/np.sum(self.target))
return class_thresholds, class_fprs, class_tprs, auc_measures
def get_thresholded_metrics(self):
predictions_thresholded = self.predictions >= self.class_thresholds
with np.errstate(divide = 'ignore', invalid = 'ignore'):
#Obtain Metrics
cond_positive = np.sum(self.target == 1, 0)
cond_negative = np.sum(self.target == 0, 0)
true_positive = np.sum((self.target == 1) & (predictions_thresholded == 1), 0)
false_positive = np.sum((self.target == 0) & (predictions_thresholded == 1), 0)
true_negative = np.sum((self.target == 0) & (predictions_thresholded == 0), 0)
false_negative = np.sum((self.target == 1) & (predictions_thresholded == 0), 0)
class_tprs = true_positive / cond_positive
class_fprs = false_positive / cond_negative
class_tnrs = true_negative / cond_negative
class_fnrs = false_negative / cond_positive
class_accs = np.sum(self.target == predictions_thresholded, 0) / predictions_thresholded.shape[0]
class_f1s = (2 * true_positive) / (2 * true_positive + false_positive + false_negative)
#
cond_positive_T = np.sum(self.target == 1)
cond_negative_T = np.sum(self.target == 0)
true_positive_T = np.sum((self.target == 1) & (predictions_thresholded == 1))
false_positive_T = np.sum((self.target == 0) & (predictions_thresholded == 1))
true_negative_T = np.sum((self.target == 0) & (predictions_thresholded == 0))
false_negative_T = np.sum((self.target == 1) & (predictions_thresholded == 0))
tpr_T = true_positive_T / cond_positive_T
fpr_T = false_positive_T / cond_negative_T
tnr_T = true_negative_T / cond_negative_T
fnr_T = false_negative_T / cond_positive_T
acc_T = np.sum(self.target == predictions_thresholded) / np.prod(predictions_thresholded.shape)
f1_T = (2 * true_positive_T) / (2 * true_positive_T + false_positive_T + false_negative_T)
#
class_tprs = np.append(class_tprs, tpr_T)
class_fprs = np.append(class_fprs, fpr_T)
class_tnrs = np.append(class_tnrs, tnr_T)
class_fnrs = np.append(class_fnrs, fnr_T)
class_accs = np.append(class_accs, acc_T)
class_f1s = np.append(class_f1s, f1_T)
return class_tprs, class_fprs, class_tnrs, class_fnrs, class_accs, class_f1s
def plot_rocs(self):
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
for iter_class in range(len(self.class_names)):
plt.plot(self.class_fprs[iter_class], self.class_tprs[iter_class], label=self.class_names[iter_class])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
# plt.show()
plt.savefig(os.path.join(self.eval_dir, 'ROC_' + self.sess_id + '.png'), bbox_inches='tight')
plt.close()
def write_to_excel(self):
sess_xlsx_path = os.path.join(self.eval_dir, 'metrics_' + self.sess_id + '.xlsx')
df = pd.DataFrame({'HTT': self.class_names + ['Average'],
'TPR': list(self.metric_tprs),
'FPR': list(self.metric_fprs),
'TNR': list(self.metric_tnrs),
'FNR': list(self.metric_fnrs),
'ACC': list(self.metric_accs),
'F1': list(self.metric_f1s),
'AUC': self.auc_measures}, columns=['HTT', 'TPR', 'FPR', 'TNR', 'FNR', 'ACC', 'F1', 'AUC'])
df.to_excel(sess_xlsx_path)
| 6,205 | 46.738462 | 146 | py |
clx-branch-23.04 | clx-branch-23.04/examples/run_dga_training.py | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example Usage: python run_dga_training.py \
--training-data benign_and_dga_domains.csv \
--output-dir trained_models \
--batch-size 10000 \
--epochs 2
"""
import os
import cudf
import torch
import argparse
from datetime import datetime
from clx.analytics.dga_detector import DGADetector
LR = 0.001
N_LAYERS = 4
CHAR_VOCAB = 128
HIDDEN_SIZE = 100
N_DOMAIN_TYPE = 2
def main():
epochs = int(args["epochs"])
input_filepath = args["training_data"]
batch_size = int(args["batch_size"])
output_dir = args["output_dir"]
# load input data to gpu memory
input_df = cudf.read_csv(input_filepath)
train_data = input_df['domain']
labels = input_df['type']
del input_df
dd = DGADetector(lr=LR)
dd.init_model(
n_layers=N_LAYERS,
char_vocab=CHAR_VOCAB,
hidden_size=HIDDEN_SIZE,
n_domain_type=N_DOMAIN_TYPE,
)
dd.train_model(train_data, labels, batch_size=batch_size, epochs=epochs, train_size=0.7)
if not os.path.exists(output_dir):
print("Creating directory '{}'".format(output_dir))
os.makedirs(output_dir)
now = datetime.now()
model_filename = "rnn_classifier_{}.bin".format(now.strftime("%Y-%m-%d_%H_%M_%S"))
model_filepath = os.path.join(output_dir, model_filename)
print("Saving trained model to location '{}'".format(model_filepath))
dd.save_model(model_filepath)
def parse_cmd_args():
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser(description="DGA detection model training script")
ap.add_argument(
"--training-data", required=True, help="CSV with domain and type fields"
)
ap.add_argument(
"--output-dir", required=True, help="output directory to save new model files"
)
ap.add_argument(
"--batch-size",
required=True,
help="Dividing dataset into number of batches or sets or parts",
)
ap.add_argument(
"--epochs",
required=True,
help="One epoch is when an entire dataset is passed forward and backward through the neural network only once",
)
args = vars(ap.parse_args())
return args
# execution starts here
if __name__ == "__main__":
args = parse_cmd_args()
main()
| 2,926 | 31.522222 | 119 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/tests/test_dga_detector.py | # Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
from clx.utils.data.dataloader import DataLoader
from clx.analytics.dga_detector import DGADetector
from clx.analytics.dga_dataset import DGADataset
from clx.analytics.model.rnn_classifier import RNNClassifier
import torch
from os import path
from faker import Faker
import random
dd = DGADetector()
dd.init_model()
def test_train_model():
if torch.cuda.is_available():
fake = Faker()
Faker.seed(0)
domain_col = [fake.dga() for _ in range(200)]
label_col = [random.randint(0, 1) for _ in range(200)]
train_gdf = cudf.DataFrame(list(zip(domain_col, label_col)), columns=["domain", "label"])
# train model
dd.train_model(train_gdf["domain"], train_gdf["label"], batch_size=2)
gpu_count = torch.cuda.device_count()
if gpu_count > 1:
assert isinstance(dd.model.module, RNNClassifier)
else:
assert isinstance(dd.model, RNNClassifier)
def test_evaluate_model():
if torch.cuda.is_available():
test_df = cudf.DataFrame({"domain": ["cnn.com", "bakercityherald.com"], "type": [1, 0]})
truncate = 100
dataset = DGADataset(test_df, truncate)
dataloader = DataLoader(dataset, batchsize=2)
# evaluate model
accuracy = dd.evaluate_model(dataloader)
assert isinstance(accuracy, (int, float))
def test_predict():
if torch.cuda.is_available():
test_domains = cudf.Series(["nvidia.com", "dfsdfsdf"])
# predict
preds = dd.predict(test_domains)
assert len(preds) == 2
assert preds.dtype == int
assert isinstance(preds, cudf.core.series.Series)
def test2_predict():
if torch.cuda.is_available():
test_domains = cudf.Series(["nvidia.com", "dfsdfsdf"])
# predict
preds = dd.predict(test_domains, probability=True)
assert len(preds) == 2
assert preds.dtype == float
assert isinstance(preds, cudf.core.series.Series)
def test_save_model(tmpdir):
if torch.cuda.is_available():
# save model
dd.save_model(str(tmpdir.join("clx_dga.mdl")))
assert path.exists(str(tmpdir.join("clx_dga.mdl")))
def test_load_model(tmpdir):
if torch.cuda.is_available():
# save model
dd.save_model(str(tmpdir.join("clx_dga.mdl")))
assert path.exists(str(tmpdir.join("clx_dga.mdl")))
# load model
dd2 = DGADetector()
dd2.init_model()
dd2.load_model(str(tmpdir.join("clx_dga.mdl")))
gpu_count = torch.cuda.device_count()
if gpu_count > 1:
assert isinstance(dd2.model.module, RNNClassifier)
else:
assert isinstance(dd2.model, RNNClassifier)
def test_save_checkpoint(tmpdir):
if torch.cuda.is_available():
# save model
dd.save_checkpoint(str(tmpdir.join("clx_dga.mdl")))
assert path.exists(str(tmpdir.join("clx_dga.mdl")))
def test_load_checkpoint(tmpdir):
if torch.cuda.is_available():
# save model
dd.save_model(str(tmpdir.join("clx_dga.mdl")))
assert path.exists(str(tmpdir.join("clx_dga.mdl")))
# load model
dd2 = DGADetector()
dd2.init_model()
dd2.load_model(str(tmpdir.join("clx_dga.mdl")))
gpu_count = torch.cuda.device_count()
if gpu_count > 1:
assert isinstance(dd2.model.module, RNNClassifier)
else:
assert isinstance(dd2.model, RNNClassifier)
| 4,063 | 32.586777 | 97 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/tests/test_asset_classification.py | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cudf
import clx
from clx.analytics.asset_classification import AssetClassification
import torch
from os import path
import random
import pandas as pd
column1 = [random.randint(1, 24) for _ in range(9000)]
column2 = [random.randint(1, 4) for _ in range(9000)]
column3 = [random.randint(1, 9) for _ in range(9000)]
column4 = [random.randint(1, 26) for _ in range(9000)]
column5 = [random.randint(1, 3) for _ in range(9000)]
column6 = [random.randint(1, 9) for _ in range(9000)]
column7 = [random.randint(1, 37) for _ in range(9000)]
column8 = [random.randint(1, 8) for _ in range(9000)]
column9 = [random.randint(1, 4) for _ in range(9000)]
column10 = [random.randint(1, 11) for _ in range(9000)]
label = [random.randint(0, 6) for _ in range(9000)]
train_pd = pd.DataFrame(list(zip(column1, column2, column3, column4, column5, column6, column7, column8, column9, column10, label)), columns=["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "label"])
train_gdf = cudf.from_pandas(train_pd)
batch_size = 6
epochs = 15
@pytest.mark.parametrize("train_gdf", [train_gdf])
def test_train_model_mixed_cat_cont(tmpdir, train_gdf):
train_gdf = train_gdf.copy()
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8"]
cont_cols = ["9", "10"]
train_gdf[cont_cols] = normalize_conts(train_gdf[cont_cols])
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
if torch.cuda.is_available():
assert isinstance(ac._model, clx.analytics.model.tabular_model.TabularModel)
@pytest.mark.parametrize("train_gdf", [train_gdf])
def test_train_model_all_cat(tmpdir, train_gdf):
train_gdf = train_gdf.copy()
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cont_cols = []
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
if torch.cuda.is_available():
assert isinstance(ac._model, clx.analytics.model.tabular_model.TabularModel)
@pytest.mark.parametrize("train_gdf", [train_gdf])
def test_train_model_all_cont(tmpdir, train_gdf):
train_gdf = train_gdf.copy()
cont_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cat_cols = []
train_gdf[cont_cols] = normalize_conts(train_gdf[cont_cols])
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
if torch.cuda.is_available():
assert isinstance(ac._model, clx.analytics.model.tabular_model.TabularModel)
@pytest.mark.parametrize("train_gdf", [train_gdf])
def test_predict(tmpdir, train_gdf):
if torch.cuda.is_available():
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cont_cols = []
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
# predict
test_gdf = train_gdf.head()
test_gdf.drop("label", axis=1)
preds = ac.predict(test_gdf, cat_cols, cont_cols)
assert isinstance(preds, cudf.core.series.Series)
assert len(preds) == len(test_gdf)
assert preds.dtype == int
def test_save_model(tmpdir):
if torch.cuda.is_available():
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cont_cols = []
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
# save model
ac.save_model(str(tmpdir.join("clx_ac.mdl")))
assert path.exists(str(tmpdir.join("clx_ac.mdl")))
def test_load_model(tmpdir):
if torch.cuda.is_available():
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cont_cols = []
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
# save model
ac.save_model(str(tmpdir.join("clx_ac.mdl")))
assert path.exists(str(tmpdir.join("clx_ac.mdl")))
# load model
ac2 = AssetClassification()
ac2.load_model(str(tmpdir.join("clx_ac.mdl")))
assert isinstance(ac2._model, clx.analytics.model.tabular_model.TabularModel)
def normalize_conts(gdf):
means, stds = (gdf.mean(0), gdf.std(ddof=0))
gdf = (gdf - means) / stds
return gdf
| 4,887 | 38.104 | 202 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/tests/test_binary_sequence_classifier.py | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from os import path
import cudf
import torch
import transformers
from cuml.model_selection import train_test_split
from faker import Faker
from clx.analytics.binary_sequence_classifier import BinarySequenceClassifier
sc = BinarySequenceClassifier()
if torch.cuda.is_available():
sc.init_model("bert-base-uncased")
def test_train_model():
if torch.cuda.is_available():
fake = Faker()
email_col = [fake.text() for _ in range(200)]
label_col = [random.randint(0, 1) for _ in range(200)]
emails_gdf = cudf.DataFrame(list(zip(email_col, label_col)), columns=["email", "label"])
X_train, X_test, y_train, y_test = train_test_split(
emails_gdf, "label", train_size=0.8, random_state=10
)
sc.train_model(
X_train["email"],
y_train,
learning_rate=3e-5,
max_seq_len=128,
batch_size=6,
epochs=1,
)
assert isinstance(
sc._model.module,
transformers.models.bert.modeling_bert.BertForSequenceClassification,
)
def test_evaluate_model():
if torch.cuda.is_available():
X_test = cudf.Series(["email 1", "email 2"])
y_test = cudf.Series([0, 0])
accuracy = sc.evaluate_model(
X_test, y_test, max_seq_len=128, batch_size=32
)
assert accuracy >= 0.0 and accuracy <= 1.0
def test_predict():
if torch.cuda.is_available():
X_test = cudf.Series(["email 1", "email 2"])
preds = sc.predict(X_test, max_seq_len=128)
assert preds[0].isin([False, True]).equals(cudf.Series([True, True]))
def test_save_model(tmpdir):
if torch.cuda.is_available():
sc.save_model(tmpdir)
assert path.exists(str(tmpdir.join("config.json")))
assert path.exists(str(tmpdir.join("pytorch_model.bin")))
def test_save_checkpoint(tmpdir):
if torch.cuda.is_available():
fname = str(tmpdir.mkdir("tmp_test_sequence_classifier").join("sc_checkpoint.tar"))
sc.save_checkpoint(fname)
assert path.exists(fname)
def test_load_checkpoint(tmpdir):
if torch.cuda.is_available():
fname = str(tmpdir.mkdir("tmp_test_sequence_classifier").join("sc_checkpoint.tar"))
sc.save_checkpoint(fname)
assert path.exists(fname)
sc.load_checkpoint(fname)
assert isinstance(
sc._model.module,
transformers.models.bert.modeling_bert.BertForSequenceClassification,
)
| 3,113 | 32.12766 | 96 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/tests/test_multiclass_sequence_classifier.py | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from os import path
import cudf
import torch
import transformers
from cuml.model_selection import train_test_split
from faker import Faker
from clx.analytics.multiclass_sequence_classifier import MulticlassSequenceClassifier
sc = MulticlassSequenceClassifier()
if torch.cuda.is_available():
sc.init_model("bert-base-uncased", num_labels=3)
def test_train_model():
if torch.cuda.is_available():
fake = Faker()
email_col = [fake.text() for _ in range(200)]
label_col = [random.randint(0, 2) for _ in range(200)]
emails_gdf = cudf.DataFrame(list(zip(email_col, label_col)), columns=["email", "label"])
X_train, X_test, y_train, y_test = train_test_split(
emails_gdf, "label", train_size=0.8, random_state=10
)
sc.train_model(
X_train["email"],
y_train,
learning_rate=3e-5,
max_seq_len=128,
batch_size=6,
epochs=1,
)
assert isinstance(
sc._model.module,
transformers.models.bert.modeling_bert.BertForSequenceClassification,
)
def test_evaluate_model():
if torch.cuda.is_available():
X_test = cudf.Series(["email 1", "email 2"])
y_test = cudf.Series([0, 0])
accuracy = sc.evaluate_model(
X_test, y_test, max_seq_len=128, batch_size=32
)
assert accuracy >= 0.0 and accuracy <= 1.0
def test_predict():
if torch.cuda.is_available():
X_test = cudf.Series(["email 1", "email 2"])
preds = sc.predict(X_test, max_seq_len=128)
assert preds.isin([0, 1, 2]).equals(cudf.Series([True, True]))
def test_save_model(tmpdir):
if torch.cuda.is_available():
sc.save_model(tmpdir)
assert path.exists(str(tmpdir.join("config.json")))
assert path.exists(str(tmpdir.join("pytorch_model.bin")))
def test_save_checkpoint(tmpdir):
if torch.cuda.is_available():
fname = str(tmpdir.mkdir("tmp_test_sequence_classifier").join("sc_checkpoint.tar"))
sc.save_checkpoint(fname)
assert path.exists(fname)
def test_load_checkpoint(tmpdir):
if torch.cuda.is_available():
fname = str(tmpdir.mkdir("tmp_test_sequence_classifier").join("sc_checkpoint.tar"))
sc.save_checkpoint(fname)
assert path.exists(fname)
sc.load_checkpoint(fname)
assert isinstance(
sc._model.module,
transformers.models.bert.modeling_bert.BertForSequenceClassification,
)
| 3,132 | 32.329787 | 96 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/tests/test_cybert.py | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import cupy
import pandas as pd
import numpy as np
import torch
import s3fs
import transformers
from clx.analytics.cybert import Cybert
S3_BASE_PATH = "models.huggingface.co/bert/raykallen/cybert_apache_parser"
CONFIG_FILENAME = "config.json"
MODEL_FILENAME = "pytorch_model.bin"
fs = s3fs.S3FileSystem(anon=True)
fs.get(S3_BASE_PATH + "/" + MODEL_FILENAME, MODEL_FILENAME)
fs.get(S3_BASE_PATH + "/" + CONFIG_FILENAME, CONFIG_FILENAME)
cyparse = Cybert()
input_logs = cudf.Series(['109.169.248.247 - -',
'POST /administrator/index.php HTTP/1.1 200 4494'])
def get_expected_preprocess():
tokens = torch.tensor(
[[11523, 119, 20065, 119, 27672, 119, 26049, 118, 118, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[153, 9025, 1942, 120, 11065, 120, 7448, 119, 185, 16194, 145, 20174,
2101, 120, 122, 119, 122, 2363, 3140, 1580, 1527, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]], device='cuda:0'
)
masks = torch.tensor(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]], device='cuda:0'
)
metadata = cupy.array([[0, 0, 8], [1, 0, 20]], dtype='uint32')
return tokens, masks, metadata
def get_expected_inference():
expected_parsed_df = pd.DataFrame({
'remote_host': ['109.169.248.247', np.NaN],
'other': ['-', np.NaN],
'request_method': [np.NaN, 'POST'],
'request_url': [np.NaN, "/administrator/index.php"],
'request_http_ver': [np.NaN, 'HTTP/1.1'],
'status': [np.NaN, '200'],
'response_bytes_clf': [np.NaN, '449']
})
expected_confidence_df = pd.DataFrame({
'remote_host': [0.999628, np.NaN], 'other': [0.999579, np.NaN],
'request_method': [np.NaN, 0.99822], 'request_url': [np.NaN, 0.999629],
'request_http_ver': [np.NaN, 0.999936], 'status': [np.NaN, 0.999866],
'response_bytes_clf': [np.NaN, 0.999751]
})
return expected_parsed_df, expected_confidence_df
def test_load_model():
cyparse.load_model(MODEL_FILENAME, CONFIG_FILENAME)
assert isinstance(cyparse._label_map, dict)
assert isinstance(cyparse._model.module,
transformers.models.bert.modeling_bert.BertForTokenClassification)
def test_preprocess():
expected_tokens, expected_masks, expected_metadata = get_expected_preprocess()
actual_tokens, actual_masks, actual_metadata = cyparse.preprocess(input_logs)
assert actual_tokens.equal(expected_tokens)
assert actual_masks.equal(expected_masks)
assert cupy.equal(actual_metadata, expected_metadata).all()
def test_inference():
if torch.cuda.is_available():
expected_parsed_df, expected_confidence_df = get_expected_inference()
actual_parsed_df, actual_confidence_df = cyparse.inference(input_logs)
pd._testing.assert_frame_equal(actual_parsed_df, expected_parsed_df)
pd._testing.assert_frame_equal(actual_confidence_df, expected_confidence_df)
| 5,066 | 43.447368 | 88 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/analytics/asset_classification.py | import cudf
from cuml.model_selection import train_test_split
import torch
import torch.optim as torch_optim
import torch.nn.functional as F
import logging
from torch.utils.dlpack import from_dlpack
from clx.analytics.model.tabular_model import TabularModel
log = logging.getLogger(__name__)
class AssetClassification:
"""
Supervised asset classification on tabular data containing categorical and/or continuous features.
:param layers: linear layer follow the input layer
:param drops: drop out percentage
:param emb_drop: drop out percentage at embedding layers
:param is_reg: is regression
:param is_multi: is classification
:param use_bn: use batch normalization
"""
def __init__(self, layers=[200, 100], drops=[0.001, 0.01], emb_drop=0.04, is_reg=False, is_multi=True, use_bn=True):
self._layers = layers
self._drops = drops
self._emb_drop = emb_drop
self._is_reg = is_reg
self._is_multi = is_multi
self._use_bn = use_bn
self._device = None
self._model = None
self._optimizer = None
self._cat_cols = None
self._cont_cols = None
self._device = torch.device('cuda')
def train_model(self, train_gdf, cat_cols, cont_cols, label_col, batch_size, epochs, lr=0.01, wd=0.0):
"""
This function is used for training fastai tabular model with a given training dataset.
:param train_gdf: training dataset with categorized and/or continuous feature columns
:type train_gdf: cudf.DataFrame
:param cat_cols: array of categorical column names in train_gdf
:type label_col: array
:param cont_col: array of continuous column names in train_gdf
:type label_col: array
:param label_col: column name of label column in train_gdf
:type label_col: str
:param batch_size: train_gdf will be partitioned into multiple dataframes of this size
:type batch_size: int
:param epochs: number of epochs to be adjusted depending on convergence for a specific dataset
:type epochs: int
:param lr: learning rate
:type lr: float
:param wd: wd
:type wd: float
Examples
--------
>>> from clx.analytics.asset_classification import AssetClassification
>>> ac = AssetClassification()
>>> cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
>>> cont_cols = ["10"]
>>> ac.train_model(X_train, cat_cols, cont_cols, "label", batch_size, epochs, lr=0.01, wd=0.0)
"""
self._cat_cols = cat_cols
self._cont_cols = cont_cols
# train/test split
X, val_X, Y, val_Y = train_test_split(train_gdf, label_col, train_size=0.9)
val_X.index = val_Y.index
X.index = Y.index
embedded_cols = {}
for col in cat_cols:
if col != label_col:
categories_cnt = X[col].max() + 2
if categories_cnt > 1:
embedded_cols[col] = categories_cnt
X[label_col] = Y
val_X[label_col] = val_Y
# Embedding
embedding_sizes = [(n_categories, min(100, (n_categories + 1) // 2)) for _, n_categories in embedded_cols.items()]
n_cont = len(cont_cols)
out_sz = train_gdf[label_col].nunique()
# Partition dataframes
train_part_dfs = self._get_partitioned_dfs(X, batch_size)
val_part_dfs = self._get_partitioned_dfs(val_X, batch_size)
self._model = TabularModel(embedding_sizes, n_cont, out_sz, self._layers, self._drops, self._emb_drop, self._is_reg, self._is_multi, self._use_bn)
self._to_device(self._model, self._device)
self._config_optimizer()
for i in range(epochs):
loss = self._train(self._model, self._optimizer, train_part_dfs, cat_cols, cont_cols, label_col)
print("training loss: ", loss)
self._val_loss(self._model, val_part_dfs, cat_cols, cont_cols, label_col)
def predict(self, gdf, cat_cols, cont_cols):
"""
Predict the class with the trained model
:param gdf: prediction input dataset with categorized int16 feature columns
:type gdf: cudf.DataFrame
:param cat_cols: array of categorical column names in gdf
:type label_col: array
:param cont_col: array of continuous column names in gdf
:type label_col: array
Examples
--------
>>> cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
>>> cont_cols = ["10"]
>>> ac.predict(X_test, cat_cols, cont_cols).values_host
0 0
1 0
2 0
3 0
4 2
..
8204 0
8205 4
8206 0
8207 3
8208 0
Length: 8209, dtype: int64
"""
cat_set = torch.zeros(0, 0)
xb_cont_tensor = torch.zeros(0, 0)
if cat_cols:
cat_set = gdf[self._cat_cols].to_dlpack()
cat_set = from_dlpack(cat_set).long()
if cont_cols:
xb_cont_tensor = gdf[self._cont_cols].to_dlpack()
xb_cont_tensor = from_dlpack(xb_cont_tensor).float()
out = self._model(cat_set, xb_cont_tensor)
preds = torch.max(out, 1)[1].view(-1).tolist()
return cudf.Series(preds)
def save_model(self, fname):
"""
Save trained model
:param save_to_path: directory path to save model
:type save_to_path: str
Examples
--------
>>> from clx.analytics.asset_classification import AssetClassification
>>> ac = AssetClassification()
>>> cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
>>> cont_cols = ["10"]
>>> ac.train_model(X_train, cat_cols, cont_cols, "label", batch_size, epochs, lr=0.01, wd=0.0)
>>> ac.save_model("ac.mdl")
"""
torch.save(self._model, fname)
def load_model(self, fname):
"""
Load a saved model.
:param fname: directory path to model
:type fname: str
Examples
--------
>>> from clx.analytics.asset_classification import AssetClassification
>>> ac = AssetClassification()
>>> ac.load_model("ac.mdl")
"""
self._model = torch.load(fname)
def _config_optimizer(self, lr=0.001, wd=0.0):
parameters = filter(lambda p: p.requires_grad, self._model.parameters())
self._optimizer = torch_optim.Adam(parameters, lr=lr, weight_decay=wd)
def _get_partitioned_dfs(self, df, batch_size):
dataset_len = df.shape[0]
prev_chunk_offset = 0
partitioned_dfs = []
while prev_chunk_offset < dataset_len:
curr_chunk_offset = prev_chunk_offset + batch_size
chunk = df.iloc[prev_chunk_offset:curr_chunk_offset:1]
partitioned_dfs.append(chunk)
prev_chunk_offset = curr_chunk_offset
return partitioned_dfs
def _train(self, model, optim, dfs, cat_cols, cont_cols, label_col):
self._model.train()
total = 0
sum_loss = 0
cat_set = torch.zeros(0, 0)
xb_cont_tensor = torch.zeros(0, 0)
for df in dfs:
batch = df.shape[0]
if cat_cols:
cat_set = df[cat_cols].to_dlpack()
cat_set = from_dlpack(cat_set).long()
if cont_cols:
xb_cont_tensor = df[cont_cols].to_dlpack()
xb_cont_tensor = from_dlpack(xb_cont_tensor).float()
output = self._model(cat_set, xb_cont_tensor)
train_label = df[label_col].to_dlpack()
train_label = from_dlpack(train_label).long()
loss = F.cross_entropy(output, train_label)
optim.zero_grad()
loss.backward()
optim.step()
total += batch
sum_loss += batch * (loss.item())
return sum_loss / total
def _val_loss(self, model, dfs, cat_cols, cont_cols, label_col):
self._model.eval()
total = 0
sum_loss = 0
correct = 0
val_set = torch.zeros(0, 0)
xb_cont_tensor = torch.zeros(0, 0)
for df in dfs:
current_batch_size = df.shape[0]
if cat_cols:
val_set = df[cat_cols].to_dlpack()
val_set = from_dlpack(val_set).long()
if cont_cols:
xb_cont_tensor = df[cont_cols].to_dlpack()
xb_cont_tensor = from_dlpack(xb_cont_tensor).float()
out = self._model(val_set, xb_cont_tensor)
val_label = df[label_col].to_dlpack()
val_label = from_dlpack(val_label).long()
loss = F.cross_entropy(out, val_label)
sum_loss += current_batch_size * (loss.item())
total += current_batch_size
pred = torch.max(out, 1)[1]
correct += (pred == val_label).float().sum().item()
print("valid loss %.3f and accuracy %.3f" % (sum_loss / total, correct / total))
return sum_loss / total, correct / total
def _to_device(self, data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [self._to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
| 9,402 | 34.217228 | 154 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/analytics/binary_sequence_classifier.py | import logging
import cudf
from cudf.core.subword_tokenizer import SubwordTokenizer
import torch
import torch.nn as nn
from torch.utils.dlpack import to_dlpack
from clx.analytics.sequence_classifier import SequenceClassifier
from clx.utils.data.dataloader import DataLoader
from clx.utils.data.dataset import Dataset
from transformers import AutoModelForSequenceClassification
log = logging.getLogger(__name__)
class BinarySequenceClassifier(SequenceClassifier):
"""
Sequence Classifier using BERT. This class provides methods for training/loading BERT models, evaluation and prediction.
"""
def init_model(self, model_or_path):
"""
Load model from huggingface or locally saved model.
:param model_or_path: huggingface pretrained model name or directory path to model
:type model_or_path: str
Examples
--------
>>> from clx.analytics.binary_sequence_classifier import BinarySequenceClassifier
>>> sc = BinarySequenceClassifier()
>>> sc.init_model("bert-base-uncased") # huggingface pre-trained model
>>> sc.init_model(model_path) # locally saved model
"""
self._model = AutoModelForSequenceClassification.from_pretrained(model_or_path)
if torch.cuda.is_available():
self._device = torch.device("cuda")
self._model.cuda()
self._model = nn.DataParallel(self._model)
else:
self._device = torch.device("cpu")
self._tokenizer = SubwordTokenizer(self._hashpath, do_lower_case=True)
def predict(self, input_data, max_seq_len=128, batch_size=32, threshold=0.5):
"""
Predict the class with the trained model
:param input_data: input text data for prediction
:type input_data: cudf.Series
:param max_seq_len: Limits the length of the sequence returned by tokenizer. If tokenized sentence is shorter than max_seq_len, output will be padded with 0s. If the tokenized sentence is longer than max_seq_len it will be truncated to max_seq_len.
:type max_seq_len: int
:param batch_size: batch size
:type batch_size: int
:param threshold: results with probabilities higher than this will be labeled as positive
:type threshold: float
:return: predictions, probabilities: predictions are labels (0 or 1) based on minimum threshold
:rtype: cudf.Series, cudf.Series
Examples
--------
>>> from cuml.preprocessing.model_selection import train_test_split
>>> emails_train, emails_test, labels_train, labels_test = train_test_split(train_emails_df, 'label', train_size=0.8)
>>> sc.train_model(emails_train, labels_train)
>>> predictions = sc.predict(emails_test, threshold=0.8)
"""
predict_gdf = cudf.DataFrame()
predict_gdf["text"] = input_data
predict_dataset = Dataset(predict_gdf)
predict_dataloader = DataLoader(predict_dataset, batchsize=batch_size)
preds_l = []
probs_l = []
self._model.eval()
for df in predict_dataloader.get_chunks():
b_input_ids, b_input_mask = self._bert_uncased_tokenize(df["text"], max_seq_len)
with torch.no_grad():
logits = self._model(
b_input_ids, token_type_ids=None, attention_mask=b_input_mask
)[0]
b_probs = torch.sigmoid(logits[:, 1])
b_preds = b_probs.ge(threshold).type(torch.int8)
b_probs = cudf.io.from_dlpack(to_dlpack(b_probs))
b_preds = cudf.io.from_dlpack(to_dlpack(b_preds)).astype("boolean")
preds_l.append(b_preds)
probs_l.append(b_probs)
preds = cudf.concat(preds_l)
probs = cudf.concat(probs_l)
return preds, probs
| 3,848 | 37.878788 | 256 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/analytics/sequence_classifier.py | import logging
import os
import cudf
from cudf.core.subword_tokenizer import SubwordTokenizer
import cupy
import torch
from clx.utils.data.dataloader import DataLoader
from clx.utils.data.dataset import Dataset
from torch.utils.dlpack import to_dlpack
from tqdm import trange
from torch.optim import AdamW
from abc import ABC, abstractmethod
log = logging.getLogger(__name__)
class SequenceClassifier(ABC):
"""
Sequence Classifier using BERT. This class provides methods for training/loading BERT models, evaluation and prediction.
"""
def __init__(self):
self._device = None
self._model = None
self._optimizer = None
self._hashpath = self._get_hash_table_path()
@abstractmethod
def predict(self, input_data, max_seq_len=128, batch_size=32, threshold=0.5):
pass
def train_model(
self,
train_data,
labels,
learning_rate=3e-5,
max_seq_len=128,
batch_size=32,
epochs=5,
):
"""
Train the classifier
:param train_data: text data for training
:type train_data: cudf.Series
:param labels: labels for each element in train_data
:type labels: cudf.Series
:param learning_rate: learning rate
:type learning_rate: float
:param max_seq_len: Limits the length of the sequence returned by tokenizer. If tokenized sentence is shorter than max_seq_len, output will be padded with 0s. If the tokenized sentence is longer than max_seq_len it will be truncated to max_seq_len.
:type max_seq_len: int
:param batch_size: batch size
:type batch_size: int
:param epoch: epoch, default is 5
:type epoch: int
Examples
--------
>>> from cuml.preprocessing.model_selection import train_test_split
>>> emails_train, emails_test, labels_train, labels_test = train_test_split(train_emails_df, 'label', train_size=0.8)
>>> sc.train_model(emails_train, labels_train)
"""
train_gdf = cudf.DataFrame()
train_gdf["text"] = train_data
train_gdf["label"] = labels
train_dataset = Dataset(train_gdf)
train_dataloader = DataLoader(train_dataset, batchsize=batch_size)
self._config_optimizer(learning_rate)
self._model.train() # Enable training mode
self._tokenizer = SubwordTokenizer(self._hashpath, do_lower_case=True)
for _ in trange(epochs, desc="Epoch"):
tr_loss = 0 # Tracking variables
nb_tr_examples, nb_tr_steps = 0, 0
for df in train_dataloader.get_chunks():
b_input_ids, b_input_mask = self._bert_uncased_tokenize(df["text"], max_seq_len)
b_labels = torch.tensor(df["label"].to_numpy())
self._optimizer.zero_grad() # Clear out the gradients
loss = self._model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)[0] # forwardpass
loss.sum().backward()
self._optimizer.step() # update parameters
tr_loss += loss.sum().item() # get a numeric value
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss / nb_tr_steps))
def evaluate_model(self, test_data, labels, max_seq_len=128, batch_size=32):
"""
Evaluate trained model
:param test_data: test data to evaluate model
:type test_data: cudf.Series
:param labels: labels for each element in test_data
:type labels: cudf.Series
:param max_seq_len: Limits the length of the sequence returned by tokenizer. If tokenized sentence is shorter than max_seq_len, output will be padded with 0s. If the tokenized sentence is longer than max_seq_len it will be truncated to max_seq_len.
:type max_seq_len: int
:param batch_size: batch size
:type batch_size: int
Examples
--------
>>> from cuml.preprocessing.model_selection import train_test_split
>>> emails_train, emails_test, labels_train, labels_test = train_test_split(train_emails_df, 'label', train_size=0.8)
>>> sc.evaluate_model(emails_test, labels_test)
"""
self._model.eval()
test_gdf = cudf.DataFrame()
test_gdf["text"] = test_data
test_gdf["label"] = labels
test_dataset = Dataset(test_gdf)
test_dataloader = DataLoader(test_dataset, batchsize=batch_size)
eval_accuracy = 0
nb_eval_steps = 0
for df in test_dataloader.get_chunks():
b_input_ids, b_input_mask = self._bert_uncased_tokenize(df["text"], max_seq_len)
b_labels = torch.tensor(df["label"].to_numpy())
with torch.no_grad():
logits = self._model(
b_input_ids, token_type_ids=None, attention_mask=b_input_mask
)[0]
logits = logits.type(torch.DoubleTensor).to(self._device)
logits = cupy.fromDlpack(to_dlpack(logits))
label_ids = b_labels.type(torch.IntTensor).to(self._device)
label_ids = cupy.fromDlpack(to_dlpack(label_ids))
temp_eval_accuracy = self._flatten_accuracy(logits, label_ids)
eval_accuracy += temp_eval_accuracy
nb_eval_steps += 1
accuracy = eval_accuracy / nb_eval_steps
return float(accuracy)
def save_model(self, save_to_path="."):
"""
Save trained model
:param save_to_path: directory path to save model, default is current directory
:type save_to_path: str
Examples
--------
>>> from cuml.preprocessing.model_selection import train_test_split
>>> emails_train, emails_test, labels_train, labels_test = train_test_split(train_emails_df, 'label', train_size=0.8)
>>> sc.train_model(emails_train, labels_train)
>>> sc.save_model()
"""
self._model.module.save_pretrained(save_to_path)
def save_checkpoint(self, file_path):
"""
Save model checkpoint
:param file_path: file path to save checkpoint
:type file_path: str
Examples
--------
>>> sc.init_model("bert-base-uncased") # huggingface pre-trained model
>>> sc.train_model(train_data, train_labels)
>>> sc.save_checkpoint(PATH)
"""
checkpoint = {
"state_dict": self._model.module.state_dict()
}
torch.save(checkpoint, file_path)
def load_checkpoint(self, file_path):
"""
Load model checkpoint
:param file_path: file path to load checkpoint
:type file_path: str
Examples
--------
>>> sc.init_model("bert-base-uncased") # huggingface pre-trained model
>>> sc.load_checkpoint(PATH)
"""
model_dict = torch.load(file_path)
self._model.module.load_state_dict(model_dict["state_dict"])
def _get_hash_table_path(self):
hash_table_path = "%s/resources/bert-base-uncased-hash.txt" % os.path.dirname(
os.path.realpath(__file__)
)
return hash_table_path
def _config_optimizer(self, learning_rate):
param_optimizer = list(self._model.named_parameters())
no_decay = ["bias", "gamma", "beta"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay_rate": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay_rate": 0.0,
},
]
self._optimizer = AdamW(optimizer_grouped_parameters, learning_rate)
def _flatten_accuracy(self, preds, labels):
pred_flat = cupy.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return cupy.sum(pred_flat == labels_flat) / len(labels_flat)
def _bert_uncased_tokenize(self, strings, max_length):
"""
converts cudf.Series of strings to two torch tensors- token ids and attention mask with padding
"""
output = self._tokenizer(strings,
max_length=max_length,
max_num_rows=len(strings),
truncation=True,
add_special_tokens=True,
return_tensors="pt")
return output['input_ids'].type(torch.long), output['attention_mask'].type(torch.long)
| 8,757 | 35.953586 | 256 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/analytics/cybert.py | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, TensorDataset
from transformers import (
BertForTokenClassification,
DistilBertForTokenClassification,
ElectraForTokenClassification,
)
from cudf.core.subword_tokenizer import SubwordTokenizer
log = logging.getLogger(__name__)
ARCH_MAPPING = {
"BertForTokenClassification": BertForTokenClassification,
"DistilBertForTokenClassification": DistilBertForTokenClassification,
"ElectraForTokenClassification": ElectraForTokenClassification,
}
MODEL_MAPPING = {
"BertForTokenClassification": "bert-base-cased",
"DistilBertForTokenClassification": "distilbert-base-cased",
"ElectraForTokenClassification": "rapids/electra-small-discriminator",
}
class Cybert:
"""
Cyber log parsing using BERT, DistilBERT, or ELECTRA. This class provides methods
for loading models, prediction, and postprocessing.
"""
def __init__(self):
self._model = None
self._label_map = {}
resources_dir = "%s/resources" % os.path.dirname(os.path.realpath(__file__))
vocabpath = "%s/bert-base-cased-vocab.txt" % resources_dir
self._vocab_lookup = {}
with open(vocabpath) as f:
for index, line in enumerate(f):
self._vocab_lookup[index] = line.split()[0]
self._hashpath = "%s/bert-base-cased-hash.txt" % resources_dir
self.tokenizer = SubwordTokenizer(self._hashpath, do_lower_case=False)
def load_model(self, model_filepath, config_filepath):
"""
Load cybert model.
:param model_filepath: Filepath of the model (.pth or .bin) to be loaded
:type model_filepath: str
:param config_filepath: Config file (.json) to be used
:type config_filepath: str
Examples
--------
>>> from clx.analytics.cybert import Cybert
>>> cyparse = Cybert()
>>> cyparse.load_model('/path/to/model.bin', '/path/to/config.json')
"""
with open(config_filepath) as f:
config = json.load(f)
model_arch = config["architectures"][0]
self._label_map = {int(k): v for k, v in config["id2label"].items()}
self._model = ARCH_MAPPING[model_arch].from_pretrained(
model_filepath, config=config_filepath,
)
self._model.cuda()
self._model.eval()
self._model = nn.DataParallel(self._model)
def preprocess(self, raw_data_col, stride_len=116, max_seq_len=128):
"""
Preprocess and tokenize data for cybert model inference.
:param raw_data_col: logs to be processed
:type raw_data_col: cudf.Series
:param stride_len: Max stride length for processing, default is 116
:type stride_len: int
:param max_seq_len: Max sequence length for processing, default is 128
:type max_seq_len: int
Examples
--------
>>> import cudf
>>> from clx.analytics.cybert import Cybert
>>> cyparse = Cybert()
>>> cyparse.load_model('/path/to/model.pth', '/path/to/config.json')
>>> raw_df = cudf.Series(['Log event 1', 'Log event 2'])
>>> input_ids, attention_masks, meta_data = cyparse.preprocess(raw_df)
"""
raw_data_col = raw_data_col.str.replace('"', "")
raw_data_col = raw_data_col.str.replace("\\r", " ")
raw_data_col = raw_data_col.str.replace("\\t", " ")
raw_data_col = raw_data_col.str.replace("=", "= ")
raw_data_col = raw_data_col.str.replace("\\n", " ")
output = self.tokenizer(
raw_data_col,
max_length=128,
stride=12,
max_num_rows=len(raw_data_col),
truncation=False,
add_special_tokens=False,
return_tensors="pt",
)
input_ids = output["input_ids"].type(torch.long)
attention_masks = output["attention_mask"].type(torch.long)
meta_data = output["metadata"]
return input_ids, attention_masks, meta_data
def inference(self, raw_data_col, batch_size=160):
"""
Cybert inference and postprocessing on dataset
:param raw_data_col: logs to be processed
:type raw_data_col: cudf.Series
:param batch_size: Log data is processed in batches using a Pytorch dataloader.
The batch size parameter refers to the batch size indicated in torch.utils.data.DataLoader.
:type batch_size: int
:return: parsed_df
:rtype: pandas.DataFrame
:return: confidence_df
:rtype: pandas.DataFrame
Examples
--------
>>> import cudf
>>> from clx.analytics.cybert import Cybert
>>> cyparse = Cybert()
>>> cyparse.load_model('/path/to/model.pth', '/path/to/config.json')
>>> raw_data_col = cudf.Series(['Log event 1', 'Log event 2'])
>>> processed_df, confidence_df = cy.inference(raw_data_col)
"""
input_ids, attention_masks, meta_data = self.preprocess(raw_data_col)
dataset = TensorDataset(input_ids, attention_masks)
dataloader = DataLoader(dataset=dataset, shuffle=False, batch_size=batch_size)
confidences_list = []
labels_list = []
for step, batch in enumerate(dataloader):
in_ids, att_masks = batch
with torch.no_grad():
logits = self._model(in_ids, att_masks)[0]
logits = F.softmax(logits, dim=2)
confidences, labels = torch.max(logits, 2)
confidences_list.extend(confidences.detach().cpu().numpy().tolist())
labels_list.extend(labels.detach().cpu().numpy().tolist())
infer_pdf = pd.DataFrame(meta_data.cpu()).astype(int)
infer_pdf.columns = ["doc", "start", "stop"]
infer_pdf["confidences"] = confidences_list
infer_pdf["labels"] = labels_list
infer_pdf["token_ids"] = input_ids.detach().cpu().numpy().tolist()
del dataset
del dataloader
del logits
del confidences
del labels
del confidences_list
del labels_list
parsed_df, confidence_df = self.__postprocess(infer_pdf)
return parsed_df, confidence_df
def __postprocess(self, infer_pdf):
# cut overlapping edges
infer_pdf["confidences"] = infer_pdf.apply(
lambda row: row["confidences"][row["start"]:row["stop"]], axis=1
)
infer_pdf["labels"] = infer_pdf.apply(
lambda row: row["labels"][row["start"]:row["stop"]], axis=1
)
infer_pdf["token_ids"] = infer_pdf.apply(
lambda row: row["token_ids"][row["start"]:row["stop"]], axis=1
)
# aggregated logs
infer_pdf = infer_pdf.groupby("doc").agg(
{"token_ids": "sum", "confidences": "sum", "labels": "sum"}
)
# parse_by_label
parsed_dfs = infer_pdf.apply(
lambda row: self.__get_label_dicts(row), axis=1, result_type="expand"
)
parsed_df = pd.DataFrame(parsed_dfs[0].tolist())
confidence_df = pd.DataFrame(parsed_dfs[1].tolist())
if "X" in confidence_df.columns:
confidence_df = confidence_df.drop(["X"], axis=1)
confidence_df = confidence_df.applymap(np.mean)
# decode cleanup
parsed_df = self.__decode_cleanup(parsed_df)
return parsed_df, confidence_df
def __get_label_dicts(self, row):
token_dict = defaultdict(str)
confidence_dict = defaultdict(list)
for label, confidence, token_id in zip(
row["labels"], row["confidences"], row["token_ids"]
):
text_token = self._vocab_lookup[token_id]
if text_token[:2] != "##":
# if not a subword use the current label, else use previous
new_label = label
new_confidence = confidence
if self._label_map[new_label] in token_dict:
token_dict[self._label_map[new_label]] = (
token_dict[self._label_map[new_label]] + " " + text_token
)
else:
token_dict[self._label_map[new_label]] = text_token
confidence_dict[self._label_map[label]].append(new_confidence)
return token_dict, confidence_dict
def __decode_cleanup(self, df):
return (
df.replace(" ##", "", regex=True)
.replace(" : ", ":", regex=True)
.replace("\[ ", "[", regex=True)
.replace(" ]", "]", regex=True)
.replace(" /", "/", regex=True)
.replace("/ ", "/", regex=True)
.replace(" - ", "-", regex=True)
.replace(" \( ", " (", regex=True)
.replace(" \) ", ") ", regex=True)
.replace("\+ ", "+", regex=True)
.replace(" . ", ".", regex=True)
)
| 9,636 | 36.644531 | 99 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/analytics/detector.py | import logging
import torch
import torch.nn as nn
from abc import ABC, abstractmethod
log = logging.getLogger(__name__)
GPU_COUNT = torch.cuda.device_count()
class Detector(ABC):
def __init__(self, lr=0.001):
self.lr = lr
self._model = None
self._optimizer = None
self._criterion = nn.CrossEntropyLoss()
@property
def model(self):
return self._model
@property
def optimizer(self):
return self._optimizer
@property
def criterion(self):
return self._criterion
@abstractmethod
def init_model(self, char_vocab, hidden_size, n_domain_type, n_layers):
pass
@abstractmethod
def train_model(self, training_data, labels, batch_size=1000, epochs=1, train_size=0.7):
pass
@abstractmethod
def predict(self, epoch, train_dataset):
pass
def load_model(self, file_path):
""" This function load already saved model and sets cuda parameters.
:param file_path: File path of a model to be loaded.
:type file_path: string
"""
model = torch.load(file_path)
model.eval()
self._model = model
self._set_model2cuda()
self._set_optimizer()
def save_model(self, file_path):
""" This function saves model to a given location.
:param file_path: File path of a model to be saved.
:type file_path: string
"""
torch.save(self.model, file_path)
def _save_checkpoint(self, checkpoint, file_path):
torch.save(checkpoint, file_path)
log.info("Pretrained model checkpoint saved to location: '{}'".format(file_path))
def _set_parallelism(self):
if GPU_COUNT > 1:
log.info("CUDA device count: {}".format(GPU_COUNT))
self._model = nn.DataParallel(self.model)
self._set_model2cuda()
else:
self._set_model2cuda()
def _set_optimizer(self):
self._optimizer = torch.optim.RMSprop(
self.model.parameters(), self.lr, weight_decay=0.0
)
def _set_model2cuda(self):
if torch.cuda.is_available():
log.info("Found GPU's now setting up cuda for the model")
self.model.cuda()
def leverage_model(self, model):
"""This function leverages model by setting parallelism parameters.
:param model: Model instance.
:type model: RNNClassifier
"""
model.eval()
self._model = model
self._set_parallelism()
self._set_optimizer()
def _get_unwrapped_model(self):
if GPU_COUNT > 1:
model = self.model.module
else:
model = self.model
return model
| 2,728 | 25.495146 | 92 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/analytics/dga_detector.py | import cudf
import torch
import logging
from tqdm import trange
from torch.utils.dlpack import from_dlpack
from clx.utils.data import utils
from clx.analytics.detector import Detector
from clx.utils.data.dataloader import DataLoader
from clx.analytics.dga_dataset import DGADataset
from clx.analytics.model.rnn_classifier import RNNClassifier
from cuml.model_selection import train_test_split
log = logging.getLogger(__name__)
class DGADetector(Detector):
"""
This class provides multiple functionalities such as build, train and evaluate the RNNClassifier model
to distinguish legitimate and DGA domain names.
"""
def init_model(self, char_vocab=128, hidden_size=100, n_domain_type=2, n_layers=3):
"""This function instantiates RNNClassifier model to train. And also optimizes to scale it and keep running on parallelism.
:param char_vocab: Vocabulary size is set to 128 ASCII characters.
:type char_vocab: int
:param hidden_size: Hidden size of the network.
:type hidden_size: int
:param n_domain_type: Number of domain types.
:type n_domain_type: int
:param n_layers: Number of network layers.
:type n_layers: int
"""
if self.model is None:
model = RNNClassifier(char_vocab, hidden_size, n_domain_type, n_layers)
self.leverage_model(model)
def load_checkpoint(self, file_path):
""" This function load already saved model checkpoint and sets cuda parameters.
:param file_path: File path of a model checkpoint to be loaded.
:type file_path: string
"""
checkpoint = torch.load(file_path)
model = RNNClassifier(
checkpoint["input_size"],
checkpoint["hidden_size"],
checkpoint["output_size"],
checkpoint["n_layers"],
)
model.load_state_dict(checkpoint["state_dict"])
super().leverage_model(model)
def save_checkpoint(self, file_path):
""" This function saves model checkpoint to given location.
:param file_path: File path to save model checkpoint.
:type file_path: string
"""
model = self._get_unwrapped_model()
checkpoint = {
"state_dict": model.state_dict(),
"input_size": model.input_size,
"hidden_size": model.hidden_size,
"n_layers": model.n_layers,
"output_size": model.output_size,
}
super()._save_checkpoint(checkpoint, file_path)
def train_model(
self, train_data, labels, batch_size=1000, epochs=5, train_size=0.7, truncate=100
):
"""This function is used for training RNNClassifier model with a given training dataset. It returns total loss to determine model prediction accuracy.
:param train_data: Training data
:type train_data: cudf.Series
:param labels: labels data
:type labels: cudf.Series
:param batch_size: batch size
:type batch_size: int
:param epochs: Number of epochs for training
:type epochs: int
:param train_size: Training size for splitting training and test data
:type train_size: int
:param truncate: Truncate string to n number of characters.
:type truncate: int
Examples
--------
>>> from clx.analytics.dga_detector import DGADetector
>>> dd = DGADetector()
>>> dd.init_model()
>>> dd.train_model(train_data, labels)
1.5728906989097595
"""
log.info("Initiating model training ...")
log.info('Truncate domains to width: {}'.format(truncate))
self.model.train()
train_dataloader, test_dataloader = self._preprocess_data(
train_data, labels, batch_size, train_size, truncate
)
for _ in trange(epochs, desc="Epoch"):
total_loss = 0
i = 0
for df in train_dataloader.get_chunks():
domains_len = df.shape[0]
if domains_len > 0:
types_tensor = self._create_types_tensor(df["type"])
df = df.drop(["type", "domain"], axis=1)
input, seq_lengths = self._create_variables(df)
model_result = self.model(input, seq_lengths)
loss = self._get_loss(model_result, types_tensor)
total_loss += loss
i = i + 1
if i % 10 == 0:
log.info(
"[{}/{} ({:.0f}%)]\tLoss: {:.2f}".format(
i * domains_len,
train_dataloader.dataset_len,
100.0 * i * domains_len / train_dataloader.dataset_len,
total_loss / i * domains_len,
)
)
self.evaluate_model(test_dataloader)
def predict(self, domains, probability=False, truncate=100):
"""This function accepts cudf series of domains as an argument to classify domain names as benign/malicious and returns the learned label for each object in the form of cudf series.
:param domains: List of domains.
:type domains: cudf.Series
:return: Predicted results with respect to given domains.
:rtype: cudf.Series
:param truncate: Truncate string to n number of characters.
:type truncate: int
Examples
--------
>>> dd.predict(['nvidia.com', 'dgadomain'])
0 0.010
1 0.924
Name: dga_probability, dtype: decimal
"""
log.debug("Initiating model inference ...")
self.model.eval()
df = cudf.DataFrame({"domain": domains})
log.debug('Truncate domains to width: {}'.format(truncate))
df['domain'] = df['domain'].str.slice_replace(truncate, repl='')
temp_df = utils.str2ascii(df, 'domain')
# Assigning sorted domains index to return learned labels as per the given input order.
df.index = temp_df.index
df["domain"] = temp_df["domain"]
temp_df = temp_df.drop("domain", axis=1)
input, seq_lengths = self._create_variables(temp_df)
del temp_df
model_result = self.model(input, seq_lengths)
if probability:
model_result = model_result[:, 0]
preds = torch.sigmoid(model_result)
preds = preds.view(-1).tolist()
df["preds"] = preds
else:
preds = model_result.data.max(1, keepdim=True)[1]
preds = preds.view(-1).tolist()
df["preds"] = preds
df = df.sort_index()
return df["preds"]
def _create_types_tensor(self, type_series):
"""Create types tensor variable in the same order of sequence tensor"""
types = type_series.values_host
types_tensor = torch.LongTensor(types)
if torch.cuda.is_available():
types_tensor = self._set_var2cuda(types_tensor)
return types_tensor
def _create_variables(self, df):
"""
Creates vectorized sequence for given domains and wraps around cuda for parallel processing.
"""
seq_len_arr = df["len"].values_host
df = df.drop("len", axis=1)
seq_len_tensor = torch.LongTensor(seq_len_arr)
seq_tensor = self._df2tensor(df)
# Return variables
# DataParallel requires everything to be a Variable
if torch.cuda.is_available():
seq_tensor = self._set_var2cuda(seq_tensor)
seq_len_tensor = self._set_var2cuda(seq_len_tensor)
return seq_tensor, seq_len_tensor
def _df2tensor(self, ascii_df):
"""
Converts gdf -> dlpack tensor -> torch tensor
"""
dlpack_ascii_tensor = ascii_df.to_dlpack()
seq_tensor = from_dlpack(dlpack_ascii_tensor).long()
return seq_tensor
def evaluate_model(self, dataloader):
"""This function evaluates the trained model to verify it's accuracy.
:param dataloader: Instance holds preprocessed data.
:type dataloader: DataLoader
:return: Model accuracy
:rtype: decimal
Examples
--------
>>> dd = DGADetector()
>>> dd.init_model()
>>> dd.evaluate_model(dataloader)
Evaluating trained model ...
Test set accuracy: 3/4 (0.75)
"""
log.info("Evaluating trained model ...")
correct = 0
for df in dataloader.get_chunks():
target = self._create_types_tensor(df["type"])
df = df.drop(["type", "domain"], axis=1)
input, seq_lengths = self._create_variables(df)
output = self.model(input, seq_lengths)
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
accuracy = float(correct) / dataloader.dataset_len
log.info(
"Test set accuracy: {}/{} ({})\n".format(
correct, dataloader.dataset_len, accuracy
)
)
return accuracy
def _get_loss(self, model_result, types_tensor):
loss = self.criterion(model_result, types_tensor)
self.model.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def _set_var2cuda(self, tensor):
"""
Set variable to cuda.
"""
return tensor.cuda()
def _preprocess_data(self, train_data, labels, batch_size, train_size, truncate):
train_gdf = cudf.DataFrame()
train_gdf["domain"] = train_data
train_gdf["type"] = labels
domain_train, domain_test, type_train, type_test = train_test_split(
train_gdf, "type", train_size=train_size
)
test_df = self._create_df(domain_test, type_test)
train_df = self._create_df(domain_train, type_train)
test_dataset = DGADataset(test_df, truncate)
train_dataset = DGADataset(train_df, truncate)
test_dataloader = DataLoader(test_dataset, batchsize=batch_size)
train_dataloader = DataLoader(train_dataset, batchsize=batch_size)
return train_dataloader, test_dataloader
def _create_df(self, domain_df, type_series):
df = cudf.DataFrame()
df["domain"] = domain_df["domain"].reset_index(drop=True)
df["type"] = type_series.reset_index(drop=True)
return df
| 10,504 | 38.197761 | 189 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/analytics/multiclass_sequence_classifier.py | import logging
import cudf
from cudf.core.subword_tokenizer import SubwordTokenizer
import cupy
import torch
import torch.nn as nn
from torch.utils.dlpack import to_dlpack
from clx.analytics.sequence_classifier import SequenceClassifier
from clx.utils.data.dataloader import DataLoader
from clx.utils.data.dataset import Dataset
from transformers import AutoModelForSequenceClassification
log = logging.getLogger(__name__)
class MulticlassSequenceClassifier(SequenceClassifier):
"""
Sequence Classifier using BERT. This class provides methods for training/loading BERT models, evaluation and prediction.
"""
def init_model(self, model_or_path, num_labels):
"""
Load model from huggingface or locally saved model.
:param model_or_path: huggingface pretrained model name or directory path to model
:type model_or_path: str
:param num_labels: number of labels used only for multiclass classification
:type num_labels: int
Examples
--------
>>> from clx.analytics.multiclass_sequence_classifier import MulticlassSequenceClassifier
>>> sc = MulticlassSequenceClassifier()
>>> sc.init_model("bert-base-uncased", num_labels=4) # huggingface pre-trained model
>>> sc.init_model(model_path, num_labels=4) # locally saved model
"""
self._model = AutoModelForSequenceClassification.from_pretrained(model_or_path, num_labels=num_labels)
if torch.cuda.is_available():
self._device = torch.device("cuda")
self._model.cuda()
self._model = nn.DataParallel(self._model)
else:
self._device = torch.device("cpu")
self._tokenizer = SubwordTokenizer(self._hashpath, do_lower_case=True)
def predict(self, input_data, max_seq_len=128, batch_size=32):
"""
Predict the class with the trained model
:param input_data: input text data for prediction
:type input_data: cudf.Series
:param max_seq_len: Limits the length of the sequence returned by tokenizer. If tokenized sentence is shorter than max_seq_len, output will be padded with 0s. If the tokenized sentence is longer than max_seq_len it will be truncated to max_seq_len.
:type max_seq_len: int
:param batch_size: batch size
:type batch_size: int
:param threshold: results with probabilities higher than this will be labeled as positive
:type threshold: float
:return: predictions, probabilities: predictions are labels (0 or 1) based on minimum threshold
:rtype: cudf.Series, cudf.Series
Examples
--------
>>> from cuml.preprocessing.model_selection import train_test_split
>>> emails_train, emails_test, labels_train, labels_test = train_test_split(train_emails_df, 'label', train_size=0.8)
>>> sc.train_model(emails_train, labels_train)
>>> predictions = sc.predict(emails_test)
"""
predict_gdf = cudf.DataFrame()
predict_gdf["text"] = input_data
predict_dataset = Dataset(predict_gdf)
predict_dataloader = DataLoader(predict_dataset, batchsize=batch_size)
preds = cudf.Series()
self._model.eval()
for df in predict_dataloader.get_chunks():
b_input_ids, b_input_mask = self._bert_uncased_tokenize(df["text"], max_seq_len)
with torch.no_grad():
logits = self._model(
b_input_ids, token_type_ids=None, attention_mask=b_input_mask
)[0]
logits = logits.type(torch.DoubleTensor).to(self._device)
logits = cupy.fromDlpack(to_dlpack(logits))
b_preds = cupy.argmax(logits, axis=1).flatten()
b_preds = cudf.Series(b_preds)
preds = preds.append(b_preds)
return preds
| 3,867 | 38.876289 | 256 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/analytics/model/rnn_classifier.py | # Original code at https://github.com/spro/practical-pytorch
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
DROPOUT = 0.0
class RNNClassifier(nn.Module):
def __init__(
self, input_size, hidden_size, output_size, n_layers, bidirectional=True
):
super(RNNClassifier, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.n_directions = int(bidirectional) + 1
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(
hidden_size,
hidden_size,
n_layers,
dropout=DROPOUT,
bidirectional=bidirectional,
)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, input, seq_lengths):
# Note: we run this all at once (over the whole input sequence)
# input shape: B x S (input size)
# transpose to make S(sequence) x B (batch)
input = input.t()
batch_size = input.size(1)
# Make a hidden
hidden = self._init_hidden(batch_size)
# Embedding S x B -> S x B x I (embedding size)
embedded = self.embedding(input)
# Pack them up nicely
gru_input = pack_padded_sequence(embedded, seq_lengths.data.cpu().numpy())
# To compact weights again call flatten_parameters().
self.gru.flatten_parameters()
output, hidden = self.gru(gru_input, hidden)
# output = self.dropout(output)
# Use the last layer output as FC's input
# No need to unpack, since we are going to use hidden
fc_output = self.fc(hidden[-1])
return fc_output
def _init_hidden(self, batch_size):
hidden = torch.zeros(
self.n_layers * self.n_directions, batch_size, self.hidden_size
)
# creating variable
if torch.cuda.is_available():
return hidden.cuda()
else:
return hidden
| 2,067 | 31.3125 | 82 | py |
clx-branch-23.04 | clx-branch-23.04/python/clx/analytics/model/tabular_model.py | # Original code at https://github.com/spro/practical-pytorch
import torch
import torch.nn as nn
class TabularModel(nn.Module):
"Basic model for tabular data"
def __init__(self, emb_szs, n_cont, out_sz, layers, drops,
emb_drop, use_bn, is_reg, is_multi):
super().__init__()
self.embeds = nn.ModuleList([nn.Embedding(ni, nf) for ni, nf in emb_szs])
self.emb_drop = nn.Dropout(emb_drop)
self.bn_cont = nn.BatchNorm1d(n_cont)
n_emb = sum(e.embedding_dim for e in self.embeds)
self.n_emb, self.n_cont = n_emb, n_cont
sizes = [n_emb + n_cont] + layers + [out_sz]
actns = [nn.ReLU(inplace=True)] * (len(sizes) - 2) + [None]
layers = []
for i, (n_in, n_out, dp, act) in enumerate(zip(sizes[:-1], sizes[1:], [0.] + drops, actns)):
layers += self._bn_drop_lin(n_in, n_out, bn=use_bn and i != 0, p=dp, actn=act)
self.layers = nn.Sequential(*layers)
def forward(self, x_cat, x_cont):
if self.n_emb != 0:
x = [e(x_cat[:, i]) for i, e in enumerate(self.embeds)]
x = torch.cat(x, 1)
x = self.emb_drop(x)
if self.n_cont != 0:
if self.n_cont == 1:
x_cont = x_cont.unsqueeze(1)
x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
x = self.layers(x)
return x.squeeze()
def _bn_drop_lin(self, n_in, n_out, bn, p, actn):
"Sequence of batchnorm (if `bn`), dropout (with `p`) and linear (`n_in`,`n_out`) layers followed by `actn`."
layers = [nn.BatchNorm1d(n_in)] if bn else []
if p != 0:
layers.append(nn.Dropout(p))
layers.append(nn.Linear(n_in, n_out))
if actn is not None:
layers.append(actn)
return layers
| 1,858 | 38.553191 | 116 | py |
pdarts | pdarts-master/test.py | import os
import sys
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='CIFAR10.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--arch', type=str, default='PDARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
torch.cuda.set_device(args.gpu)
cudnn.enabled=True
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
try:
utils.load(model, args.model_path)
except:
model = model.module
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=False, num_workers=2)
model.drop_path_prob = 0.0
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('Test_acc %f', test_acc)
def infer(test_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(test_queue):
input = input.cuda()
target = target.cuda()
with torch.no_grad():
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
logging.info('test %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 3,279 | 31.475248 | 100 | py |
pdarts | pdarts-master/train_imagenet.py | import os
import sys
import numpy as np
import time
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("training imagenet")
parser.add_argument('--workers', type=int, default=32, help='number of workers to load dataset')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.1, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-5, help='weight decay')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--epochs', type=int, default=250, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--save', type=str, default='/tmp/checkpoints/', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='PDARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--lr_scheduler', type=str, default='linear', help='lr scheduler, linear or cosine')
parser.add_argument('--tmp_data_dir', type=str, default='/tmp/cache/', help='temp data dir')
parser.add_argument('--note', type=str, default='try', help='note for this run')
args, unparsed = parser.parse_known_args()
args.save = '{}eval-{}-{}'.format(args.save, args.note, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CLASSES = 1000
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def main():
if not torch.cuda.is_available():
logging.info('No GPU device available')
sys.exit(1)
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info("args = %s", args)
logging.info("unparsed_args = %s", unparsed)
num_gpus = torch.cuda.device_count()
genotype = eval("genotypes.%s" % args.arch)
print('---------Genotype---------')
logging.info(genotype)
print('--------------------------')
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
if num_gpus > 1:
model = nn.DataParallel(model)
model = model.cuda()
else:
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
data_dir = os.path.join(args.tmp_data_dir, 'imagenet')
traindir = os.path.join(data_dir, 'train')
validdir = os.path.join(data_dir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_data = dset.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
]))
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
best_acc_top1 = 0
best_acc_top5 = 0
for epoch in range(args.epochs):
if args.lr_scheduler == 'cosine':
scheduler.step()
current_lr = scheduler.get_lr()[0]
elif args.lr_scheduler == 'linear':
current_lr = adjust_lr(optimizer, epoch)
else:
print('Wrong lr type, exit')
sys.exit(1)
logging.info('Epoch: %d lr %e', epoch, current_lr)
if epoch < 5 and args.batch_size > 256:
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr * (epoch + 1) / 5.0
logging.info('Warming-up Epoch: %d, LR: %e', epoch, current_lr * (epoch + 1) / 5.0)
if num_gpus > 1:
model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
else:
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
epoch_start = time.time()
train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer)
logging.info('Train_acc: %f', train_acc)
valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
logging.info('Valid_acc_top1: %f', valid_acc_top1)
logging.info('Valid_acc_top5: %f', valid_acc_top5)
epoch_duration = time.time() - epoch_start
logging.info('Epoch time: %ds.', epoch_duration)
is_best = False
if valid_acc_top5 > best_acc_top5:
best_acc_top5 = valid_acc_top5
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc_top1': best_acc_top1,
'optimizer' : optimizer.state_dict(),
}, is_best, args.save)
def adjust_lr(optimizer, epoch):
# Smaller slope for the last 5 epochs because lr * 1/250 is relatively large
if args.epochs - epoch > 5:
lr = args.learning_rate * (args.epochs - 5 - epoch) / (args.epochs - 5)
else:
lr = args.learning_rate * (args.epochs - epoch) / ((args.epochs - 5) * 5)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
batch_time = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
target = target.cuda(non_blocking=True)
input = input.cuda(non_blocking=True)
b_start = time.time()
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
batch_time.update(time.time() - b_start)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
end_time = time.time()
if step == 0:
duration = 0
start_time = time.time()
else:
duration = end_time - start_time
start_time = time.time()
logging.info('TRAIN Step: %03d Objs: %e R1: %f R5: %f Duration: %ds BTime: %.3fs',
step, objs.avg, top1.avg, top5.avg, duration, batch_time.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
with torch.no_grad():
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
end_time = time.time()
if step == 0:
duration = 0
start_time = time.time()
else:
duration = end_time - start_time
start_time = time.time()
logging.info('VALID Step: %03d Objs: %e R1: %f R5: %f Duration: %ds', step, objs.avg, top1.avg, top5.avg, duration)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 10,818 | 38.922509 | 127 | py |
pdarts | pdarts-master/utils.py | import os
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar100(args):
CIFAR_MEAN = [0.5071, 0.4867, 0.4408]
CIFAR_STD = [0.2675, 0.2565, 0.2761]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
| 3,652 | 24.907801 | 105 | py |
pdarts | pdarts-master/model.py | import torch
import torch.nn as nn
from operations import *
from torch.autograd import Variable
from utils import drop_path
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 14x14"""
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class NetworkCIFAR(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkCIFAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr
if i == 2*layers//3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2*self._layers//3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits, logits_aux
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkImageNet, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.stem0 = nn.Sequential(
nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = self.stem0(input)
s1 = self.stem1(s0)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
| 7,284 | 34.710784 | 95 | py |
pdarts | pdarts-master/model_search.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from torch.autograd import Variable
from genotypes import PRIMITIVES
from genotypes import Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride, switch, p):
super(MixedOp, self).__init__()
self.m_ops = nn.ModuleList()
self.p = p
for i in range(len(switch)):
if switch[i]:
primitive = PRIMITIVES[i]
op = OPS[primitive](C, stride, False)
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
if isinstance(op, Identity) and p > 0:
op = nn.Sequential(op, nn.Dropout(self.p))
self.m_ops.append(op)
def update_p(self):
for op in self.m_ops:
if isinstance(op, nn.Sequential):
if isinstance(op[0], Identity):
op[1].p = self.p
def forward(self, x, weights):
return sum(w * op(x) for w, op in zip(weights, self.m_ops))
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev, switches, p):
super(Cell, self).__init__()
self.reduction = reduction
self.p = p
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self.cell_ops = nn.ModuleList()
switch_count = 0
for i in range(self._steps):
for j in range(2+i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride, switch=switches[switch_count], p=self.p)
self.cell_ops.append(op)
switch_count = switch_count + 1
def update_p(self):
for op in self.cell_ops:
op.p = self.p
op.update_p()
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(self.cell_ops[offset+j](h, weights[offset+j]) for j, h in enumerate(states))
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C, num_classes, layers, criterion, steps=4, multiplier=4, stem_multiplier=3, switches_normal=[], switches_reduce=[], p=0.0):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
self.p = p
self.switches_normal = switches_normal
switch_ons = []
for i in range(len(switches_normal)):
ons = 0
for j in range(len(switches_normal[i])):
if switches_normal[i][j]:
ons = ons + 1
switch_ons.append(ons)
ons = 0
self.switch_on = switch_ons[0]
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, switches_reduce, self.p)
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, switches_normal, self.p)
# cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, switches)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier*C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
def forward(self, input):
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
if self.alphas_reduce.size(1) == 1:
weights = F.softmax(self.alphas_reduce, dim=0)
else:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
if self.alphas_normal.size(1) == 1:
weights = F.softmax(self.alphas_normal, dim=0)
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits
def update_p(self):
for cell in self.cells:
cell.p = self.p
cell.update_p()
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2+i))
num_ops = self.switch_on
self.alphas_normal = nn.Parameter(torch.FloatTensor(1e-3*np.random.randn(k, num_ops)))
self.alphas_reduce = nn.Parameter(torch.FloatTensor(1e-3*np.random.randn(k, num_ops)))
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
| 6,003 | 34.738095 | 147 | py |
pdarts | pdarts-master/train_search.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import copy
from model_search import Network
from genotypes import PRIMITIVES
from genotypes import Genotype
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--workers', type=int, default=2, help='number of workers to load dataset')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.0, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--epochs', type=int, default=25, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=5, help='total number of layers')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='/tmp/checkpoints/', help='experiment path')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--arch_learning_rate', type=float, default=6e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--tmp_data_dir', type=str, default='/tmp/cache/', help='temp data dir')
parser.add_argument('--note', type=str, default='try', help='note for this run')
parser.add_argument('--dropout_rate', action='append', default=[], help='dropout rate of skip connect')
parser.add_argument('--add_width', action='append', default=['0'], help='add channels')
parser.add_argument('--add_layers', action='append', default=['0'], help='add layers')
parser.add_argument('--cifar100', action='store_true', default=False, help='search with cifar100 dataset')
args = parser.parse_args()
args.save = '{}search-{}-{}'.format(args.save, args.note, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.cifar100:
CIFAR_CLASSES = 100
data_folder = 'cifar-100-python'
else:
CIFAR_CLASSES = 10
data_folder = 'cifar-10-batches-py'
def main():
if not torch.cuda.is_available():
logging.info('No GPU device available')
sys.exit(1)
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info("args = %s", args)
# prepare dataset
if args.cifar100:
train_transform, valid_transform = utils._data_transforms_cifar100(args)
else:
train_transform, valid_transform = utils._data_transforms_cifar10(args)
if args.cifar100:
train_data = dset.CIFAR100(root=args.tmp_data_dir, train=True, download=True, transform=train_transform)
else:
train_data = dset.CIFAR10(root=args.tmp_data_dir, train=True, download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=args.workers)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=args.workers)
# build Network
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
switches = []
for i in range(14):
switches.append([True for j in range(len(PRIMITIVES))])
switches_normal = copy.deepcopy(switches)
switches_reduce = copy.deepcopy(switches)
# To be moved to args
num_to_keep = [5, 3, 1]
num_to_drop = [3, 2, 2]
if len(args.add_width) == 3:
add_width = args.add_width
else:
add_width = [0, 0, 0]
if len(args.add_layers) == 3:
add_layers = args.add_layers
else:
add_layers = [0, 6, 12]
if len(args.dropout_rate) ==3:
drop_rate = args.dropout_rate
else:
drop_rate = [0.0, 0.0, 0.0]
eps_no_archs = [10, 10, 10]
for sp in range(len(num_to_keep)):
model = Network(args.init_channels + int(add_width[sp]), CIFAR_CLASSES, args.layers + int(add_layers[sp]), criterion, switches_normal=switches_normal, switches_reduce=switches_reduce, p=float(drop_rate[sp]))
model = nn.DataParallel(model)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
network_params = []
for k, v in model.named_parameters():
if not (k.endswith('alphas_normal') or k.endswith('alphas_reduce')):
network_params.append(v)
optimizer = torch.optim.SGD(
network_params,
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
optimizer_a = torch.optim.Adam(model.module.arch_parameters(),
lr=args.arch_learning_rate, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
sm_dim = -1
epochs = args.epochs
eps_no_arch = eps_no_archs[sp]
scale_factor = 0.2
for epoch in range(epochs):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('Epoch: %d lr: %e', epoch, lr)
epoch_start = time.time()
# training
if epoch < eps_no_arch:
model.module.p = float(drop_rate[sp]) * (epochs - epoch - 1) / epochs
model.module.update_p()
train_acc, train_obj = train(train_queue, valid_queue, model, network_params, criterion, optimizer, optimizer_a, lr, train_arch=False)
else:
model.module.p = float(drop_rate[sp]) * np.exp(-(epoch - eps_no_arch) * scale_factor)
model.module.update_p()
train_acc, train_obj = train(train_queue, valid_queue, model, network_params, criterion, optimizer, optimizer_a, lr, train_arch=True)
logging.info('Train_acc %f', train_acc)
epoch_duration = time.time() - epoch_start
logging.info('Epoch time: %ds', epoch_duration)
# validation
if epochs - epoch < 5:
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('Valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, 'weights.pt'))
print('------Dropping %d paths------' % num_to_drop[sp])
# Save switches info for s-c refinement.
if sp == len(num_to_keep) - 1:
switches_normal_2 = copy.deepcopy(switches_normal)
switches_reduce_2 = copy.deepcopy(switches_reduce)
# drop operations with low architecture weights
arch_param = model.module.arch_parameters()
normal_prob = F.softmax(arch_param[0], dim=sm_dim).data.cpu().numpy()
for i in range(14):
idxs = []
for j in range(len(PRIMITIVES)):
if switches_normal[i][j]:
idxs.append(j)
if sp == len(num_to_keep) - 1:
# for the last stage, drop all Zero operations
drop = get_min_k_no_zero(normal_prob[i, :], idxs, num_to_drop[sp])
else:
drop = get_min_k(normal_prob[i, :], num_to_drop[sp])
for idx in drop:
switches_normal[i][idxs[idx]] = False
reduce_prob = F.softmax(arch_param[1], dim=-1).data.cpu().numpy()
for i in range(14):
idxs = []
for j in range(len(PRIMITIVES)):
if switches_reduce[i][j]:
idxs.append(j)
if sp == len(num_to_keep) - 1:
drop = get_min_k_no_zero(reduce_prob[i, :], idxs, num_to_drop[sp])
else:
drop = get_min_k(reduce_prob[i, :], num_to_drop[sp])
for idx in drop:
switches_reduce[i][idxs[idx]] = False
logging.info('switches_normal = %s', switches_normal)
logging_switches(switches_normal)
logging.info('switches_reduce = %s', switches_reduce)
logging_switches(switches_reduce)
if sp == len(num_to_keep) - 1:
arch_param = model.module.arch_parameters()
normal_prob = F.softmax(arch_param[0], dim=sm_dim).data.cpu().numpy()
reduce_prob = F.softmax(arch_param[1], dim=sm_dim).data.cpu().numpy()
normal_final = [0 for idx in range(14)]
reduce_final = [0 for idx in range(14)]
# remove all Zero operations
for i in range(14):
if switches_normal_2[i][0] == True:
normal_prob[i][0] = 0
normal_final[i] = max(normal_prob[i])
if switches_reduce_2[i][0] == True:
reduce_prob[i][0] = 0
reduce_final[i] = max(reduce_prob[i])
# Generate Architecture, similar to DARTS
keep_normal = [0, 1]
keep_reduce = [0, 1]
n = 3
start = 2
for i in range(3):
end = start + n
tbsn = normal_final[start:end]
tbsr = reduce_final[start:end]
edge_n = sorted(range(n), key=lambda x: tbsn[x])
keep_normal.append(edge_n[-1] + start)
keep_normal.append(edge_n[-2] + start)
edge_r = sorted(range(n), key=lambda x: tbsr[x])
keep_reduce.append(edge_r[-1] + start)
keep_reduce.append(edge_r[-2] + start)
start = end
n = n + 1
# set switches according the ranking of arch parameters
for i in range(14):
if not i in keep_normal:
for j in range(len(PRIMITIVES)):
switches_normal[i][j] = False
if not i in keep_reduce:
for j in range(len(PRIMITIVES)):
switches_reduce[i][j] = False
# translate switches into genotype
genotype = parse_network(switches_normal, switches_reduce)
logging.info(genotype)
## restrict skipconnect (normal cell only)
logging.info('Restricting skipconnect...')
# generating genotypes with different numbers of skip-connect operations
for sks in range(0, 9):
max_sk = 8 - sks
num_sk = check_sk_number(switches_normal)
if not num_sk > max_sk:
continue
while num_sk > max_sk:
normal_prob = delete_min_sk_prob(switches_normal, switches_normal_2, normal_prob)
switches_normal = keep_1_on(switches_normal_2, normal_prob)
switches_normal = keep_2_branches(switches_normal, normal_prob)
num_sk = check_sk_number(switches_normal)
logging.info('Number of skip-connect: %d', max_sk)
genotype = parse_network(switches_normal, switches_reduce)
logging.info(genotype)
def train(train_queue, valid_queue, model, network_params, criterion, optimizer, optimizer_a, lr, train_arch=True):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
for step, (input, target) in enumerate(train_queue):
model.train()
n = input.size(0)
input = input.cuda()
target = target.cuda(non_blocking=True)
if train_arch:
# In the original implementation of DARTS, it is input_search, target_search = next(iter(valid_queue), which slows down
# the training when using PyTorch 0.4 and above.
try:
input_search, target_search = next(valid_queue_iter)
except:
valid_queue_iter = iter(valid_queue)
input_search, target_search = next(valid_queue_iter)
input_search = input_search.cuda()
target_search = target_search.cuda(non_blocking=True)
optimizer_a.zero_grad()
logits = model(input_search)
loss_a = criterion(logits, target_search)
loss_a.backward()
nn.utils.clip_grad_norm_(model.module.arch_parameters(), args.grad_clip)
optimizer_a.step()
optimizer.zero_grad()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm_(network_params, args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
logging.info('TRAIN Step: %03d Objs: %e R1: %f R5: %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
with torch.no_grad():
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def parse_network(switches_normal, switches_reduce):
def _parse_switches(switches):
n = 2
start = 0
gene = []
step = 4
for i in range(step):
end = start + n
for j in range(start, end):
for k in range(len(switches[j])):
if switches[j][k]:
gene.append((PRIMITIVES[k], j - start))
start = end
n = n + 1
return gene
gene_normal = _parse_switches(switches_normal)
gene_reduce = _parse_switches(switches_reduce)
concat = range(2, 6)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
def get_min_k(input_in, k):
input = copy.deepcopy(input_in)
index = []
for i in range(k):
idx = np.argmin(input)
index.append(idx)
input[idx] = 1
return index
def get_min_k_no_zero(w_in, idxs, k):
w = copy.deepcopy(w_in)
index = []
if 0 in idxs:
zf = True
else:
zf = False
if zf:
w = w[1:]
index.append(0)
k = k - 1
for i in range(k):
idx = np.argmin(w)
w[idx] = 1
if zf:
idx = idx + 1
index.append(idx)
return index
def logging_switches(switches):
for i in range(len(switches)):
ops = []
for j in range(len(switches[i])):
if switches[i][j]:
ops.append(PRIMITIVES[j])
logging.info(ops)
def check_sk_number(switches):
count = 0
for i in range(len(switches)):
if switches[i][3]:
count = count + 1
return count
def delete_min_sk_prob(switches_in, switches_bk, probs_in):
def _get_sk_idx(switches_in, switches_bk, k):
if not switches_in[k][3]:
idx = -1
else:
idx = 0
for i in range(3):
if switches_bk[k][i]:
idx = idx + 1
return idx
probs_out = copy.deepcopy(probs_in)
sk_prob = [1.0 for i in range(len(switches_bk))]
for i in range(len(switches_in)):
idx = _get_sk_idx(switches_in, switches_bk, i)
if not idx == -1:
sk_prob[i] = probs_out[i][idx]
d_idx = np.argmin(sk_prob)
idx = _get_sk_idx(switches_in, switches_bk, d_idx)
probs_out[d_idx][idx] = 0.0
return probs_out
def keep_1_on(switches_in, probs):
switches = copy.deepcopy(switches_in)
for i in range(len(switches)):
idxs = []
for j in range(len(PRIMITIVES)):
if switches[i][j]:
idxs.append(j)
drop = get_min_k_no_zero(probs[i, :], idxs, 2)
for idx in drop:
switches[i][idxs[idx]] = False
return switches
def keep_2_branches(switches_in, probs):
switches = copy.deepcopy(switches_in)
final_prob = [0.0 for i in range(len(switches))]
for i in range(len(switches)):
final_prob[i] = max(probs[i])
keep = [0, 1]
n = 3
start = 2
for i in range(3):
end = start + n
tb = final_prob[start:end]
edge = sorted(range(n), key=lambda x: tb[x])
keep.append(edge[-1] + start)
keep.append(edge[-2] + start)
start = end
n = n + 1
for i in range(len(switches)):
if not i in keep:
for j in range(len(PRIMITIVES)):
switches[i][j] = False
return switches
if __name__ == '__main__':
start_time = time.time()
main()
end_time = time.time()
duration = end_time - start_time
logging.info('Total searching time: %ds', duration)
| 19,015 | 39.545842 | 215 | py |
pdarts | pdarts-master/test_imagenet.py | import os
import sys
import numpy as np
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--model_path', type=str, default='../models/imagenet.pth.tar', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--arch', type=str, default='PDARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CLASSES = 1000
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
cudnn.enabled=True
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
model = nn.DataParallel(model)
model = model.cuda()
model.load_state_dict(torch.load(args.model_path)['state_dict'])
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=False, num_workers=4)
model.module.drop_path_prob = 0.0
valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
logging.info('Valid_acc_top1 %f', valid_acc_top1)
logging.info('Valid_acc_top5 %f', valid_acc_top5)
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda()
with torch.no_grad():
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
logging.info('Valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 3,334 | 31.378641 | 116 | py |
pdarts | pdarts-master/train_cifar.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--workers', type=int, default=4, help='number of workers')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='/tmp/checkpoints/', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='PDARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--tmp_data_dir', type=str, default='/tmp/cache/', help='temp data dir')
parser.add_argument('--note', type=str, default='try', help='note for this run')
parser.add_argument('--cifar100', action='store_true', default=False, help='if use cifar100')
args, unparsed = parser.parse_known_args()
args.save = '{}eval-{}-{}'.format(args.save, args.note, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.cifar100:
CIFAR_CLASSES = 100
data_folder = 'cifar-100-python'
else:
CIFAR_CLASSES = 10
data_folder = 'cifar-10-batches-py'
def main():
if not torch.cuda.is_available():
logging.info('No GPU device available')
sys.exit(1)
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info("args = %s", args)
logging.info("unparsed args = %s", unparsed)
num_gpus = torch.cuda.device_count()
genotype = eval("genotypes.%s" % args.arch)
print('---------Genotype---------')
logging.info(genotype)
print('--------------------------')
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = torch.nn.DataParallel(model)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
if args.cifar100:
train_transform, valid_transform = utils._data_transforms_cifar100(args)
else:
train_transform, valid_transform = utils._data_transforms_cifar10(args)
if args.cifar100:
train_data = dset.CIFAR100(root=args.tmp_data_dir, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform)
else:
train_data = dset.CIFAR10(root=args.tmp_data_dir, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
best_acc = 0.0
for epoch in range(args.epochs):
scheduler.step()
logging.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0])
model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
start_time = time.time()
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('Train_acc: %f', train_acc)
valid_acc, valid_obj = infer(valid_queue, model, criterion)
if valid_acc > best_acc:
best_acc = valid_acc
logging.info('Valid_acc: %f', valid_acc)
end_time = time.time()
duration = end_time - start_time
print('Epoch time: %ds.' % duration )
utils.save(model.module, os.path.join(args.save, 'weights.pt'))
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
prec1, _ = utils.accuracy(logits, target, topk=(1,5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
if step % args.report_freq == 0:
logging.info('Train Step: %03d Objs: %e Acc: %f', step, objs.avg, top1.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
with torch.no_grad():
logits, _ = model(input)
loss = criterion(logits, target)
prec1, _ = utils.accuracy(logits, target, topk=(1,5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
if step % args.report_freq == 0:
logging.info('Valid Step: %03d Objs: %e Acc: %f', step, objs.avg, top1.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
start_time = time.time()
main()
end_time = time.time()
duration = end_time - start_time
logging.info('Eval time: %ds.', duration)
| 7,688 | 39.68254 | 113 | py |
pdarts | pdarts-master/operations.py | import torch
import torch.nn as nn
OPS = {
'none' : lambda C, stride, affine: Zero(stride),
'avg_pool_3x3' : lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3' : lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect' : lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
'sep_conv_3x3' : lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
'sep_conv_5x5' : lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
'sep_conv_7x7' : lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3' : lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5' : lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
'conv_7x1_1x7' : lambda C, stride, affine: nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1,7), stride=(1, stride), padding=(0, 3), bias=False),
nn.Conv2d(C, C, (7,1), stride=(stride, 1), padding=(3, 0), bias=False),
nn.BatchNorm2d(C, affine=affine)
),
}
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
'''
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:,:,::self.stride,::self.stride].mul(0.)
'''
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
n, c, h, w = x.size()
h //= self.stride
w //= self.stride
if x.is_cuda:
with torch.cuda.device(x.get_device()):
padding = torch.cuda.FloatTensor(n, c, h, w).fill_(0)
else:
padding = torch.FloatTensor(n, c, h, w).fill_(0)
return padding
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv_1(x), self.conv_2(x[:,:,1:,1:])], dim=1)
out = self.bn(out)
return out
| 4,144 | 32.97541 | 129 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/run_exp.py | ##########################################################
# pytorch-kaldi-gan
# Walter Heymans
# North West University
# 2020
# Adapted from:
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
from __future__ import print_function
import os
import sys
import glob
import configparser
import numpy as np
from utils import (
check_cfg,
create_lists,
create_configs,
compute_avg_performance,
read_args_command_line,
run_shell,
compute_n_chunks,
get_all_archs,
cfg_item2sec,
dump_epoch_results,
create_curves,
change_lr_cfg,
expand_str_ep,
do_validation_after_chunk,
get_val_info_file_path,
get_val_cfg_file_path,
get_chunks_after_which_to_validate,
)
from data_io import read_lab_fea_refac01 as read_lab_fea
from shutil import copyfile
from core import read_next_chunk_into_shared_list_with_subprocess, extract_data_from_shared_list, convert_numpy_to_torch
import re
from distutils.util import strtobool
import importlib
import math
import multiprocessing
import weights_and_biases as wandb
import torch
skip_decode = False
def _run_forwarding_in_subprocesses(config):
use_cuda = strtobool(config["exp"]["use_cuda"])
if use_cuda:
return False
else:
return True
def _is_first_validation(ep, ck, N_ck_tr, config):
def _get_nr_of_valid_per_epoch_from_config(config):
if not "nr_of_valid_per_epoch" in config["exp"]:
return 1
return int(config["exp"]["nr_of_valid_per_epoch"])
if ep>0:
return False
val_chunks = get_chunks_after_which_to_validate(N_ck_tr, _get_nr_of_valid_per_epoch_from_config(config))
if ck == val_chunks[0]:
return True
return False
def _max_nr_of_parallel_forwarding_processes(config):
if "max_nr_of_parallel_forwarding_processes" in config["forward"]:
return int(config["forward"]["max_nr_of_parallel_forwarding_processes"])
return -1
def print_version_info():
print("")
print("".center(40, "#"))
print(" Pytorch-Kaldi-GAN ".center(38, " ").center(40, "#"))
print(" Walter Heymans ".center(38, " ").center(40, "#"))
print(" North West University ".center(38, " ").center(40, "#"))
print(" 2020 ".center(38, " ").center(40, "#"))
print("".center(38, " ").center(40, "#"))
print(" Adapted form: ".center(38, " ").center(40, "#"))
print(" Pytorch-Kaldi v.0.1 ".center(38, " ").center(40, "#"))
print(" Mirco Ravanelli, Titouan Parcollet ".center(38, " ").center(40, "#"))
print(" Mila, University of Montreal ".center(38, " ").center(40, "#"))
print(" October 2018 ".center(38, " ").center(40, "#"))
print("".center(40, "#"), end="\n\n")
# START OF EXECUTION #
print_version_info()
# Reading global cfg file (first argument-mandatory file)
cfg_file = sys.argv[1]
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
config_file_name = str(os.path.basename(cfg_file)).replace(".cfg", "")
# Reading and parsing optional arguments from command line (e.g.,--optimization,lr=0.002)
[section_args, field_args, value_args] = read_args_command_line(sys.argv, config)
# Output folder creation
out_folder = config["exp"]["out_folder"]
if not os.path.exists(out_folder):
os.makedirs(out_folder + "/exp_files")
# Log file path
log_file = config["exp"]["out_folder"] + "/log.log"
# Read, parse, and check the config file
cfg_file_proto = config["cfg_proto"]["cfg_proto"]
[config, name_data, name_arch] = check_cfg(cfg_file, config, cfg_file_proto)
# Read cfg file options
is_production = strtobool(config["exp"]["production"])
cfg_file_proto_chunk = config["cfg_proto"]["cfg_proto_chunk"]
cmd = config["exp"]["cmd"]
N_ep = int(config["exp"]["N_epochs_tr"])
N_ep_str_format = "0" + str(max(math.ceil(np.log10(N_ep)), 1)) + "d"
tr_data_lst = config["data_use"]["train_with"].split(",")
valid_data_lst = config["data_use"]["valid_with"].split(",")
forward_data_lst = config["data_use"]["forward_with"].split(",")
max_seq_length_train = config["batches"]["max_seq_length_train"]
forward_save_files = list(map(strtobool, config["forward"]["save_out_file"].split(",")))
print("- Reading config file......OK!")
# Copy the global cfg file into the output folder
cfg_file = out_folder + "/conf.cfg"
with open(cfg_file, "w") as configfile:
config.write(configfile)
# Load the run_nn function from core libriary
# The run_nn is a function that process a single chunk of data
run_nn_script = config["exp"]["run_nn_script"].split(".py")[0]
module = importlib.import_module("core")
run_nn = getattr(module, run_nn_script)
# Splitting data into chunks (see out_folder/additional_files)
create_lists(config)
# Writing the config files
create_configs(config)
print("- Chunk creation......OK!\n")
# create res_file
res_file_path = out_folder + "/res.res"
res_file = open(res_file_path, "w")
res_file.close()
# Learning rates and architecture-specific optimization parameters
arch_lst = get_all_archs(config)
lr = {}
auto_lr_annealing = {}
improvement_threshold = {}
halving_factor = {}
pt_files = {}
for arch in arch_lst:
lr[arch] = expand_str_ep(config[arch]["arch_lr"], "float", N_ep, "|", "*")
if len(config[arch]["arch_lr"].split("|")) > 1:
auto_lr_annealing[arch] = False
else:
auto_lr_annealing[arch] = True
improvement_threshold[arch] = float(config[arch]["arch_improvement_threshold"])
halving_factor[arch] = float(config[arch]["arch_halving_factor"])
pt_files[arch] = config[arch]["arch_pretrain_file"]
# If production, skip training and forward directly from last saved models
if is_production:
ep = N_ep - 1
N_ep = 0
model_files = {}
for arch in pt_files.keys():
model_files[arch] = out_folder + "/exp_files/final_" + arch + ".pkl"
op_counter = 1 # used to dected the next configuration file from the list_chunks.txt
# Reading the ordered list of config file to process
cfg_file_list = [line.rstrip("\n") for line in open(out_folder + "/exp_files/list_chunks.txt")]
cfg_file_list.append(cfg_file_list[-1])
# A variable that tells if the current chunk is the first one that is being processed:
processed_first = True
data_name = []
data_set = []
data_end_index = []
fea_dict = []
lab_dict = []
arch_dict = []
if config["gan"]["arch_gan"] == "True":
gan_on = True
# Checking directories
directory_g = os.path.join(out_folder, config["gan"]["output_path_g"])
directory_d = os.path.join(out_folder, config["gan"]["output_path_d"])
gan_dir = os.path.dirname(directory_g)
if not os.path.exists(gan_dir):
os.mkdir(gan_dir)
if not os.path.exists(gan_dir + "/images"):
os.mkdir(gan_dir + "/images")
try:
if str(config["generator"]["pretrained_file"]) != "none":
if os.path.exists(str(config["generator"]["pretrained_file"])):
copyfile(str(config["generator"]["pretrained_file"]), directory_g)
print("Loaded pretrained G.")
except KeyError:
pass
try:
if str(config["discriminator"]["pretrained_file"]) != "none":
if os.path.exists(str(config["discriminator"]["pretrained_file"])):
copyfile(str(config["discriminator"]["pretrained_file"]), directory_d)
print("Loaded pretrained D.")
except KeyError:
pass
else:
gan_on = False
def print_settings():
print_width = 72
print(" SETTINGS ".center(print_width, "="))
print("# Epochs:\t\t", N_ep)
print("# Batch size:\t\t", int(config["batches"]["batch_size_train"]))
print("# Seed:\t\t\t", int(config["exp"]["seed"]))
print("# Weights and Biases:\t", str(config["wandb"]["wandb"]))
print("# GAN training:\t\t", str(config["gan"]["arch_gan"]))
print("")
print(" Acoustic Model settings ".center(print_width, "-"))
print("# Name:\t\t\t", str(config["architecture1"]["arch_name"]))
print("# Learning rate:\t", float(config["architecture1"]["arch_lr"]))
print("# Halving factor:\t", float(config["architecture1"]["arch_halving_factor"]))
print("# Improvement threshold:", float(config["architecture1"]["arch_improvement_threshold"]))
print("# Optimizer:\t\t", str(config["architecture1"]["arch_opt"]))
try:
if config["gan"]["double_features"] == "True":
print("# Double features:\t", config["gan"]["double_features"])
except KeyError:
pass
if gan_on:
print("")
print(" Generator Architecture ".center(print_width, "-"))
print("# Name:\t\t\t", str(config["generator"]["arch_name"]))
print("=".center(print_width, "="), end = "\n\n")
print_settings()
if str(config["wandb"]["wandb"]) == "True":
wandb_cfg = wandb.load_cfg_dict_from_yaml(str(config["wandb"]["config"]))
# UPDATE config file if Weights and Biases file is different
wandb_cfg["max_epochs"] = int(config["exp"]["N_epochs_tr"])
wandb_cfg["seed"] = int(config["exp"]["seed"])
wandb_cfg["batch_size"] = int(config["batches"]["batch_size_train"])
wandb_cfg["lr"] = float(config["architecture1"]["arch_lr"])
wandb_cfg["gan_on"] = config["gan"]["arch_gan"]
wandb_details = os.path.join(out_folder, "wandb_details.txt")
if not os.path.exists(wandb_details):
wandb_details_file = open(wandb_details, "w")
wandb.initialize_wandb(project = str(config["wandb"]["project"]),
config = wandb_cfg,
directory = out_folder,
resume = False)
try:
wandb_details_file.write(wandb.get_run_id() + '\n')
wandb_details_file.write(wandb.get_run_name())
except TypeError:
pass
wandb_details_file.close()
else:
wandb_details_file = open(wandb_details, "r")
file_content = wandb_details_file.read().splitlines()
try:
wandb_run_id = file_content[0]
wandb_run_name = file_content[1]
except IndexError:
wandb_run_id = ""
wandb_run_name = ""
pass
wandb_details_file.close()
if not wandb_run_id == "":
wandb.initialize_wandb(project = str(config["wandb"]["project"]),
config = wandb_cfg,
directory = out_folder,
resume = True,
identity = wandb_run_id,
name = wandb_run_name)
else:
wandb.initialize_wandb(project = str(config["wandb"]["project"]),
config = wandb_cfg,
directory = out_folder,
resume = True)
if str(config["wandb"]["decode_only"]) == "True":
wandb_decode_only = True
wandb_on = False
else:
wandb_on = True
wandb_decode_only = False
wandb.quick_log("status", "training", commit = False)
else:
wandb_on = False
wandb_decode_only = False
create_gan_dataset = False
try:
if config["ganset"]["create_set"] == "True":
create_gan_dataset = True
print("\nGAN dataset will be created.\n")
# Output folder creation
gan_out_folder = config["ganset"]["out_folder"]
if not os.path.exists(gan_out_folder):
os.makedirs(gan_out_folder)
except KeyError:
pass
fine_tuning = True
try:
if config["exp"]["fine_tuning"] == "False":
fine_tuning = False
except KeyError:
pass
# --------TRAINING LOOP--------#
for ep in range(N_ep):
if wandb_on:
wandb.quick_log("epoch", ep + 1, commit = False)
processed_first = True
tr_loss_tot = 0
tr_error_tot = 0
tr_time_tot = 0
val_time_tot = 0
print(
"------------------------------ Epoch %s / %s ------------------------------"
% (format(ep + 1, N_ep_str_format), format(N_ep, N_ep_str_format))
)
for tr_data in tr_data_lst:
# Compute the total number of chunks for each training epoch
N_ck_tr = compute_n_chunks(out_folder, tr_data, ep, N_ep_str_format, "train")
N_ck_str_format = "0" + str(max(math.ceil(np.log10(N_ck_tr)), 1)) + "d"
# ***Epoch training***
for ck in range(N_ck_tr):
if not fine_tuning and ck > 1:
break
# Get training time per chunk
import time
starting_time = time.time()
print_chunk_time = False
if wandb_on:
wandb.quick_log("chunk", ck + 1, commit = True)
# paths of the output files (info,model,chunk_specific cfg file)
info_file = (
out_folder
+ "/exp_files/train_"
+ tr_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".info"
)
if ep + ck == 0:
model_files_past = {}
else:
model_files_past = model_files
model_files = {}
for arch in pt_files.keys():
model_files[arch] = info_file.replace(".info", "_" + arch + ".pkl")
config_chunk_file = (
out_folder
+ "/exp_files/train_"
+ tr_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".cfg"
)
# update learning rate in the cfg file (if needed)
change_lr_cfg(config_chunk_file, lr, ep)
# if this chunk has not already been processed, do training...
if not (os.path.exists(info_file)):
print_chunk_time = True
print("Training %s chunk = %i / %i" % (tr_data, ck + 1, N_ck_tr))
# getting the next chunk
next_config_file = cfg_file_list[op_counter]
[data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict] = run_nn(
data_name,
data_set,
data_end_index,
fea_dict,
lab_dict,
arch_dict,
config_chunk_file,
processed_first,
next_config_file,
wandb_on = wandb_on,
epoch = ep,
chunk = ck + 1
)
# update the first_processed variable
processed_first = False
if not (os.path.exists(info_file)):
sys.stderr.write(
"ERROR: training epoch %i, chunk %i not done! File %s does not exist.\nSee %s \n"
% (ep, ck, info_file, log_file)
)
sys.exit(0)
# update the operation counter
op_counter += 1
# update pt_file (used to initialized the DNN for the next chunk)
for pt_arch in pt_files.keys():
pt_files[pt_arch] = (
out_folder
+ "/exp_files/train_"
+ tr_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ "_"
+ pt_arch
+ ".pkl"
)
# remove previous pkl files
if len(model_files_past.keys()) > 0:
for pt_arch in pt_files.keys():
if os.path.exists(model_files_past[pt_arch]):
os.remove(model_files_past[pt_arch])
if do_validation_after_chunk(ck, N_ck_tr, config) and (tr_data == tr_data_lst[-1]) and not(create_gan_dataset):
if not _is_first_validation(ep,ck, N_ck_tr, config):
valid_peformance_dict_prev = valid_peformance_dict
valid_peformance_dict = {}
for valid_data in valid_data_lst:
N_ck_valid = compute_n_chunks(out_folder, valid_data, ep, N_ep_str_format, "valid")
N_ck_str_format_val = "0" + str(max(math.ceil(np.log10(N_ck_valid)), 1)) + "d"
for ck_val in range(N_ck_valid):
info_file = get_val_info_file_path(
out_folder,
valid_data,
ep,
ck,
ck_val,
N_ep_str_format,
N_ck_str_format,
N_ck_str_format_val,
)
config_chunk_file = get_val_cfg_file_path(
out_folder,
valid_data,
ep,
ck,
ck_val,
N_ep_str_format,
N_ck_str_format,
N_ck_str_format_val,
)
if not (os.path.exists(info_file)):
print("Validating %s chunk = %i / %i" % (valid_data, ck_val + 1, N_ck_valid))
next_config_file = cfg_file_list[op_counter]
data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict = run_nn(
data_name,
data_set,
data_end_index,
fea_dict,
lab_dict,
arch_dict,
config_chunk_file,
processed_first,
next_config_file,
wandb_on = wandb_on,
)
processed_first = False
if not (os.path.exists(info_file)):
sys.stderr.write(
"ERROR: validation on epoch %i, chunk %i, valid chunk %i of dataset %s not done! File %s does not exist.\nSee %s \n"
% (ep, ck, ck_val, valid_data, info_file, log_file)
)
sys.exit(0)
op_counter += 1
valid_info_lst = sorted(
glob.glob(
get_val_info_file_path(
out_folder,
valid_data,
ep,
ck,
None,
N_ep_str_format,
N_ck_str_format,
N_ck_str_format_val,
)
)
)
valid_loss, valid_error, valid_time = compute_avg_performance(valid_info_lst)
valid_peformance_dict[valid_data] = [valid_loss, valid_error, valid_time]
val_time_tot += valid_time
if not _is_first_validation(ep,ck, N_ck_tr, config):
err_valid_mean = np.mean(np.asarray(list(valid_peformance_dict.values()))[:, 1])
err_valid_mean_prev = np.mean(np.asarray(list(valid_peformance_dict_prev.values()))[:, 1])
for lr_arch in lr.keys():
if ep < N_ep - 1 and auto_lr_annealing[lr_arch]:
if ((err_valid_mean_prev - err_valid_mean) / err_valid_mean) < improvement_threshold[
lr_arch
]:
new_lr_value = float(lr[lr_arch][ep]) * halving_factor[lr_arch]
for i in range(ep + 1, N_ep):
lr[lr_arch][i] = str(new_lr_value)
ending_time = time.time()
if print_chunk_time:
chunk_time = ending_time - starting_time
print("Chunk time:", round(chunk_time), "s\n")
if wandb_on:
wandb.quick_log("chunk_time", chunk_time, commit=False)
# Training Loss and Error
tr_info_lst = sorted(
glob.glob(out_folder + "/exp_files/train_" + tr_data + "_ep" + format(ep, N_ep_str_format) + "*.info")
)
[tr_loss, tr_error, tr_time] = compute_avg_performance(tr_info_lst)
tr_loss_tot = tr_loss_tot + tr_loss
tr_error_tot = tr_error_tot + tr_error
tr_time_tot = tr_time_tot + tr_time
tot_time = tr_time + val_time_tot
if not create_gan_dataset:
if fine_tuning:
# Print results in both res_file and stdout
dump_epoch_results(
res_file_path,
ep,
tr_data_lst,
tr_loss_tot,
tr_error_tot,
tot_time,
valid_data_lst,
valid_peformance_dict,
lr,
N_ep,
)
if wandb_on:
for lr_arch in lr.keys():
wandb.quick_log("learning_rate", float(lr[lr_arch][ep]), commit = False)
for valid_data in valid_data_lst:
wandb.quick_log("valid_loss_" + str(valid_data), float(valid_peformance_dict[valid_data][0]), commit = False)
wandb.quick_log("valid_error_" + str(valid_data), float(valid_peformance_dict[valid_data][1]), commit = False)
# Training has ended, copy the last .pkl to final_arch.pkl for production
for pt_arch in pt_files.keys():
if os.path.exists(model_files[pt_arch]) and not os.path.exists(out_folder + "/exp_files/final_" + pt_arch + ".pkl"):
copyfile(model_files[pt_arch], out_folder + "/exp_files/final_" + pt_arch + ".pkl")
# Terminate application if GAN dataset creation is set
try:
if config["ganset"]["create_set"] == "True":
print("\nGAN dataset created!")
exit()
except KeyError:
pass
# --------FORWARD--------#
if wandb_on or wandb_decode_only:
wandb.quick_log("status", "forwarding", commit = True)
for forward_data in forward_data_lst:
# Compute the number of chunks
N_ck_forward = compute_n_chunks(out_folder, forward_data, ep, N_ep_str_format, "forward")
N_ck_str_format = "0" + str(max(math.ceil(np.log10(N_ck_forward)), 1)) + "d"
processes = list()
info_files = list()
for ck in range(N_ck_forward):
if not is_production:
print("Testing %s chunk = %i / %i" % (forward_data, ck + 1, N_ck_forward))
else:
print("Forwarding %s chunk = %i / %i" % (forward_data, ck + 1, N_ck_forward))
# output file
info_file = (
out_folder
+ "/exp_files/forward_"
+ forward_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".info"
)
config_chunk_file = (
out_folder
+ "/exp_files/forward_"
+ forward_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".cfg"
)
# Do forward if the chunk was not already processed
if not (os.path.exists(info_file)):
# Doing forward
# getting the next chunk
next_config_file = cfg_file_list[op_counter]
# run chunk processing
if _run_forwarding_in_subprocesses(config):
shared_list = list()
output_folder = config["exp"]["out_folder"]
save_gpumem = strtobool(config["exp"]["save_gpumem"])
use_cuda = strtobool(config["exp"]["use_cuda"])
p = read_next_chunk_into_shared_list_with_subprocess(
read_lab_fea, shared_list, config_chunk_file, is_production, output_folder, wait_for_process=True
)
data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set_dict = extract_data_from_shared_list(
shared_list
)
data_set_inp, data_set_ref = convert_numpy_to_torch(data_set_dict, save_gpumem, use_cuda)
data_set = {"input": data_set_inp, "ref": data_set_ref}
data_end_index = {"fea": data_end_index_fea, "lab": data_end_index_lab}
p = multiprocessing.Process(
target=run_nn,
kwargs={
"data_name": data_name,
"data_set": data_set,
"data_end_index": data_end_index,
"fea_dict": fea_dict,
"lab_dict": lab_dict,
"arch_dict": arch_dict,
"cfg_file": config_chunk_file,
"processed_first": False,
"next_config_file": None,
},
)
processes.append(p)
if _max_nr_of_parallel_forwarding_processes(config) != -1 and len(
processes
) > _max_nr_of_parallel_forwarding_processes(config):
processes[0].join()
del processes[0]
p.start()
else:
[data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict] = run_nn(
data_name,
data_set,
data_end_index,
fea_dict,
lab_dict,
arch_dict,
config_chunk_file,
processed_first,
next_config_file,
wandb_on = wandb_on,
)
processed_first = False
if not (os.path.exists(info_file)):
sys.stderr.write(
"ERROR: forward chunk %i of dataset %s not done! File %s does not exist.\nSee %s \n"
% (ck, forward_data, info_file, log_file)
)
sys.exit(0)
info_files.append(info_file)
# update the operation counter
op_counter += 1
if _run_forwarding_in_subprocesses(config):
for process in processes:
process.join()
for info_file in info_files:
if not (os.path.exists(info_file)):
sys.stderr.write(
"ERROR: File %s does not exist. Forwarding did not suceed.\nSee %s \n" % (info_file, log_file)
)
sys.exit(0)
# --------DECODING--------#
if wandb_on or wandb_decode_only:
wandb.quick_log("status", "decoding", commit = True)
dec_lst = glob.glob(out_folder + "/exp_files/*_to_decode.ark")
forward_data_lst = config["data_use"]["forward_with"].split(",")
forward_outs = config["forward"]["forward_out"].split(",")
forward_dec_outs = list(map(strtobool, config["forward"]["require_decoding"].split(",")))
def get_wer_stats(word_error_rate_string):
wer_stats = word_error_rate_string.split(" ")
word_error_rate = float(wer_stats[1])
word_tot = wer_stats[5]
word_tot = int(word_tot.replace(",", ""))
word_ins = int(wer_stats[6])
word_del = int(wer_stats[8])
word_sub = int(wer_stats[10])
return word_error_rate, word_tot, word_ins, word_del, word_sub
def get_unique_filename(results_file_name):
file_unique_var = False
if not os.path.exists(results_file_name): # File does not exist yet
return results_file_name
# File does exist, determine number to append
results_file_name = results_file_name.replace(".txt", "") # no number added
file_number = 1
while not file_unique_var:
temp_filename = results_file_name + "__" + str(file_number) + ".txt"
if not os.path.exists(temp_filename):
file_unique_var = True
results_file_name = temp_filename
else:
file_number += 1
return results_file_name
def store_wer_stats(run_name, dataset, word_error_rate_string):
if not os.path.exists("results"):
os.makedirs("results")
results_file_name = "results/" + config["exp"]["dataset_name"] + "__" + dataset + "__" + run_name + ".txt"
results_file_name = get_unique_filename(results_file_name)
results_file = open(results_file_name, "w")
results_file.write(word_error_rate_string)
results_file.close()
if skip_decode:
exit(0)
for data in forward_data_lst:
for k in range(len(forward_outs)):
if forward_dec_outs[k]:
print("Decoding %s output %s" % (data, forward_outs[k]))
info_file = out_folder + "/exp_files/decoding_" + data + "_" + forward_outs[k] + ".info"
# create decode config file
config_dec_file = out_folder + "/decoding_" + data + "_" + forward_outs[k] + ".conf"
config_dec = configparser.ConfigParser()
config_dec.add_section("decoding")
for dec_key in config["decoding"].keys():
config_dec.set("decoding", dec_key, config["decoding"][dec_key])
# add graph_dir, datadir, alidir
lab_field = config[cfg_item2sec(config, "data_name", data)]["lab"]
# Production case, we don't have labels
if not is_production:
pattern = "lab_folder=(.*)\nlab_opts=(.*)\nlab_count_file=(.*)\nlab_data_folder=(.*)\nlab_graph=(.*)"
alidir = re.findall(pattern, lab_field)[0][0]
config_dec.set("decoding", "alidir", os.path.abspath(alidir))
datadir = re.findall(pattern, lab_field)[0][3]
config_dec.set("decoding", "data", os.path.abspath(datadir))
graphdir = re.findall(pattern, lab_field)[0][4]
config_dec.set("decoding", "graphdir", os.path.abspath(graphdir))
else:
pattern = "lab_data_folder=(.*)\nlab_graph=(.*)"
datadir = re.findall(pattern, lab_field)[0][0]
config_dec.set("decoding", "data", os.path.abspath(datadir))
graphdir = re.findall(pattern, lab_field)[0][1]
config_dec.set("decoding", "graphdir", os.path.abspath(graphdir))
# The ali dir is supposed to be in exp/model/ which is one level ahead of graphdir
alidir = graphdir.split("/")[0 : len(graphdir.split("/")) - 1]
alidir = "/".join(alidir)
config_dec.set("decoding", "alidir", os.path.abspath(alidir))
with open(config_dec_file, "w") as configfile:
config_dec.write(configfile)
out_folder = os.path.abspath(out_folder)
files_dec = out_folder + "/exp_files/forward_" + data + "_ep*_ck*_" + forward_outs[k] + "_to_decode.ark"
out_dec_folder = out_folder + "/decode_" + data + "_" + forward_outs[k]
if not (os.path.exists(info_file)):
# Run the decoder
cmd_decode = (
cmd
+ config["decoding"]["decoding_script_folder"]
+ "/"
+ config["decoding"]["decoding_script"]
+ " "
+ os.path.abspath(config_dec_file)
+ " "
+ out_dec_folder
+ ' "'
+ files_dec
+ '"'
)
run_shell(cmd_decode, log_file)
# remove ark files if needed
if not forward_save_files[k]:
list_rem = glob.glob(files_dec)
for rem_ark in list_rem:
os.remove(rem_ark)
# Print WER results and write info file
cmd_res = "./check_res_dec.sh " + out_dec_folder
wers = run_shell(cmd_res, log_file).decode("utf-8")
res_file = open(res_file_path, "a")
res_file.write("%s\n" % wers)
print(wers)
try:
if len(wers) > 0:
w_error_rate, w_tot, w_ins, w_del, w_sub = get_wer_stats(wers)
store_wer_stats(config_file_name, data, wers)
if wandb_on or wandb_decode_only:
wandb.quick_log("WER_" + data, w_error_rate, commit=True)
except IOError:
pass
if wandb_on or wandb_decode_only:
wandb.quick_log("status", "complete", commit = True)
| 33,246 | 35.216776 | 152 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/quaternion_neural_networks.py | ##########################################################
# Quaternion Neural Networks
# Titouan Parcollet, Xinchi Qiu, Mirco Ravanelli
# University of Oxford and Mila, University of Montreal
# May 2020
##########################################################
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from torch.nn.utils.rnn import PackedSequence
from torch.nn import Module
import numpy as np
from scipy.stats import chi
from numpy.random import RandomState
from distutils.util import strtobool
import math
class QLSTM(nn.Module):
"""
This class implements a straightforward QLSTM as described
in "Quaternion Recurrent Neural Networks", Titouan P., ICLR 2019
Please note that the autograd parameter is usefull if you run out of
VRAM. Set it to False, and the model will use a custom QuaternionLinear
function that follows a custom backpropagation. The training will
be even slower but will consume 4 times less VRAM.
"""
def __init__(self, options,inp_dim):
super(QLSTM, self).__init__()
# Reading parameters
self.input_dim=inp_dim
self.lstm_lay=list(map(int, options['lstm_lay'].split(',')))
self.lstm_drop=list(map(float, options['lstm_drop'].split(',')))
self.lstm_act=options['lstm_act'].split(',')
self.bidir=strtobool(options['lstm_bidir'])
self.use_cuda=strtobool(options['use_cuda'])
self.autograd=strtobool(options['autograd'])
self.to_do=options['to_do']
if self.to_do=='train':
self.test_flag=False
else:
self.test_flag=True
# List initialization
self.wfx = nn.ModuleList([]) # Forget
self.ufh = nn.ModuleList([]) # Forget
self.wix = nn.ModuleList([]) # Input
self.uih = nn.ModuleList([]) # Input
self.wox = nn.ModuleList([]) # Output
self.uoh = nn.ModuleList([]) # Output
self.wcx = nn.ModuleList([]) # Cell state
self.uch = nn.ModuleList([]) # Cell state
self.act = nn.ModuleList([]) # Activations
self.N_lstm_lay=len(self.lstm_lay)
# Initialization of hidden layers
current_input=self.input_dim
for i in range(self.N_lstm_lay):
# Activations
self.act.append(act_fun(self.lstm_act[i]))
add_bias=True
# QuaternionLinearAutograd = Autograd (High VRAM consumption but faster)
# QuaternionLinear = Custom Backward (Low VRAM consumption but slower)
if(self.autograd):
# Feed-forward connections
self.wfx.append(QuaternionLinearAutograd(current_input, self.lstm_lay[i],bias=add_bias))
self.wix.append(QuaternionLinearAutograd(current_input, self.lstm_lay[i],bias=add_bias))
self.wox.append(QuaternionLinearAutograd(current_input, self.lstm_lay[i],bias=add_bias))
self.wcx.append(QuaternionLinearAutograd(current_input, self.lstm_lay[i],bias=add_bias))
# Recurrent connections
self.ufh.append(QuaternionLinearAutograd(self.lstm_lay[i], self.lstm_lay[i],bias=False))
self.uih.append(QuaternionLinearAutograd(self.lstm_lay[i], self.lstm_lay[i],bias=False))
self.uoh.append(QuaternionLinearAutograd(self.lstm_lay[i], self.lstm_lay[i],bias=False))
self.uch.append(QuaternionLinearAutograd(self.lstm_lay[i], self.lstm_lay[i],bias=False))
else:
# Feed-forward connections
self.wfx.append(QuaternionLinear(current_input, self.lstm_lay[i],bias=add_bias))
self.wix.append(QuaternionLinear(current_input, self.lstm_lay[i],bias=add_bias))
self.wox.append(QuaternionLinear(current_input, self.lstm_lay[i],bias=add_bias))
self.wcx.append(QuaternionLinear(current_input, self.lstm_lay[i],bias=add_bias))
# Recurrent connections
self.ufh.append(QuaternionLinear(self.lstm_lay[i], self.lstm_lay[i],bias=False))
self.uih.append(QuaternionLinear(self.lstm_lay[i], self.lstm_lay[i],bias=False))
self.uoh.append(QuaternionLinear(self.lstm_lay[i], self.lstm_lay[i],bias=False))
self.uch.append(QuaternionLinear(self.lstm_lay[i], self.lstm_lay[i],bias=False))
if self.bidir:
current_input=2*self.lstm_lay[i]
else:
current_input=self.lstm_lay[i]
self.out_dim=self.lstm_lay[i]+self.bidir*self.lstm_lay[i]
def forward(self, x):
for i in range(self.N_lstm_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2*x.shape[1], self.lstm_lay[i])
x=torch.cat([x,flip(x,0)],1)
else:
h_init = torch.zeros(x.shape[1],self.lstm_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag==False:
drop_mask=torch.bernoulli(torch.Tensor(h_init.shape[0],h_init.shape[1]).fill_(1-self.lstm_drop[i]))
else:
drop_mask=torch.FloatTensor([1-self.lstm_drop[i]])
if self.use_cuda:
h_init=h_init.cuda()
drop_mask=drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wfx_out=self.wfx[i](x)
wix_out=self.wix[i](x)
wox_out=self.wox[i](x)
wcx_out=self.wcx[i](x)
# Processing time steps
hiddens = []
ct=h_init
ht=h_init
for k in range(x.shape[0]):
# LSTM equations
ft=torch.sigmoid(wfx_out[k]+self.ufh[i](ht))
it=torch.sigmoid(wix_out[k]+self.uih[i](ht))
ot=torch.sigmoid(wox_out[k]+self.uoh[i](ht))
ct=it*self.act[i](wcx_out[k]+self.uch[i](ht))*drop_mask+ft*ct
ht=ot*self.act[i](ct)
hiddens.append(ht)
# Stacking hidden states
h=torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f=h[:,0:int(x.shape[1]/2)]
h_b=flip(h[:,int(x.shape[1]/2):x.shape[1]].contiguous(),0)
h=torch.cat([h_f,h_b],2)
# Setup x for the next hidden layer
x=h
return x
#
# From this point, the defined functions are PyTorch modules extending
# linear layers to the quaternion domain.
#
class QuaternionLinearAutograd(Module):
r"""Applies a quaternion linear transformation to the incoming data.
The backward process follows the Autograd scheme.
"""
def __init__(self, in_features, out_features, bias=True,
init_criterion='glorot', weight_init='quaternion',
seed=None):
super(QuaternionLinearAutograd, self).__init__()
self.in_features = in_features//4
self.out_features = out_features//4
self.r_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.i_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.j_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.k_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
if bias:
self.bias = Parameter(torch.Tensor(self.out_features*4))
else:
self.bias = torch.zeros(self.out_features*4)
self.init_criterion = init_criterion
self.weight_init = weight_init
self.seed = seed if seed is not None else np.random.randint(0,1234)
self.rng = RandomState(self.seed)
self.reset_parameters()
def reset_parameters(self):
winit = {'quaternion': quaternion_init, 'unitary': unitary_init, 'random': random_init}[self.weight_init]
if self.bias is not None:
self.bias.data.fill_(0)
affect_init(self.r_weight, self.i_weight, self.j_weight, self.k_weight, winit,
self.rng, self.init_criterion)
def forward(self, input):
return quaternion_linear(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) \
+ ', bias=' + str(self.bias is not None) \
+ ', init_criterion=' + str(self.init_criterion) \
+ ', weight_init=' + str(self.weight_init) \
+ ', seed=' + str(self.seed) + ')'
class QuaternionLinear(Module):
r"""A custom Autograd function is call to drastically reduce the VRAM consumption.
Nonetheless, computing time is increased compared to QuaternionLinearAutograd().
"""
def __init__(self, in_features, out_features, bias=True,
init_criterion='glorot', weight_init='quaternion',
seed=None):
super(QuaternionLinear, self).__init__()
self.in_features = in_features//4
self.out_features = out_features//4
self.r_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.i_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.j_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
self.k_weight = Parameter(torch.Tensor(self.in_features, self.out_features))
if bias:
self.bias = Parameter(torch.Tensor(self.out_features*4))
else:
self.register_parameter('bias', None)
self.init_criterion = init_criterion
self.weight_init = weight_init
self.seed = seed if seed is not None else np.random.randint(0,1234)
self.rng = RandomState(self.seed)
self.reset_parameters()
def reset_parameters(self):
winit = {'quaternion': quaternion_init,
'unitary': unitary_init}[self.weight_init]
if self.bias is not None:
self.bias.data.fill_(0)
affect_init(self.r_weight, self.i_weight, self.j_weight, self.k_weight, winit,
self.rng, self.init_criterion)
def forward(self, input):
# See the autograd section for explanation of what happens here.
if input.dim() == 3:
T, N, C = input.size()
input = input.view(T * N, C)
output = QuaternionLinearFunction.apply(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias)
output = output.view(T, N, output.size(1))
elif input.dim() == 2:
output = QuaternionLinearFunction.apply(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias)
else:
raise NotImplementedError
return output
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) \
+ ', bias=' + str(self.bias is not None) \
+ ', init_criterion=' + str(self.init_criterion) \
+ ', weight_init=' + str(self.weight_init) \
+ ', seed=' + str(self.seed) + ')'
#
# Thereafter are utility functions needed by the above classes
#
def flip(x, dim):
xsize = x.size()
dim = x.dim() + dim if dim < 0 else dim
x = x.contiguous()
x = x.view(-1, *xsize[dim:])
x = x.view(x.size(0), x.size(1), -1)[:, getattr(torch.arange(x.size(1)-1, -1, -1), ('cpu','cuda')[x.is_cuda])().long(), :]
return x.view(xsize)
def act_fun(act_type):
if act_type=="relu":
return nn.ReLU()
if act_type=="prelu":
return nn.PReLU()
if act_type=="tanh":
return nn.Tanh()
if act_type=="sigmoid":
return nn.Sigmoid()
if act_type=="hardtanh":
return nn.Hardtanh()
if act_type=="leaky_relu":
return nn.LeakyReLU(0.2)
if act_type=="elu":
return nn.ELU()
if act_type=="softmax":
return nn.LogSoftmax(dim=1)
if act_type=="linear":
return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
def check_input(input):
if input.dim() not in {2, 3}:
raise RuntimeError(
"quaternion linear accepts only input of dimension 2 or 3."
" input.dim = " + str(input.dim())
)
nb_hidden = input.size()[-1]
if nb_hidden % 4 != 0:
raise RuntimeError(
"Quaternion Tensors must be divisible by 4."
" input.size()[1] = " + str(nb_hidden)
)
#
# Quaternion getters!
#
def get_r(input):
check_input(input)
nb_hidden = input.size()[-1]
if input.dim() == 2:
return input.narrow(1, 0, nb_hidden // 4)
elif input.dim() == 3:
return input.narrow(2, 0, nb_hidden // 4)
def get_i(input):
check_input(input)
nb_hidden = input.size()[-1]
if input.dim() == 2:
return input.narrow(1, nb_hidden // 4, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, nb_hidden // 4, nb_hidden // 4)
def get_j(input):
check_input(input)
nb_hidden = input.size()[-1]
if input.dim() == 2:
return input.narrow(1, nb_hidden // 2, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, nb_hidden // 2, nb_hidden // 4)
def get_k(input):
check_input(input)
nb_hidden = input.size()[-1]
if input.dim() == 2:
return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, nb_hidden - nb_hidden // 4, nb_hidden // 4)
def quaternion_linear(input, r_weight, i_weight, j_weight, k_weight, bias):
"""
Applies a quaternion linear transformation to the incoming data:
It is important to notice that the forward phase of a QNN is defined
as W * Inputs (with * equal to the Hamilton product). The constructed
cat_kernels_4_quaternion is a modified version of the quaternion representation
so when we do torch.mm(Input,W) it's equivalent to W * Inputs.
"""
cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=0)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=0)
cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=0)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=1)
if input.dim() == 2 :
if bias is not None:
return torch.addmm(bias, input, cat_kernels_4_quaternion)
else:
return torch.mm(input, cat_kernels_4_quaternion)
else:
output = torch.matmul(input, cat_kernels_4_quaternion)
if bias is not None:
return output+bias
else:
return output
# Custom AUTOGRAD for lower VRAM consumption
class QuaternionLinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None):
ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight, bias)
check_input(input)
cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=0)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=0)
cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=0)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=1)
if input.dim() == 2 :
if bias is not None:
return torch.addmm(bias, input, cat_kernels_4_quaternion)
else:
return torch.mm(input, cat_kernels_4_quaternion)
else:
output = torch.matmul(input, cat_kernels_4_quaternion)
if bias is not None:
return output+bias
else:
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
input, r_weight, i_weight, j_weight, k_weight, bias = ctx.saved_tensors
grad_input = grad_weight_r = grad_weight_i = grad_weight_j = grad_weight_k = grad_bias = None
input_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
input_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=0)
input_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=0)
input_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=0)
cat_kernels_4_quaternion_T = Variable(torch.cat([input_r, input_i, input_j, input_k], dim=1).permute(1,0), requires_grad=False)
r = get_r(input)
i = get_i(input)
j = get_j(input)
k = get_k(input)
input_r = torch.cat([r, -i, -j, -k], dim=0)
input_i = torch.cat([i, r, -k, j], dim=0)
input_j = torch.cat([j, k, r, -i], dim=0)
input_k = torch.cat([k, -j, i, r], dim=0)
input_mat = Variable(torch.cat([input_r, input_i, input_j, input_k], dim=1), requires_grad=False)
r = get_r(grad_output)
i = get_i(grad_output)
j = get_j(grad_output)
k = get_k(grad_output)
input_r = torch.cat([r, i, j, k], dim=1)
input_i = torch.cat([-i, r, k, -j], dim=1)
input_j = torch.cat([-j, -k, r, i], dim=1)
input_k = torch.cat([-k, j, -i, r], dim=1)
grad_mat = torch.cat([input_r, input_i, input_j, input_k], dim=0)
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(cat_kernels_4_quaternion_T)
if ctx.needs_input_grad[1]:
grad_weight = grad_mat.permute(1,0).mm(input_mat).permute(1,0)
unit_size_x = r_weight.size(0)
unit_size_y = r_weight.size(1)
grad_weight_r = grad_weight.narrow(0,0,unit_size_x).narrow(1,0,unit_size_y)
grad_weight_i = grad_weight.narrow(0,0,unit_size_x).narrow(1,unit_size_y,unit_size_y)
grad_weight_j = grad_weight.narrow(0,0,unit_size_x).narrow(1,unit_size_y*2,unit_size_y)
grad_weight_k = grad_weight.narrow(0,0,unit_size_x).narrow(1,unit_size_y*3,unit_size_y)
if ctx.needs_input_grad[5]:
grad_bias = grad_output.sum(0).squeeze(0)
return grad_input, grad_weight_r, grad_weight_i, grad_weight_j, grad_weight_k, grad_bias
#
# PARAMETERS INITIALIZATION
#
def unitary_init(in_features, out_features, rng, kernel_size=None, criterion='he'):
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
fan_in = in_features * receptive_field
fan_out = out_features * receptive_field
else:
fan_in = in_features
fan_out = out_features
if criterion == 'glorot':
s = 1. / np.sqrt(2*(fan_in + fan_out))
elif criterion == 'he':
s = 1. / np.sqrt(2*fan_in)
else:
raise ValueError('Invalid criterion: ' + criterion)
if kernel_size is None:
kernel_shape = (in_features, out_features)
else:
if type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
s = np.sqrt(3.0) * s
number_of_weights = np.prod(kernel_shape)
v_r = np.random.uniform(-s,s,number_of_weights)
v_i = np.random.uniform(-s,s,number_of_weights)
v_j = np.random.uniform(-s,s,number_of_weights)
v_k = np.random.uniform(-s,s,number_of_weights)
# Unitary quaternion
for i in range(0, number_of_weights):
norm = np.sqrt(v_r[i]**2 + v_i[i]**2 + v_j[i]**2 + v_k[i]**2)+0.0001
v_r[i]/= norm
v_i[i]/= norm
v_j[i]/= norm
v_k[i]/= norm
v_r = v_r.reshape(kernel_shape)
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
return (v_r, v_i, v_j, v_k)
def random_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
fan_in = in_features * receptive_field
fan_out = out_features * receptive_field
else:
fan_in = in_features
fan_out = out_features
if criterion == 'glorot':
s = 1. / np.sqrt(2*(fan_in + fan_out))
elif criterion == 'he':
s = 1. / np.sqrt(2*fan_in)
else:
raise ValueError('Invalid criterion: ' + criterion)
if kernel_size is None:
kernel_shape = (in_features, out_features)
else:
if type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
number_of_weights = np.prod(kernel_shape)
v_r = np.random.uniform(0.0,1.0,number_of_weights)
v_i = np.random.uniform(0.0,1.0,number_of_weights)
v_j = np.random.uniform(0.0,1.0,number_of_weights)
v_k = np.random.uniform(0.0,1.0,number_of_weights)
v_r = v_r.reshape(kernel_shape)
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
weight_r = v_r * s
weight_i = v_i * s
weight_j = v_j * s
weight_k = v_k * s
return (weight_r, weight_i, weight_j, weight_k)
def quaternion_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'):
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
fan_in = in_features * receptive_field
fan_out = out_features * receptive_field
else:
fan_in = in_features
fan_out = out_features
if criterion == 'glorot':
s = 1. / np.sqrt(2*(fan_in + fan_out))
elif criterion == 'he':
s = 1. / np.sqrt(2*fan_in)
else:
raise ValueError('Invalid criterion: ' + criterion)
rng = RandomState(np.random.randint(1,1234))
# Generating randoms and purely imaginary quaternions :
if kernel_size is None:
kernel_shape = (in_features, out_features)
else:
if type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
modulus = chi.rvs(4,loc=0,scale=s,size=kernel_shape)
number_of_weights = np.prod(kernel_shape)
v_i = np.random.normal(0,1.0,number_of_weights)
v_j = np.random.normal(0,1.0,number_of_weights)
v_k = np.random.normal(0,1.0,number_of_weights)
# Purely imaginary quaternions unitary
for i in range(0, number_of_weights):
norm = np.sqrt(v_i[i]**2 + v_j[i]**2 + v_k[i]**2 +0.0001)
v_i[i]/= norm
v_j[i]/= norm
v_k[i]/= norm
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)
weight_r = modulus * np.cos(phase)
weight_i = modulus * v_i*np.sin(phase)
weight_j = modulus * v_j*np.sin(phase)
weight_k = modulus * v_k*np.sin(phase)
return (weight_r, weight_i, weight_j, weight_k)
def affect_init(r_weight, i_weight, j_weight, k_weight, init_func, rng, init_criterion):
if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size() or \
r_weight.size() != k_weight.size() :
raise ValueError('The real and imaginary weights '
'should have the same size . Found: r:'
+ str(r_weight.size()) +' i:'
+ str(i_weight.size()) +' j:'
+ str(j_weight.size()) +' k:'
+ str(k_weight.size()))
elif r_weight.dim() != 2:
raise Exception('affect_init accepts only matrices. Found dimension = '
+ str(r_weight.dim()))
kernel_size = None
r, i, j, k = init_func(r_weight.size(0), r_weight.size(1), rng, kernel_size, init_criterion)
r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j), torch.from_numpy(k)
r_weight.data = r.type_as(r_weight.data)
i_weight.data = i.type_as(i_weight.data)
j_weight.data = j.type_as(j_weight.data)
k_weight.data = k.type_as(k_weight.data)
| 24,754 | 37.20216 | 135 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/resample_files.py | import torch
import torchaudio
import numpy as np
import matplotlib.pyplot as plt
import configparser
import os
import sys
import random
import shutil
# Reading global cfg file (first argument-mandatory file)
cfg_file = sys.argv[1]
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Output folder creation
out_folder = config["resample"]["out_folder"]
if not os.path.exists(out_folder):
os.makedirs(out_folder)
data_folder = config["resample"]["data_folder"]
print("- Reading config file......OK!")
# Preparing speakers
def normalize_tensor(tensor):
''' Normalize tensor between -1 and 1 '''
max_val = torch.abs(torch.max(tensor))
min_val = torch.abs(torch.min(tensor))
return torch.mul(torch.sub(torch.div(torch.add(tensor, min_val), torch.add(max_val, min_val)), 0.5), 2)
if config["resample"]["dataset"] == "qutnoise":
audio_files = os.listdir(data_folder)
# Create parallel dataset
print("\n- Starting resampling.\n")
sample_rate = int(config["resample"]["sample_rate"])
for sound in audio_files:
sound_dir = os.path.join(data_folder, sound)
recording, o_sample_rate = torchaudio.load(sound_dir)
recording = normalize_tensor(recording)
save_dir = os.path.join(out_folder, sound)
torchaudio.save(save_dir, recording, sample_rate = sample_rate)
print("Saved:", sound) | 1,528 | 26.303571 | 107 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/core.py | ##########################################################
# pytorch-kaldi-gan
# Walter Heymans
# North West University
# 2020
# Adapted from:
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import sys
import configparser
import os
from utils import is_sequential_dict, model_init, optimizer_init, forward_model, progress
from data_io import load_counts
import numpy as np
import random
import torch
from distutils.util import strtobool
import time
import threading
import itertools
import torch.nn.functional as functional
from data_io import read_lab_fea, open_or_fd, write_mat
from utils import shift
import gan_networks
import weights_and_biases as wandb
def save_tensor_list_to_png(array, titles=[], fig_name="tensor.png"):
import matplotlib.pyplot as plt
plt.figure()
for i in range(1, len(array) + 1):
plt.subplot(len(array), 1, i)
if len(array) == 4 and i <= 2:
graph_colour = "b"
elif len(array) == 4:
graph_colour = "r"
elif i == 2:
graph_colour = "r"
else:
graph_colour = "b"
plt.plot(array[i - 1].detach().numpy(), graph_colour)
if len(titles) == len(array):
plt.title(titles[i - 1])
plt.tight_layout(True)
plt.savefig(fig_name)
plt.close()
def compute_gradient_penalty(D, real_samples, fake_samples):
Tensor = torch.cuda.FloatTensor
from torch.autograd import Variable
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = Tensor(np.random.random((real_samples.size(0), 440)))
# Get random interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = D(interpolates)
fake = Variable(Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
# Get gradient w.r.t. interpolates
gradients = torch.autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def get_pearson_correlation(tensor1, tensor2):
from scipy.stats import pearsonr
output1 = tensor1.detach().cpu().numpy()
output2 = tensor2.detach().cpu().numpy()
if output1.shape == output2.shape:
# calculate Pearson's correlation
if len(output1.shape) > 1:
correlation = 0
for i in range(output1.shape[0]):
try:
temp_corr, _ = pearsonr(output1[i], output2[i])
except:
temp_corr = 0
correlation += temp_corr
if output1.shape[0] > 0:
correlation = correlation / output1.shape[0]
else:
correlation, _ = pearsonr(output1, output2)
return correlation
else:
return 0
def get_mean_squared_error(tensor1, tensor2):
output1 = tensor1.detach().cpu()
output2 = tensor2.detach().cpu()
if output1.shape == output2.shape:
if len(output1.shape) > 1:
error = 0
for i in range(output1.shape[0]):
error += torch.mean(torch.abs(torch.abs(output1) - torch.abs(output2)))
if output1.shape[0] > 0:
error = error / output1.shape[0]
else:
error = torch.mean(torch.abs(torch.abs(output1) - torch.abs(output2)))
return error.numpy()
else:
return 0
def read_next_chunk_into_shared_list_with_subprocess(
read_lab_fea, shared_list, cfg_file, is_production, output_folder, wait_for_process
):
p = threading.Thread(target=read_lab_fea, args=(cfg_file, is_production, shared_list, output_folder))
p.start()
if wait_for_process:
p.join()
return None
else:
return p
def extract_data_from_shared_list(shared_list):
data_name = shared_list[0]
data_end_index_fea = shared_list[1]
data_end_index_lab = shared_list[2]
fea_dict = shared_list[3]
lab_dict = shared_list[4]
arch_dict = shared_list[5]
data_set = shared_list[6]
return data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set
def convert_numpy_to_torch(data_set_dict, save_gpumem, use_cuda):
if not (save_gpumem) and use_cuda:
data_set_inp = torch.from_numpy(data_set_dict["input"]).float().cuda()
data_set_ref = torch.from_numpy(data_set_dict["ref"]).float().cuda()
else:
data_set_inp = torch.from_numpy(data_set_dict["input"]).float()
data_set_ref = torch.from_numpy(data_set_dict["ref"]).float()
data_set_ref = data_set_ref.view((data_set_ref.shape[0], 1))
return data_set_inp, data_set_ref
def get_labels(batch_size, label):
return torch.ones((batch_size, 1)) * label
def wgan_loss_d(dx, dz):
return -torch.mean(dx) + torch.mean(dz)
def run_nn(
data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict, cfg_file, processed_first, next_config_file,
epoch=1, wandb_on=False, chunk=0
):
# This function processes the current chunk using the information in cfg_file. In parallel, the next chunk is load into the CPU memory
# Reading chunk-specific cfg file (first argument-mandatory file)
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Setting torch seed
seed = int(config["exp"]["seed"])
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Reading config parameters
output_folder = config["exp"]["out_folder"]
use_cuda = strtobool(config["exp"]["use_cuda"])
multi_gpu = strtobool(config["exp"]["multi_gpu"])
try:
torch.cuda.set_device(int(config["exp"]["cuda_device"]))
except KeyError:
torch.cuda.set_device(0)
to_do = config["exp"]["to_do"]
info_file = config["exp"]["out_info"]
model = config["model"]["model"].split("\n")
forward_outs = config["forward"]["forward_out"].split(",")
forward_normalize_post = list(map(strtobool, config["forward"]["normalize_posteriors"].split(",")))
forward_count_files = config["forward"]["normalize_with_counts_from"].split(",")
require_decodings = list(map(strtobool, config["forward"]["require_decoding"].split(",")))
use_cuda = strtobool(config["exp"]["use_cuda"])
save_gpumem = strtobool(config["exp"]["save_gpumem"])
is_production = strtobool(config["exp"]["production"])
if to_do == "train":
batch_size = int(config["batches"]["batch_size_train"])
try:
gan_batch_size = int(config["gan"]["batch_size"])
except KeyError:
pass
if to_do == "valid":
batch_size = int(config["batches"]["batch_size_valid"])
if to_do == "forward":
batch_size = 1
if config["gan"]["arch_gan"] == "True" and to_do == "train":
gan_on = True
else:
gan_on = False
# ***** Reading the Data ********
if processed_first:
# Reading all the features and labels for this chunk
shared_list = []
p = threading.Thread(target=read_lab_fea, args=(cfg_file, is_production, shared_list, output_folder))
p.start()
p.join()
data_name = shared_list[0]
data_end_index = shared_list[1]
fea_dict = shared_list[2]
lab_dict = shared_list[3]
arch_dict = shared_list[4]
data_set = shared_list[5]
# converting numpy tensors into pytorch tensors and put them on GPUs if specified
if not (save_gpumem) and use_cuda:
data_set = torch.from_numpy(data_set).float().cuda()
else:
data_set = torch.from_numpy(data_set).float()
try:
if config["ganset"]["create_set"] == "True":
gan_out_folder = config["ganset"]["out_folder"]
smallset = data_set[:,:40].clone()
smallset = torch.cat((smallset, torch.unsqueeze(data_set[:,-1], dim = 1)), dim = 1)
torch.save(smallset, os.path.join(gan_out_folder, "chunk_" + str(chunk) + ".pt"))
except KeyError:
pass
else:
try:
if config["ganset"]["create_set"] == "True":
gan_out_folder = config["ganset"]["out_folder"]
smallset = data_set[:,:40].clone()
smallset = torch.cat((smallset, torch.unsqueeze(data_set[:,-1], dim = 1)), dim = 1)
torch.save(smallset, os.path.join(gan_out_folder, "chunk_" + str(chunk) + ".pt"))
except KeyError:
pass
# Reading all the features and labels for the next chunk
shared_list = []
p = threading.Thread(target=read_lab_fea, args=(next_config_file, is_production, shared_list, output_folder))
p.start()
# Reading model and initialize networks
inp_out_dict = fea_dict
[nns, costs] = model_init(inp_out_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do)
if config["gan"]["arch_gan"] == "True":
# Create Generator
generator_class = getattr(gan_networks, config["generator"]["arch_name"])
generator = generator_class(nns[str(config["architecture1"]["arch_name"])].get_input_dim(),
nns[str(config["architecture1"]["arch_name"])].get_output_dim(),
config["generator"])
if use_cuda:
generator = generator.cuda()
directory_g = os.path.join(config["exp"]["out_folder"], config["gan"]["output_path_g"])
if os.path.exists(directory_g):
try:
if int(config["exp"]["cuda_device"]) == 0:
generator.load_state_dict(torch.load(directory_g, map_location="cuda:0"))
elif int(config["exp"]["cuda_device"]) == 1:
generator.load_state_dict(torch.load(directory_g, map_location="cuda:1"))
except RuntimeError:
print("Load error loading G, network will be recreated.")
# optimizers initialization
optimizers = optimizer_init(nns, config, arch_dict)
# pre-training and multi-gpu init
for net in nns.keys():
pt_file_arch = config[arch_dict[net][0]]["arch_pretrain_file"]
if pt_file_arch != "none":
if use_cuda:
try:
if int(config["exp"]["cuda_device"]) == 0:
checkpoint_load = torch.load(pt_file_arch, map_location="cuda:0")
elif int(config["exp"]["cuda_device"]) == 1:
checkpoint_load = torch.load(pt_file_arch, map_location="cuda:1")
except FileNotFoundError:
# File does not exist, load most recent model
exp_file_names = os.path.dirname(pt_file_arch)
if os.path.exists(exp_file_names):
exp_file_list = os.listdir(exp_file_names)
new_pt_file_arch = ''
for exp_file in exp_file_list:
if exp_file.__contains__('final') and exp_file.__contains__('.pkl'):
new_pt_file_arch = os.path.join(exp_file_names, exp_file)
break
elif exp_file.__contains__('.pkl'):
new_pt_file_arch = os.path.join(exp_file_names, exp_file)
if int(config["exp"]["cuda_device"]) == 0:
checkpoint_load = torch.load(new_pt_file_arch, map_location="cuda:0")
elif int(config["exp"]["cuda_device"]) == 1:
checkpoint_load = torch.load(new_pt_file_arch, map_location="cuda:1")
except EOFError:
if int(config["exp"]["cuda_device"]) == 0:
checkpoint_load = torch.load(os.path.join(output_folder, "exp_files/backup.pkl"), map_location="cuda:0")
elif int(config["exp"]["cuda_device"]) == 1:
checkpoint_load = torch.load(os.path.join(output_folder, "exp_files/backup.pkl"), map_location="cuda:1")
else:
checkpoint_load = torch.load(pt_file_arch, map_location="cpu")
nns[net].load_state_dict(checkpoint_load["model_par"])
optimizers[net].load_state_dict(checkpoint_load["optimizer_par"])
optimizers[net].param_groups[0]["lr"] = float(
config[arch_dict[net][0]]["arch_lr"]
) # loading lr of the cfg file for pt
if multi_gpu:
nns[net] = torch.nn.DataParallel(nns[net])
if to_do == "forward":
post_file = {}
for out_id in range(len(forward_outs)):
if require_decodings[out_id]:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + "_to_decode.ark")
else:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + ".ark")
post_file[forward_outs[out_id]] = open_or_fd(out_file, output_folder, "wb")
# Save the model
if to_do == "train":
for net in nns.keys():
checkpoint = {}
if multi_gpu:
checkpoint["model_par"] = nns[net].module.state_dict()
else:
checkpoint["model_par"] = nns[net].state_dict()
checkpoint["optimizer_par"] = optimizers[net].state_dict()
torch.save(checkpoint, os.path.join(output_folder, "exp_files/backup.pkl"))
# check automatically if the model is sequential
seq_model = is_sequential_dict(config, arch_dict)
# ***** Minibatch Processing loop********
if seq_model or to_do == "forward":
N_snt = len(data_name)
N_batches = int(N_snt / batch_size)
else:
N_ex_tr = data_set.shape[0]
N_batches = int(N_ex_tr / batch_size)
beg_batch = 0
end_batch = batch_size
snt_index = 0
beg_snt = 0
start_time = time.time()
# array of sentence lengths
arr_snt_len = shift(shift(data_end_index, -1, 0) - data_end_index, 1, 0)
arr_snt_len[0] = data_end_index[0]
loss_sum = 0
err_sum = 0
inp_dim = data_set.shape[1]
double_features = False
try:
if config["gan"]["double_features"] == "True":
double_features = True
except KeyError:
pass
for i in range(N_batches):
max_len = 0
if seq_model:
max_len = int(max(arr_snt_len[snt_index : snt_index + batch_size]))
inp = torch.zeros(max_len, batch_size, inp_dim).contiguous()
for k in range(batch_size):
snt_len = data_end_index[snt_index] - beg_snt
N_zeros = max_len - snt_len
# Appending a random number of initial zeros, tge others are at the end.
N_zeros_left = random.randint(0, N_zeros)
# randomizing could have a regularization effect
inp[N_zeros_left : N_zeros_left + snt_len, k, :] = data_set[beg_snt : beg_snt + snt_len, :]
beg_snt = data_end_index[snt_index]
snt_index = snt_index + 1
else:
# features and labels for batch i
if to_do != "forward":
inp = data_set[beg_batch:end_batch, :].contiguous()
else:
snt_len = data_end_index[snt_index] - beg_snt
inp = data_set[beg_snt : beg_snt + snt_len, :].contiguous()
beg_snt = data_end_index[snt_index]
snt_index = snt_index + 1
# use cuda
if use_cuda:
inp = inp.cuda()
if to_do == "train":
# Forward input, with autograd graph active
if gan_on:
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
inp_out_dict,
max_len,
batch_size,
to_do,
forward_outs,
generator=generator,
gan_on=True,
double_features=double_features,
)
else:
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
inp_out_dict,
max_len,
batch_size,
to_do,
forward_outs,
double_features = double_features,
)
for opt in optimizers.keys():
optimizers[opt].zero_grad()
outs_dict["loss_final"].backward()
for opt in optimizers.keys():
if not (strtobool(config[arch_dict[opt][0]]["arch_freeze"])):
optimizers[opt].step()
else:
with torch.no_grad(): # Forward input without autograd graph (save memory)
if config["gan"]["arch_gan"] == "True": # Validation and forward
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
inp_out_dict,
max_len,
batch_size,
to_do,
forward_outs,
generator=generator,
gan_on=True,
double_features=double_features,
)
else:
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
inp_out_dict,
max_len,
batch_size,
to_do,
forward_outs,
double_features = double_features,
)
if to_do == "forward":
for out_id in range(len(forward_outs)):
out_save = outs_dict[forward_outs[out_id]].data.cpu().numpy()
if forward_normalize_post[out_id]:
# read the config file
counts = load_counts(forward_count_files[out_id])
out_save = out_save - np.log(counts / np.sum(counts))
# save the output
write_mat(output_folder, post_file[forward_outs[out_id]], out_save, data_name[i])
else:
loss_sum = loss_sum + outs_dict["loss_final"].detach()
err_sum = err_sum + outs_dict["err_final"].detach()
# update it to the next batch
beg_batch = end_batch
end_batch = beg_batch + batch_size
# Progress bar
if to_do == "train":
status_string = (
"Training | (Batch "
+ str(i + 1)
+ "/"
+ str(N_batches)
+ ")"
+ " | L:"
+ str(round(loss_sum.cpu().item() / (i + 1), 3))
)
if i == N_batches - 1:
status_string = "Training | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "valid":
status_string = "Validating | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "forward":
status_string = "Forwarding | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
progress(i, N_batches, status=status_string)
elapsed_time_chunk = time.time() - start_time
loss_tot = loss_sum / N_batches
err_tot = err_sum / N_batches
if wandb_on and to_do == "train" :
wandb.quick_log("train_loss", loss_tot.cpu().numpy(), commit = False)
wandb.quick_log("train_error", err_tot.cpu().numpy(), commit = False)
# clearing memory
del inp, outs_dict, data_set
# save the model
if to_do == "train":
for net in nns.keys():
checkpoint = {}
if multi_gpu:
checkpoint["model_par"] = nns[net].module.state_dict()
else:
checkpoint["model_par"] = nns[net].state_dict()
checkpoint["optimizer_par"] = optimizers[net].state_dict()
out_file = info_file.replace(".info", "_" + arch_dict[net][0] + ".pkl")
torch.save(checkpoint, out_file)
if to_do == "forward":
for out_name in forward_outs:
post_file[out_name].close()
# Write info file
with open(info_file, "w") as text_file:
text_file.write("[results]\n")
if to_do != "forward":
text_file.write("loss=%s\n" % loss_tot.cpu().numpy())
text_file.write("err=%s\n" % err_tot.cpu().numpy())
text_file.write("elapsed_time_chunk=%f\n" % elapsed_time_chunk)
text_file.close()
# Getting the data for the next chunk (read in parallel)
p.join()
data_name = shared_list[0]
data_end_index = shared_list[1]
fea_dict = shared_list[2]
lab_dict = shared_list[3]
arch_dict = shared_list[4]
data_set = shared_list[5]
# converting numpy tensors into pytorch tensors and put them on GPUs if specified
if not (save_gpumem) and use_cuda:
data_set = torch.from_numpy(data_set).float().cuda()
else:
data_set = torch.from_numpy(data_set).float()
return [data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict]
| 22,371 | 33.793157 | 138 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/neural_networks.py | ##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from distutils.util import strtobool
import math
import json
# uncomment below if you want to use SRU
# and you need to install SRU: pip install sru[cuda].
# or you can install it from source code: https://github.com/taolei87/sru.
# import sru
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def act_fun(act_type):
if act_type == "relu":
return nn.ReLU()
if act_type == "tanh":
return nn.Tanh()
if act_type == "sigmoid":
return nn.Sigmoid()
if act_type == "leaky_relu":
return nn.LeakyReLU(0.2)
if act_type == "elu":
return nn.ELU()
if act_type == "softmax":
return nn.LogSoftmax(dim=1)
if act_type == "linear":
return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
class MLP(nn.Module):
def __init__(self, options, inp_dim):
super(MLP, self).__init__()
self.input_dim = inp_dim
self.dnn_lay = list(map(int, options["dnn_lay"].split(",")))
self.dnn_drop = list(map(float, options["dnn_drop"].split(",")))
self.dnn_use_batchnorm = list(map(strtobool, options["dnn_use_batchnorm"].split(",")))
self.dnn_use_laynorm = list(map(strtobool, options["dnn_use_laynorm"].split(",")))
self.dnn_use_laynorm_inp = strtobool(options["dnn_use_laynorm_inp"])
self.dnn_use_batchnorm_inp = strtobool(options["dnn_use_batchnorm_inp"])
self.dnn_act = options["dnn_act"].split(",")
self.wx = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.ln = nn.ModuleList([])
self.act = nn.ModuleList([])
self.drop = nn.ModuleList([])
# input layer normalization
if self.dnn_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# input batch normalization
if self.dnn_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_dnn_lay = len(self.dnn_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_dnn_lay):
# dropout
self.drop.append(nn.Dropout(p=self.dnn_drop[i]))
# activation
self.act.append(act_fun(self.dnn_act[i]))
add_bias = True
# layer norm initialization
self.ln.append(LayerNorm(self.dnn_lay[i]))
self.bn.append(nn.BatchNorm1d(self.dnn_lay[i], momentum=0.05))
if self.dnn_use_laynorm[i] or self.dnn_use_batchnorm[i]:
add_bias = False
# Linear operations
self.wx.append(nn.Linear(current_input, self.dnn_lay[i], bias=add_bias))
# weight initialization
self.wx[i].weight = torch.nn.Parameter(
torch.Tensor(self.dnn_lay[i], current_input).uniform_(
-np.sqrt(0.01 / (current_input + self.dnn_lay[i])),
np.sqrt(0.01 / (current_input + self.dnn_lay[i])),
)
)
self.wx[i].bias = torch.nn.Parameter(torch.zeros(self.dnn_lay[i]))
current_input = self.dnn_lay[i]
self.out_dim = current_input
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.dnn_use_laynorm_inp):
x = self.ln0((x))
if bool(self.dnn_use_batchnorm_inp):
x = self.bn0((x))
for i in range(self.N_dnn_lay):
if self.dnn_use_laynorm[i] and not (self.dnn_use_batchnorm[i]):
x = self.drop[i](self.act[i](self.ln[i](self.wx[i](x))))
if self.dnn_use_batchnorm[i] and not (self.dnn_use_laynorm[i]):
x = self.drop[i](self.act[i](self.bn[i](self.wx[i](x))))
if self.dnn_use_batchnorm[i] == True and self.dnn_use_laynorm[i] == True:
x = self.drop[i](self.act[i](self.bn[i](self.ln[i](self.wx[i](x)))))
if self.dnn_use_batchnorm[i] == False and self.dnn_use_laynorm[i] == False:
x = self.drop[i](self.act[i](self.wx[i](x)))
return x
class LSTM_cudnn(nn.Module):
def __init__(self, options, inp_dim):
super(LSTM_cudnn, self).__init__()
self.input_dim = inp_dim
self.hidden_size = int(options["hidden_size"])
self.num_layers = int(options["num_layers"])
self.bias = bool(strtobool(options["bias"]))
self.batch_first = bool(strtobool(options["batch_first"]))
self.dropout = float(options["dropout"])
self.bidirectional = bool(strtobool(options["bidirectional"]))
self.lstm = nn.ModuleList(
[
nn.LSTM(
self.input_dim,
self.hidden_size,
self.num_layers,
bias=self.bias,
dropout=self.dropout,
bidirectional=self.bidirectional,
)
]
)
for name,param in self.lstm[0].named_parameters():
if 'weight_hh' in name:
if self.batch_first:
nn.init.orthogonal_(param)
elif 'bias' in name:
nn.init.zeros_(param)
self.out_dim = self.hidden_size + self.bidirectional * self.hidden_size
def forward(self, x):
if self.bidirectional:
h0 = torch.zeros(self.num_layers * 2, x.shape[1], self.hidden_size)
c0 = torch.zeros(self.num_layers * 2, x.shape[1], self.hidden_size)
else:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
c0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
if x.is_cuda:
h0 = h0.cuda()
c0 = c0.cuda()
output, (hn, cn) = self.lstm[0](x, (h0, c0))
return output
class GRU_cudnn(nn.Module):
def __init__(self, options, inp_dim):
super(GRU_cudnn, self).__init__()
self.input_dim = inp_dim
self.hidden_size = int(options["hidden_size"])
self.num_layers = int(options["num_layers"])
self.bias = bool(strtobool(options["bias"]))
self.batch_first = bool(strtobool(options["batch_first"]))
self.dropout = float(options["dropout"])
self.bidirectional = bool(strtobool(options["bidirectional"]))
self.gru = nn.ModuleList(
[
nn.GRU(
self.input_dim,
self.hidden_size,
self.num_layers,
bias=self.bias,
dropout=self.dropout,
bidirectional=self.bidirectional,
)
]
)
for name,param in self.gru[0].named_parameters():
if 'weight_hh' in name:
nn.init.orthogonal_(param)
elif 'weight_ih' in name:
nn.init.xavier_uniform_(param)
elif 'bias' in name:
nn.init.zeros_(param)
self.out_dim = self.hidden_size + self.bidirectional * self.hidden_size
def forward(self, x):
if self.bidirectional:
h0 = torch.zeros(self.num_layers * 2, x.shape[1], self.hidden_size)
else:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
if x.is_cuda:
h0 = h0.cuda()
output, hn = self.gru[0](x, h0)
return output
class RNN_cudnn(nn.Module):
def __init__(self, options, inp_dim):
super(RNN_cudnn, self).__init__()
self.input_dim = inp_dim
self.hidden_size = int(options["hidden_size"])
self.num_layers = int(options["num_layers"])
self.nonlinearity = options["nonlinearity"]
self.bias = bool(strtobool(options["bias"]))
self.batch_first = bool(strtobool(options["batch_first"]))
self.dropout = float(options["dropout"])
self.bidirectional = bool(strtobool(options["bidirectional"]))
self.rnn = nn.ModuleList(
[
nn.RNN(
self.input_dim,
self.hidden_size,
self.num_layers,
nonlinearity=self.nonlinearity,
bias=self.bias,
dropout=self.dropout,
bidirectional=self.bidirectional,
)
]
)
self.out_dim = self.hidden_size + self.bidirectional * self.hidden_size
def forward(self, x):
if self.bidirectional:
h0 = torch.zeros(self.num_layers * 2, x.shape[1], self.hidden_size)
else:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
if x.is_cuda:
h0 = h0.cuda()
output, hn = self.rnn[0](x, h0)
return output
class LSTM(nn.Module):
def __init__(self, options, inp_dim):
super(LSTM, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.lstm_lay = list(map(int, options["lstm_lay"].split(",")))
self.lstm_drop = list(map(float, options["lstm_drop"].split(",")))
self.lstm_use_batchnorm = list(map(strtobool, options["lstm_use_batchnorm"].split(",")))
self.lstm_use_laynorm = list(map(strtobool, options["lstm_use_laynorm"].split(",")))
self.lstm_use_laynorm_inp = strtobool(options["lstm_use_laynorm_inp"])
self.lstm_use_batchnorm_inp = strtobool(options["lstm_use_batchnorm_inp"])
self.lstm_act = options["lstm_act"].split(",")
self.lstm_orthinit = strtobool(options["lstm_orthinit"])
self.bidir = strtobool(options["lstm_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wfx = nn.ModuleList([]) # Forget
self.ufh = nn.ModuleList([]) # Forget
self.wix = nn.ModuleList([]) # Input
self.uih = nn.ModuleList([]) # Input
self.wox = nn.ModuleList([]) # Output
self.uoh = nn.ModuleList([]) # Output
self.wcx = nn.ModuleList([]) # Cell state
self.uch = nn.ModuleList([]) # Cell state
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wfx = nn.ModuleList([]) # Batch Norm
self.bn_wix = nn.ModuleList([]) # Batch Norm
self.bn_wox = nn.ModuleList([]) # Batch Norm
self.bn_wcx = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.lstm_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.lstm_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_lstm_lay = len(self.lstm_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_lstm_lay):
# Activations
self.act.append(act_fun(self.lstm_act[i]))
add_bias = True
if self.lstm_use_laynorm[i] or self.lstm_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wfx.append(nn.Linear(current_input, self.lstm_lay[i], bias=add_bias))
self.wix.append(nn.Linear(current_input, self.lstm_lay[i], bias=add_bias))
self.wox.append(nn.Linear(current_input, self.lstm_lay[i], bias=add_bias))
self.wcx.append(nn.Linear(current_input, self.lstm_lay[i], bias=add_bias))
# Recurrent connections
self.ufh.append(nn.Linear(self.lstm_lay[i], self.lstm_lay[i], bias=False))
self.uih.append(nn.Linear(self.lstm_lay[i], self.lstm_lay[i], bias=False))
self.uoh.append(nn.Linear(self.lstm_lay[i], self.lstm_lay[i], bias=False))
self.uch.append(nn.Linear(self.lstm_lay[i], self.lstm_lay[i], bias=False))
if self.lstm_orthinit:
nn.init.orthogonal_(self.ufh[i].weight)
nn.init.orthogonal_(self.uih[i].weight)
nn.init.orthogonal_(self.uoh[i].weight)
nn.init.orthogonal_(self.uch[i].weight)
# batch norm initialization
self.bn_wfx.append(nn.BatchNorm1d(self.lstm_lay[i], momentum=0.05))
self.bn_wix.append(nn.BatchNorm1d(self.lstm_lay[i], momentum=0.05))
self.bn_wox.append(nn.BatchNorm1d(self.lstm_lay[i], momentum=0.05))
self.bn_wcx.append(nn.BatchNorm1d(self.lstm_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.lstm_lay[i]))
if self.bidir:
current_input = 2 * self.lstm_lay[i]
else:
current_input = self.lstm_lay[i]
self.out_dim = self.lstm_lay[i] + self.bidir * self.lstm_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.lstm_use_laynorm_inp):
x = self.ln0((x))
if bool(self.lstm_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_lstm_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.lstm_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.lstm_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(torch.Tensor(h_init.shape[0], h_init.shape[1]).fill_(1 - self.lstm_drop[i]))
else:
drop_mask = torch.FloatTensor([1 - self.lstm_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wfx_out = self.wfx[i](x)
wix_out = self.wix[i](x)
wox_out = self.wox[i](x)
wcx_out = self.wcx[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.lstm_use_batchnorm[i]:
wfx_out_bn = self.bn_wfx[i](wfx_out.view(wfx_out.shape[0] * wfx_out.shape[1], wfx_out.shape[2]))
wfx_out = wfx_out_bn.view(wfx_out.shape[0], wfx_out.shape[1], wfx_out.shape[2])
wix_out_bn = self.bn_wix[i](wix_out.view(wix_out.shape[0] * wix_out.shape[1], wix_out.shape[2]))
wix_out = wix_out_bn.view(wix_out.shape[0], wix_out.shape[1], wix_out.shape[2])
wox_out_bn = self.bn_wox[i](wox_out.view(wox_out.shape[0] * wox_out.shape[1], wox_out.shape[2]))
wox_out = wox_out_bn.view(wox_out.shape[0], wox_out.shape[1], wox_out.shape[2])
wcx_out_bn = self.bn_wcx[i](wcx_out.view(wcx_out.shape[0] * wcx_out.shape[1], wcx_out.shape[2]))
wcx_out = wcx_out_bn.view(wcx_out.shape[0], wcx_out.shape[1], wcx_out.shape[2])
# Processing time steps
hiddens = []
ct = h_init
ht = h_init
for k in range(x.shape[0]):
# LSTM equations
ft = torch.sigmoid(wfx_out[k] + self.ufh[i](ht))
it = torch.sigmoid(wix_out[k] + self.uih[i](ht))
ot = torch.sigmoid(wox_out[k] + self.uoh[i](ht))
ct = it * self.act[i](wcx_out[k] + self.uch[i](ht)) * drop_mask + ft * ct
ht = ot * self.act[i](ct)
if self.lstm_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0 : int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class GRU(nn.Module):
def __init__(self, options, inp_dim):
super(GRU, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.gru_lay = list(map(int, options["gru_lay"].split(",")))
self.gru_drop = list(map(float, options["gru_drop"].split(",")))
self.gru_use_batchnorm = list(map(strtobool, options["gru_use_batchnorm"].split(",")))
self.gru_use_laynorm = list(map(strtobool, options["gru_use_laynorm"].split(",")))
self.gru_use_laynorm_inp = strtobool(options["gru_use_laynorm_inp"])
self.gru_use_batchnorm_inp = strtobool(options["gru_use_batchnorm_inp"])
self.gru_orthinit = strtobool(options["gru_orthinit"])
self.gru_act = options["gru_act"].split(",")
self.bidir = strtobool(options["gru_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wh = nn.ModuleList([])
self.uh = nn.ModuleList([])
self.wz = nn.ModuleList([]) # Update Gate
self.uz = nn.ModuleList([]) # Update Gate
self.wr = nn.ModuleList([]) # Reset Gate
self.ur = nn.ModuleList([]) # Reset Gate
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wh = nn.ModuleList([]) # Batch Norm
self.bn_wz = nn.ModuleList([]) # Batch Norm
self.bn_wr = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.gru_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.gru_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_gru_lay = len(self.gru_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_gru_lay):
# Activations
self.act.append(act_fun(self.gru_act[i]))
add_bias = True
if self.gru_use_laynorm[i] or self.gru_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wh.append(nn.Linear(current_input, self.gru_lay[i], bias=add_bias))
self.wz.append(nn.Linear(current_input, self.gru_lay[i], bias=add_bias))
self.wr.append(nn.Linear(current_input, self.gru_lay[i], bias=add_bias))
# Recurrent connections
self.uh.append(nn.Linear(self.gru_lay[i], self.gru_lay[i], bias=False))
self.uz.append(nn.Linear(self.gru_lay[i], self.gru_lay[i], bias=False))
self.ur.append(nn.Linear(self.gru_lay[i], self.gru_lay[i], bias=False))
if self.gru_orthinit:
nn.init.orthogonal_(self.uh[i].weight)
nn.init.orthogonal_(self.uz[i].weight)
nn.init.orthogonal_(self.ur[i].weight)
# batch norm initialization
self.bn_wh.append(nn.BatchNorm1d(self.gru_lay[i], momentum=0.05))
self.bn_wz.append(nn.BatchNorm1d(self.gru_lay[i], momentum=0.05))
self.bn_wr.append(nn.BatchNorm1d(self.gru_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.gru_lay[i]))
if self.bidir:
current_input = 2 * self.gru_lay[i]
else:
current_input = self.gru_lay[i]
self.out_dim = self.gru_lay[i] + self.bidir * self.gru_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.gru_use_laynorm_inp):
x = self.ln0((x))
if bool(self.gru_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_gru_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.gru_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.gru_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(torch.Tensor(h_init.shape[0], h_init.shape[1]).fill_(1 - self.gru_drop[i]))
else:
drop_mask = torch.FloatTensor([1 - self.gru_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wh_out = self.wh[i](x)
wz_out = self.wz[i](x)
wr_out = self.wr[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.gru_use_batchnorm[i]:
wh_out_bn = self.bn_wh[i](wh_out.view(wh_out.shape[0] * wh_out.shape[1], wh_out.shape[2]))
wh_out = wh_out_bn.view(wh_out.shape[0], wh_out.shape[1], wh_out.shape[2])
wz_out_bn = self.bn_wz[i](wz_out.view(wz_out.shape[0] * wz_out.shape[1], wz_out.shape[2]))
wz_out = wz_out_bn.view(wz_out.shape[0], wz_out.shape[1], wz_out.shape[2])
wr_out_bn = self.bn_wr[i](wr_out.view(wr_out.shape[0] * wr_out.shape[1], wr_out.shape[2]))
wr_out = wr_out_bn.view(wr_out.shape[0], wr_out.shape[1], wr_out.shape[2])
# Processing time steps
hiddens = []
ht = h_init
for k in range(x.shape[0]):
# gru equation
zt = torch.sigmoid(wz_out[k] + self.uz[i](ht))
rt = torch.sigmoid(wr_out[k] + self.ur[i](ht))
at = wh_out[k] + self.uh[i](rt * ht)
hcand = self.act[i](at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
if self.gru_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0 : int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class logMelFb(nn.Module):
def __init__(self, options, inp_dim):
super(logMelFb, self).__init__()
import torchaudio
self._sample_rate = int(options["logmelfb_nr_sample_rate"])
self._nr_of_filters = int(options["logmelfb_nr_filt"])
self._stft_window_size = int(options["logmelfb_stft_window_size"])
self._stft_window_shift = int(options["logmelfb_stft_window_shift"])
self._use_cuda = strtobool(options["use_cuda"])
self.out_dim = self._nr_of_filters
self._mspec = torchaudio.transforms.MelSpectrogram(
sr=self._sample_rate,
n_fft=self._stft_window_size,
ws=self._stft_window_size,
hop=self._stft_window_shift,
n_mels=self._nr_of_filters,
)
def forward(self, x):
def _safe_log(inp, epsilon=1e-20):
eps = torch.FloatTensor([epsilon])
if self._use_cuda:
eps = eps.cuda()
log_inp = torch.log10(torch.max(inp, eps.expand_as(inp)))
return log_inp
assert x.shape[-1] == 1, "Multi channel time signal processing not suppored yet"
x_reshape_for_stft = torch.squeeze(x, -1).transpose(0, 1)
if self._use_cuda:
window = self._mspec.window(self._stft_window_size).cuda()
else:
window = self._mspec.window(self._stft_window_size)
x_stft = torch.stft(
x_reshape_for_stft, self._stft_window_size, hop_length=self._stft_window_shift, center=False, window=window
)
x_power_stft = x_stft.pow(2).sum(-1)
x_power_stft_reshape_for_filterbank_mult = x_power_stft.transpose(1, 2)
mel_spec = self._mspec.fm(x_power_stft_reshape_for_filterbank_mult).transpose(0, 1)
log_mel_spec = _safe_log(mel_spec)
out = log_mel_spec
return out
class channel_averaging(nn.Module):
def __init__(self, options, inp_dim):
super(channel_averaging, self).__init__()
self._use_cuda = strtobool(options["use_cuda"])
channel_weights = [float(e) for e in options["chAvg_channelWeights"].split(",")]
self._nr_of_channels = len(channel_weights)
numpy_weights = np.asarray(channel_weights, dtype=np.float32) * 1.0 / np.sum(channel_weights)
self._weights = torch.from_numpy(numpy_weights)
if self._use_cuda:
self._weights = self._weights.cuda()
self.out_dim = 1
def forward(self, x):
assert self._nr_of_channels == x.shape[-1]
out = torch.einsum("tbc,c->tb", x, self._weights).unsqueeze(-1)
return out
class fusionRNN_jit(torch.jit.ScriptModule):
def __init__(self, options, inp_dim):
super(fusionRNN_jit, self).__init__()
# Reading parameters
input_size = inp_dim
hidden_size = list(map(int, options["fusionRNN_lay"].split(",")))[0]
dropout = list(map(float, options["fusionRNN_drop"].split(",")))[0]
num_layers = len(list(map(int, options["fusionRNN_lay"].split(","))))
batch_size = int(options["batches"])
self.do_fusion = map(strtobool, options["fusionRNN_do_fusion"].split(","))
self.act = str(options["fusionRNN_fusion_act"])
self.reduce = str(options["fusionRNN_fusion_reduce"])
self.fusion_layer_size = int(options["fusionRNN_fusion_layer_size"])
self.to_do = options["to_do"]
self.number_of_mic = int(options["fusionRNN_number_of_mic"])
self.save_mic = self.number_of_mic
bidirectional = True
self.out_dim = 2 * hidden_size
current_dim = int(input_size)
self.model = torch.nn.ModuleList([])
if self.to_do == "train":
self.training = True
else:
self.training = False
for i in range(num_layers):
rnn_lay = liGRU_layer(
current_dim,
hidden_size,
num_layers,
batch_size,
dropout=dropout,
bidirectional=bidirectional,
device="cuda",
do_fusion=self.do_fusion,
fusion_layer_size=self.fusion_layer_size,
number_of_mic=self.number_of_mic,
act=self.act,
reduce=self.reduce
)
if i == 0:
if self.do_fusion:
if bidirectional:
current_dim = (self.fusion_layer_size // self.save_mic) * 2
else:
current_dim = self.fusion_layer_size // self.save_mic
#We need to reset the number of mic for the next layers so it is divided by 1
self.number_of_mic = 1
else:
if bidirectional:
current_dim = hidden_size * 2
else:
current_dim = hidden_size
self.do_fusion = False # DO NOT APPLY FUSION ON THE NEXT LAYERS
else:
if bidirectional:
current_dim = hidden_size * 2
else:
current_dim == hidden_size
self.model.append(rnn_lay)
@torch.jit.script_method
def forward(self, x):
# type: (Tensor) -> Tensor
for ligru_lay in self.model:
x = ligru_lay(x)
return x
class liGRU_layer(torch.jit.ScriptModule):
def __init__(
self,
input_size,
hidden_size,
num_layers,
batch_size,
dropout=0.0,
nonlinearity="relu",
bidirectional=True,
device="cuda",
do_fusion=False,
fusion_layer_size=64,
number_of_mic=1,
act="relu",
reduce="mean",
):
super(liGRU_layer, self).__init__()
self.hidden_size = int(hidden_size)
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.device = device
self.do_fusion = do_fusion
self.fusion_layer_size = fusion_layer_size
self.number_of_mic = number_of_mic
self.act = act
self.reduce = reduce
if self.do_fusion:
self.hidden_size = self.fusion_layer_size // self.number_of_mic
if self.do_fusion:
self.wz = FusionLinearConv(
self.input_size, self.hidden_size, bias=True, number_of_mic = self.number_of_mic, act=self.act, reduce=self.reduce
).to(device)
self.wh = FusionLinearConv(
self.input_size, self.hidden_size, bias=True, number_of_mic = self.number_of_mic, act=self.act, reduce=self.reduce
).to(device)
else:
self.wz = nn.Linear(
self.input_size, self.hidden_size, bias=True
).to(device)
self.wh = nn.Linear(
self.input_size, self.hidden_size, bias=True
).to(device)
self.wz.bias.data.fill_(0)
torch.nn.init.xavier_normal_(self.wz.weight.data)
self.wh.bias.data.fill_(0)
torch.nn.init.xavier_normal_(self.wh.weight.data)
self.u = nn.Linear(
self.hidden_size, 2 * self.hidden_size, bias=False
).to(device)
# Adding orthogonal initialization for recurrent connection
nn.init.orthogonal_(self.u.weight)
self.bn_wh = nn.BatchNorm1d(self.hidden_size, momentum=0.05).to(
device
)
self.bn_wz = nn.BatchNorm1d(self.hidden_size, momentum=0.05).to(
device
)
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False).to(device)
self.drop_mask_te = torch.tensor([1.0], device=device).float()
self.N_drop_masks = 100
self.drop_mask_cnt = 0
# Setting the activation function
self.act = torch.nn.ReLU().to(device)
@torch.jit.script_method
def forward(self, x):
# type: (Tensor) -> Tensor
if self.bidirectional:
x_flip = x.flip(0)
x = torch.cat([x, x_flip], dim=1)
# Feed-forward affine transformations (all steps in parallel)
wz = self.wz(x)
wh = self.wh(x)
# Apply batch normalization
wz_bn = self.bn_wz(wz.view(wz.shape[0] * wz.shape[1], wz.shape[2]))
wh_bn = self.bn_wh(wh.view(wh.shape[0] * wh.shape[1], wh.shape[2]))
wz = wz_bn.view(wz.shape[0], wz.shape[1], wz.shape[2])
wh = wh_bn.view(wh.shape[0], wh.shape[1], wh.shape[2])
# Processing time steps
h = self.ligru_cell(wz, wh)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=1)
h_b = h_b.flip(0)
h = torch.cat([h_f, h_b], dim=2)
return h
@torch.jit.script_method
def ligru_cell(self, wz, wh):
# type: (Tensor, Tensor) -> Tensor
if self.bidirectional:
h_init = torch.zeros(
2 * self.batch_size,
self.hidden_size,
device="cuda",
)
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
2 * self.batch_size,
self.hidden_size,
device="cuda",
)
).data
else:
h_init = torch.zeros(
self.batch_size,
self.hidden_size,
device="cuda",
)
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
self.batch_size,
self.hidden_size,
device="cuda",
)
).data
hiddens = []
ht = h_init
if self.training:
drop_mask = drop_masks_i[self.drop_mask_cnt]
self.drop_mask_cnt = self.drop_mask_cnt + 1
if self.drop_mask_cnt >= self.N_drop_masks:
self.drop_mask_cnt = 0
if self.bidirectional:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
2 * self.batch_size,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
self.batch_size,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_mask = self.drop_mask_te
for k in range(wh.shape[0]):
uz, uh = self.u(ht).chunk(2, 1)
at = wh[k] + uh
zt = wz[k] + uz
# ligru equation
zt = torch.sigmoid(zt)
hcand = self.act(at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
return h
class liGRU(nn.Module):
def __init__(self, options, inp_dim):
super(liGRU, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.ligru_lay = list(map(int, options["ligru_lay"].split(",")))
self.ligru_drop = list(map(float, options["ligru_drop"].split(",")))
self.ligru_use_batchnorm = list(map(strtobool, options["ligru_use_batchnorm"].split(",")))
self.ligru_use_laynorm = list(map(strtobool, options["ligru_use_laynorm"].split(",")))
self.ligru_use_laynorm_inp = strtobool(options["ligru_use_laynorm_inp"])
self.ligru_use_batchnorm_inp = strtobool(options["ligru_use_batchnorm_inp"])
self.ligru_orthinit = strtobool(options["ligru_orthinit"])
self.ligru_act = options["ligru_act"].split(",")
self.bidir = strtobool(options["ligru_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wh = nn.ModuleList([])
self.uh = nn.ModuleList([])
self.wz = nn.ModuleList([]) # Update Gate
self.uz = nn.ModuleList([]) # Update Gate
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wh = nn.ModuleList([]) # Batch Norm
self.bn_wz = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.ligru_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.ligru_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_ligru_lay = len(self.ligru_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_ligru_lay):
# Activations
self.act.append(act_fun(self.ligru_act[i]))
add_bias = True
if self.ligru_use_laynorm[i] or self.ligru_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wh.append(nn.Linear(current_input, self.ligru_lay[i], bias=add_bias))
self.wz.append(nn.Linear(current_input, self.ligru_lay[i], bias=add_bias))
# Recurrent connections
self.uh.append(nn.Linear(self.ligru_lay[i], self.ligru_lay[i], bias=False))
self.uz.append(nn.Linear(self.ligru_lay[i], self.ligru_lay[i], bias=False))
if self.ligru_orthinit:
nn.init.orthogonal_(self.uh[i].weight)
nn.init.orthogonal_(self.uz[i].weight)
# batch norm initialization
self.bn_wh.append(nn.BatchNorm1d(self.ligru_lay[i], momentum=0.05))
self.bn_wz.append(nn.BatchNorm1d(self.ligru_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.ligru_lay[i]))
if self.bidir:
current_input = 2 * self.ligru_lay[i]
else:
current_input = self.ligru_lay[i]
self.out_dim = self.ligru_lay[i] + self.bidir * self.ligru_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.ligru_use_laynorm_inp):
x = self.ln0((x))
if bool(self.ligru_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_ligru_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.ligru_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.ligru_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(
torch.Tensor(h_init.shape[0], h_init.shape[1]).fill_(1 - self.ligru_drop[i])
)
else:
drop_mask = torch.FloatTensor([1 - self.ligru_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wh_out = self.wh[i](x)
wz_out = self.wz[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.ligru_use_batchnorm[i]:
wh_out_bn = self.bn_wh[i](wh_out.view(wh_out.shape[0] * wh_out.shape[1], wh_out.shape[2]))
wh_out = wh_out_bn.view(wh_out.shape[0], wh_out.shape[1], wh_out.shape[2])
wz_out_bn = self.bn_wz[i](wz_out.view(wz_out.shape[0] * wz_out.shape[1], wz_out.shape[2]))
wz_out = wz_out_bn.view(wz_out.shape[0], wz_out.shape[1], wz_out.shape[2])
# Processing time steps
hiddens = []
ht = h_init
for k in range(x.shape[0]):
# ligru equation
zt = torch.sigmoid(wz_out[k] + self.uz[i](ht))
at = wh_out[k] + self.uh[i](ht)
hcand = self.act[i](at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
if self.ligru_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0 : int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class minimalGRU(nn.Module):
def __init__(self, options, inp_dim):
super(minimalGRU, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.minimalgru_lay = list(map(int, options["minimalgru_lay"].split(",")))
self.minimalgru_drop = list(map(float, options["minimalgru_drop"].split(",")))
self.minimalgru_use_batchnorm = list(map(strtobool, options["minimalgru_use_batchnorm"].split(",")))
self.minimalgru_use_laynorm = list(map(strtobool, options["minimalgru_use_laynorm"].split(",")))
self.minimalgru_use_laynorm_inp = strtobool(options["minimalgru_use_laynorm_inp"])
self.minimalgru_use_batchnorm_inp = strtobool(options["minimalgru_use_batchnorm_inp"])
self.minimalgru_orthinit = strtobool(options["minimalgru_orthinit"])
self.minimalgru_act = options["minimalgru_act"].split(",")
self.bidir = strtobool(options["minimalgru_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wh = nn.ModuleList([])
self.uh = nn.ModuleList([])
self.wz = nn.ModuleList([]) # Update Gate
self.uz = nn.ModuleList([]) # Update Gate
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wh = nn.ModuleList([]) # Batch Norm
self.bn_wz = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.minimalgru_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.minimalgru_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_minimalgru_lay = len(self.minimalgru_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_minimalgru_lay):
# Activations
self.act.append(act_fun(self.minimalgru_act[i]))
add_bias = True
if self.minimalgru_use_laynorm[i] or self.minimalgru_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wh.append(nn.Linear(current_input, self.minimalgru_lay[i], bias=add_bias))
self.wz.append(nn.Linear(current_input, self.minimalgru_lay[i], bias=add_bias))
# Recurrent connections
self.uh.append(nn.Linear(self.minimalgru_lay[i], self.minimalgru_lay[i], bias=False))
self.uz.append(nn.Linear(self.minimalgru_lay[i], self.minimalgru_lay[i], bias=False))
if self.minimalgru_orthinit:
nn.init.orthogonal_(self.uh[i].weight)
nn.init.orthogonal_(self.uz[i].weight)
# batch norm initialization
self.bn_wh.append(nn.BatchNorm1d(self.minimalgru_lay[i], momentum=0.05))
self.bn_wz.append(nn.BatchNorm1d(self.minimalgru_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.minimalgru_lay[i]))
if self.bidir:
current_input = 2 * self.minimalgru_lay[i]
else:
current_input = self.minimalgru_lay[i]
self.out_dim = self.minimalgru_lay[i] + self.bidir * self.minimalgru_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.minimalgru_use_laynorm_inp):
x = self.ln0((x))
if bool(self.minimalgru_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_minimalgru_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.minimalgru_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.minimalgru_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(
torch.Tensor(h_init.shape[0], h_init.shape[1]).fill_(1 - self.minimalgru_drop[i])
)
else:
drop_mask = torch.FloatTensor([1 - self.minimalgru_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wh_out = self.wh[i](x)
wz_out = self.wz[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.minimalgru_use_batchnorm[i]:
wh_out_bn = self.bn_wh[i](wh_out.view(wh_out.shape[0] * wh_out.shape[1], wh_out.shape[2]))
wh_out = wh_out_bn.view(wh_out.shape[0], wh_out.shape[1], wh_out.shape[2])
wz_out_bn = self.bn_wz[i](wz_out.view(wz_out.shape[0] * wz_out.shape[1], wz_out.shape[2]))
wz_out = wz_out_bn.view(wz_out.shape[0], wz_out.shape[1], wz_out.shape[2])
# Processing time steps
hiddens = []
ht = h_init
for k in range(x.shape[0]):
# minimalgru equation
zt = torch.sigmoid(wz_out[k] + self.uz[i](ht))
at = wh_out[k] + self.uh[i](zt * ht)
hcand = self.act[i](at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
if self.minimalgru_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0 : int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class RNN(nn.Module):
def __init__(self, options, inp_dim):
super(RNN, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.rnn_lay = list(map(int, options["rnn_lay"].split(",")))
self.rnn_drop = list(map(float, options["rnn_drop"].split(",")))
self.rnn_use_batchnorm = list(map(strtobool, options["rnn_use_batchnorm"].split(",")))
self.rnn_use_laynorm = list(map(strtobool, options["rnn_use_laynorm"].split(",")))
self.rnn_use_laynorm_inp = strtobool(options["rnn_use_laynorm_inp"])
self.rnn_use_batchnorm_inp = strtobool(options["rnn_use_batchnorm_inp"])
self.rnn_orthinit = strtobool(options["rnn_orthinit"])
self.rnn_act = options["rnn_act"].split(",")
self.bidir = strtobool(options["rnn_bidir"])
self.use_cuda = strtobool(options["use_cuda"])
self.to_do = options["to_do"]
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wh = nn.ModuleList([])
self.uh = nn.ModuleList([])
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wh = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.rnn_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.rnn_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
self.N_rnn_lay = len(self.rnn_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_rnn_lay):
# Activations
self.act.append(act_fun(self.rnn_act[i]))
add_bias = True
if self.rnn_use_laynorm[i] or self.rnn_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wh.append(nn.Linear(current_input, self.rnn_lay[i], bias=add_bias))
# Recurrent connections
self.uh.append(nn.Linear(self.rnn_lay[i], self.rnn_lay[i], bias=False))
if self.rnn_orthinit:
nn.init.orthogonal_(self.uh[i].weight)
# batch norm initialization
self.bn_wh.append(nn.BatchNorm1d(self.rnn_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.rnn_lay[i]))
if self.bidir:
current_input = 2 * self.rnn_lay[i]
else:
current_input = self.rnn_lay[i]
self.out_dim = self.rnn_lay[i] + self.bidir * self.rnn_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.rnn_use_laynorm_inp):
x = self.ln0((x))
if bool(self.rnn_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_rnn_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.rnn_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.rnn_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(torch.Tensor(h_init.shape[0], h_init.shape[1]).fill_(1 - self.rnn_drop[i]))
else:
drop_mask = torch.FloatTensor([1 - self.rnn_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wh_out = self.wh[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.rnn_use_batchnorm[i]:
wh_out_bn = self.bn_wh[i](wh_out.view(wh_out.shape[0] * wh_out.shape[1], wh_out.shape[2]))
wh_out = wh_out_bn.view(wh_out.shape[0], wh_out.shape[1], wh_out.shape[2])
# Processing time steps
hiddens = []
ht = h_init
for k in range(x.shape[0]):
# rnn equation
at = wh_out[k] + self.uh[i](ht)
ht = self.act[i](at) * drop_mask
if self.rnn_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0 : int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
return x
class CNN(nn.Module):
def __init__(self, options, inp_dim):
super(CNN, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.cnn_N_filt = list(map(int, options["cnn_N_filt"].split(",")))
self.cnn_len_filt = list(map(int, options["cnn_len_filt"].split(",")))
self.cnn_max_pool_len = list(map(int, options["cnn_max_pool_len"].split(",")))
self.cnn_act = options["cnn_act"].split(",")
self.cnn_drop = list(map(float, options["cnn_drop"].split(",")))
self.cnn_use_laynorm = list(map(strtobool, options["cnn_use_laynorm"].split(",")))
self.cnn_use_batchnorm = list(map(strtobool, options["cnn_use_batchnorm"].split(",")))
self.cnn_use_laynorm_inp = strtobool(options["cnn_use_laynorm_inp"])
self.cnn_use_batchnorm_inp = strtobool(options["cnn_use_batchnorm_inp"])
self.N_cnn_lay = len(self.cnn_N_filt)
self.conv = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.ln = nn.ModuleList([])
self.act = nn.ModuleList([])
self.drop = nn.ModuleList([])
if self.cnn_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
if self.cnn_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d([self.input_dim], momentum=0.05)
current_input = self.input_dim
for i in range(self.N_cnn_lay):
N_filt = int(self.cnn_N_filt[i])
len_filt = int(self.cnn_len_filt[i])
# dropout
self.drop.append(nn.Dropout(p=self.cnn_drop[i]))
# activation
self.act.append(act_fun(self.cnn_act[i]))
# layer norm initialization
self.ln.append(
LayerNorm([N_filt, int((current_input - self.cnn_len_filt[i] + 1) / self.cnn_max_pool_len[i])])
)
self.bn.append(
nn.BatchNorm1d(
N_filt, int((current_input - self.cnn_len_filt[i] + 1) / self.cnn_max_pool_len[i]), momentum=0.05
)
)
if i == 0:
self.conv.append(nn.Conv1d(1, N_filt, len_filt))
else:
self.conv.append(nn.Conv1d(self.cnn_N_filt[i - 1], self.cnn_N_filt[i], self.cnn_len_filt[i]))
current_input = int((current_input - self.cnn_len_filt[i] + 1) / self.cnn_max_pool_len[i])
self.out_dim = current_input * N_filt
def forward(self, x):
batch = x.shape[0]
seq_len = x.shape[1]
if bool(self.cnn_use_laynorm_inp):
x = self.ln0((x))
if bool(self.cnn_use_batchnorm_inp):
x = self.bn0((x))
x = x.view(batch, 1, seq_len)
for i in range(self.N_cnn_lay):
if self.cnn_use_laynorm[i]:
x = self.drop[i](self.act[i](self.ln[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i]))))
if self.cnn_use_batchnorm[i]:
x = self.drop[i](self.act[i](self.bn[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i]))))
if self.cnn_use_batchnorm[i] == False and self.cnn_use_laynorm[i] == False:
x = self.drop[i](self.act[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i])))
x = x.view(batch, -1)
return x
class SincNet(nn.Module):
def __init__(self, options, inp_dim):
super(SincNet, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.sinc_N_filt = list(map(int, options["sinc_N_filt"].split(",")))
self.sinc_len_filt = list(map(int, options["sinc_len_filt"].split(",")))
self.sinc_max_pool_len = list(map(int, options["sinc_max_pool_len"].split(",")))
self.sinc_act = options["sinc_act"].split(",")
self.sinc_drop = list(map(float, options["sinc_drop"].split(",")))
self.sinc_use_laynorm = list(map(strtobool, options["sinc_use_laynorm"].split(",")))
self.sinc_use_batchnorm = list(map(strtobool, options["sinc_use_batchnorm"].split(",")))
self.sinc_use_laynorm_inp = strtobool(options["sinc_use_laynorm_inp"])
self.sinc_use_batchnorm_inp = strtobool(options["sinc_use_batchnorm_inp"])
self.N_sinc_lay = len(self.sinc_N_filt)
self.sinc_sample_rate = int(options["sinc_sample_rate"])
self.sinc_min_low_hz = int(options["sinc_min_low_hz"])
self.sinc_min_band_hz = int(options["sinc_min_band_hz"])
self.conv = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.ln = nn.ModuleList([])
self.act = nn.ModuleList([])
self.drop = nn.ModuleList([])
if self.sinc_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
if self.sinc_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d([self.input_dim], momentum=0.05)
current_input = self.input_dim
for i in range(self.N_sinc_lay):
N_filt = int(self.sinc_N_filt[i])
len_filt = int(self.sinc_len_filt[i])
# dropout
self.drop.append(nn.Dropout(p=self.sinc_drop[i]))
# activation
self.act.append(act_fun(self.sinc_act[i]))
# layer norm initialization
self.ln.append(
LayerNorm([N_filt, int((current_input - self.sinc_len_filt[i] + 1) / self.sinc_max_pool_len[i])])
)
self.bn.append(
nn.BatchNorm1d(
N_filt, int((current_input - self.sinc_len_filt[i] + 1) / self.sinc_max_pool_len[i]), momentum=0.05
)
)
if i == 0:
self.conv.append(
SincConv(
1,
N_filt,
len_filt,
sample_rate=self.sinc_sample_rate,
min_low_hz=self.sinc_min_low_hz,
min_band_hz=self.sinc_min_band_hz,
)
)
else:
self.conv.append(nn.Conv1d(self.sinc_N_filt[i - 1], self.sinc_N_filt[i], self.sinc_len_filt[i]))
current_input = int((current_input - self.sinc_len_filt[i] + 1) / self.sinc_max_pool_len[i])
self.out_dim = current_input * N_filt
def forward(self, x):
batch = x.shape[0]
seq_len = x.shape[1]
if bool(self.sinc_use_laynorm_inp):
x = self.ln0(x)
if bool(self.sinc_use_batchnorm_inp):
x = self.bn0(x)
x = x.view(batch, 1, seq_len)
for i in range(self.N_sinc_lay):
if self.sinc_use_laynorm[i]:
x = self.drop[i](self.act[i](self.ln[i](F.max_pool1d(self.conv[i](x), self.sinc_max_pool_len[i]))))
if self.sinc_use_batchnorm[i]:
x = self.drop[i](self.act[i](self.bn[i](F.max_pool1d(self.conv[i](x), self.sinc_max_pool_len[i]))))
if self.sinc_use_batchnorm[i] == False and self.sinc_use_laynorm[i] == False:
x = self.drop[i](self.act[i](F.max_pool1d(self.conv[i](x), self.sinc_max_pool_len[i])))
x = x.view(batch, -1)
return x
class SincConv(nn.Module):
"""Sinc-based convolution
Parameters
----------
in_channels : `int`
Number of input channels. Must be 1.
out_channels : `int`
Number of filters.
kernel_size : `int`
Filter length.
sample_rate : `int`, optional
Sample rate. Defaults to 16000.
Usage
-----
See `torch.nn.Conv1d`
Reference
---------
Mirco Ravanelli, Yoshua Bengio,
"Speaker Recognition from raw waveform with SincNet".
https://arxiv.org/abs/1808.00158
"""
@staticmethod
def to_mel(hz):
return 2595 * np.log10(1 + hz / 700)
@staticmethod
def to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias=False,
groups=1,
sample_rate=16000,
min_low_hz=50,
min_band_hz=50,
):
super(SincConv, self).__init__()
if in_channels != 1:
# msg = (f'SincConv only support one input channel '
# f'(here, in_channels = {in_channels:d}).')
msg = "SincConv only support one input channel (here, in_channels = {%i})" % (in_channels)
raise ValueError(msg)
self.out_channels = out_channels
self.kernel_size = kernel_size
# Forcing the filters to be odd (i.e, perfectly symmetrics)
if kernel_size % 2 == 0:
self.kernel_size = self.kernel_size + 1
self.stride = stride
self.padding = padding
self.dilation = dilation
if bias:
raise ValueError("SincConv does not support bias.")
if groups > 1:
raise ValueError("SincConv does not support groups.")
self.sample_rate = sample_rate
self.min_low_hz = min_low_hz
self.min_band_hz = min_band_hz
# initialize filterbanks such that they are equally spaced in Mel scale
low_hz = 30
high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
mel = np.linspace(self.to_mel(low_hz), self.to_mel(high_hz), self.out_channels + 1)
hz = self.to_hz(mel) / self.sample_rate
# filter lower frequency (out_channels, 1)
self.low_hz_ = nn.Parameter(torch.Tensor(hz[:-1]).view(-1, 1))
# filter frequency band (out_channels, 1)
self.band_hz_ = nn.Parameter(torch.Tensor(np.diff(hz)).view(-1, 1))
# Hamming window
# self.window_ = torch.hamming_window(self.kernel_size)
n_lin = torch.linspace(0, self.kernel_size, steps=self.kernel_size)
self.window_ = 0.54 - 0.46 * torch.cos(2 * math.pi * n_lin / self.kernel_size)
# (kernel_size, 1)
n = (self.kernel_size - 1) / 2
self.n_ = torch.arange(-n, n + 1).view(1, -1) / self.sample_rate
def sinc(self, x):
# Numerically stable definition
x_left = x[:, 0 : int((x.shape[1] - 1) / 2)]
y_left = torch.sin(x_left) / x_left
y_right = torch.flip(y_left, dims=[1])
sinc = torch.cat([y_left, torch.ones([x.shape[0], 1]).to(x.device), y_right], dim=1)
return sinc
def forward(self, waveforms):
"""
Parameters
----------
waveforms : `torch.Tensor` (batch_size, 1, n_samples)
Batch of waveforms.
Returns
-------
features : `torch.Tensor` (batch_size, out_channels, n_samples_out)
Batch of sinc filters activations.
"""
self.n_ = self.n_.to(waveforms.device)
self.window_ = self.window_.to(waveforms.device)
low = self.min_low_hz / self.sample_rate + torch.abs(self.low_hz_)
high = low + self.min_band_hz / self.sample_rate + torch.abs(self.band_hz_)
f_times_t = torch.matmul(low, self.n_)
low_pass1 = 2 * low * self.sinc(2 * math.pi * f_times_t * self.sample_rate)
f_times_t = torch.matmul(high, self.n_)
low_pass2 = 2 * high * self.sinc(2 * math.pi * f_times_t * self.sample_rate)
band_pass = low_pass2 - low_pass1
max_, _ = torch.max(band_pass, dim=1, keepdim=True)
band_pass = band_pass / max_
self.filters = (band_pass * self.window_).view(self.out_channels, 1, self.kernel_size)
return F.conv1d(
waveforms,
self.filters,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
bias=None,
groups=1,
)
class SincConv_fast(nn.Module):
"""Sinc-based convolution
Parameters
----------
in_channels : `int`
Number of input channels. Must be 1.
out_channels : `int`
Number of filters.
kernel_size : `int`
Filter length.
sample_rate : `int`, optional
Sample rate. Defaults to 16000.
Usage
-----
See `torch.nn.Conv1d`
Reference
---------
Mirco Ravanelli, Yoshua Bengio,
"Speaker Recognition from raw waveform with SincNet".
https://arxiv.org/abs/1808.00158
"""
@staticmethod
def to_mel(hz):
return 2595 * np.log10(1 + hz / 700)
@staticmethod
def to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias=False,
groups=1,
sample_rate=16000,
min_low_hz=50,
min_band_hz=50,
):
super(SincConv_fast, self).__init__()
if in_channels != 1:
# msg = (f'SincConv only support one input channel '
# f'(here, in_channels = {in_channels:d}).')
msg = "SincConv only support one input channel (here, in_channels = {%i})" % (in_channels)
raise ValueError(msg)
self.out_channels = out_channels
self.kernel_size = kernel_size
# Forcing the filters to be odd (i.e, perfectly symmetrics)
if kernel_size % 2 == 0:
self.kernel_size = self.kernel_size + 1
self.stride = stride
self.padding = padding
self.dilation = dilation
if bias:
raise ValueError("SincConv does not support bias.")
if groups > 1:
raise ValueError("SincConv does not support groups.")
self.sample_rate = sample_rate
self.min_low_hz = min_low_hz
self.min_band_hz = min_band_hz
# initialize filterbanks such that they are equally spaced in Mel scale
low_hz = 30
high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
mel = np.linspace(self.to_mel(low_hz), self.to_mel(high_hz), self.out_channels + 1)
hz = self.to_hz(mel)
# filter lower frequency (out_channels, 1)
self.low_hz_ = nn.Parameter(torch.Tensor(hz[:-1]).view(-1, 1))
# filter frequency band (out_channels, 1)
self.band_hz_ = nn.Parameter(torch.Tensor(np.diff(hz)).view(-1, 1))
# Hamming window
# self.window_ = torch.hamming_window(self.kernel_size)
n_lin = torch.linspace(
0, (self.kernel_size / 2) - 1, steps=int((self.kernel_size / 2))
) # computing only half of the window
self.window_ = 0.54 - 0.46 * torch.cos(2 * math.pi * n_lin / self.kernel_size)
# (kernel_size, 1)
n = (self.kernel_size - 1) / 2.0
self.n_ = (
2 * math.pi * torch.arange(-n, 0).view(1, -1) / self.sample_rate
) # Due to symmetry, I only need half of the time axes
def forward(self, waveforms):
"""
Parameters
----------
waveforms : `torch.Tensor` (batch_size, 1, n_samples)
Batch of waveforms.
Returns
-------
features : `torch.Tensor` (batch_size, out_channels, n_samples_out)
Batch of sinc filters activations.
"""
self.n_ = self.n_.to(waveforms.device)
self.window_ = self.window_.to(waveforms.device)
low = self.min_low_hz + torch.abs(self.low_hz_)
high = torch.clamp(low + self.min_band_hz + torch.abs(self.band_hz_), self.min_low_hz, self.sample_rate / 2)
band = (high - low)[:, 0]
f_times_t_low = torch.matmul(low, self.n_)
f_times_t_high = torch.matmul(high, self.n_)
band_pass_left = (
(torch.sin(f_times_t_high) - torch.sin(f_times_t_low)) / (self.n_ / 2)
) * self.window_ # Equivalent of Eq.4 of the reference paper (SPEAKER RECOGNITION FROM RAW WAVEFORM WITH SINCNET). I just have expanded the sinc and simplified the terms. This way I avoid several useless computations.
band_pass_center = 2 * band.view(-1, 1)
band_pass_right = torch.flip(band_pass_left, dims=[1])
band_pass = torch.cat([band_pass_left, band_pass_center, band_pass_right], dim=1)
band_pass = band_pass / (2 * band[:, None])
self.filters = (band_pass).view(self.out_channels, 1, self.kernel_size)
return F.conv1d(
waveforms,
self.filters,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
bias=None,
groups=1,
)
def flip(x, dim):
xsize = x.size()
dim = x.dim() + dim if dim < 0 else dim
x = x.contiguous()
x = x.view(-1, *xsize[dim:])
x = x.view(x.size(0), x.size(1), -1)[
:, getattr(torch.arange(x.size(1) - 1, -1, -1), ("cpu", "cuda")[x.is_cuda])().long(), :
]
return x.view(xsize)
class SRU(nn.Module):
def __init__(self, options, inp_dim):
super(SRU, self).__init__()
self.input_dim = inp_dim
self.hidden_size = int(options["sru_hidden_size"])
self.num_layers = int(options["sru_num_layers"])
self.dropout = float(options["sru_dropout"])
self.rnn_dropout = float(options["sru_rnn_dropout"])
self.use_tanh = bool(strtobool(options["sru_use_tanh"]))
self.use_relu = bool(strtobool(options["sru_use_relu"]))
self.use_selu = bool(strtobool(options["sru_use_selu"]))
self.weight_norm = bool(strtobool(options["sru_weight_norm"]))
self.layer_norm = bool(strtobool(options["sru_layer_norm"]))
self.bidirectional = bool(strtobool(options["sru_bidirectional"]))
self.is_input_normalized = bool(strtobool(options["sru_is_input_normalized"]))
self.has_skip_term = bool(strtobool(options["sru_has_skip_term"]))
self.rescale = bool(strtobool(options["sru_rescale"]))
self.highway_bias = float(options["sru_highway_bias"])
self.n_proj = int(options["sru_n_proj"])
self.sru = sru.SRU(
self.input_dim,
self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
rnn_dropout=self.rnn_dropout,
bidirectional=self.bidirectional,
n_proj=self.n_proj,
use_tanh=self.use_tanh,
use_selu=self.use_selu,
use_relu=self.use_relu,
weight_norm=self.weight_norm,
layer_norm=self.layer_norm,
has_skip_term=self.has_skip_term,
is_input_normalized=self.is_input_normalized,
highway_bias=self.highway_bias,
rescale=self.rescale,
)
self.out_dim = self.hidden_size + self.bidirectional * self.hidden_size
def forward(self, x):
if self.bidirectional:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size * 2)
else:
h0 = torch.zeros(self.num_layers, x.shape[1], self.hidden_size)
if x.is_cuda:
h0 = h0.cuda()
output, hn = self.sru(x, c0=h0)
return output
class PASE(nn.Module):
def __init__(self, options, inp_dim):
super(PASE, self).__init__()
# To use PASE within PyTorch-Kaldi, please clone the current PASE repository: https://github.com/santi-pdp/pase
# Note that you have to clone the dev branch.
# Take a look into the requirements (requirements.txt) and install in your environment what is missing. An important requirement is QRNN (https://github.com/salesforce/pytorch-qrnn).
# Before starting working with PASE, it could make sense to a quick test with QRNN independently (see “usage” section in the QRNN repository).
# Remember to install pase. This way it can be used outside the pase folder directory. To do it, go into the pase folder and type:
# "python setup.py install"
from pase.models.frontend import wf_builder
self.input_dim = inp_dim
self.pase_cfg = options["pase_cfg"]
self.pase_model = options["pase_model"]
self.pase = wf_builder(self.pase_cfg)
self.pase.load_pretrained(self.pase_model, load_last=True, verbose=True)
# Reading the out_dim from the config file:
with open(self.pase_cfg) as json_file:
config = json.load(json_file)
self.out_dim = int(config["emb_dim"])
def forward(self, x):
x = x.unsqueeze(0).unsqueeze(0)
output = self.pase(x)
return output
class FusionLinearConv(nn.Module):
r"""Applies a FusionLayer as described in:
'FusionRNN: Shared Neural Parameters for
Multi-Channel Distant Speech Recognition', Titouan P. et Al.
Input channels are supposed to be concatenated along the last dimension
"""
def __init__(self, in_features, out_features, number_of_mic=1, bias=True,seed=None,act="leaky",reduce="sum"):
super(FusionLinearConv, self).__init__()
self.in_features = in_features // number_of_mic
self.out_features = out_features
self.number_of_mic = number_of_mic
self.reduce = reduce
if act == "leaky_relu":
self.act_function = nn.LeakyReLU()
elif act == "prelu":
self.act_function = nn.PReLU()
elif act == "relu":
self.act_function = nn.ReLU()
else:
self.act_function = nn.Tanh()
self.conv = nn.Conv1d(1, self.out_features, kernel_size=self.in_features, stride=self.in_features, bias=True, padding=0)
self.conv.bias.data.fill_(0)
torch.nn.init.xavier_normal_(self.conv.weight.data)
def forward(self, input):
orig_shape = input.shape
out = self.act_function(self.conv(input.view(orig_shape[0]*orig_shape[1], 1, -1)))
if self.reduce == "mean":
out = torch.mean(out, dim=-1)
else:
out = torch.sum(out, dim=-1)
return out.view(orig_shape[0],orig_shape[1], -1)
| 73,602 | 34.049048 | 226 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/multistyle_training.py | from augmentation_utils import *
import configparser
import sox
import logging
logging.getLogger('sox').setLevel(logging.ERROR)
# Reading global cfg file (first argument-mandatory file)
cfg_file = sys.argv[1]
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Output folder creation
out_folder = config["data"]["out_folder"]
if not os.path.exists(out_folder):
os.makedirs(out_folder)
data_folder = config["data"]["data_folder"]
# Read cfg file options
snr_array = np.array(list(map(int, config["impulse"]["snrs"].split(","))))
snr_list = 10 ** (snr_array / 10)
print("- Reading config file......OK!")
if config["data"]["dataset"] == "librispeech":
speaker_lst = os.listdir(data_folder)
speaker_lst = validate_dir(speaker_lst)
# Create parallel dataset
print("\n- Starting dataset parallelization.\n")
speaker_count = 1
for speaker in speaker_lst:
print(" Speaker {} / {} ".format(speaker_count, len(speaker_lst)).center(40, "="))
speaker_count += 1
speaker_dir = os.path.join(data_folder, speaker)
# Get chapters by speaker
chapter_lst = os.listdir(speaker_dir)
chapter_lst = validate_dir(chapter_lst)
chapter_count = 1
for chap in chapter_lst:
print("Chapter {} / {} \r".format(chapter_count, len(chapter_lst)), end = '')
chapter_count += 1
chapter_dir = os.path.join(speaker_dir, chap)
# Get utterances by speaker per chapter
utterance_lst = os.listdir(chapter_dir)
utt_transcripitons, utterance_lst = get_utterances(utterance_lst)
output_dir = os.path.join(out_folder, speaker, chap)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
transcription_from_dir = os.path.join(chapter_dir, utt_transcripitons)
transcription_to_dir = os.path.join(output_dir, utt_transcripitons)
shutil.copyfile(transcription_from_dir, transcription_to_dir)
for utt in utterance_lst:
utterance_dir = os.path.join(chapter_dir, utt)
utt_save_dir = os.path.join(output_dir, utt)
if config["styles"]["change_speed"] == "True":
random_number = random.randint(0, 1)
if random_number == 1:
vol_fraction = 1 + float(config["impulse"]["volume_change"])
else:
vol_fraction = 1 - float(config["impulse"]["volume_change"])
else:
vol_fraction = 1
if config["styles"]["change_volume"] == "True":
random_number = random.randint(0, 1)
if random_number == 1:
speed_fraction = 1 + float(config["impulse"]["speed_change"])
else:
speed_fraction = 1 - float(config["impulse"]["speed_change"])
else:
speed_fraction = 1
if config["styles"]["change_speed"] == "True" or config["styles"]["change_volume"] == "True":
# create a transformer
tfm = sox.Transformer()
tfm.tempo(speed_fraction, 's')
tfm.vol(vol_fraction)
tfm.build_file(
input_filepath = utterance_dir, sample_rate_in = int(config["impulse"]["sample_rate"]),
output_filepath = utt_save_dir
)
if config["styles"]["add_impulse"] == "True":
recording, sample_rate = torchaudio.load(utt_save_dir)
noise = get_random_noise_file(config["impulse"]["impulse_dir"])
recording = normalize_tensor(recording)
random_snr_value = random.randrange(len(snr_list))
recording = convolve_impulse(recording[0], noise[0], snr_list[random_snr_value])
recording = normalize_tensor(recording)
torchaudio.save(utt_save_dir, recording, sample_rate = sample_rate)
cmd = "kaldi_decoding_scripts/create_parallel_dataset.sh " \
+ os.path.basename(config["data"]["out_folder"]) + " " \
+ os.path.dirname(config["data"]["root_folder"])
invoke_process_popen_poll_live(cmd)
print("\n\nDataset created successfully\n")
| 4,581 | 34.796875 | 111 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/utils.py | ##########################################################
# pytorch-kaldi-gan
# Walter Heymans
# North West University
# 2020
# Adapted from:
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import configparser
import sys
import os.path
import random
import subprocess
import numpy as np
import re
import glob
from distutils.util import strtobool
import importlib
import torch
import torch.nn as nn
import torch.optim as optim
import math
import matplotlib.pyplot as plt
import weights_and_biases as wandb
import math
from torch.optim.optimizer import Optimizer
def run_command(cmd):
"""from http://blog.kagesenshi.org/2008/02/teeing-python-subprocesspopen-output.html
"""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = []
while True:
line = p.stdout.readline()
stdout.append(line)
print(line.decode("utf-8"))
if line == "" and p.poll() != None:
break
return "".join(stdout)
def run_shell_display(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
while True:
out = p.stdout.read(1).decode("utf-8")
if out == "" and p.poll() != None:
break
if out != "":
sys.stdout.write(out)
sys.stdout.flush()
return
def run_shell(cmd, log_file):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p.wait()
with open(log_file, "a+") as logfile:
logfile.write(output.decode("utf-8") + "\n")
logfile.write(err.decode("utf-8") + "\n")
# print(output.decode("utf-8"))
return output
def read_args_command_line(args, config):
sections = []
fields = []
values = []
for i in range(2, len(args)):
# check if the option is valid for second level
r2 = re.compile("--.*,.*=.*")
# check if the option is valid for 4 level
r4 = re.compile('--.*,.*,.*,.*=".*"')
if r2.match(args[i]) is None and r4.match(args[i]) is None:
sys.stderr.write(
'ERROR: option "%s" from command line is not valid! (the format must be "--section,field=value")\n'
% (args[i])
)
sys.exit(0)
sections.append(re.search("--(.*),", args[i]).group(1))
fields.append(re.search(",(.*)", args[i].split("=")[0]).group(1))
values.append(re.search("=(.*)", args[i]).group(1))
# parsing command line arguments
for i in range(len(sections)):
# Remove multi level is level >= 2
sections[i] = sections[i].split(",")[0]
if sections[i] in config.sections():
# Case of args level > than 2 like --sec,fields,0,field="value"
if len(fields[i].split(",")) >= 2:
splitted = fields[i].split(",")
# Get the actual fields
field = splitted[0]
number = int(splitted[1])
f_name = splitted[2]
if field in list(config[sections[i]]):
# Get the current string of the corresponding field
current_config_field = config[sections[i]][field]
# Count the number of occurence of the required field
matching = re.findall(f_name + ".", current_config_field)
if number >= len(matching):
sys.stderr.write(
'ERROR: the field number "%s" provided from command line is not valid, we found "%s" "%s" field(s) in section "%s"!\n'
% (number, len(matching), f_name, field)
)
sys.exit(0)
else:
# Now replace
str_to_be_replaced = re.findall(f_name + ".*", current_config_field)[number]
new_str = str(f_name + "=" + values[i])
replaced = nth_replace_string(current_config_field, str_to_be_replaced, new_str, number + 1)
config[sections[i]][field] = replaced
else:
sys.stderr.write(
'ERROR: field "%s" of section "%s" from command line is not valid!")\n' % (field, sections[i])
)
sys.exit(0)
else:
if fields[i] in list(config[sections[i]]):
config[sections[i]][fields[i]] = values[i]
else:
sys.stderr.write(
'ERROR: field "%s" of section "%s" from command line is not valid!")\n'
% (fields[i], sections[i])
)
sys.exit(0)
else:
sys.stderr.write('ERROR: section "%s" from command line is not valid!")\n' % (sections[i]))
sys.exit(0)
return [sections, fields, values]
def compute_avg_performance(info_lst):
losses = []
errors = []
times = []
for tr_info_file in info_lst:
config_res = configparser.ConfigParser()
config_res.read(tr_info_file)
losses.append(float(config_res["results"]["loss"]))
errors.append(float(config_res["results"]["err"]))
times.append(float(config_res["results"]["elapsed_time_chunk"]))
loss = np.mean(losses)
error = np.mean(errors)
time = np.sum(times)
return [loss, error, time]
def check_field(inp, type_inp, field):
valid_field = True
if inp == "" and field != "cmd":
sys.stderr.write('ERROR: The the field "%s" of the config file is empty! \n' % (field))
valid_field = False
sys.exit(0)
if type_inp == "path":
if not (os.path.isfile(inp)) and not (os.path.isdir(inp)) and inp != "none":
sys.stderr.write(
'ERROR: The path "%s" specified in the field "%s" of the config file does not exists! \n'
% (inp, field)
)
valid_field = False
sys.exit(0)
if "{" and "}" in type_inp:
arg_list = type_inp[1:-1].split(",")
if inp not in arg_list:
sys.stderr.write('ERROR: The field "%s" can only contain %s arguments \n' % (field, arg_list))
valid_field = False
sys.exit(0)
if "int(" in type_inp:
try:
int(inp)
except ValueError:
sys.stderr.write('ERROR: The field "%s" can only contain an integer (got "%s") \n' % (field, inp))
valid_field = False
sys.exit(0)
# Check if the value if within the expected range
lower_bound = type_inp.split(",")[0][4:]
upper_bound = type_inp.split(",")[1][:-1]
if lower_bound != "-inf":
if int(inp) < int(lower_bound):
sys.stderr.write(
'ERROR: The field "%s" can only contain an integer greater than %s (got "%s") \n'
% (field, lower_bound, inp)
)
valid_field = False
sys.exit(0)
if upper_bound != "inf":
if int(inp) > int(upper_bound):
sys.stderr.write(
'ERROR: The field "%s" can only contain an integer smaller than %s (got "%s") \n'
% (field, upper_bound, inp)
)
valid_field = False
sys.exit(0)
if "float(" in type_inp:
try:
float(inp)
except ValueError:
sys.stderr.write('ERROR: The field "%s" can only contain a float (got "%s") \n' % (field, inp))
valid_field = False
sys.exit(0)
# Check if the value if within the expected range
lower_bound = type_inp.split(",")[0][6:]
upper_bound = type_inp.split(",")[1][:-1]
if lower_bound != "-inf":
if float(inp) < float(lower_bound):
sys.stderr.write(
'ERROR: The field "%s" can only contain a float greater than %s (got "%s") \n'
% (field, lower_bound, inp)
)
valid_field = False
sys.exit(0)
if upper_bound != "inf":
if float(inp) > float(upper_bound):
sys.stderr.write(
'ERROR: The field "%s" can only contain a float smaller than %s (got "%s") \n'
% (field, upper_bound, inp)
)
valid_field = False
sys.exit(0)
if type_inp == "bool":
lst = {"True", "true", "1", "False", "false", "0"}
if not (inp in lst):
sys.stderr.write('ERROR: The field "%s" can only contain a boolean (got "%s") \n' % (field, inp))
valid_field = False
sys.exit(0)
if "int_list(" in type_inp:
lst = inp.split(",")
try:
list(map(int, lst))
except ValueError:
sys.stderr.write(
'ERROR: The field "%s" can only contain a list of integer (got "%s"). Make also sure there aren\'t white spaces between commas.\n'
% (field, inp)
)
valid_field = False
sys.exit(0)
# Check if the value if within the expected range
lower_bound = type_inp.split(",")[0][9:]
upper_bound = type_inp.split(",")[1][:-1]
for elem in lst:
if lower_bound != "-inf":
if int(elem) < int(lower_bound):
sys.stderr.write(
'ERROR: The field "%s" can only contain an integer greater than %s (got "%s") \n'
% (field, lower_bound, elem)
)
valid_field = False
sys.exit(0)
if upper_bound != "inf":
if int(elem) > int(upper_bound):
sys.stderr.write(
'ERROR: The field "%s" can only contain an integer smaller than %s (got "%s") \n'
% (field, upper_bound, elem)
)
valid_field = False
sys.exit(0)
if "float_list(" in type_inp:
lst = inp.split(",")
try:
list(map(float, lst))
except ValueError:
sys.stderr.write(
'ERROR: The field "%s" can only contain a list of floats (got "%s"). Make also sure there aren\'t white spaces between commas. \n'
% (field, inp)
)
valid_field = False
sys.exit(0)
# Check if the value if within the expected range
lower_bound = type_inp.split(",")[0][11:]
upper_bound = type_inp.split(",")[1][:-1]
for elem in lst:
if lower_bound != "-inf":
if float(elem) < float(lower_bound):
sys.stderr.write(
'ERROR: The field "%s" can only contain a float greater than %s (got "%s") \n'
% (field, lower_bound, elem)
)
valid_field = False
sys.exit(0)
if upper_bound != "inf":
if float(elem) > float(upper_bound):
sys.stderr.write(
'ERROR: The field "%s" can only contain a float smaller than %s (got "%s") \n'
% (field, upper_bound, elem)
)
valid_field = False
sys.exit(0)
if type_inp == "bool_list":
lst = {"True", "true", "1", "False", "false", "0"}
inps = inp.split(",")
for elem in inps:
if not (elem in lst):
sys.stderr.write(
'ERROR: The field "%s" can only contain a list of boolean (got "%s"). Make also sure there aren\'t white spaces between commas.\n'
% (field, inp)
)
valid_field = False
sys.exit(0)
return valid_field
def get_all_archs(config):
arch_lst = []
for sec in config.sections():
if "architecture" in sec:
arch_lst.append(sec)
return arch_lst
def expand_section(config_proto, config):
# expands config_proto with fields in prototype files
name_data = []
name_arch = []
for sec in config.sections():
if "dataset" in sec:
config_proto.add_section(sec)
config_proto[sec] = config_proto["dataset"]
name_data.append(config[sec]["data_name"])
if "architecture" in sec:
name_arch.append(config[sec]["arch_name"])
config_proto.add_section(sec)
config_proto[sec] = config_proto["architecture"]
proto_file = config[sec]["arch_proto"]
# Reading proto file (architecture)
config_arch = configparser.ConfigParser()
config_arch.read(proto_file)
# Reading proto options
fields_arch = list(dict(config_arch.items("proto")).keys())
fields_arch_type = list(dict(config_arch.items("proto")).values())
for i in range(len(fields_arch)):
config_proto.set(sec, fields_arch[i], fields_arch_type[i])
# Reading proto file (architecture_optimizer)
opt_type = config[sec]["arch_opt"]
if opt_type == "sgd":
proto_file = "proto/sgd.proto"
if opt_type == "rmsprop":
proto_file = "proto/rmsprop.proto"
if opt_type == "adam":
proto_file = "proto/adam.proto"
config_arch = configparser.ConfigParser()
config_arch.read(proto_file)
# Reading proto options
fields_arch = list(dict(config_arch.items("proto")).keys())
fields_arch_type = list(dict(config_arch.items("proto")).values())
for i in range(len(fields_arch)):
config_proto.set(sec, fields_arch[i], fields_arch_type[i])
config_proto.remove_section("dataset")
config_proto.remove_section("architecture")
return [config_proto, name_data, name_arch]
def expand_section_proto(config_proto, config):
# Read config proto file
config_proto_optim_file = config["optimization"]["opt_proto"]
config_proto_optim = configparser.ConfigParser()
config_proto_optim.read(config_proto_optim_file)
for optim_par in list(config_proto_optim["proto"]):
config_proto.set("optimization", optim_par, config_proto_optim["proto"][optim_par])
def check_cfg_fields(config_proto, config, cfg_file):
# Check mandatory sections and fields
sec_parse = True
for sec in config_proto.sections():
if any(sec in s for s in config.sections()):
# Check fields
for field in list(dict(config_proto.items(sec)).keys()):
if not (field in config[sec]):
sys.stderr.write(
'ERROR: The confg file %s does not contain the field "%s=" in section "[%s]" (mandatory)!\n'
% (cfg_file, field, sec)
)
sec_parse = False
else:
field_type = config_proto[sec][field]
if not (check_field(config[sec][field], field_type, field)):
sec_parse = False
# If a mandatory section doesn't exist...
else:
sys.stderr.write(
'ERROR: The confg file %s does not contain "[%s]" section (mandatory)!\n' % (cfg_file, sec)
)
sec_parse = False
if sec_parse == False:
sys.stderr.write("ERROR: Revise the confg file %s \n" % (cfg_file))
sys.exit(0)
return sec_parse
def check_consistency_with_proto(cfg_file, cfg_file_proto):
sec_parse = True
# Check if cfg file exists
try:
open(cfg_file, "r")
except IOError:
sys.stderr.write("ERROR: The confg file %s does not exist!\n" % (cfg_file))
sys.exit(0)
# Check if cfg proto file exists
try:
open(cfg_file_proto, "r")
except IOError:
sys.stderr.write("ERROR: The confg file %s does not exist!\n" % (cfg_file_proto))
sys.exit(0)
# Parser Initialization
config = configparser.ConfigParser()
# Reading the cfg file
config.read(cfg_file)
# Reading proto cfg file
config_proto = configparser.ConfigParser()
config_proto.read(cfg_file_proto)
# Adding the multiple entries in data and architecture sections
[config_proto, name_data, name_arch] = expand_section(config_proto, config)
# Check mandatory sections and fields
sec_parse = check_cfg_fields(config_proto, config, cfg_file)
if sec_parse == False:
sys.exit(0)
return [config_proto, name_data, name_arch]
def check_cfg(cfg_file, config, cfg_file_proto):
# Check consistency between cfg_file and cfg_file_proto
[config_proto, name_data, name_arch] = check_consistency_with_proto(cfg_file, cfg_file_proto)
# Reload data_name because they might be altered by arguments
name_data = []
for sec in config.sections():
if "dataset" in sec:
name_data.append(config[sec]["data_name"])
# check consistency between [data_use] vs [data*]
sec_parse = True
data_use_with = []
for data in list(dict(config.items("data_use")).values()):
data_use_with.append(data.split(","))
data_use_with = sum(data_use_with, [])
if not (set(data_use_with).issubset(name_data)):
sys.stderr.write("ERROR: in [data_use] you are using a dataset not specified in [dataset*] %s \n" % (cfg_file))
sec_parse = False
sys.exit(0)
# Set to false the first layer norm layer if the architecture is sequential (to avoid numerical instabilities)
seq_model = False
for sec in config.sections():
if "architecture" in sec:
if strtobool(config[sec]["arch_seq_model"]):
seq_model = True
break
if seq_model:
for item in list(config["architecture1"].items()):
if "use_laynorm" in item[0] and "_inp" not in item[0]:
ln_list = item[1].split(",")
if ln_list[0] == "True":
ln_list[0] = "False"
config["architecture1"][item[0]] = ",".join(ln_list)
# Production case (We don't have the alignement for the forward_with), by default the prod
# Flag is set to False, and the dataset prod number to 1, corresponding to no prod dataset
config["exp"]["production"] = str("False")
prod_dataset_number = "dataset1"
for data in name_data:
[lab_names, _, _] = parse_lab_field(config[cfg_item2sec(config, "data_name", data)]["lab"])
if "none" in lab_names and data == config["data_use"]["forward_with"]:
config["exp"]["production"] = str("True")
prod_data_name = data
for sec in config.sections():
if "dataset" in sec:
if config[sec]["data_name"] == data:
prod_dataset_number = sec
else:
continue
# If production case is detected, remove all the other datasets except production
if config["exp"]["production"] == str("True"):
name_data = [elem for elem in name_data if elem == prod_data_name]
# Parse fea and lab fields in datasets*
cnt = 0
fea_names_lst = []
lab_names_lst = []
for data in name_data:
[lab_names, _, _] = parse_lab_field(config[cfg_item2sec(config, "data_name", data)]["lab"])
if "none" in lab_names:
continue
[fea_names, fea_lsts, fea_opts, cws_left, cws_right] = parse_fea_field(
config[cfg_item2sec(config, "data_name", data)]["fea"]
)
[lab_names, lab_folders, lab_opts] = parse_lab_field(config[cfg_item2sec(config, "data_name", data)]["lab"])
fea_names_lst.append(sorted(fea_names))
lab_names_lst.append(sorted(lab_names))
# Check that fea_name doesn't contain special characters
for name_features in fea_names_lst[cnt]:
if not (re.match("^[a-zA-Z0-9]*$", name_features)):
sys.stderr.write(
'ERROR: features names (fea_name=) must contain only letters or numbers (no special characters as "_,$,..") \n'
)
sec_parse = False
sys.exit(0)
if cnt > 0:
if fea_names_lst[cnt - 1] != fea_names_lst[cnt]:
sys.stderr.write("ERROR: features name (fea_name) must be the same of all the datasets! \n")
sec_parse = False
sys.exit(0)
if lab_names_lst[cnt - 1] != lab_names_lst[cnt]:
sys.stderr.write("ERROR: labels name (lab_name) must be the same of all the datasets! \n")
sec_parse = False
sys.exit(0)
cnt = cnt + 1
# Create the output folder
out_folder = config["exp"]["out_folder"]
if not os.path.exists(out_folder) or not (os.path.exists(out_folder + "/exp_files")):
os.makedirs(out_folder + "/exp_files")
# Parsing forward field
model = config["model"]["model"]
possible_outs = list(re.findall("(.*)=", model.replace(" ", "")))
forward_out_lst = config["forward"]["forward_out"].split(",")
forward_norm_lst = config["forward"]["normalize_with_counts_from"].split(",")
forward_norm_bool_lst = config["forward"]["normalize_posteriors"].split(",")
lab_lst = list(re.findall("lab_name=(.*)\n", config[prod_dataset_number]["lab"].replace(" ", "")))
lab_folders = list(re.findall("lab_folder=(.*)\n", config[prod_dataset_number]["lab"].replace(" ", "")))
N_out_lab = ["none"] * len(lab_lst)
if config["exp"]["production"] == str("False"):
for i in range(len(lab_opts)):
# Compute number of monophones if needed
if "ali-to-phones" in lab_opts[i]:
log_file = config["exp"]["out_folder"] + "/log.log"
folder_lab_count = lab_folders[i]
cmd = "hmm-info " + folder_lab_count + "/final.mdl | awk '/phones/{print $4}'"
output = run_shell(cmd, log_file)
if output.decode().rstrip() == "":
sys.stderr.write(
"ERROR: hmm-info command doesn't exist. Make sure your .bashrc contains the Kaldi paths and correctly exports it.\n"
)
sys.exit(0)
N_out = int(output.decode().rstrip())
N_out_lab[i] = N_out
for i in range(len(forward_out_lst)):
if forward_out_lst[i] not in possible_outs:
sys.stderr.write(
'ERROR: the output "%s" in the section "forward_out" is not defined in section model)\n'
% (forward_out_lst[i])
)
sys.exit(0)
if strtobool(forward_norm_bool_lst[i]):
if forward_norm_lst[i] not in lab_lst:
if not os.path.exists(forward_norm_lst[i]):
sys.stderr.write(
'ERROR: the count_file "%s" in the section "forward_out" does not exist)\n'
% (forward_norm_lst[i])
)
sys.exit(0)
else:
# Check if the specified file is in the right format
f = open(forward_norm_lst[i], "r")
cnts = f.read()
if not (bool(re.match("(.*)\[(.*)\]", cnts))):
sys.stderr.write(
'ERROR: the count_file "%s" in the section "forward_out" not in the right format)\n'
% (forward_norm_lst[i])
)
else:
# Try to automatically retrieve the count file from the config file
# Compute the number of context-dependent phone states
if "ali-to-pdf" in lab_opts[lab_lst.index(forward_norm_lst[i])]:
log_file = config["exp"]["out_folder"] + "/log.log"
folder_lab_count = lab_folders[lab_lst.index(forward_norm_lst[i])]
cmd = "hmm-info " + folder_lab_count + "/final.mdl | awk '/pdfs/{print $4}'"
output = run_shell(cmd, log_file)
if output.decode().rstrip() == "":
sys.stderr.write(
"ERROR: hmm-info command doesn't exist. Make sure your .bashrc contains the Kaldi paths and correctly exports it.\n"
)
sys.exit(0)
N_out = int(output.decode().rstrip())
N_out_lab[lab_lst.index(forward_norm_lst[i])] = N_out
count_file_path = (
out_folder
+ "/exp_files/forward_"
+ forward_out_lst[i]
+ "_"
+ forward_norm_lst[i]
+ ".count"
)
cmd = (
"analyze-counts --print-args=False --verbose=0 --binary=false --counts-dim="
+ str(N_out)
+ ' "ark:ali-to-pdf '
+ folder_lab_count
+ '/final.mdl \\"ark:gunzip -c '
+ folder_lab_count
+ '/ali.*.gz |\\" ark:- |" '
+ count_file_path
)
run_shell(cmd, log_file)
forward_norm_lst[i] = count_file_path
else:
sys.stderr.write(
'ERROR: Not able to automatically retrieve count file for the label "%s". Please add a valid count file path in "normalize_with_counts_from" or set normalize_posteriors=False \n'
% (forward_norm_lst[i])
)
sys.exit(0)
# Update the config file with the count_file paths
config["forward"]["normalize_with_counts_from"] = ",".join(forward_norm_lst)
# When possible replace the pattern "N_out_lab*" with the detected number of output
for sec in config.sections():
for field in list(config[sec]):
for i in range(len(lab_lst)):
pattern = "N_out_" + lab_lst[i]
if pattern in config[sec][field]:
if N_out_lab[i] != "none":
config[sec][field] = config[sec][field].replace(pattern, str(N_out_lab[i]))
else:
sys.stderr.write(
"ERROR: Cannot automatically retrieve the number of output in %s. Please, add manually the number of outputs \n"
% (pattern)
)
sys.exit(0)
# Check the model field
parse_model_field(cfg_file)
# Create block diagram picture of the model
create_block_diagram(cfg_file)
if sec_parse == False:
sys.exit(0)
return [config, name_data, name_arch]
def cfg_item2sec(config, field, value):
for sec in config.sections():
if field in list(dict(config.items(sec)).keys()):
if value in list(dict(config.items(sec)).values()):
return sec
sys.stderr.write("ERROR: %s=%s not found in config file \n" % (field, value))
sys.exit(0)
return -1
def split_chunks(seq, size):
newseq = []
splitsize = 1.0 / size * len(seq)
for i in range(size):
newseq.append(seq[int(round(i * splitsize)) : int(round((i + 1) * splitsize))])
return newseq
def get_chunks_after_which_to_validate(N_ck_tr, nr_of_valid_per_epoch):
def _partition_chunks(N_ck_tr, nr_of_valid_per_epoch):
chunk_part = list()
chunk_size = int(np.ceil(N_ck_tr / float(nr_of_valid_per_epoch)))
for i1 in range(nr_of_valid_per_epoch):
chunk_part.append(range(0, N_ck_tr)[i1 * chunk_size : (i1 + 1) * chunk_size])
return chunk_part
part_chunk_ids = _partition_chunks(N_ck_tr, nr_of_valid_per_epoch)
chunk_ids = list()
for l in part_chunk_ids:
chunk_ids.append(l[-1])
return chunk_ids
def do_validation_after_chunk(ck, N_ck_tr, config):
def _get_nr_of_valid_per_epoch_from_config(config):
if not "nr_of_valid_per_epoch" in config["exp"]:
return 1
return int(config["exp"]["nr_of_valid_per_epoch"])
nr_of_valid_per_epoch = _get_nr_of_valid_per_epoch_from_config(config)
valid_chunks = get_chunks_after_which_to_validate(N_ck_tr, nr_of_valid_per_epoch)
if ck in valid_chunks:
return True
else:
return False
def _get_val_file_name_base(dataset, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val):
file_name = "valid_" + dataset + "_ep" + format(ep, N_ep_str_format) + "_trCk" + format(ck, N_ck_str_format)
if ck_val is None:
file_name += "*"
else:
file_name += "_ck" + format(ck_val, N_ck_str_format_val)
return file_name
def get_val_lst_file_path(
out_folder, valid_data, ep, ck, ck_val, fea_name, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
):
def _get_val_lst_file_name(
dataset, ep, ck, ck_val, fea_name, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
):
file_name = _get_val_file_name_base(
dataset, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
)
file_name += "_"
if not fea_name is None:
file_name += fea_name
else:
file_name += "*"
file_name += ".lst"
return file_name
lst_file_name = _get_val_lst_file_name(
valid_data, ep, ck, ck_val, fea_name, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
)
lst_file = out_folder + "/exp_files/" + lst_file_name
return lst_file
def get_val_info_file_path(
out_folder, valid_data, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
):
def _get_val_info_file_name(dataset, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val):
file_name = _get_val_file_name_base(
dataset, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
)
file_name += ".info"
return file_name
info_file_name = _get_val_info_file_name(
valid_data, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
)
info_file = out_folder + "/exp_files/" + info_file_name
return info_file
def get_val_cfg_file_path(
out_folder, valid_data, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
):
def _get_val_cfg_file_name(dataset, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val):
file_name = _get_val_file_name_base(
dataset, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
)
file_name += ".cfg"
return file_name
cfg_file_name = _get_val_cfg_file_name(
valid_data, ep, ck, ck_val, N_ep_str_format, N_ck_str_format, N_ck_str_format_val
)
config_chunk_file = out_folder + "/exp_files/" + cfg_file_name
return config_chunk_file
def create_configs(config):
# This function create the chunk-specific config files
cfg_file_proto_chunk = config["cfg_proto"]["cfg_proto_chunk"]
N_ep = int(config["exp"]["N_epochs_tr"])
N_ep_str_format = "0" + str(max(math.ceil(np.log10(N_ep)), 1)) + "d"
tr_data_lst = config["data_use"]["train_with"].split(",")
valid_data_lst = config["data_use"]["valid_with"].split(",")
max_seq_length_train = config["batches"]["max_seq_length_train"]
forward_data_lst = config["data_use"]["forward_with"].split(",")
is_production = strtobool(config["exp"]["production"])
out_folder = config["exp"]["out_folder"]
cfg_file = out_folder + "/conf.cfg"
chunk_lst = out_folder + "/exp_files/list_chunks.txt"
lst_chunk_file = open(chunk_lst, "w")
# Read the batch size string
batch_size_tr_str = config["batches"]["batch_size_train"]
batch_size_tr_arr = expand_str_ep(batch_size_tr_str, "int", N_ep, "|", "*")
# Read the max_seq_length_train
if len(max_seq_length_train.split(",")) == 1:
max_seq_length_tr_arr = expand_str_ep(max_seq_length_train, "int", N_ep, "|", "*")
else:
max_seq_length_tr_arr = [max_seq_length_train] * N_ep
cfg_file_proto = config["cfg_proto"]["cfg_proto"]
[config, name_data, name_arch] = check_cfg(cfg_file, config, cfg_file_proto)
arch_lst = get_all_archs(config)
lr = {}
improvement_threshold = {}
halving_factor = {}
pt_files = {}
drop_rates = {}
for arch in arch_lst:
lr_arr = expand_str_ep(config[arch]["arch_lr"], "float", N_ep, "|", "*")
lr[arch] = lr_arr
improvement_threshold[arch] = float(config[arch]["arch_improvement_threshold"])
halving_factor[arch] = float(config[arch]["arch_halving_factor"])
pt_files[arch] = config[arch]["arch_pretrain_file"]
# Loop over all the sections and look for a "_drop" field (to perform dropout scheduling
for (field_key, field_val) in config.items(arch):
if "_drop" in field_key:
drop_lay = field_val.split(",")
N_lay = len(drop_lay)
drop_rates[arch] = []
for lay_id in range(N_lay):
drop_rates[arch].append(expand_str_ep(drop_lay[lay_id], "float", N_ep, "|", "*"))
# Check dropout factors
for dropout_factor in drop_rates[arch][0]:
if float(dropout_factor) < 0.0 or float(dropout_factor) > 1.0:
sys.stderr.write(
"The dropout rate should be between 0 and 1. Got %s in %s.\n" % (dropout_factor, field_key)
)
sys.exit(0)
# Production case, we don't want to train, only forward without labels
if is_production:
ep = N_ep - 1
N_ep = 0
model_files = {}
max_seq_length_train_curr = max_seq_length_train
for arch in pt_files.keys():
model_files[arch] = out_folder + "/exp_files/final_" + arch + ".pkl"
if strtobool(config["batches"]["increase_seq_length_train"]):
max_seq_length_train_curr = config["batches"]["start_seq_len_train"]
if len(max_seq_length_train.split(",")) == 1:
max_seq_length_train_curr = int(max_seq_length_train_curr)
else:
# TODO: add support for increasing seq length when fea and lab have different time dimensionality
pass
for ep in range(N_ep):
for tr_data in tr_data_lst:
# Compute the total number of chunks for each training epoch
N_ck_tr = compute_n_chunks(out_folder, tr_data, ep, N_ep_str_format, "train")
N_ck_str_format = "0" + str(max(math.ceil(np.log10(N_ck_tr)), 1)) + "d"
# ***Epoch training***
for ck in range(N_ck_tr):
# path of the list of features for this chunk
lst_file = (
out_folder
+ "/exp_files/train_"
+ tr_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ "_*.lst"
)
# paths of the output files (info,model,chunk_specific cfg file)
info_file = (
out_folder
+ "/exp_files/train_"
+ tr_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".info"
)
if ep + ck == 0:
model_files_past = {}
else:
model_files_past = model_files
model_files = {}
for arch in pt_files.keys():
model_files[arch] = info_file.replace(".info", "_" + arch + ".pkl")
config_chunk_file = (
out_folder
+ "/exp_files/train_"
+ tr_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".cfg"
)
lst_chunk_file.write(config_chunk_file + "\n")
if strtobool(config["batches"]["increase_seq_length_train"]) == False:
if len(max_seq_length_train.split(",")) == 1:
max_seq_length_train_curr = int(max_seq_length_tr_arr[ep])
else:
max_seq_length_train_curr = max_seq_length_tr_arr[ep]
# Write chunk-specific cfg file
write_cfg_chunk(
cfg_file,
config_chunk_file,
cfg_file_proto_chunk,
pt_files,
lst_file,
info_file,
"train",
tr_data,
lr,
max_seq_length_train_curr,
name_data,
ep,
ck,
batch_size_tr_arr[ep],
drop_rates,
)
# update pt_file (used to initialized the DNN for the next chunk)
for pt_arch in pt_files.keys():
pt_files[pt_arch] = (
out_folder
+ "/exp_files/train_"
+ tr_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ "_"
+ pt_arch
+ ".pkl"
)
if do_validation_after_chunk(ck, N_ck_tr, config) and tr_data == tr_data_lst[-1]:
for valid_data in valid_data_lst:
N_ck_valid = compute_n_chunks(out_folder, valid_data, ep, N_ep_str_format, "valid")
N_ck_str_format_val = "0" + str(max(math.ceil(np.log10(N_ck_valid)), 1)) + "d"
for ck_val in range(N_ck_valid):
lst_file = get_val_lst_file_path(
out_folder,
valid_data,
ep,
ck,
ck_val,
None,
N_ep_str_format,
N_ck_str_format,
N_ck_str_format_val,
)
info_file = get_val_info_file_path(
out_folder,
valid_data,
ep,
ck,
ck_val,
N_ep_str_format,
N_ck_str_format,
N_ck_str_format_val,
)
config_chunk_file = get_val_cfg_file_path(
out_folder,
valid_data,
ep,
ck,
ck_val,
N_ep_str_format,
N_ck_str_format,
N_ck_str_format_val,
)
lst_chunk_file.write(config_chunk_file + "\n")
write_cfg_chunk(
cfg_file,
config_chunk_file,
cfg_file_proto_chunk,
model_files,
lst_file,
info_file,
"valid",
valid_data,
lr,
max_seq_length_train_curr,
name_data,
ep,
ck_val,
batch_size_tr_arr[ep],
drop_rates,
)
if strtobool(config["batches"]["increase_seq_length_train"]):
if len(max_seq_length_train.split(",")) == 1:
max_seq_length_train_curr = max_seq_length_train_curr * int(
config["batches"]["multply_factor_seq_len_train"]
)
if max_seq_length_train_curr > int(max_seq_length_tr_arr[ep]):
max_seq_length_train_curr = int(max_seq_length_tr_arr[ep])
else:
# TODO: add support for increasing seq length when fea and lab have different time dimensionality
pass
# Create GAN LST files
try:
if config["gan"]["arch_gan"] == "True":
clean_gan_data_name = config["data_use"]["clean_gan_with"].split(",")
for dataset in clean_gan_data_name:
# Compute the total number of chunks for each training epoch
N_ck_tr = compute_n_chunks(out_folder, dataset, ep, N_ep_str_format, "gan")
# ***Epoch training***
for ck in range(N_ck_tr):
# path of the list of features for this chunk
lst_file = (
out_folder
+ "/exp_files/gan_"
+ dataset
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ "_*.lst"
)
# paths of the output files (info,model,chunk_specific cfg file)
info_file = (
out_folder
+ "/exp_files/gan_"
+ dataset
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".info"
)
config_chunk_file = (
out_folder
+ "/exp_files/gan_"
+ dataset
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".cfg"
)
if strtobool(config["batches"]["increase_seq_length_train"]) == False:
if len(max_seq_length_train.split(",")) == 1:
max_seq_length_train_curr = int(max_seq_length_tr_arr[ep])
else:
max_seq_length_train_curr = max_seq_length_tr_arr[ep]
# Write chunk-specific cfg file
write_cfg_chunk(
cfg_file,
config_chunk_file,
cfg_file_proto_chunk,
pt_files,
lst_file,
info_file,
"train",
dataset,
lr,
max_seq_length_train_curr,
name_data,
ep,
ck,
batch_size_tr_arr[ep],
drop_rates,
)
except KeyError:
pass
for forward_data in forward_data_lst:
# Compute the number of chunks
N_ck_forward = compute_n_chunks(out_folder, forward_data, ep, N_ep_str_format, "forward")
N_ck_str_format = "0" + str(max(math.ceil(np.log10(N_ck_forward)), 1)) + "d"
for ck in range(N_ck_forward):
# path of the list of features for this chunk
lst_file = (
out_folder
+ "/exp_files/forward_"
+ forward_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ "_*.lst"
)
# output file
info_file = (
out_folder
+ "/exp_files/forward_"
+ forward_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".info"
)
config_chunk_file = (
out_folder
+ "/exp_files/forward_"
+ forward_data
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ ".cfg"
)
lst_chunk_file.write(config_chunk_file + "\n")
# Write chunk-specific cfg file
write_cfg_chunk(
cfg_file,
config_chunk_file,
cfg_file_proto_chunk,
model_files,
lst_file,
info_file,
"forward",
forward_data,
lr,
max_seq_length_train_curr,
name_data,
ep,
ck,
batch_size_tr_arr[ep],
drop_rates,
)
lst_chunk_file.close()
def create_lists(config):
def _get_validation_data_for_chunks(fea_names, list_fea, N_chunks):
full_list = []
for i in range(len(fea_names)):
full_list.append([line.rstrip("\n") + "," for line in open(list_fea[i])])
full_list[i] = sorted(full_list[i])
full_list_fea_conc = full_list[0]
for i in range(1, len(full_list)):
full_list_fea_conc = list(map(str.__add__, full_list_fea_conc, full_list[i]))
ganset = True
try:
if str(config["ganset"]["create_set"]) == "True":
ganset = False
except KeyError:
pass
if ganset:
random.shuffle(full_list_fea_conc)
valid_chunks_fea = list(split_chunks(full_list_fea_conc, N_chunks))
return valid_chunks_fea
def _shuffle_forward_data(config):
if "shuffle_forwarding_data" in config["forward"]:
suffle_on_forwarding = strtobool(config["forward"]["shuffle_forwarding_data"])
if not suffle_on_forwarding:
return False
return True
# splitting data into chunks (see out_folder/additional_files)
out_folder = config["exp"]["out_folder"]
seed = int(config["exp"]["seed"])
N_ep = int(config["exp"]["N_epochs_tr"])
N_ep_str_format = "0" + str(max(math.ceil(np.log10(N_ep)), 1)) + "d"
# Setting the random seed
random.seed(seed)
# training chunk lists creation
tr_data_name = config["data_use"]["train_with"].split(",")
# Reading validation feature lists
for dataset in tr_data_name:
sec_data = cfg_item2sec(config, "data_name", dataset)
[fea_names, list_fea, fea_opts, cws_left, cws_right] = parse_fea_field(
config[cfg_item2sec(config, "data_name", dataset)]["fea"]
)
N_chunks = int(config[sec_data]["N_chunks"])
N_ck_str_format = "0" + str(max(math.ceil(np.log10(N_chunks)), 1)) + "d"
full_list = []
for i in range(len(fea_names)):
full_list.append([line.rstrip("\n") + "," for line in open(list_fea[i])])
full_list[i] = sorted(full_list[i])
# concatenating all the featues in a single file (useful for shuffling consistently)
full_list_fea_conc = full_list[0]
for i in range(1, len(full_list)):
full_list_fea_conc = list(map(str.__add__, full_list_fea_conc, full_list[i]))
for ep in range(N_ep):
# randomize the list
ganset = True
try:
if str(config["ganset"]["create_set"]) == "True":
ganset = False
except KeyError:
pass
if ganset:
random.shuffle(full_list_fea_conc)
tr_chunks_fea = list(split_chunks(full_list_fea_conc, N_chunks))
tr_chunks_fea.reverse()
for ck in range(N_chunks):
for i in range(len(fea_names)):
tr_chunks_fea_split = []
for snt in tr_chunks_fea[ck]:
# print(snt.split(',')[i])
tr_chunks_fea_split.append(snt.split(",")[i])
output_lst_file = (
out_folder
+ "/exp_files/train_"
+ dataset
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ "_"
+ fea_names[i]
+ ".lst"
)
f = open(output_lst_file, "w")
tr_chunks_fea_wr = map(lambda x: x + "\n", tr_chunks_fea_split)
f.writelines(tr_chunks_fea_wr)
f.close()
if do_validation_after_chunk(ck, N_chunks, config):
valid_data_name = config["data_use"]["valid_with"].split(",")
for dataset_val in valid_data_name:
sec_data = cfg_item2sec(config, "data_name", dataset_val)
fea_names, list_fea, fea_opts, cws_left, cws_right = parse_fea_field(
config[cfg_item2sec(config, "data_name", dataset_val)]["fea"]
)
N_chunks_val = int(config[sec_data]["N_chunks"])
N_ck_str_format_val = "0" + str(max(math.ceil(np.log10(N_chunks_val)), 1)) + "d"
valid_chunks_fea = _get_validation_data_for_chunks(fea_names, list_fea, N_chunks_val)
for ck_val in range(N_chunks_val):
for fea_idx in range(len(fea_names)):
valid_chunks_fea_split = []
for snt in valid_chunks_fea[ck_val]:
valid_chunks_fea_split.append(snt.split(",")[fea_idx])
output_lst_file = get_val_lst_file_path(
out_folder,
dataset_val,
ep,
ck,
ck_val,
fea_names[fea_idx],
N_ep_str_format,
N_ck_str_format,
N_ck_str_format_val,
)
f = open(output_lst_file, "w")
valid_chunks_fea_wr = map(lambda x: x + "\n", valid_chunks_fea_split)
f.writelines(valid_chunks_fea_wr)
f.close()
# Create GAN LST files
try:
if config["gan"]["arch_gan"] == "True":
clean_gan_data_name = config["data_use"]["clean_gan_with"].split(",")
# Feature lists for clean GAN dataset
for dataset in clean_gan_data_name:
sec_data = cfg_item2sec(config, "data_name", dataset)
[fea_names, list_fea, fea_opts, cws_left, cws_right] = parse_fea_field(
config[cfg_item2sec(config, "data_name", dataset)]["fea"]
)
N_chunks = int(config[sec_data]["N_chunks"])
full_list = []
for i in range(len(fea_names)):
full_list.append([line.rstrip("\n") + "," for line in open(list_fea[i])])
full_list[i] = sorted(full_list[i])
# concatenating all the featues in a single file (useful for shuffling consistently)
full_list_fea_conc = full_list[0]
for i in range(1, len(full_list)):
full_list_fea_conc = list(map(str.__add__, full_list_fea_conc, full_list[i]))
for ep in range(N_ep):
# randomize the list
ganset = True
try:
if str(config["ganset"]["create_set"]) == "True":
ganset = False
except KeyError:
pass
if ganset:
random.shuffle(full_list_fea_conc)
tr_chunks_fea = list(split_chunks(full_list_fea_conc, N_chunks))
tr_chunks_fea.reverse()
for ck in range(N_chunks):
for i in range(len(fea_names)):
tr_chunks_fea_split = []
for snt in tr_chunks_fea[ck]:
# print(snt.split(',')[i])
tr_chunks_fea_split.append(snt.split(",")[i])
output_lst_file = (
out_folder
+ "/exp_files/gan_"
+ dataset
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ "_"
+ fea_names[i]
+ ".lst"
)
f = open(output_lst_file, "w")
tr_chunks_fea_wr = map(lambda x: x + "\n", tr_chunks_fea_split)
f.writelines(tr_chunks_fea_wr)
f.close()
except KeyError:
pass
# forward chunk lists creation
forward_data_name = config["data_use"]["forward_with"].split(",")
# Reading validation feature lists
for dataset in forward_data_name:
sec_data = cfg_item2sec(config, "data_name", dataset)
[fea_names, list_fea, fea_opts, cws_left, cws_right] = parse_fea_field(
config[cfg_item2sec(config, "data_name", dataset)]["fea"]
)
N_chunks = int(config[sec_data]["N_chunks"])
N_ck_str_format = "0" + str(max(math.ceil(np.log10(N_chunks)), 1)) + "d"
full_list = []
for i in range(len(fea_names)):
full_list.append([line.rstrip("\n") + "," for line in open(list_fea[i])])
full_list[i] = sorted(full_list[i])
# concatenating all the featues in a single file (useful for shuffling consistently)
full_list_fea_conc = full_list[0]
for i in range(1, len(full_list)):
full_list_fea_conc = list(map(str.__add__, full_list_fea_conc, full_list[i]))
# randomize the list
if _shuffle_forward_data(config):
random.shuffle(full_list_fea_conc)
forward_chunks_fea = list(split_chunks(full_list_fea_conc, N_chunks))
for ck in range(N_chunks):
for i in range(len(fea_names)):
forward_chunks_fea_split = []
for snt in forward_chunks_fea[ck]:
# print(snt.split(',')[i])
forward_chunks_fea_split.append(snt.split(",")[i])
output_lst_file = (
out_folder
+ "/exp_files/forward_"
+ dataset
+ "_ep"
+ format(ep, N_ep_str_format)
+ "_ck"
+ format(ck, N_ck_str_format)
+ "_"
+ fea_names[i]
+ ".lst"
)
f = open(output_lst_file, "w")
forward_chunks_fea_wr = map(lambda x: x + "\n", forward_chunks_fea_split)
f.writelines(forward_chunks_fea_wr)
f.close()
def write_cfg_chunk(
cfg_file,
config_chunk_file,
cfg_file_proto_chunk,
pt_files,
lst_file,
info_file,
to_do,
data_set_name,
lr,
max_seq_length_train_curr,
name_data,
ep,
ck,
batch_size,
drop_rates,
):
# writing the chunk-specific cfg file
config = configparser.ConfigParser()
config.read(cfg_file)
config_chunk = configparser.ConfigParser()
config_chunk.read(cfg_file)
# Exp section
config_chunk["exp"]["to_do"] = to_do
config_chunk["exp"]["out_info"] = info_file
# change seed for randomness
config_chunk["exp"]["seed"] = str(int(config_chunk["exp"]["seed"]) + ep + ck)
config_chunk["batches"]["batch_size_train"] = batch_size
for arch in pt_files.keys():
config_chunk[arch]["arch_pretrain_file"] = pt_files[arch]
# writing the current learning rate
for lr_arch in lr.keys():
config_chunk[lr_arch]["arch_lr"] = str(lr[lr_arch][ep])
for (field_key, field_val) in config.items(lr_arch):
if "_drop" in field_key:
N_lay = len(drop_rates[lr_arch])
drop_arr = []
for lay in range(N_lay):
drop_arr.append(drop_rates[lr_arch][lay][ep])
config_chunk[lr_arch][field_key] = str(",".join(drop_arr))
# Data_chunk section
config_chunk.add_section("data_chunk")
config_chunk["data_chunk"] = config[cfg_item2sec(config, "data_name", data_set_name)]
lst_files = sorted(glob.glob(lst_file))
current_fea = config_chunk["data_chunk"]["fea"]
list_current_fea = re.findall("fea_name=(.*)\nfea_lst=(.*)\n", current_fea)
for (fea, path) in list_current_fea:
for path_cand in lst_files:
fea_type_cand = re.findall("_(.*).lst", path_cand)[0].split("_")[-1]
if fea_type_cand == fea:
config_chunk["data_chunk"]["fea"] = config_chunk["data_chunk"]["fea"].replace(path, path_cand)
config_chunk.remove_option("data_chunk", "data_name")
config_chunk.remove_option("data_chunk", "N_chunks")
config_chunk.remove_section("decoding")
config_chunk.remove_section("data_use")
data_to_del = []
for sec in config.sections():
if "dataset" in sec:
data_to_del.append(config[sec]["data_name"])
for dataset in data_to_del:
config_chunk.remove_section(cfg_item2sec(config_chunk, "data_name", dataset))
# Create batche section
config_chunk.remove_option("batches", "increase_seq_length_train")
config_chunk.remove_option("batches", "start_seq_len_train")
config_chunk.remove_option("batches", "multply_factor_seq_len_train")
config_chunk["batches"]["max_seq_length_train"] = str(max_seq_length_train_curr)
# Write cfg_file_chunk
with open(config_chunk_file, "w") as configfile:
config_chunk.write(configfile)
# Check cfg_file_chunk
[config_proto_chunk, name_data_ck, name_arch_ck] = check_consistency_with_proto(
config_chunk_file, cfg_file_proto_chunk
)
def parse_fea_field(fea):
# Adding the required fields into a list
fea_names = []
fea_lsts = []
fea_opts = []
cws_left = []
cws_right = []
for line in fea.split("\n"):
line = re.sub(" +", " ", line)
if "fea_name=" in line:
fea_names.append(line.split("=")[1])
if "fea_lst=" in line:
fea_lsts.append(line.split("=")[1])
if "fea_opts=" in line:
fea_opts.append(line.split("fea_opts=")[1])
if "cw_left=" in line:
cws_left.append(line.split("=")[1])
if not (check_field(line.split("=")[1], "int(0,inf)", "cw_left")):
sys.exit(0)
if "cw_right=" in line:
cws_right.append(line.split("=")[1])
if not (check_field(line.split("=")[1], "int(0,inf)", "cw_right")):
sys.exit(0)
# Check features names
if not (sorted(fea_names) == sorted(list(set(fea_names)))):
sys.stderr.write("ERROR fea_names must be different! (got %s)" % (fea_names))
sys.exit(0)
snt_lst = []
cnt = 0
# Check consistency of feature lists
for fea_lst in fea_lsts:
if not (os.path.isfile(fea_lst)):
sys.stderr.write(
'ERROR: The path "%s" specified in the field "fea_lst" of the config file does not exists! \n'
% (fea_lst)
)
sys.exit(0)
else:
snts = sorted([line.rstrip("\n").split(" ")[0] for line in open(fea_lst)])
snt_lst.append(snts)
# Check if all the sentences are present in all the list files
if cnt > 0:
if snt_lst[cnt - 1] != snt_lst[cnt]:
sys.stderr.write(
"ERROR: the files %s in fea_lst contain a different set of sentences! \n" % (fea_lst)
)
sys.exit(0)
cnt = cnt + 1
return [fea_names, fea_lsts, fea_opts, cws_left, cws_right]
def parse_lab_field(lab):
# Adding the required fields into a list
lab_names = []
lab_folders = []
lab_opts = []
for line in lab.split("\n"):
line = re.sub(" +", " ", line)
if "lab_name=" in line:
lab_names.append(line.split("=")[1])
if "lab_folder=" in line:
lab_folders.append(line.split("=")[1])
if "lab_opts=" in line:
lab_opts.append(line.split("lab_opts=")[1])
# Check features names
if not (sorted(lab_names) == sorted(list(set(lab_names)))):
sys.stderr.write("ERROR lab_names must be different! (got %s)" % (lab_names))
sys.exit(0)
# Check consistency of feature lists
for lab_fold in lab_folders:
if not (os.path.isdir(lab_fold)):
sys.stderr.write(
'ERROR: The path "%s" specified in the field "lab_folder" of the config file does not exists! \n'
% (lab_fold)
)
sys.exit(0)
return [lab_names, lab_folders, lab_opts]
def compute_n_chunks(out_folder, data_list, ep, N_ep_str_format, step):
list_ck = sorted(
glob.glob(out_folder + "/exp_files/" + step + "_" + data_list + "_ep" + format(ep, N_ep_str_format) + "*.lst")
)
last_ck = list_ck[-1]
N_ck = int(re.findall("_ck(.+)_", last_ck)[-1].split("_")[0]) + 1
return N_ck
def parse_model_field(cfg_file):
# Reading the config file
config = configparser.ConfigParser()
config.read(cfg_file)
# reading the proto file
model_proto_file = config["model"]["model_proto"]
f = open(model_proto_file, "r")
proto_model = f.read()
# readiing the model string
model = config["model"]["model"]
# Reading fea,lab arch architectures from the cfg file
fea_lst = list(re.findall("fea_name=(.*)\n", config["dataset1"]["fea"].replace(" ", "")))
lab_lst = list(re.findall("lab_name=(.*)\n", config["dataset1"]["lab"].replace(" ", "")))
arch_lst = list(re.findall("arch_name=(.*)\n", open(cfg_file, "r").read().replace(" ", "")))
possible_operations = re.findall("(.*)\((.*),(.*)\)\n", proto_model)
possible_inputs = fea_lst
model_arch = list(filter(None, model.replace(" ", "").split("\n")))
# Reading the model field line by line
for line in model_arch:
pattern = "(.*)=(.*)\((.*),(.*)\)"
if not re.match(pattern, line):
sys.stderr.write(
"ERROR: all the entries must be of the following type: output=operation(str,str), got (%s)\n" % (line)
)
sys.exit(0)
else:
# Analyze line and chech if it is compliant with proto_model
[out_name, operation, inp1, inp2] = list(re.findall(pattern, line)[0])
inps = [inp1, inp2]
found = False
for i in range(len(possible_operations)):
if operation == possible_operations[i][0]:
found = True
for k in range(1, 3):
if possible_operations[i][k] == "architecture":
if inps[k - 1] not in arch_lst:
sys.stderr.write(
'ERROR: the architecture "%s" is not in the architecture lists of the config file (possible architectures are %s)\n'
% (inps[k - 1], arch_lst)
)
sys.exit(0)
if possible_operations[i][k] == "label":
if inps[k - 1] not in lab_lst:
sys.stderr.write(
'ERROR: the label "%s" is not in the label lists of the config file (possible labels are %s)\n'
% (inps[k - 1], lab_lst)
)
sys.exit(0)
if possible_operations[i][k] == "input":
if inps[k - 1] not in possible_inputs:
sys.stderr.write(
'ERROR: the input "%s" is not defined before (possible inputs are %s)\n'
% (inps[k - 1], possible_inputs)
)
sys.exit(0)
if possible_operations[i][k] == "float":
try:
float(inps[k - 1])
except ValueError:
sys.stderr.write(
'ERROR: the input "%s" must be a float, got %s\n' % (inps[k - 1], line)
)
sys.exit(0)
# Update the list of possible inpus
possible_inputs.append(out_name)
break
if found == False:
sys.stderr.write(
('ERROR: operation "%s" does not exists (not defined into the model proto file)\n' % (operation))
)
sys.exit(0)
# Check for the mandatory fiels
if "loss_final" not in "".join(model_arch):
sys.stderr.write("ERROR: the variable loss_final should be defined in model\n")
sys.exit(0)
if "err_final" not in "".join(model_arch):
sys.stderr.write("ERROR: the variable err_final should be defined in model\n")
sys.exit(0)
def terminal_node_detection(model_arch, node):
terminal = True
pattern = "(.*)=(.*)\((.*),(.*)\)"
for line in model_arch:
[out_name, operation, inp1, inp2] = list(re.findall(pattern, line)[0])
if inp1 == node or inp2 == node:
terminal = False
return terminal
def create_block_connection(lst_inp, model_arch, diag_lines, cnt_names, arch_dict):
if lst_inp == []:
return [[], [], diag_lines]
pattern = "(.*)=(.*)\((.*),(.*)\)"
arch_current = []
output_conn = []
current_inp = []
for input_element in lst_inp:
for l in range(len(model_arch)):
[out_name, operation, inp1, inp2] = list(re.findall(pattern, model_arch[l])[0])
if inp1 == input_element or inp2 == input_element:
if operation == "compute":
arch_current.append(inp1)
output_conn.append(out_name)
current_inp.append(inp2)
model_arch[l] = "processed" + "=" + operation + "(" + inp1 + ",processed)"
else:
arch_current.append(out_name)
output_conn.append(out_name)
if inp1 == input_element:
current_inp.append(inp1)
model_arch[l] = out_name + "=" + operation + "(processed," + inp2 + ")"
if inp2 == input_element:
current_inp.append(inp2)
model_arch[l] = out_name + "=" + operation + "(" + inp1 + ",processed)"
for i in range(len(arch_current)):
# Create connections
diag_lines = (
diag_lines
+ str(cnt_names.index(arch_dict[current_inp[i]]))
+ " -> "
+ str(cnt_names.index(arch_current[i]))
+ ' [label = "'
+ current_inp[i]
+ '"]\n'
)
# remove terminal nodes from output list
output_conn_pruned = []
for node in output_conn:
if not (terminal_node_detection(model_arch, node)):
output_conn_pruned.append(node)
[arch_current, output_conn, diag_lines] = create_block_connection(
output_conn, model_arch, diag_lines, cnt_names, arch_dict
)
return [arch_current, output_conn_pruned, diag_lines]
def create_block_diagram(cfg_file):
# Reading the config file
config = configparser.ConfigParser()
config.read(cfg_file)
# readiing the model string
model = config["model"]["model"]
# Reading fea,lab arch architectures from the cfg file
pattern = "(.*)=(.*)\((.*),(.*)\)"
fea_lst = list(re.findall("fea_name=(.*)\n", config["dataset1"]["fea"].replace(" ", "")))
lab_lst = list(re.findall("lab_name=(.*)\n", config["dataset1"]["lab"].replace(" ", "")))
arch_lst = list(re.findall("arch_name=(.*)\n", open(cfg_file, "r").read().replace(" ", "")))
out_diag_file = config["exp"]["out_folder"] + "/model.diag"
model_arch = list(filter(None, model.replace(" ", "").split("\n")))
diag_lines = "blockdiag {\n"
cnt = 0
cnt_names = []
arch_lst = []
fea_lst_used = []
lab_lst_used = []
for line in model_arch:
if "err_final=" in line:
model_arch.remove(line)
# Initializations of the blocks
for line in model_arch:
[out_name, operation, inp1, inp2] = list(re.findall(pattern, line)[0])
if operation != "compute":
# node architecture
diag_lines = diag_lines + str(cnt) + ' [label="' + operation + '",shape = roundedbox];\n'
arch_lst.append(out_name)
cnt_names.append(out_name)
cnt = cnt + 1
# labels
if inp2 in lab_lst:
diag_lines = diag_lines + str(cnt) + ' [label="' + inp2 + '",shape = roundedbox];\n'
if inp2 not in lab_lst_used:
lab_lst_used.append(inp2)
cnt_names.append(inp2)
cnt = cnt + 1
# features
if inp1 in fea_lst:
diag_lines = diag_lines + str(cnt) + ' [label="' + inp1 + '",shape = circle];\n'
if inp1 not in fea_lst_used:
fea_lst_used.append(inp1)
cnt_names.append(inp1)
cnt = cnt + 1
if inp2 in fea_lst:
diag_lines = diag_lines + str(cnt) + ' [label="' + inp2 + '",shape = circle];\n'
if inp2 not in fea_lst_used:
fea_lst_used.append(inp2)
cnt_names.append(inp2)
cnt = cnt + 1
else:
# architecture
diag_lines = diag_lines + str(cnt) + ' [label="' + inp1 + '",shape = box];\n'
arch_lst.append(inp1)
cnt_names.append(inp1)
cnt = cnt + 1
# feature
if inp2 in fea_lst:
diag_lines = diag_lines + str(cnt) + ' [label="' + inp2 + '",shape = circle];\n'
if inp2 not in fea_lst_used:
fea_lst_used.append(inp2)
cnt_names.append(inp2)
cnt = cnt + 1
# Connections across blocks
lst_conc = fea_lst_used + lab_lst_used
arch_dict = {}
for elem in lst_conc:
arch_dict[elem] = elem
for model_line in model_arch:
[out_name, operation, inp1, inp2] = list(re.findall(pattern, model_line)[0])
if operation == "compute":
arch_dict[out_name] = inp1
else:
arch_dict[out_name] = out_name
output_conn = lst_conc
[arch_current, output_conn, diag_lines] = create_block_connection(
output_conn, model_arch, diag_lines, cnt_names, arch_dict
)
diag_lines = diag_lines + "}"
# Write the diag file describing the model
with open(out_diag_file, "w") as text_file:
text_file.write("%s" % diag_lines)
# Create image from the diag file
log_file = config["exp"]["out_folder"] + "/log.log"
cmd = "blockdiag -Tsvg " + out_diag_file + " -o " + config["exp"]["out_folder"] + "/model.svg"
run_shell(cmd, log_file)
def list_fea_lab_arch(config): # cancel
model = config["model"]["model"].split("\n")
fea_lst = list(re.findall("fea_name=(.*)\n", config["data_chunk"]["fea"].replace(" ", "")))
lab_lst = list(re.findall("lab_name=(.*)\n", config["data_chunk"]["lab"].replace(" ", "")))
fea_lst_used = []
lab_lst_used = []
arch_lst_used = []
fea_dict_used = {}
lab_dict_used = {}
arch_dict_used = {}
fea_lst_used_name = []
lab_lst_used_name = []
arch_lst_used_name = []
fea_field = config["data_chunk"]["fea"]
lab_field = config["data_chunk"]["lab"]
pattern = "(.*)=(.*)\((.*),(.*)\)"
for line in model:
[out_name, operation, inp1, inp2] = list(re.findall(pattern, line)[0])
if inp1 in fea_lst and inp1 not in fea_lst_used_name:
pattern_fea = "fea_name=" + inp1 + "\nfea_lst=(.*)\nfea_opts=(.*)\ncw_left=(.*)\ncw_right=(.*)"
fea_lst_used.append((inp1 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).split(","))
fea_dict_used[inp1] = (inp1 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).split(",")
fea_lst_used_name.append(inp1)
if inp2 in fea_lst and inp2 not in fea_lst_used_name:
pattern_fea = "fea_name=" + inp2 + "\nfea_lst=(.*)\nfea_opts=(.*)\ncw_left=(.*)\ncw_right=(.*)"
fea_lst_used.append((inp2 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).split(","))
fea_dict_used[inp2] = (inp2 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).split(",")
fea_lst_used_name.append(inp2)
if inp1 in lab_lst and inp1 not in lab_lst_used_name:
pattern_lab = "lab_name=" + inp1 + "\nlab_folder=(.*)\nlab_opts=(.*)"
lab_lst_used.append((inp1 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).split(","))
lab_dict_used[inp1] = (inp1 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).split(",")
lab_lst_used_name.append(inp1)
if inp2 in lab_lst and inp2 not in lab_lst_used_name:
pattern_lab = "lab_name=" + inp2 + "\nlab_folder=(.*)\nlab_opts=(.*)"
lab_lst_used.append((inp2 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).split(","))
lab_dict_used[inp2] = (inp2 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).split(",")
lab_lst_used_name.append(inp2)
if operation == "compute" and inp1 not in arch_lst_used_name:
arch_id = cfg_item2sec(config, "arch_name", inp1)
arch_seq_model = strtobool(config[arch_id]["arch_seq_model"])
arch_lst_used.append([arch_id, inp1, arch_seq_model])
arch_dict_used[inp1] = [arch_id, inp1, arch_seq_model]
arch_lst_used_name.append(inp1)
# convert to unicode (for python 2)
for i in range(len(fea_lst_used)):
fea_lst_used[i] = list(map(str, fea_lst_used[i]))
for i in range(len(lab_lst_used)):
lab_lst_used[i] = list(map(str, lab_lst_used[i]))
for i in range(len(arch_lst_used)):
arch_lst_used[i] = list(map(str, arch_lst_used[i]))
return [fea_lst_used, lab_lst_used, arch_lst_used]
def dict_fea_lab_arch(config, fea_only):
model = config["model"]["model"].split("\n")
fea_lst = list(re.findall("fea_name=(.*)\n", config["data_chunk"]["fea"].replace(" ", "")))
lab_lst = list(re.findall("lab_name=(.*)\n", config["data_chunk"]["lab"].replace(" ", "")))
fea_lst_used = []
lab_lst_used = []
arch_lst_used = []
fea_dict_used = {}
lab_dict_used = {}
arch_dict_used = {}
fea_lst_used_name = []
lab_lst_used_name = []
arch_lst_used_name = []
fea_field = config["data_chunk"]["fea"]
lab_field = config["data_chunk"]["lab"]
pattern = "(.*)=(.*)\((.*),(.*)\)"
for line in model:
[out_name, operation, inp1, inp2] = list(re.findall(pattern, line)[0])
if inp1 in fea_lst and inp1 not in fea_lst_used_name:
pattern_fea = "fea_name=" + inp1 + "\nfea_lst=(.*)\nfea_opts=(.*)\ncw_left=(.*)\ncw_right=(.*)"
if sys.version_info[0] == 2:
fea_lst_used.append(
(inp1 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).encode("utf8").split(",")
)
fea_dict_used[inp1] = (
(inp1 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).encode("utf8").split(",")
)
else:
fea_lst_used.append((inp1 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).split(","))
fea_dict_used[inp1] = (inp1 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).split(",")
fea_lst_used_name.append(inp1)
if inp2 in fea_lst and inp2 not in fea_lst_used_name:
pattern_fea = "fea_name=" + inp2 + "\nfea_lst=(.*)\nfea_opts=(.*)\ncw_left=(.*)\ncw_right=(.*)"
if sys.version_info[0] == 2:
fea_lst_used.append(
(inp2 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).encode("utf8").split(",")
)
fea_dict_used[inp2] = (
(inp2 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).encode("utf8").split(",")
)
else:
fea_lst_used.append((inp2 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).split(","))
fea_dict_used[inp2] = (inp2 + "," + ",".join(list(re.findall(pattern_fea, fea_field)[0]))).split(",")
fea_lst_used_name.append(inp2)
if inp1 in lab_lst and inp1 not in lab_lst_used_name and not fea_only:
pattern_lab = "lab_name=" + inp1 + "\nlab_folder=(.*)\nlab_opts=(.*)"
if sys.version_info[0] == 2:
lab_lst_used.append(
(inp1 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).encode("utf8").split(",")
)
lab_dict_used[inp1] = (
(inp1 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).encode("utf8").split(",")
)
else:
lab_lst_used.append((inp1 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).split(","))
lab_dict_used[inp1] = (inp1 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).split(",")
lab_lst_used_name.append(inp1)
if inp2 in lab_lst and inp2 not in lab_lst_used_name and not fea_only:
# Testing production case (no labels)
pattern_lab = "lab_name=" + inp2 + "\nlab_folder=(.*)\nlab_opts=(.*)"
if sys.version_info[0] == 2:
lab_lst_used.append(
(inp2 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).encode("utf8").split(",")
)
lab_dict_used[inp2] = (
(inp2 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).encode("utf8").split(",")
)
else:
lab_lst_used.append((inp2 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).split(","))
lab_dict_used[inp2] = (inp2 + "," + ",".join(list(re.findall(pattern_lab, lab_field)[0]))).split(",")
lab_lst_used_name.append(inp2)
if operation == "compute" and inp1 not in arch_lst_used_name:
arch_id = cfg_item2sec(config, "arch_name", inp1)
arch_seq_model = strtobool(config[arch_id]["arch_seq_model"])
arch_lst_used.append([arch_id, inp1, arch_seq_model])
arch_dict_used[inp1] = [arch_id, inp1, arch_seq_model]
arch_lst_used_name.append(inp1)
# convert to unicode (for python 2)
for i in range(len(fea_lst_used)):
fea_lst_used[i] = list(map(str, fea_lst_used[i]))
for i in range(len(lab_lst_used)):
lab_lst_used[i] = list(map(str, lab_lst_used[i]))
for i in range(len(arch_lst_used)):
arch_lst_used[i] = list(map(str, arch_lst_used[i]))
return [fea_dict_used, lab_dict_used, arch_dict_used]
def is_sequential(config, arch_lst): # To cancel
seq_model = False
for [arch_id, arch_name, arch_seq] in arch_lst:
if strtobool(config[arch_id]["arch_seq_model"]):
seq_model = True
break
return seq_model
def is_sequential_dict(config, arch_dict):
seq_model = False
for arch in arch_dict.keys():
arch_id = arch_dict[arch][0]
if strtobool(config[arch_id]["arch_seq_model"]):
seq_model = True
break
return seq_model
def compute_cw_max(fea_dict):
cw_left_arr = []
cw_right_arr = []
for fea in fea_dict.keys():
cw_left_arr.append(int(fea_dict[fea][3]))
cw_right_arr.append(int(fea_dict[fea][4]))
cw_left_max = max(cw_left_arr)
cw_right_max = max(cw_right_arr)
return [cw_left_max, cw_right_max]
def model_init(inp_out_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do):
pattern = "(.*)=(.*)\((.*),(.*)\)"
nns = {}
costs = {}
for line in model:
[out_name, operation, inp1, inp2] = list(re.findall(pattern, line)[0])
if operation == "compute":
# computing input dim
inp_dim = inp_out_dict[inp2][-1]
# import the class
module = importlib.import_module(config[arch_dict[inp1][0]]["arch_library"])
nn_class = getattr(module, config[arch_dict[inp1][0]]["arch_class"])
# add use cuda and todo options
config.set(arch_dict[inp1][0], "use_cuda", config["exp"]["use_cuda"])
config.set(arch_dict[inp1][0], "to_do", config["exp"]["to_do"])
arch_freeze_flag = strtobool(config[arch_dict[inp1][0]]["arch_freeze"])
# initialize the neural network
double_features = False
try:
if config["gan"]["double_features"] == "True":
double_features = True
except KeyError:
pass
if double_features:
net = nn_class(config[arch_dict[inp1][0]], (inp_dim*2))
else:
net = nn_class(config[arch_dict[inp1][0]], inp_dim)
if use_cuda:
net.cuda()
if to_do == "train":
if not (arch_freeze_flag):
net.train()
else:
# Switch to eval modality if architecture is frozen (mainly for batch_norm/dropout functions)
net.eval()
else:
net.eval()
# addigng nn into the nns dict
nns[arch_dict[inp1][1]] = net
out_dim = net.out_dim
# updating output dim
inp_out_dict[out_name] = [out_dim]
if operation == "concatenate":
inp_dim1 = inp_out_dict[inp1][-1]
inp_dim2 = inp_out_dict[inp2][-1]
inp_out_dict[out_name] = [inp_dim1 + inp_dim2]
if operation == "cost_nll":
costs[out_name] = nn.NLLLoss()
inp_out_dict[out_name] = [1]
if operation == "cost_err":
inp_out_dict[out_name] = [1]
if (
operation == "mult"
or operation == "sum"
or operation == "mult_constant"
or operation == "sum_constant"
or operation == "avg"
or operation == "mse"
):
inp_out_dict[out_name] = inp_out_dict[inp1]
return [nns, costs]
def optimizer_init(nns, config, arch_dict):
# optimizer init
optimizers = {}
for net in nns.keys():
lr = float(config[arch_dict[net][0]]["arch_lr"])
if config[arch_dict[net][0]]["arch_opt"] == "sgd":
opt_momentum = float(config[arch_dict[net][0]]["opt_momentum"])
opt_weight_decay = float(config[arch_dict[net][0]]["opt_weight_decay"])
opt_dampening = float(config[arch_dict[net][0]]["opt_dampening"])
opt_nesterov = strtobool(config[arch_dict[net][0]]["opt_nesterov"])
optimizers[net] = optim.SGD(
nns[net].parameters(),
lr=lr,
momentum=opt_momentum,
weight_decay=opt_weight_decay,
dampening=opt_dampening,
nesterov=opt_nesterov,
)
if config[arch_dict[net][0]]["arch_opt"] == "adam":
opt_betas = list(map(float, (config[arch_dict[net][0]]["opt_betas"].split(","))))
opt_eps = float(config[arch_dict[net][0]]["opt_eps"])
opt_weight_decay = float(config[arch_dict[net][0]]["opt_weight_decay"])
opt_amsgrad = strtobool(config[arch_dict[net][0]]["opt_amsgrad"])
optimizers[net] = optim.Adam(
nns[net].parameters(),
lr=lr,
betas=opt_betas,
eps=opt_eps,
weight_decay=opt_weight_decay,
amsgrad=opt_amsgrad,
)
if config[arch_dict[net][0]]["arch_opt"] == "rmsprop":
opt_momentum = float(config[arch_dict[net][0]]["opt_momentum"])
opt_alpha = float(config[arch_dict[net][0]]["opt_alpha"])
opt_eps = float(config[arch_dict[net][0]]["opt_eps"])
opt_centered = strtobool(config[arch_dict[net][0]]["opt_centered"])
opt_weight_decay = float(config[arch_dict[net][0]]["opt_weight_decay"])
optimizers[net] = optim.RMSprop(
nns[net].parameters(),
lr=lr,
momentum=opt_momentum,
alpha=opt_alpha,
eps=opt_eps,
centered=opt_centered,
weight_decay=opt_weight_decay,
)
return optimizers
def forward_model_refac01(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
ref,
inp_out_dict,
max_len_fea,
max_len_lab,
batch_size,
to_do,
forward_outs,
):
def _add_input_features_to_outs_dict(fea_dict, outs_dict, inp):
for fea in fea_dict.keys():
if len(inp.shape) == 3 and len(fea_dict[fea]) > 1:
outs_dict[fea] = inp[:, :, fea_dict[fea][5] : fea_dict[fea][6]]
if len(inp.shape) == 2 and len(fea_dict[fea]) > 1:
outs_dict[fea] = inp[:, fea_dict[fea][5] : fea_dict[fea][6]]
return outs_dict
def _compute_layer_values(
inp_out_dict, inp2, inp, inp1, max_len, batch_size, arch_dict, out_name, nns, outs_dict, to_do
):
def _is_input_feature(inp_out_dict, inp2):
if len(inp_out_dict[inp2]) > 1:
return True
return False
def _extract_respective_feature_from_input(inp, inp_out_dict, inp2, arch_dict, inp1, max_len, batch_size):
if len(inp.shape) == 3:
inp_dnn = inp
if not (bool(arch_dict[inp1][2])):
inp_dnn = inp_dnn.view(max_len * batch_size, -1)
if len(inp.shape) == 2:
inp_dnn = inp
if bool(arch_dict[inp1][2]):
inp_dnn = inp_dnn.view(max_len, batch_size, -1)
return inp_dnn
do_break = False
if _is_input_feature(inp_out_dict, inp2):
inp_dnn = _extract_respective_feature_from_input(
inp, inp_out_dict, inp2, arch_dict, inp1, max_len, batch_size
)
outs_dict[out_name] = nns[inp1](inp_dnn)
else:
if not (bool(arch_dict[inp1][2])) and len(outs_dict[inp2].shape) == 3:
outs_dict[inp2] = outs_dict[inp2].view(outs_dict[inp2].shape[0] * outs_dict[inp2].shape[1], -1)
if bool(arch_dict[inp1][2]) and len(outs_dict[inp2].shape) == 2:
# TODO: This computation needs to be made independent of max_len in case the network is performing sub sampling in time
outs_dict[inp2] = outs_dict[inp2].view(max_len, batch_size, -1)
outs_dict[out_name] = nns[inp1](outs_dict[inp2])
if to_do == "forward" and out_name == forward_outs[-1]:
do_break = True
return outs_dict, do_break
def _get_labels_from_input(ref, inp2, lab_dict):
if len(inp.shape) == 3:
lab_dnn = ref
if len(inp.shape) == 2:
lab_dnn = ref
lab_dnn = lab_dnn.view(-1).long()
return lab_dnn
def _get_network_output(outs_dict, inp1, max_len, batch_size):
out = outs_dict[inp1]
if len(out.shape) == 3:
out = out.view(max_len * batch_size, -1)
return out
outs_dict = {}
_add_input_features_to_outs_dict(fea_dict, outs_dict, inp)
layer_string_pattern = "(.*)=(.*)\((.*),(.*)\)"
for line in model:
out_name, operation, inp1, inp2 = list(re.findall(layer_string_pattern, line)[0])
if operation == "compute":
outs_dict, do_break = _compute_layer_values(
inp_out_dict, inp2, inp, inp1, max_len_fea, batch_size, arch_dict, out_name, nns, outs_dict, to_do
)
if do_break:
break
elif operation == "cost_nll":
lab_dnn = _get_labels_from_input(ref, inp2, lab_dict)
out = _get_network_output(outs_dict, inp1, max_len_lab, batch_size)
if to_do != "forward":
outs_dict[out_name] = costs[out_name](out, lab_dnn)
elif operation == "cost_err":
lab_dnn = _get_labels_from_input(ref, inp2, lab_dict)
out = _get_network_output(outs_dict, inp1, max_len_lab, batch_size)
if to_do != "forward":
pred = torch.max(out, dim=1)[1]
err = torch.mean((pred != lab_dnn).float())
outs_dict[out_name] = err
elif operation == "concatenate":
dim_conc = len(outs_dict[inp1].shape) - 1
outs_dict[out_name] = torch.cat((outs_dict[inp1], outs_dict[inp2]), dim_conc) # check concat axis
if to_do == "forward" and out_name == forward_outs[-1]:
break
elif operation == "mult":
outs_dict[out_name] = outs_dict[inp1] * outs_dict[inp2]
if to_do == "forward" and out_name == forward_outs[-1]:
break
elif operation == "sum":
outs_dict[out_name] = outs_dict[inp1] + outs_dict[inp2]
if to_do == "forward" and out_name == forward_outs[-1]:
break
elif operation == "mult_constant":
outs_dict[out_name] = outs_dict[inp1] * float(inp2)
if to_do == "forward" and out_name == forward_outs[-1]:
break
elif operation == "sum_constant":
outs_dict[out_name] = outs_dict[inp1] + float(inp2)
if to_do == "forward" and out_name == forward_outs[-1]:
break
elif operation == "avg":
outs_dict[out_name] = (outs_dict[inp1] + outs_dict[inp2]) / 2
if to_do == "forward" and out_name == forward_outs[-1]:
break
elif operation == "mse":
outs_dict[out_name] = torch.mean((outs_dict[inp1] - outs_dict[inp2]) ** 2)
if to_do == "forward" and out_name == forward_outs[-1]:
break
return outs_dict
def plot_waveforms(waveform_tensor):
with torch.no_grad():
if len(waveform_tensor) > 1:
plt.figure()
for i in range(len(waveform_tensor)):
plt.subplot(len(waveform_tensor), 1, (i + 1))
plt.plot(waveform_tensor[i].t().detach().to("cpu").numpy())
plt.show()
else:
plt.figure()
plt.plot(waveform_tensor.t().detach().to("cpu").numpy())
plt.show()
def forward_model(
fea_dict, lab_dict, arch_dict, model, nns, costs, inp, inp_out_dict, max_len, batch_size, to_do, forward_outs,
generator = None, discriminator = None, gan_on = False, double_features = False,
):
# Forward Step
outs_dict = {}
pattern = "(.*)=(.*)\((.*),(.*)\)"
# adding input features to out_dict:
for fea in fea_dict.keys():
if len(inp.shape) == 3 and len(fea_dict[fea]) > 1:
outs_dict[fea] = inp[:, :, fea_dict[fea][5] : fea_dict[fea][6]]
if len(inp.shape) == 2 and len(fea_dict[fea]) > 1:
outs_dict[fea] = inp[:, fea_dict[fea][5] : fea_dict[fea][6]]
for line in model:
[out_name, operation, inp1, inp2] = list(re.findall(pattern, line)[0])
if operation == "compute":
if len(inp_out_dict[inp2]) > 1: # if it is an input feature
# Selection of the right feature in the inp tensor
if len(inp.shape) == 3:
inp_dnn = inp[:, :, inp_out_dict[inp2][-3] : inp_out_dict[inp2][-2]]
if not (bool(arch_dict[inp1][2])):
inp_dnn = inp_dnn.view(max_len * batch_size, -1)
if len(inp.shape) == 2:
inp_dnn = inp[:, inp_out_dict[inp2][-3] : inp_out_dict[inp2][-2]]
if bool(arch_dict[inp1][2]):
inp_dnn = inp_dnn.view(max_len, batch_size, -1)
# Run features trough generator network
if gan_on and to_do == "train":
# Using GAN on raw features
outs_dict["inp_dnn"] = inp_dnn
outs_dict["gan_training"] = True
# Use gan for Acoustic model training
with torch.no_grad():
outs_dict["generator_output"] = generator(inp_dnn)
if discriminator is not None:
d_output = discriminator(inp_dnn)
pred = d_output.data.max(1, keepdim=True)[1]
for i in range(d_output.shape[0]):
if pred[i] == 1: # Replace clean samples with original
outs_dict["generator_output"][i] = inp_dnn[i]
if double_features:
outs_dict[out_name] = nns[inp1](torch.cat((inp_dnn, outs_dict["generator_output"]), dim = 1))
else:
outs_dict[out_name] = nns[inp1](outs_dict["generator_output"])
elif gan_on: # Validation and forward with GAN
# Use GAN for evaluation
outs_dict["inp_dnn"] = inp_dnn
outs_dict["generator_output"] = generator(inp_dnn)
if discriminator is not None:
d_output = discriminator(inp_dnn)
pred = d_output.data.max(1, keepdim=True)[1]
for i in range(d_output.shape[0]):
if pred[i] == 1: # Replace clean samples with original
outs_dict["generator_output"][i] = inp_dnn[i]
if double_features:
outs_dict[out_name] = nns[inp1](
torch.cat((inp_dnn, outs_dict["generator_output"].detach()), dim = 1))
else:
outs_dict[out_name] = nns[inp1](outs_dict["generator_output"].detach())
outs_dict["gan_training"] = False
else:
# Do not use GAN at all
if double_features:
inp_dnn = torch.cat((inp_dnn, inp_dnn), dim = 1)
outs_dict["inp_dnn"] = inp_dnn
outs_dict[out_name] = nns[inp1](inp_dnn)
outs_dict["gan_training"] = False
else:
if not (bool(arch_dict[inp1][2])) and len(outs_dict[inp2].shape) == 3:
outs_dict[inp2] = outs_dict[inp2].view(max_len * batch_size, -1)
if bool(arch_dict[inp1][2]) and len(outs_dict[inp2].shape) == 2:
outs_dict[inp2] = outs_dict[inp2].view(max_len, batch_size, -1)
outs_dict[out_name] = nns[inp1](outs_dict[inp2])
if to_do == "forward" and out_name == forward_outs[-1]:
break
if operation == "cost_nll":
# Put labels in the right format
if len(inp.shape) == 3:
lab_dnn = inp[:, :, lab_dict[inp2][3]]
if len(inp.shape) == 2:
lab_dnn = inp[:, lab_dict[inp2][3]]
lab_dnn = lab_dnn.view(-1).long()
# put output in the right format
out = outs_dict[inp1]
if len(out.shape) == 3:
out = out.view(max_len * batch_size, -1)
if to_do != "forward":
outs_dict[out_name] = costs[out_name](out, lab_dnn)
outs_dict["lab_dnn"] = lab_dnn
if operation == "cost_err":
if len(inp.shape) == 3:
lab_dnn = inp[:, :, lab_dict[inp2][3]]
if len(inp.shape) == 2:
lab_dnn = inp[:, lab_dict[inp2][3]]
lab_dnn = lab_dnn.view(-1).long()
# put output in the right format
out = outs_dict[inp1]
if len(out.shape) == 3:
out = out.view(max_len * batch_size, -1)
if to_do != "forward":
pred = torch.max(out, dim=1)[1]
err = torch.mean((pred != lab_dnn).float())
outs_dict[out_name] = err
if operation == "concatenate":
dim_conc = len(outs_dict[inp1].shape) - 1
outs_dict[out_name] = torch.cat((outs_dict[inp1], outs_dict[inp2]), dim_conc) # check concat axis
if to_do == "forward" and out_name == forward_outs[-1]:
break
if operation == "mult":
outs_dict[out_name] = outs_dict[inp1] * outs_dict[inp2]
if to_do == "forward" and out_name == forward_outs[-1]:
break
if operation == "sum":
outs_dict[out_name] = outs_dict[inp1] + outs_dict[inp2]
if to_do == "forward" and out_name == forward_outs[-1]:
break
if operation == "mult_constant":
outs_dict[out_name] = outs_dict[inp1] * float(inp2)
if to_do == "forward" and out_name == forward_outs[-1]:
break
if operation == "sum_constant":
outs_dict[out_name] = outs_dict[inp1] + float(inp2)
if to_do == "forward" and out_name == forward_outs[-1]:
break
if operation == "avg":
outs_dict[out_name] = (outs_dict[inp1] + outs_dict[inp2]) / 2
if to_do == "forward" and out_name == forward_outs[-1]:
break
if operation == "mse":
outs_dict[out_name] = torch.mean((outs_dict[inp1] - outs_dict[inp2]) ** 2)
if to_do == "forward" and out_name == forward_outs[-1]:
break
return outs_dict
def dump_epoch_results(
res_file_path, ep, tr_data_lst, tr_loss_tot, tr_error_tot, tot_time, valid_data_lst, valid_peformance_dict, lr, N_ep
):
#
# Default terminal line size is 80 characters, try new dispositions to fit this limit
#
N_ep_str_format = "0" + str(max(math.ceil(np.log10(N_ep)), 1)) + "d"
res_file = open(res_file_path, "a")
res_file.write(
"ep=%s tr=%s loss=%s err=%s "
% (
format(ep, N_ep_str_format),
tr_data_lst,
format(tr_loss_tot / len(tr_data_lst), "0.3f"),
format(tr_error_tot / len(tr_data_lst), "0.3f"),
)
)
print(" ")
print("----- Summary epoch %s / %s" % (format(ep+1, N_ep_str_format), format(N_ep, N_ep_str_format)))
print("Training on %s" % (tr_data_lst))
print(
"Loss = %s | err = %s "
% (format(tr_loss_tot / len(tr_data_lst), "0.3f"), format(tr_error_tot / len(tr_data_lst), "0.3f"))
)
print("-----")
for valid_data in valid_data_lst:
res_file.write(
"valid=%s loss=%s err=%s "
% (
valid_data,
format(valid_peformance_dict[valid_data][0], "0.3f"),
format(valid_peformance_dict[valid_data][1], "0.3f"),
)
)
print("Validating on %s" % (valid_data))
print(
"Loss = %s | err = %s "
% (
format(valid_peformance_dict[valid_data][0], "0.3f"),
format(valid_peformance_dict[valid_data][1], "0.3f"),
)
)
print("-----")
for lr_arch in lr.keys():
res_file.write("lr_%s=%s " % (lr_arch, lr[lr_arch][ep]))
print("Learning rate on %s = %s " % (lr_arch, lr[lr_arch][ep]))
print("-----")
res_file.write("time(s)=%i\n" % (int(tot_time)))
print("Elapsed time (s) = %i\n" % (int(tot_time)))
print(" ")
res_file.close()
def progress(count, total, status=""):
bar_len = 30
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = "=" * filled_len + "-" * (bar_len - filled_len)
if count == total - 1:
sys.stdout.write("[%s] %s%s %s \r" % (bar, 100, "%", status))
sys.stdout.write("\n")
else:
sys.stdout.write("[%s] %s%s %s\r" % (bar, percents, "%", status))
sys.stdout.flush()
def export_loss_acc_to_txt(out_folder, N_ep, val_lst):
if not os.path.exists(out_folder + "/generated_outputs"):
os.makedirs(out_folder + "/generated_outputs")
nb_val = len(val_lst)
res = open(out_folder + "/res.res", "r").readlines()
tr_loss = []
tr_acc = []
val_loss = np.ndarray((nb_val, N_ep))
val_acc = np.ndarray((nb_val, N_ep))
line_cpt = 0
for i in range(N_ep):
splitted = res[i].split(" ")
# Getting uniq training loss and acc
tr_loss.append(float(splitted[2].split("=")[1]))
tr_acc.append(1 - float(splitted[3].split("=")[1]))
# Getting multiple or uniq val loss and acc
# +5 to avoird the 6 first columns of the res.res file
for i in range(nb_val):
val_loss[i][line_cpt] = float(splitted[(i * 3) + 5].split("=")[1])
val_acc[i][line_cpt] = 1 - float(splitted[(i * 3) + 6].split("=")[1])
line_cpt += 1
# Saving to files
np.savetxt(out_folder + "/generated_outputs/tr_loss.txt", np.asarray(tr_loss), "%0.3f", delimiter=",")
np.savetxt(out_folder + "/generated_outputs/tr_acc.txt", np.asarray(tr_acc), "%0.3f", delimiter=",")
for i in range(nb_val):
np.savetxt(out_folder + "/generated_outputs/val_" + str(i) + "_loss.txt", val_loss[i], "%0.5f", delimiter=",")
np.savetxt(out_folder + "/generated_outputs/val_" + str(i) + "_acc.txt", val_acc[i], "%0.5f", delimiter=",")
def create_curves(out_folder, N_ep, val_lst):
try:
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
except ValueError:
print("WARNING: matplotlib is not installed. The plots of the training curves have not been created.")
sys.exit(0)
print(" ")
print("-----")
print("Generating output files and plots ... ")
export_loss_acc_to_txt(out_folder, N_ep, val_lst)
if not os.path.exists(out_folder + "/generated_outputs"):
sys.stdacc.write("accOR: No results generated please call export_loss_err_to_txt() before")
sys.exit(0)
nb_epoch = len(open(out_folder + "/generated_outputs/tr_loss.txt", "r").readlines())
x = np.arange(nb_epoch)
nb_val = len(val_lst)
# Loading train Loss and acc
tr_loss = np.loadtxt(out_folder + "/generated_outputs/tr_loss.txt")
tr_acc = np.loadtxt(out_folder + "/generated_outputs/tr_acc.txt")
# Loading val loss and acc
val_loss = []
val_acc = []
for i in range(nb_val):
val_loss.append(np.loadtxt(out_folder + "/generated_outputs/val_" + str(i) + "_loss.txt"))
val_acc.append(np.loadtxt(out_folder + "/generated_outputs/val_" + str(i) + "_acc.txt"))
#
# LOSS PLOT
#
# Getting maximum values
max_loss = np.amax(tr_loss)
for i in range(nb_val):
if np.amax(val_loss[i]) > max_loss:
max_loss = np.amax(val_loss[i])
# Plot train loss and acc
plt.plot(x, tr_loss, label="train_loss")
# Plot val loss and acc
for i in range(nb_val):
plt.plot(x, val_loss[i], label="val_" + str(i) + "_loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.title("Evolution of the loss function")
plt.axis([0, nb_epoch - 1, 0, max_loss + 1])
plt.legend()
plt.savefig(out_folder + "/generated_outputs/loss.png")
# Clear plot
plt.gcf().clear()
#
# ACC PLOT
#
# Plot train loss and acc
plt.plot(x, tr_acc, label="train_acc")
# Plot val loss and acc
for i in range(nb_val):
plt.plot(x, val_acc[i], label="val_" + str(i) + "_acc")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.title("Evolution of the accuracy")
plt.axis([0, nb_epoch - 1, 0, 1])
plt.legend()
plt.savefig(out_folder + "/generated_outputs/acc.png")
print("OK")
# Replace the nth pattern in a string
def nth_replace_string(s, sub, repl, nth):
find = s.find(sub)
# if find is not p1 we have found at least one match for the substring
i = find != -1
# loop util we find the nth or we find no match
while find != -1 and i != nth:
# find + 1 means we start at the last match start index + 1
find = s.find(sub, find + 1)
i += 1
# if i is equal to nth we found nth matches so replace
if i == nth:
return s[:find] + repl + s[find + len(sub) :]
return s
def change_lr_cfg(cfg_file, lr, ep):
config = configparser.ConfigParser()
config.read(cfg_file)
field = "arch_lr"
for lr_arch in lr.keys():
config.set(lr_arch, field, str(lr[lr_arch][ep]))
# Write cfg_file_chunk
with open(cfg_file, "w") as configfile:
config.write(configfile)
def shift(arr, num, fill_value=np.nan):
if num >= 0:
return np.concatenate((np.full(num, fill_value), arr[:-num]))
else:
return np.concatenate((arr[-num:], np.full(-num, fill_value)))
def expand_str_ep(str_compact, type_inp, N_ep, split_elem, mult_elem):
lst_out = []
str_compact_lst = str_compact.split(split_elem)
for elem in str_compact_lst:
elements = elem.split(mult_elem)
if type_inp == "int":
try:
int(elements[0])
except ValueError:
sys.stderr.write('The string "%s" must contain integers. Got %s.\n' % (str_compact, elements[0]))
sys.exit(0)
if type_inp == "float":
try:
float(elements[0])
except ValueError:
sys.stderr.write('The string "%s" must contain floats. Got %s.\n' % (str_compact, elements[0]))
sys.exit(0)
if len(elements) == 2:
try:
int(elements[1])
lst_out.extend([elements[0] for i in range(int(elements[1]))])
except ValueError:
sys.stderr.write('The string "%s" must contain integers. Got %s\n' % (str_compact, elements[1]))
sys.exit(0)
if len(elements) == 1:
lst_out.append(elements[0])
if len(str_compact_lst) == 1 and len(elements) == 1:
lst_out.extend([elements[0] for i in range(N_ep - 1)])
# Final check
if len(lst_out) != N_ep:
sys.stderr.write(
'The total number of elements specified in the string "%s" is equal to %i not equal to the total number of epochs %s.\n'
% (str_compact, len(lst_out), N_ep)
)
sys.exit(0)
return lst_out
| 110,615 | 36.598912 | 206 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/train_gan.py | ##########################################################
# pytorch-kaldi-gan
# Walter Heymans
# North West University
# 2020
##########################################################
import sys
import configparser
import os
import time
import numpy
import numpy as np
import random
import torch
import torch.nn.functional as functional
from torch.optim.optimizer import Optimizer
import gan_networks
import itertools
from shutil import copyfile
import math
import matplotlib.pyplot as plt
import weights_and_biases as wandb
import importlib
import warnings
warnings.filterwarnings("ignore", '', UserWarning)
def print_version_info():
print("")
print("".center(40, "#"))
print(" Pytorch-Kaldi-GAN ".center(38, " ").center(40, "#"))
print(" Walter Heymans ".center(38, " ").center(40, "#"))
print(" North West University ".center(38, " ").center(40, "#"))
print(" 2020 ".center(38, " ").center(40, "#"))
print("".center(40, "#"), end="\n\n")
def save_tensor_list_to_png(array, titles=[], fig_name="tensor.png"):
plt.figure(figsize=(8, 6), dpi=300)
for i in range(1, len(array) + 1):
plt.subplot(len(array), 1, i)
if len(array) == 4 and i <= 2:
graph_colour = "b"
elif len(array) == 4:
graph_colour = "r"
elif i == 2:
graph_colour = "r"
else:
graph_colour = "b"
plt.plot(array[i - 1].detach().numpy(), graph_colour)
if len(titles) == len(array):
plt.title(titles[i - 1])
plt.tight_layout()
plt.savefig(fig_name)
plt.close()
def format_time(time_in_seconds):
hours_remaining = math.floor(time_in_seconds / 3600)
minutes_remaining = math.floor(time_in_seconds / 60) - (hours_remaining * 60)
seconds_remaining = math.floor(time_in_seconds) - (minutes_remaining * 60) - (hours_remaining * 3600)
if hours_remaining > 0:
return "{}h {}m {}s ".format(hours_remaining, minutes_remaining, seconds_remaining)
elif minutes_remaining > 0:
return "{}m {}s ".format(minutes_remaining, seconds_remaining)
else:
return "{}s ".format(seconds_remaining)
def get_labels(bs, label):
return torch.ones((bs, 1)) * label
def get_pearson_correlation(tensor1, tensor2):
from scipy.stats import pearsonr
output1 = tensor1.detach().cpu().numpy()
output2 = tensor2.detach().cpu().numpy()
if output1.shape == output2.shape:
# calculate Pearson's correlation
if len(output1.shape) > 1:
correlation = 0
for i in range(output1.shape[0]):
try:
temp_corr, _ = pearsonr(output1[i], output2[i])
except:
temp_corr = 0
correlation += temp_corr
if output1.shape[0] > 0:
correlation = correlation / output1.shape[0]
else:
correlation, _ = pearsonr(output1, output2)
return correlation
else:
return 0
def get_mean_squared_error(tensor1, tensor2):
output1 = tensor1.detach().cpu()
output2 = tensor2.detach().cpu()
if output1.shape == output2.shape:
if len(output1.shape) > 1:
error = 0
for i in range(output1.shape[0]):
error += torch.mean(torch.abs(torch.abs(output1) - torch.abs(output2)))
if output1.shape[0] > 0:
error = error / output1.shape[0]
else:
error = torch.mean(torch.abs(torch.abs(output1) - torch.abs(output2)))
return error.numpy()
else:
return 0
def get_g_performance(clean, noisy, generated):
''' Performance metric using Pearson correlation, mean squared error and L1 loss.
Metric is comparing generator relative to noisy signal.
Higher is better. '''
l1_loss_noisy = torch.nn.functional.l1_loss(clean, noisy).item()
l1_loss_gen = torch.nn.functional.l1_loss(clean, generated).item()
r_clean_noisy = get_pearson_correlation(clean, noisy)
r_clean_gen = get_pearson_correlation(clean, generated)
mse_clean_noisy = get_mean_squared_error(clean, noisy)
mse_clean_gen = get_mean_squared_error(clean, generated)
l1_performance = l1_loss_noisy - l1_loss_gen
r_performance = r_clean_gen - r_clean_noisy
mse_performance = mse_clean_noisy - mse_clean_gen
performance_metric = r_performance + mse_performance + l1_performance
return performance_metric
def compute_gradient_penalty(D, real_samples, fake_samples):
Tensor = torch.cuda.FloatTensor
from torch.autograd import Variable
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = Tensor(np.random.random((real_samples.size(0), 440)))
# Get random interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = D(interpolates)
fake = Variable(Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
# Get gradient w.r.t. interpolates
gradients = torch.autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
script_start_time = time.time()
print_version_info()
# Reading global cfg file (first argument-mandatory file)
cfg_file = sys.argv[1]
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Output folder creation
out_folder = config["exp"]["out_folder"]
if not os.path.exists(out_folder):
os.makedirs(out_folder)
# Copy the global cfg file into the output folder
cfg_file = out_folder + "/conf.cfg"
with open(cfg_file, "w") as configfile:
config.write(configfile)
# Read hyper-parameters from config file
seed = int(config['hyperparameters']['seed'])
max_epochs = int(config['hyperparameters']['max_epochs'])
batch_size = int(config['hyperparameters']['batch_size'])
lr_g = float(config['hyperparameters']['lr_g'])
lr_d = float(config['hyperparameters']['lr_d'])
try:
d_updates = int(config['hyperparameters']['d_updates'])
except KeyError:
d_updates = 1
real_label = float(config['hyperparameters']['real_label'])
criterion = str(config['hyperparameters']['criterion'])
optimizer = str(config['hyperparameters']['optimizer'])
cycle_consistency_lambda = int(config['hyperparameters']['cycle_consistency_lambda'])
acoustic_model_lambda = float(config['hyperparameters']['acoustic_model_lambda'])
gp_lambda = int(config['hyperparameters']['gp_lambda'])
try:
l1_lambda = int(config['hyperparameters']['l1_lambda'])
l2_lambda = int(config['hyperparameters']['l2_lambda'])
except KeyError:
pass
if config.getboolean("exp", "use_cuda"):
try:
cuda_device = int(config['exp']['cuda_device'])
except ValueError:
cuda_device = 'cpu'
else:
cuda_device = 'cpu'
if config["wandb"]["wandb"] == "True":
wandb_on = True
else:
wandb_on = False
torch.manual_seed(seed = seed)
random.seed(seed)
clean_dataset_path = str(config['datasets']['clean_dataset'])
noisy_dataset_path = str(config['datasets']['noisy_dataset'])
valid_dataset_path = str(config['datasets']['valid_dataset'])
cw_left = int(config['datasets']['cw_left'])
cw_right = int(config['datasets']['cw_right'])
frames_per_sample = cw_left + cw_right + 1
double_features = False
try:
if config["hyperparameters"]["double_features"] == "True":
double_features = True
except KeyError:
pass
early_stopping = False
try:
if config["hyperparameters"]["early_stopping"] == "True":
early_stopping = True
except KeyError:
pass
train_d_with_noisy = False
try:
if config["hyperparameters"]["train_d_with_noisy"] == "True":
train_d_with_noisy = True
except KeyError:
pass
print("@ Progress: Reading config complete\n")
def print_settings():
print_width = 64
print(" Hyper-parameters ".center(print_width, "="))
print("# Seed:\t\t\t", seed)
print("# Epochs:\t\t", max_epochs)
print("# Batch size:\t\t", batch_size)
print("# Learning rate G:\t", lr_g)
print("# Learning rate D:\t", lr_d)
print("# Acoustic model lambda:", acoustic_model_lambda)
print("# Gradient penalty lambda:", gp_lambda)
print("# Real label:\t\t", real_label)
print("# Criterion:\t\t", criterion)
print("# Optimizer:\t\t", optimizer)
print("# Cuda device:\t\t", cuda_device)
print("# Weights and Biases:\t", wandb_on)
print("# Output directory:\t", out_folder)
print("# Double features:\t", double_features)
print("# Early stopping:\t", early_stopping)
print("=".center(print_width, "="), end = "\n\n")
print_settings()
class Dataset(torch.utils.data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, dataset_path_clean, dataset_path_noisy, chunk):
self.validation_set = False
if not os.path.exists(dataset_path_noisy) or dataset_path_noisy == "":
self.validation_set = True
'Initialization'
self.dataset_path_clean = dataset_path_clean
files = sorted(os.listdir(self.dataset_path_clean))
if chunk <= len(files):
self.dataset_object = torch.load(os.path.join(self.dataset_path_clean, ("chunk_" + str(chunk) + ".pt")), map_location = 'cpu')
clean_length = int(math.floor(self.dataset_object.shape[0] / frames_per_sample))
if not self.validation_set:
# Noisy dataset
self.dataset_path_noisy = dataset_path_noisy
files = sorted(os.listdir(self.dataset_path_noisy))
if chunk <= len(files):
self.dataset_object_noisy = torch.load(os.path.join(self.dataset_path_noisy, ("chunk_" + str(chunk) + ".pt")), map_location = 'cpu')
noisy_lenght = int(math.floor(self.dataset_object_noisy.shape[0] / frames_per_sample))
self.dataset_len = min([clean_length, noisy_lenght])
else:
self.dataset_len = clean_length
def __len__(self):
'Denotes the total number of samples'
return self.dataset_len
def __getitem__(self, index):
'Generates one sample of data'
for frame in range(frames_per_sample):
label = self.dataset_object[index,-1]
if frame == 0:
clean = self.dataset_object[index + frame, :40]
else:
clean = torch.cat((clean, self.dataset_object[index + frame, :40]), dim = 0)
if not self.validation_set:
for frame in range(frames_per_sample):
label_noisy = self.dataset_object_noisy[index, -1]
if frame == 0:
noisy = self.dataset_object_noisy[index + frame, :40]
else:
noisy = torch.cat((noisy, self.dataset_object_noisy[index + frame, :40]), dim = 0)
return clean, noisy, label, label_noisy
else:
return clean, label
def getbatch(self, index, batch_size):
clean, noisy, _, _ = self.__getitem__(index)
clean = torch.unsqueeze(clean, dim = 0)
noisy = torch.unsqueeze(noisy, dim = 0)
for bs in range(batch_size-1):
tempclean, tempnoisy, _, _ = self.__getitem__(index+bs+1)
tempclean = torch.unsqueeze(tempclean, dim = 0)
tempnoisy = torch.unsqueeze(tempnoisy, dim = 0)
clean = torch.cat((clean, tempclean), dim = 0)
noisy = torch.cat((noisy, tempnoisy), dim = 0)
return clean, noisy
number_of_chunks = len(os.listdir(clean_dataset_path))
train_set = Dataset(clean_dataset_path, noisy_dataset_path, 1)
train_loader = torch.utils.data.DataLoader(train_set,
batch_size = batch_size,
shuffle = True,
num_workers = 4)
validation_set = Dataset(valid_dataset_path, "", 1)
valid_loader = torch.utils.data.DataLoader(validation_set,
batch_size = batch_size,
shuffle = True,
num_workers = 4)
print("@ Progress: Dataset loaded")
if cuda_device != 'cpu':
torch.cuda.set_device(cuda_device)
print("@ Progress: Cuda device set to", cuda_device)
# Create acoustic model
acoustic_model_path = str(config["acoustic_model"]["pretrained_file"])
train_with_am = False
use_external_model = False
try:
if str(config["acoustic_model"]["use_external_model"]) == "True":
use_external_model = True
except KeyError:
pass
if use_external_model:
if os.path.exists(acoustic_model_path):
def get_number_hidden_layers(dictionary_keys):
layer_count = 0
for key in dictionary_keys:
if 'wx' in key:
layer_count += 1
layer_count /= 2
return int(layer_count)
def get_n_out_dim(dictionary):
num_layers = get_number_hidden_layers(dictionary.keys())
last_layer_key = 'wx.' + str(num_layers - 1) + '.weight'
for key in dictionary.keys():
if last_layer_key == key:
return dictionary[key].shape[0]
return 0
try:
if cuda_device != 'cpu':
if int(config["exp"]["cuda_device"]) == 0:
checkpoint_load = torch.load(acoustic_model_path, map_location="cuda:0")
elif int(config["exp"]["cuda_device"]) == 1:
checkpoint_load = torch.load(acoustic_model_path, map_location="cuda:1")
else:
checkpoint_load = torch.load(acoustic_model_path, map_location="cpu")
N_out_lab_cd = get_n_out_dim(checkpoint_load["model_par"])
# import the class
module = importlib.import_module(config["acoustic_model"]["arch_library"])
nn_class = getattr(module, config["acoustic_model"]["arch_class"])
config["acoustic_model"]["dnn_lay"] = config["acoustic_model"]["dnn_lay"].replace('N_out_lab_cd', str(N_out_lab_cd))
if double_features:
acoustic_model = nn_class(config["acoustic_model"], int(2 * frames_per_sample * 40))
else:
acoustic_model = nn_class(config["acoustic_model"], int(frames_per_sample * 40))
acoustic_model.load_state_dict(checkpoint_load["model_par"])
acoustic_model = acoustic_model.cuda()
except RuntimeError:
print("Error loading acoustic model! Check that models in config file match.")
else:
if os.path.exists(acoustic_model_path):
def get_number_hidden_layers(dictionary_keys):
layer_count = 0
for key in dictionary_keys:
if 'wx' in key:
layer_count += 1
layer_count /= 2
return int(layer_count)
def get_n_out_dim(dictionary):
num_layers = get_number_hidden_layers(dictionary.keys())
last_layer_key = 'wx.' + str(num_layers - 1) + '.weight'
for key in dictionary.keys():
if last_layer_key == key:
return dictionary[key].shape[0]
return 0
try:
if cuda_device != 'cpu':
if int(config["exp"]["cuda_device"]) == 0:
checkpoint_load = torch.load(acoustic_model_path, map_location="cuda:0")
elif int(config["exp"]["cuda_device"]) == 1:
checkpoint_load = torch.load(acoustic_model_path, map_location="cuda:1")
else:
checkpoint_load = torch.load(acoustic_model_path, map_location="cpu")
N_out_lab_cd = get_n_out_dim(checkpoint_load["model_par"])
# import the class
module = importlib.import_module(config["acoustic_model"]["arch_library"])
nn_class = getattr(module, config["acoustic_model"]["arch_class"])
config["acoustic_model"]["dnn_lay"] = config["acoustic_model"]["dnn_lay"].replace('N_out_lab_cd', str(N_out_lab_cd))
if double_features:
acoustic_model = nn_class(config["acoustic_model"], int(2 * frames_per_sample * 40))
else:
acoustic_model = nn_class(config["acoustic_model"], int(frames_per_sample * 40))
acoustic_model.load_state_dict(checkpoint_load["model_par"])
acoustic_model = acoustic_model.cuda()
train_with_am = True
except RuntimeError:
print("Error loading acoustic model! Check that models in config file match.")
else:
print("Acoustic model path doesnt exist!")
# Create networks and optimizers
# Create Generator
input_dim = train_set.__getitem__(0)[0].shape[0]
generator_class = getattr(gan_networks, config["generator"]["arch_name"])
generator = generator_class(input_dim,
input_dim,
config["generator"])
if config["hyperparameters"]["criterion"] == "cycle":
generator_f = generator_class(input_dim,
input_dim,
config["generator"])
# Create Discriminator
discriminator_class = getattr(gan_networks, config["discriminator"]["arch_name"])
discriminator = discriminator_class(input_dim, config["discriminator"])
if config["hyperparameters"]["criterion"] == "cycle":
discriminator_h = discriminator_class(input_dim, config["discriminator"])
generator = generator.cuda()
discriminator = discriminator.cuda()
if config["hyperparameters"]["criterion"] == "cycle":
generator_f = generator_f.cuda()
discriminator_h = discriminator_h.cuda()
# Creating directories
directory_g = os.path.join(out_folder, config["gan"]["output_path_g"])
directory_d = os.path.join(out_folder, config["gan"]["output_path_d"])
gan_dir = os.path.dirname(directory_g)
if not os.path.exists(gan_dir):
os.mkdir(gan_dir)
if not os.path.exists(gan_dir + "/images"):
os.mkdir(gan_dir + "/images")
# Copy pretrained models into directory if it is set
try:
if str(config["generator"]["pretrained_file"]) != "none":
if os.path.exists(str(config["generator"]["pretrained_file"])):
copyfile(str(config["generator"]["pretrained_file"]), directory_g)
print("Loaded pretrained G.")
except KeyError:
pass
try:
if str(config["discriminator"]["pretrained_file"]) != "none":
if os.path.exists(str(config["discriminator"]["pretrained_file"])):
copyfile(str(config["discriminator"]["pretrained_file"]), directory_d)
print("Loaded pretrained D.")
except KeyError:
pass
# Load pretrained models
if os.path.exists(directory_g):
try:
generator.load_state_dict(torch.load(directory_g))
if criterion == "cycle":
generator_f.load_state_dict(torch.load(os.path.dirname(directory_g) + "/generator_f.pt"))
except RuntimeError:
print("Load error loading G, network will be recreated.")
if os.path.exists(directory_d):
try:
discriminator.load_state_dict(torch.load(directory_d))
if criterion == "cycle":
discriminator_h.load_state_dict(torch.load(os.path.dirname(directory_d) + "/discriminator_h.pt"))
except RuntimeError:
print("Load error loading D, network will be recreated.")
# Optimizer initialization
if config["hyperparameters"]["optimizer"] == "adam":
if criterion == "cycle":
optimizer_g = torch.optim.Adam(itertools.chain(generator.parameters(), generator_f.parameters()), lr = lr_g)
optimizer_d = torch.optim.Adam(discriminator.parameters(), lr = lr_d)
optimizer_h = torch.optim.Adam(discriminator_h.parameters(), lr = lr_d)
else:
optimizer_g = torch.optim.Adam(generator.parameters(), lr = lr_g, betas = (0.5, 0.999), weight_decay = 0.001)
optimizer_d = torch.optim.Adam(discriminator.parameters(), lr = lr_d, betas = (0.5, 0.999), weight_decay = 0.001)
elif config["hyperparameters"]["optimizer"] == "rmsprop":
if criterion == "cycle":
optimizer_g = torch.optim.RMSprop(itertools.chain(generator.parameters(), generator_f.parameters()), lr = lr_g)
optimizer_d = torch.optim.RMSprop(discriminator.parameters(), lr = lr_d)
optimizer_h = torch.optim.RMSprop(discriminator_h.parameters(), lr = lr_d)
else:
optimizer_g = torch.optim.RMSprop(generator.parameters(), lr = lr_g)
optimizer_d = torch.optim.RMSprop(discriminator.parameters(), lr = lr_d)
elif config["hyperparameters"]["optimizer"] == "sgd":
if criterion == "cycle":
optimizer_g = torch.optim.SGD(itertools.chain(generator.parameters(), generator_f.parameters()), lr = lr_g)
optimizer_d = torch.optim.SGD(discriminator.parameters(), lr = lr_d)
optimizer_h = torch.optim.SGD(discriminator_h.parameters(), lr = lr_d)
else:
optimizer_g = torch.optim.SGD(generator.parameters(), lr = lr_g)
optimizer_d = torch.optim.SGD(discriminator.parameters(), lr = lr_d)
# Start training
print("\n@ Progress: Starting training")
train_start_time = time.time()
number_of_batches = len(train_loader)
if str(config["wandb"]["wandb"]) == "True":
wandb_cfg = wandb.load_cfg_dict_from_yaml(str(config["wandb"]["config"]))
# UPDATE config file if Weights and Biases file is different
wandb_cfg["max_epochs"] = max_epochs
wandb_cfg["seed"] = seed
wandb_cfg["batch_size"] = batch_size
wandb_cfg["lr_g"] = lr_g
wandb_cfg["lr_d"] = lr_d
wandb_cfg["criterion"] = criterion
wandb_cfg["optimizer"] = optimizer
wandb_cfg["generator"] = str(config["generator"]["arch_name"])
wandb_cfg["discriminator"] = str(config["discriminator"]["arch_name"])
wandb_cfg["dataset"] = str(config["exp"]["dataset_name"])
wandb_cfg["acoustic_model_lambda"] = acoustic_model_lambda
wandb_cfg["cycle_consistency_lambda"] = cycle_consistency_lambda
wandb_cfg["gp_lambda"] = gp_lambda
wandb_details = os.path.join(out_folder, "wandb_details.txt")
if not os.path.exists(wandb_details):
wandb_details_file = open(wandb_details, "w")
wandb.initialize_wandb(project = str(config["wandb"]["project"]),
config = wandb_cfg,
directory = out_folder,
resume = False)
try:
wandb_details_file.write(wandb.get_run_id() + '\n')
wandb_details_file.write(wandb.get_run_name())
except TypeError:
pass
wandb_details_file.close()
else:
wandb_details_file = open(wandb_details, "r")
try:
file_content = wandb_details_file.read().splitlines()
wandb_run_id = file_content[0]
wandb_run_name = file_content[1]
except IndexError:
pass
wandb_details_file.close()
try:
wandb.initialize_wandb(project = str(config["wandb"]["project"]),
config = wandb_cfg,
directory = out_folder,
resume = True,
identity = wandb_run_id,
name = wandb_run_name)
except NameError:
wandb.initialize_wandb(project = str(config["wandb"]["project"]),
config = wandb_cfg,
directory = out_folder,
resume = True)
def create_log_file():
if not os.path.exists(os.path.join(out_folder, 'log.log')):
log_file = open(os.path.join(out_folder, 'log.log'), "w")
log_file.close()
def update_log_file(text_str):
log_file = open(os.path.join(out_folder, 'log.log'), "a")
log_file.write(text_str + "\n")
log_file.close()
def get_last_trained_epoch():
log_file = open(os.path.join(out_folder, 'log.log'), "r")
file_lines = log_file.readlines()
log_file.close()
if len(file_lines) > 0:
epoch_last, chunk_last = (file_lines[-1].replace("epoch_", "")).split("_")
return int(epoch_last), int(chunk_last)
else:
return 0, 0
def validate_generator_results():
with torch.no_grad():
number_of_valid_batches = len(valid_loader)
validation_loss = 0
correct = 0
total_samples = 0
for valid_batch, valid_label_batch in valid_loader:
valid_batch = valid_batch.cuda()
valid_label_batch = valid_label_batch.cuda()
if criterion == "am-gan":
gen_output, _ = generator(valid_batch)
else:
gen_output = generator(valid_batch)
if g_output.shape[0] > 1:
if double_features:
am_evaluation = acoustic_model(torch.cat((valid_batch, gen_output), dim = 1))
else:
am_evaluation = acoustic_model(gen_output)
validation_loss += functional.nll_loss(am_evaluation, valid_label_batch.long()).item()
pred = am_evaluation.data.max(1, keepdim = True)[1]
correct += torch.sum(pred.eq(valid_label_batch.data.view_as(pred))).item()
total_samples += valid_label_batch.shape[0]
validation_loss = validation_loss / number_of_valid_batches
validation_error = 1 - (correct / total_samples)
return validation_loss, validation_error
def check_discriminator_classification():
with torch.no_grad():
v_set = Dataset(clean_dataset_path, noisy_dataset_path, chunk)
v_loader = torch.utils.data.DataLoader(v_set,
batch_size=batch_size,
shuffle=True,
num_workers=4)
nob = len(v_loader)
validation_loss = 0
correct = 0
total_samples = 0
for v_clean_batch, v_noisy_batch, _, _ in v_loader:
nob += 1
v_clean_batch, v_noisy_batch = v_clean_batch.cuda(), v_noisy_batch.cuda()
v_input = torch.cat((v_clean_batch, v_noisy_batch), dim=0)
v_target = torch.cat((torch.ones(v_clean_batch.shape[0]).long(), torch.zeros(v_noisy_batch.shape[0]).long()), dim=0).to(cuda_device)
v_output = discriminator(v_input)
validation_loss += functional.cross_entropy(v_output, v_target).item()
pred = v_output.data.max(1, keepdim=True)[1]
correct += torch.sum(pred.eq(v_target.data.view_as(pred))).item()
total_samples += v_target.shape[0]
validation_loss = validation_loss / nob
validation_error = 1 - (correct / total_samples)
return validation_loss, validation_error
create_log_file()
if wandb_on:
wandb.quick_log("status", "training", commit = False)
epochs_skipped = 0
lowest_valid_error = 1
early_stopping_epoch = 0
file_loss = open(os.path.join(out_folder, "losses"), "w")
file_loss.close()
for epoch in range(1, max_epochs+1):
# Check if epoch has been processed
last_ep, last_ch = get_last_trained_epoch()
if (epoch < last_ep) or (last_ep == epoch and last_ch == number_of_chunks):
print("")
print(" Previously completed epoch: {} ".format(epoch).center(64, "#"))
epochs_skipped += 1
continue
if wandb_on:
wandb.quick_log("epoch", epoch, commit = False)
# Training
epoch_start_time = time.time()
print("")
print(" Optimizing epoch: {}/{} ".format(epoch, max_epochs).center(64, "#"))
for chunk in range(1, number_of_chunks + 1):
# Check if chunk has been processed
if (last_ep == epoch) and (chunk <= last_ch):
continue
if wandb_on:
wandb.quick_log("chunk", chunk, commit = True)
current_batch = 0
g_loss = 0
d_loss = 0
tot_am_loss = 0
train_set = Dataset(clean_dataset_path, noisy_dataset_path, chunk)
train_loader = torch.utils.data.DataLoader(train_set,
batch_size = batch_size,
shuffle = True,
num_workers = 6)
number_of_batches = len(train_loader)
print(" Chunk: {}/{} ".format(chunk, number_of_chunks).center(64, "-"))
for clean_batch, noisy_batch, label_batch, label_noisy_batch in train_loader:
current_batch += 1
# Transfer to GPU
clean_batch, noisy_batch = clean_batch.cuda(), noisy_batch.cuda()
label_batch, label_noisy_batch = label_batch.cuda(), label_noisy_batch.cuda()
d_clean_batch = clean_batch
d_noisy_batch = noisy_batch
if d_clean_batch.shape[0] > 1 and d_noisy_batch.shape[0] > 1:
for k in range(d_updates):
# TRAIN DISCRIMINATOR
optimizer_d.zero_grad()
d_output_clean = discriminator(d_clean_batch)
if criterion == "am-gan":
g_output, am_gan_output = generator(d_noisy_batch)
g_output = g_output.detach()
else:
g_output = generator(d_noisy_batch).detach()
d_output_g = discriminator(g_output)
real_labels = get_labels(d_clean_batch.shape[0], real_label).cuda()
fake_labels = get_labels(d_clean_batch.shape[0], 0).cuda()
if criterion == "bce" or criterion == "bce-l1" or criterion == "bce-l2" or criterion == "bce-all" or criterion == "am-gan":
loss_clean = functional.binary_cross_entropy(d_output_clean, real_labels)
loss_noisy = functional.binary_cross_entropy(d_output_g, fake_labels)
loss_discriminator = loss_clean + loss_noisy
loss_discriminator.backward()
optimizer_d.step()
d_loss += loss_discriminator.item()
file_loss = open(os.path.join(out_folder, "losses"), "a")
file_loss.write(str(epoch) + "," + str(chunk) + "," + str(loss_discriminator.item()) + ",")
file_loss.close()
elif criterion == "wgan":
if gp_lambda > 0:
gp_loss = compute_gradient_penalty(discriminator, d_clean_batch, g_output)
else:
gp_loss = 0
if train_d_with_noisy:
loss_discriminator = - torch.mean(d_output_clean) + torch.mean(d_output_g) + (0.1*torch.mean(discriminator(d_noisy_batch))) + (gp_lambda * gp_loss)
else:
loss_discriminator = - torch.mean(d_output_clean) + torch.mean(d_output_g) + (gp_lambda * gp_loss)
loss_discriminator.backward()
optimizer_d.step()
d_loss += loss_discriminator.item()
temp_d_loss = - torch.mean(d_output_clean) + torch.mean(d_output_g)
if gp_lambda > 0:
file_loss = open(os.path.join(out_folder, "losses"), "a")
file_loss.write(str(epoch) + "," + str(chunk) + "," + str(temp_d_loss.item()) + "," + str(gp_loss.item()) + ",")
file_loss.close()
elif criterion == "cycle":
optimizer_h.zero_grad()
h_output_noisy = discriminator_h(d_noisy_batch) # H_f output of noisy signal
h_output_f = discriminator_h(generator_f(d_clean_batch)) # H_f output of F
criterion_GAN = torch.nn.MSELoss()
criterion_GAN = criterion_GAN.cuda()
# TRAIN Discriminator D
# Real loss
loss_real = criterion_GAN(d_output_clean, real_labels)
# Fake loss
loss_fake = criterion_GAN(d_output_g, fake_labels)
# Total loss
loss_d = loss_real + loss_fake
loss_d.backward()
optimizer_d.step()
# TRAIN Discriminator H
# Real loss
loss_real = criterion_GAN(h_output_noisy, real_labels)
# Fake loss
loss_fake = criterion_GAN(h_output_f, fake_labels)
# Total loss
loss_h = loss_real + loss_fake
loss_h.backward()
optimizer_h.step()
d_loss += (loss_d.item() + loss_h.item()) / 2
if k < (d_updates - 1):
d_clean_batch, d_noisy_batch = train_set.getbatch(random.randint(0, train_set.__len__() - batch_size - 1), batch_size)
d_clean_batch = d_clean_batch.to(cuda_device)
d_noisy_batch = d_noisy_batch.to(cuda_device)
# TRAIN GENERATOR
optimizer_g.zero_grad()
if criterion == "am-gan":
g_output, am_gan_output = generator(noisy_batch)
else:
g_output = generator(noisy_batch)
d_verdict = discriminator(g_output)
am_loss = 0
if train_with_am:
if g_output.shape[0] > 1:
if double_features:
am_output = acoustic_model(torch.cat((noisy_batch, g_output), dim = 1))
else:
am_output = acoustic_model(g_output)
am_loss = functional.nll_loss(am_output, label_noisy_batch.long())
f = open(os.path.join(out_folder, "am_loss.txt"), 'a')
f.writelines(str(am_loss))
f.close()
tot_am_loss += am_loss.item()
else:
am_loss = 0
elif use_external_model:
if g_output.shape[0] > 1:
am_output = acoustic_model(g_output)
numpy_output = am_output.detach().cpu().numpy()
imported_am_output = torch.from_numpy(numpy_output).cuda()
am_loss = functional.nll_loss(imported_am_output, label_noisy_batch.long())
f = open(os.path.join(out_folder, "am_loss.txt"), 'a')
f.writelines(str(am_loss))
f.close()
tot_am_loss += am_loss.item()
else:
am_loss = 0
if criterion == "bce":
gen_labels = get_labels(clean_batch.shape[0], real_label).cuda()
bce_loss = functional.binary_cross_entropy(d_verdict, gen_labels)
loss_generator = bce_loss + (acoustic_model_lambda * am_loss)
loss_generator.backward()
optimizer_g.step()
if am_loss > 0:
g_loss += loss_generator.item() - (acoustic_model_lambda * am_loss.item())
file_loss = open(os.path.join(out_folder, "losses"), "a")
file_loss.write(str(bce_loss.item()) + "," + str(am_loss.item()) + "\n")
file_loss.close()
elif criterion == "wgan":
loss_generator = -torch.mean(d_verdict) + (acoustic_model_lambda * am_loss)
loss_generator.backward()
optimizer_g.step()
if am_loss > 0:
g_loss += loss_generator.item() - (acoustic_model_lambda * am_loss.item())
file_loss = open(os.path.join(out_folder, "losses"), "a")
temp_g_loss = -torch.mean(d_verdict)
file_loss.write(str(temp_g_loss.item()) + "," + str(am_loss.item()) + "\n")
file_loss.close()
elif criterion == "cycle":
criterion_GAN = torch.nn.MSELoss()
criterion_cycle = torch.nn.L1Loss()
criterion_identity = torch.nn.L1Loss()
criterion_GAN = criterion_GAN.cuda()
criterion_cycle = criterion_cycle.cuda()
criterion_identity = criterion_identity.cuda()
f_verdict = discriminator_h(generator_f(clean_batch))
# TRAIN CYCLE GENERATORS
# GAN loss
loss_GAN_g = criterion_GAN(d_verdict, real_labels)
loss_GAN_f = criterion_GAN(f_verdict, real_labels)
loss_GAN = (loss_GAN_g + loss_GAN_f) / 2
# Cycle loss
cycle_input_g = torch.unsqueeze(generator_f(clean_batch), dim = 1).cuda()
cycle_input_g = torch.cat((cycle_input_g, torch.randn(cycle_input_g.shape).cuda()), dim = 1)
cycle_input_f = torch.unsqueeze(generator(noisy_batch), dim = 1).cuda()
cycle_input_f = torch.cat((cycle_input_f, torch.randn(cycle_input_f.shape).cuda()), dim = 1)
recov_A = generator(generator_f(clean_batch))
loss_cycle_A = criterion_cycle(recov_A, clean_batch)
recov_B = generator_f(generator(noisy_batch))
loss_cycle_B = criterion_cycle(recov_B, noisy_batch)
loss_cycle = (loss_cycle_A + loss_cycle_B) / 2
# Total loss
loss_generator = loss_GAN + (cycle_consistency_lambda * loss_cycle) + (acoustic_model_lambda * am_loss)
loss_generator.backward()
optimizer_g.step()
g_loss += loss_generator.item()
elif criterion == "am-gan":
gen_labels = get_labels(clean_batch.shape[0], real_label).cuda()
bce_loss = functional.binary_cross_entropy(d_verdict, gen_labels)
am_gan_loss = functional.nll_loss(am_gan_output, label_noisy_batch.long())
loss_generator = bce_loss + (acoustic_model_lambda * am_gan_loss) + (acoustic_model_lambda * am_loss)
loss_generator.backward()
optimizer_g.step()
g_loss += loss_generator.item()
# Print end of batch results
print("Processing batch", current_batch, "/", number_of_batches, "\r", end = '')
try:
am_loss = am_loss.item()
except:
pass
print("\nD-loss:\t %.4f | G-loss:\t %.4f | AM-loss:\t %.4f" % (round(d_loss / current_batch, 4), round(g_loss / current_batch, 4), round(tot_am_loss / current_batch, 4)))
f = open(os.path.join(out_folder, "performance.txt"), 'a')
f.writelines(["Epoch " + str(epoch) + " Chunk: " + str(chunk),
"\nD-loss:\t %.4f | G-loss:\t %.4f | AM-loss:\t %.4f\n" % (round(d_loss / current_batch, 4), round(g_loss / current_batch, 4), round(tot_am_loss / current_batch, 4))])
f.close()
if wandb_on:
wandb.quick_log("d-loss", (d_loss / current_batch), commit = False)
wandb.quick_log("g-loss", (g_loss / current_batch), commit = False)
if train_with_am:
wandb.quick_log("am-loss", (tot_am_loss / current_batch), commit = False)
torch.save(generator.state_dict(), directory_g)
torch.save(discriminator.state_dict(), directory_d)
if criterion == "cycle":
torch.save(generator_f.state_dict(), os.path.dirname(directory_g) + "/generator_f.pt")
torch.save(discriminator_h.state_dict(), os.path.dirname(directory_d) + "/discriminator_h.pt")
if config["gan"]["save_figures"] == "True":
figure_name = out_folder + '/gan/images/e' + str(epoch) + 'c' + str(chunk) + '.png'
with torch.no_grad():
numpyarr = [clean_batch[0].cpu(), noisy_batch[0].cpu(), g_output[0].cpu()]
titles = ["Clean", "Encoded", "Generator"]
save_tensor_list_to_png(numpyarr, titles, figure_name)
update_log_file('epoch_' + str(epoch) + '_' + str(chunk))
print(" ".center(30, " "), end = '\r')
if train_with_am:
print("")
print(" Validation ".center(64, "-"))
valid_loss, valid_error = validate_generator_results()
print("Validation-loss: %.4f | Validation-error: %.4f" % (valid_loss, valid_error))
f = open(os.path.join(out_folder, "performance.txt"), 'a')
f.writelines(["\nValidation\n", "Validation-loss: %.4f | Validation-error: %.4f\n" % (valid_loss, valid_error)])
f.close()
if early_stopping:
if valid_error <= lowest_valid_error:
torch.save(generator.state_dict(), os.path.dirname(directory_g) + "/generator_es.pt")
torch.save(discriminator.state_dict(), os.path.dirname(directory_d) + "/discriminator_es.pt")
lowest_valid_error = valid_error
early_stopping_epoch = epoch
if wandb_on:
wandb.quick_log("early-stopping-epoch", early_stopping_epoch, commit=False)
wandb.quick_log("early-stopping-valid-error", lowest_valid_error, commit=False)
if wandb_on:
wandb.quick_log("valid-loss", valid_loss, commit = False)
wandb.quick_log("valid-error", valid_error, commit = False)
# Print end of epoch summary
epoch_end_time = time.time()
ave_epoch_time = (epoch_end_time - train_start_time) / (epoch - epochs_skipped)
epochs_remaining = max_epochs - (epoch - epochs_skipped)
estimated_time_left = ave_epoch_time * epochs_remaining
d_loss = d_loss / number_of_batches
g_loss = g_loss / number_of_batches
print(" Epoch {} completed in: {} | ETA: {} ".format(epoch,
format_time(epoch_end_time - epoch_start_time),
format_time(estimated_time_left)).center(64, "#"))
f = open(os.path.join(out_folder, "performance.txt"), 'a')
f.writelines("\nEpoch {} completed in: {} | ETA: {} \n".format(epoch,
format_time(epoch_end_time - epoch_start_time),
format_time(estimated_time_left)))
f.close()
if wandb_on:
wandb.quick_log("ETA", format_time(estimated_time_left), commit = False)
wandb.quick_log("epoch_time", format_time(epoch_end_time - epoch_start_time), commit = False)
if early_stopping:
print("Early-stopping | Epoch: %d | Validation-error: %.4f" % (early_stopping_epoch, lowest_valid_error))
f = open(os.path.join(out_folder, "performance.txt"), 'a')
f.writelines("\nEarly-stopping | Epoch: %d | Validation-error: %.4f\n\n" % (early_stopping_epoch, lowest_valid_error))
f.close()
print("\n@ Progress: Training complete\n")
if wandb_on:
wandb.quick_log("status", "complete", commit = True)
print("Completed in:", format_time(time.time() - script_start_time))
| 44,242 | 36.621599 | 191 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/gan_networks.py | import torch
import torch.nn as nn
from distutils.util import strtobool
from torch.nn.utils import spectral_norm
import math
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def act_fun(act_type):
if act_type == "relu":
return nn.ReLU()
if act_type == "tanh":
return nn.Tanh()
if act_type == "sigmoid":
return nn.Sigmoid()
if act_type == "leaky_relu":
return nn.LeakyReLU(0.2)
if act_type == "elu":
return nn.ELU()
if act_type == "softmax":
return nn.LogSoftmax(dim=1)
if act_type == "linear":
return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
def create_module_list(input_dim, output_dim, cfg):
dnn_lay = list(map(int, cfg["dnn_lay"].split(",")))
dnn_drop = list(map(float, cfg["dnn_drop"].split(",")))
dnn_batchnorm = list(map(strtobool, cfg["dnn_batchnorm"].split(",")))
dnn_act = cfg["dnn_act"].split(",")
dnn_lay.append(output_dim)
layers = nn.ModuleList([])
N_dnn_lay = len(dnn_lay)
current_input = input_dim
add_bias = True
for i in range(N_dnn_lay):
# Linear operations
layers.append(nn.Linear(current_input, dnn_lay[i], bias=add_bias))
add_bias = False
# batch norm
if dnn_batchnorm[i]:
layers.append(nn.BatchNorm1d(dnn_lay[i], momentum=0.05))
# activation
if dnn_act[i] != "linear":
layers.append(act_fun(dnn_act[i]))
# dropout
if dnn_drop[i] > 0:
layers.append(nn.Dropout(p=dnn_drop[i]))
current_input = dnn_lay[i]
return layers
class Block_Encode(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout):
super().__init__()
self.block = nn.Sequential(
nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=int((kernel_size - 1) / 2)),
nn.ReLU(),
nn.Dropout(dropout),
)
def forward(self, x):
return self.block(x)
class Block_Decode(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout):
super().__init__()
self.block = nn.Sequential(
nn.ConvTranspose1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=int(math.ceil((kernel_size - 2) / 2))),
nn.ReLU(),
nn.Dropout(dropout),
)
def forward(self, x):
return self.block(x)
class Block_Encode_BN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout):
super().__init__()
self.block = nn.Sequential(
nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=int((kernel_size - 1) / 2)),
nn.BatchNorm1d(out_channels),
nn.ReLU(),
nn.Dropout(dropout),
)
def forward(self, x):
return self.block(x)
class Block_Decode_BN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout):
super().__init__()
self.block = nn.Sequential(
nn.ConvTranspose1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=int(math.ceil((kernel_size - 2) / 2))),
nn.BatchNorm1d(out_channels),
nn.ReLU(),
nn.Dropout(dropout),
)
def forward(self, x):
return self.block(x)
class Block_Encode_SN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout):
super().__init__()
self.block = nn.Sequential(
spectral_norm(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=int((kernel_size - 1) / 2))),
nn.ReLU(),
nn.Dropout(dropout),
)
def forward(self, x):
return self.block(x)
class Block_Decode_SN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout):
super().__init__()
self.block = nn.Sequential(
spectral_norm(nn.ConvTranspose1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=int(math.ceil((kernel_size - 2) / 2)))),
nn.ReLU(),
nn.Dropout(dropout),
)
def forward(self, x):
return self.block(x)
class Discriminator_BCE(nn.Module):
def __init__(self, inp_dim, cfg):
super(Discriminator_BCE, self).__init__()
self.input_dim = inp_dim
self.output_dim = 1
leaky_alpha = 0.2
self.block = nn.Sequential(
nn.Conv1d(in_channels=1,
out_channels=16,
kernel_size=41,
padding=20,
stride=1),
# nn.BatchNorm1d(16),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Dropout(0.3),
nn.Conv1d(in_channels=16,
out_channels=16,
kernel_size=13,
padding=6,
stride=1),
# nn.BatchNorm1d(16),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Dropout(0.3),
nn.Conv1d(in_channels=16,
out_channels=32,
kernel_size=13,
padding=6,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Conv1d(in_channels=32,
out_channels=32,
kernel_size=13,
padding=6,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Conv1d(in_channels=32,
out_channels=64,
kernel_size=13,
padding=6,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Conv1d(in_channels=64,
out_channels=64,
kernel_size=13,
padding=6,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Conv1d(in_channels=64,
out_channels=128,
kernel_size=13,
padding=6,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Conv1d(in_channels=128,
out_channels=128,
kernel_size=13,
padding=6,
stride=1),
# nn.BatchNorm1d(128),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
)
self.out_block = nn.Sequential(
nn.Linear(128, self.output_dim),
nn.Sigmoid(),
)
def forward(self, x):
x = self.block(x.view(-1, 1, self.input_dim))
x = self.out_block(x.view(-1, 128))
return x
def create_conv_module_list(input_dim, output_dim, type, cfg):
conv_features = list(map(int, cfg["conv_features"].replace(" ", "").split(",")))
conv_kernals = list(map(int, cfg["conv_kernals"].replace(" ", "").split(",")))
conv_dropout = list(map(float, cfg["conv_dropout"].replace(" ", "").split(",")))
conv_batchnorm = list(map(str, cfg["conv_batchnorm"].replace(" ", "").split(",")))
conv_act = list(map(str, cfg["conv_act"].replace(" ", "").split(",")))
layers = nn.ModuleList([])
N_features_lay = len(conv_features)
if type == "Cycle_Generator" or type == "Cycle_Discriminator":
for i in range(N_features_lay - 1):
# Convolutional layers
layers.append(nn.Conv1d(in_channels=conv_features[i],
out_channels=conv_features[i + 1],
kernel_size=conv_kernals[i],
padding=int((conv_kernals[i] - 1) / 2),
stride=1))
# Batch norm
if conv_batchnorm[i] == "True":
layers.append(nn.BatchNorm1d(conv_features[i + 1]))
# Activation
if conv_act[i] != "linear":
layers.append(act_fun(conv_act[i]))
# Dropout
if conv_dropout[i] > 0:
layers.append(nn.Dropout(p=conv_dropout[i]))
if type.lower().__contains__("discriminator"):
layers.append(nn.Linear(input_dim, output_dim))
layers.append(nn.Sigmoid())
return layers
class Conv2dAuto(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = (
self.kernel_size[0] // 2, self.kernel_size[1] // 2) # dynamic add padding based on the kernel_size
class Conv1dAuto(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = ((self.kernel_size[0] - 1) // 2)
class Discriminator_wgan_spectral_norm(nn.Module):
def __init__(self, inp_dim, cfg):
super(Dis_WGAN_SN, self).__init__()
self.input_dim = inp_dim
self.output_dim = 1
leaky_alpha = 0.2
dropout = 0.3
self.block = nn.Sequential(
nn.Conv1d(in_channels=1,
out_channels=16,
kernel_size=41,
padding=20,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.BatchNorm1d(16),
nn.Dropout(dropout),
nn.Conv1d(in_channels=16,
out_channels=16,
kernel_size=13,
padding=6,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.BatchNorm1d(16),
nn.Dropout(dropout),
nn.Conv1d(in_channels=16,
out_channels=32,
kernel_size=13,
padding=6,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.BatchNorm1d(32),
nn.Dropout(dropout),
nn.Conv1d(in_channels=32,
out_channels=32,
kernel_size=13,
padding=6,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.BatchNorm1d(32),
nn.Dropout(dropout),
nn.Conv1d(in_channels=32,
out_channels=64,
kernel_size=13,
padding=6,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.BatchNorm1d(64),
nn.Dropout(dropout),
nn.Conv1d(in_channels=64,
out_channels=64,
kernel_size=3,
padding=1,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.BatchNorm1d(64),
nn.Dropout(dropout),
nn.Conv1d(in_channels=64,
out_channels=128,
kernel_size=3,
padding=1,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.BatchNorm1d(128),
nn.Dropout(dropout),
nn.Conv1d(in_channels=128,
out_channels=128,
kernel_size=3,
padding=1,
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.BatchNorm1d(128),
nn.Dropout(dropout),
)
self.out_block = nn.Sequential(
spectral_norm(nn.Linear(128, self.output_dim)),
)
def forward(self, x):
x = self.block(x.view(-1, 1, self.input_dim))
x = self.out_block(x.view(-1, 128))
return x
class Generator_small(nn.Module):
def __init__(self, inp_dim, out_dim, cfg):
super(Generator_small, self).__init__()
dropout = 0
self.inp_dim = inp_dim
leaky_alpha = 0.2
kernel_size = 5
self.block = nn.Sequential(
nn.Conv1d(in_channels=1,
out_channels=16,
kernel_size=kernel_size,
padding=int((kernel_size - 1) / 2),
stride=1),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Dropout(dropout),
nn.Conv1d(in_channels=16,
out_channels=16,
kernel_size=kernel_size,
padding=int((kernel_size - 1) / 2),
stride=1),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Dropout(dropout),
nn.Conv1d(in_channels=16,
out_channels=32,
kernel_size=kernel_size,
padding=int((kernel_size - 1) / 2),
stride=1),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Dropout(dropout),
nn.Conv1d(in_channels=32,
out_channels=16,
kernel_size=kernel_size,
padding=int((kernel_size - 1) / 2),
stride=1),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Dropout(dropout),
nn.Conv1d(in_channels=16,
out_channels=1,
kernel_size=kernel_size,
padding=int((kernel_size - 1) / 2),
stride=1),
)
# initialize weights
self.init_weights()
def init_weights(self):
"""
Initialize weights for convolution layers using Xavier initialization.
"""
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
def forward(self, x):
x = x.view(-1, 1, self.inp_dim)
return self.block(x).view(-1, self.inp_dim)
class Generator_large(nn.Module):
def __init__(self, inp_dim, out_dim, cfg):
super(Generator_large, self).__init__()
dropout = 0
self.encode1 = Block_Encode(in_channels=1, out_channels=16, kernel_size=7, stride=2, dropout=0)
self.encode2 = Block_Encode(16, 16, 7, 2, dropout)
self.encode3 = Block_Encode(16, 32, 5, 2, dropout)
self.encode4 = Block_Encode(32, 32, 5, 2, dropout)
self.encode5 = Block_Encode(32, 64, 3, 2, dropout)
self.decode1 = Block_Decode(64, 32, 4, 2, dropout)
self.decode2 = Block_Decode(64, 32, 5, 2, dropout)
self.decode3 = Block_Decode(64, 16, 6, 2, dropout)
self.decode4 = Block_Decode(32, 16, 6, 2, dropout)
self.decode5 = Block_Decode(32, 1, 6, 2, dropout)
# initialize weights
self.init_weights()
def init_weights(self):
"""
Initialize weights for convolution layers using Xavier initialization.
"""
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
def forward(self, x):
x = torch.unsqueeze(x, dim=1) # [b x 1 x 440]
e1 = self.encode1(x) # [b x 16 x 220]
e2 = self.encode2(e1) # [b x 16 x 110]
e3 = self.encode3(e2) # [b x 32 x 55]
e4 = self.encode4(e3) # [b x 32 x 28]
e5 = self.encode5(e4) # [b x 64 x 14]
d1 = self.decode1(e5)
d2 = self.decode2(torch.cat((d1, e4), dim=1))
d3 = self.decode3(torch.cat((d2, e3), dim=1))
d4 = self.decode4(torch.cat((d3, e2), dim=1))
d5 = self.decode5(torch.cat((d4, e1), dim=1))
return torch.squeeze(d5, dim=1)
class Discriminator_spectral_norm(nn.Module):
def __init__(self, inp_dim, cfg):
super(Discriminator_spectral_norm, self).__init__()
self.input_dim = inp_dim
self.output_dim = 1
leaky_alpha = 0.2
dropout = 0.25
kernel_size = 5
self.block = nn.Sequential(
nn.Conv1d(in_channels=1,
out_channels=16,
kernel_size=kernel_size,
padding=int((kernel_size-1)/2),
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Dropout(dropout),
nn.Conv1d(in_channels=16,
out_channels=16,
kernel_size=kernel_size,
padding=int((kernel_size-1)/2),
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Dropout(dropout),
nn.Conv1d(in_channels=16,
out_channels=32,
kernel_size=kernel_size,
padding=int((kernel_size-1)/2),
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Dropout(dropout),
nn.Conv1d(in_channels=32,
out_channels=32,
kernel_size=kernel_size,
padding=int((kernel_size-1)/2),
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
nn.Conv1d(in_channels=32,
out_channels=64,
kernel_size=kernel_size,
padding=int((kernel_size-1)/2),
stride=1),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.LeakyReLU(leaky_alpha, inplace=True),
)
self.out_block = nn.Sequential(
spectral_norm(nn.Linear(832, self.output_dim)),
nn.Sigmoid(),
)
def forward(self, x):
x = self.block(x.view(-1, 1, self.input_dim))
x = self.out_block(x.view(-1, 832))
return x
| 20,229 | 32.001631 | 111 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/tune_hyperparameters.py | #!/usr/bin/env python
##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
#
# Description:
# This scripts generates config files with the random hyperparamters specified by the user.
# python tune_hyperparameters.py cfg_file out_folder N_exp hyperparameters_spec
# e.g., python tune_hyperparameters.py cfg/TIMIT_MLP_mfcc.cfg exp/TIMIT_MLP_mfcc_tuning 10 arch_lr=randfloat(0.001,0.01) batch_size_train=randint(32,256) dnn_act=choose_str{relu,relu,relu,relu,softmax|tanh,tanh,tanh,tanh,softmax}
##########################################################
import random
import re
import os
import sys
from random import randint
if __name__ == "__main__":
cfg_file = sys.argv[1]
output_folder = sys.argv[2]
N_exp = int(sys.argv[3])
hyperparam_list = sys.argv[4:]
seed = 1234
print("Generating config file for hyperparameter tuning...")
if not os.path.exists(output_folder):
os.makedirs(output_folder)
random.seed(seed)
for i in range(N_exp):
cfg_file_out = output_folder + "/exp" + str(i) + ".cfg"
with open(cfg_file_out, "wt") as cfg_out, open(cfg_file, "rt") as cfg_in:
for line in cfg_in:
key = line.split("=")[0]
if key == "out_folder":
line = "out_folder=" + output_folder + "/exp" + str(i) + "\n"
hyper_found = False
for hyperparam in hyperparam_list:
key_hyper = hyperparam.split("=")[0]
if key == key_hyper:
if "randint" in hyperparam:
lower, higher = re.search("randint\((.+?)\)", hyperparam).group(1).split(",")
value_hyper = randint(int(lower), int(higher))
hyper_found = True
if "randfloat" in hyperparam:
lower, higher = re.search("randfloat\((.+?)\)", hyperparam).group(1).split(",")
value_hyper = random.uniform(float(lower), float(higher))
hyper_found = True
if "choose_str" in hyperparam:
value_hyper = random.choice(re.search("\{(.+?)\}", hyperparam).group(1).split("|"))
hyper_found = True
if "choose_int" in hyperparam:
value_hyper = int(random.choice(re.search("\{(.+?)\}", hyperparam).group(1).split("|")))
hyper_found = True
if "choose_float" in hyperparam:
value_hyper = float(random.choice(re.search("\{(.+?)\}", hyperparam).group(1).split("|")))
hyper_found = True
line_out = key + "=" + str(value_hyper) + "\n"
if not hyper_found:
line_out = line
cfg_out.write(line_out)
print("Done %s" % cfg_file_out)
| 3,100 | 35.916667 | 229 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/weights_and_biases.py | ##########################################################
# pytorch-kaldi-gan v.1.0
# Walter Heymans
# North West University
# 2020
##########################################################
import wandb
import yaml
import os
from sys import exit
def initialize_wandb(project, config, directory, resume, identity = "", name = ""):
if not (identity == "") and not (name == ""):
wandb.init(project = project,
config = config,
dir = directory,
id = identity,
name = name,
resume = resume,
reinit = True)
else:
wandb.init(project = project,
config = config,
dir = directory,
resume = resume,
reinit = True)
def quick_log(key, value, commit = True):
wandb.log({key: value}, commit = commit)
def load_cfg_dict_from_yaml(cfg_filename):
f = open(cfg_filename, 'r')
cfg_yaml = None
try:
cfg_yaml = yaml.full_load(f)
except Exception as e:
print("Error loading WANDB config file.", e)
exit(101)
finally:
f.close()
cfg = {}
for key in cfg_yaml.keys():
cfg[key] = cfg_yaml[key]['value']
return cfg
def get_api():
return wandb.Api()
def get_run_name():
return wandb.run.name
def get_run_id():
return wandb.run.id
| 1,414 | 22.983051 | 83 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/audio_processing.py | import torch
import torchaudio
import numpy as np
import matplotlib.pyplot as plt
import configparser
import os
import sys
import random
import shutil
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.spectral_norm as spectral_norm
# Reading global cfg file (first argument-mandatory file)
cfg_file = sys.argv[1]
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Output folder creation
out_folder = config["exp"]["out_folder"]
if not os.path.exists(out_folder):
os.makedirs(out_folder)
clean_dataset_dir = config["exp"]["clean_dataset"]
noisy_dataset_dir = config["exp"]["noisy_dataset"]
print("- Reading config file......OK!")
def normalize_batch(tensor):
''' Normalize batch of tensors between -1 and 1 '''
max_val = torch.abs(torch.max(tensor))
min_val = torch.abs(torch.min(tensor))
return torch.mul(torch.sub(torch.div(torch.add(tensor, min_val), torch.add(max_val, min_val)), 0.5), 2)
def validate_dir(dir_list):
''' Remove hidden files from directory list '''
for dir in dir_list:
if dir.__contains__('.'):
dir_list.remove(dir)
return dir_list
def get_utterances(dir_list):
''' Seperate utterances and transcription files '''
transcriptions = ''
for dir in dir_list:
if dir.__contains__('.txt'):
transcriptions = dir
dir_list.remove(dir)
return transcriptions, dir_list
def plot_spectrogram(specgram):
with torch.no_grad():
plt.figure()
plt.imshow(specgram.log2()[0, :, :].numpy(), cmap = 'gray')
plt.show()
def plot_spectrogram_list(specgram_list):
with torch.no_grad():
plt.figure()
for i in range(len(specgram_list)):
plt.subplot(len(specgram_list), 1 , i+1)
plt.imshow(specgram_list[i].log2()[0, :, :].numpy(), cmap = 'gray')
plt.show()
def plot_waveform(waveform_tensor):
''' Plot tensor in a figure '''
with torch.no_grad():
plt.figure()
try:
plt.plot(waveform_tensor.t().detach().to("cpu").numpy())
except AttributeError:
plt.plot(waveform_tensor.detach().to("cpu").numpy())
plt.show()
def plot_waveform_list(waveform_tensor_list):
''' Plot tensor in a figure '''
with torch.no_grad():
plt.figure()
for i in range(len(waveform_tensor_list)):
plt.subplot(len(waveform_tensor_list), 1 , i+1)
plt.plot(waveform_tensor_list[i].detach().to("cpu").numpy())
plt.show()
def normalize_tensor(tensor):
''' Normalize tensor between -1 and 1 '''
max_val = torch.abs(torch.max(tensor))
min_val = torch.abs(torch.min(tensor))
return torch.mul(torch.sub(torch.div(torch.add(tensor, min_val), torch.add(max_val, min_val)), 0.5), 2)
def get_context(mfcc_tensor, context_width_left, context_width_right):
for i in range(context_width_left, mfcc_tensor.shape[2] - context_width_right):
mfcc_frame = mfcc_tensor[:,1:,(i-context_width_left):(i+context_width_right+1)]
def get_batch(mfcc_tensor, batch_nr, batchsize):
batch_nr = batch_nr * batchsize
batch_tensor = mfcc_tensor[batch_nr:batch_nr+batchsize,:]
return batch_tensor
def reshape_utterance(mfcc_tensor):
''' Transpose and remove MFCC 0 '''
mfcc_tensor = torch.squeeze(mfcc_tensor, dim = 0)
return torch.transpose(mfcc_tensor, dim0 = 1, dim1 = 0)
def get_pearson_correlation(tensor1, tensor2):
from scipy.stats import pearsonr
output1 = tensor1.detach().cpu().numpy()
output2 = tensor2.detach().cpu().numpy()
if output1.shape == output2.shape:
# calculate Pearson's correlation
if len(output1.shape) > 1:
correlation = 0
for i in range(output1.shape[0]):
try:
temp_corr, _ = pearsonr(output1[i], output2[i])
except:
temp_corr = 0
correlation += temp_corr
if output1.shape[0] > 0:
correlation = correlation / output1.shape[0]
else:
correlation, _ = pearsonr(output1, output2)
return correlation
else:
return 0
def get_mean_squared_error(tensor1, tensor2):
output1 = tensor1.detach().cpu()
output2 = tensor2.detach().cpu()
if output1.shape == output2.shape:
if len(output1.shape) > 1:
error = 0
for i in range(output1.shape[0]):
error += torch.mean(torch.abs(torch.abs(output1) - torch.abs(output2)))
if output1.shape[0] > 0:
error = error / output1.shape[0]
else:
error = torch.mean(torch.abs(torch.abs(output1) - torch.abs(output2)))
return error.numpy()
else:
return 0
def get_g_performance(clean, noisy, generated):
''' Performance metric using Pearson correlation, mean squared error and L1 loss.
Metric is comparing generator relative to noisy signal.
Higher is better. '''
l1_loss_noisy = torch.nn.functional.l1_loss(clean, noisy).item()
l1_loss_gen = torch.nn.functional.l1_loss(clean, generated).item()
r_clean_noisy = get_pearson_correlation(clean, noisy)
r_clean_gen = get_pearson_correlation(clean, generated)
mse_clean_noisy = get_mean_squared_error(clean, noisy)
mse_clean_gen = get_mean_squared_error(clean, generated)
l1_performance = l1_loss_noisy - l1_loss_gen
r_performance = r_clean_gen - r_clean_noisy
mse_performance = mse_clean_noisy - mse_clean_gen
performance_metric = r_performance + mse_performance + l1_performance
return performance_metric
def get_labels(batch_size, label):
return torch.ones((batch_size, 1)) * label
class Conv2dAuto(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = (
self.kernel_size[0] // 2, self.kernel_size[1] // 2) # dynamic add padding based on the kernel_size
class Conv1dAuto(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = ((self.kernel_size[0] - 1) // 2)
class Generator(nn.Module):
def __init__(self, input_dim):
super(Generator, self).__init__()
self.input_block = nn.Sequential(
Conv1dAuto(in_channels = 2, out_channels = 32, kernel_size = 3, bias = True),
nn.BatchNorm1d(32),
nn.ReLU(),
Conv1dAuto(in_channels = 32, out_channels = 32, kernel_size = 3, bias = False),
nn.BatchNorm1d(32),
nn.ReLU(),
)
self.block1 = nn.Sequential(
Conv1dAuto(in_channels = 32, out_channels = 32, kernel_size = 3, bias = False),
nn.BatchNorm1d(32),
nn.ReLU(),
Conv1dAuto(in_channels = 32, out_channels = 32, kernel_size = 3, bias = False),
nn.BatchNorm1d(32),
nn.ReLU(),
Conv1dAuto(in_channels = 32, out_channels = 32, kernel_size = 3, bias = False),
nn.BatchNorm1d(32),
nn.ReLU(),
)
self.block2 = nn.Sequential(
Conv1dAuto(in_channels = 64, out_channels = 32, kernel_size = 3, bias = False),
nn.BatchNorm1d(32),
nn.ReLU(),
Conv1dAuto(in_channels = 32, out_channels = 32, kernel_size = 3, bias = False),
nn.BatchNorm1d(32),
nn.ReLU(),
Conv1dAuto(in_channels = 32, out_channels = 32, kernel_size = 3, bias = False),
nn.BatchNorm1d(32),
nn.ReLU(),
)
self.block3 = nn.Sequential(
Conv1dAuto(in_channels = 64, out_channels = 32, kernel_size = 3, bias = False),
nn.BatchNorm1d(32),
nn.ReLU(),
Conv1dAuto(in_channels = 32, out_channels = 32, kernel_size = 3, bias = False),
nn.BatchNorm1d(32),
nn.ReLU(),
Conv1dAuto(in_channels = 32, out_channels = 1, kernel_size = 3, bias = False),
)
def forward(self, x):
x1 = self.input_block(x)
x2 = self.block1(x1)
x3 = self.block2(torch.cat((x1, x2), dim = 1))
x4 = self.block3(torch.cat((x2, x3), dim = 1))
return torch.squeeze(x4, dim = 1)
class Discriminator(nn.Module):
def __init__(self, input_dim):
super(Discriminator, self).__init__()
self.input_dim = input_dim
self.block = nn.Sequential(
Conv1dAuto(in_channels = 1, out_channels = 32, kernel_size = 3, bias = True),
nn.BatchNorm1d(32),
nn.MaxPool1d(kernel_size = 2, stride = 2),
nn.ReLU(),
nn.Dropout(0.15),
spectral_norm(Conv1dAuto(in_channels = 32, out_channels = 32, kernel_size = 3, bias = False)),
nn.BatchNorm1d(32),
nn.MaxPool1d(kernel_size = 2, stride = 2),
nn.ReLU(),
nn.Dropout(0.15),
spectral_norm(Conv1dAuto(in_channels = 32, out_channels = 64, kernel_size = 3, bias = False)),
nn.BatchNorm1d(64),
nn.MaxPool1d(kernel_size = 2, stride = 2),
nn.ReLU(),
nn.Dropout(0.15),
spectral_norm(Conv1dAuto(in_channels = 64, out_channels = 64, kernel_size = 3, bias = False)),
nn.BatchNorm1d(64),
nn.MaxPool1d(kernel_size = 2, stride = 2),
nn.ReLU(),
nn.Dropout(0.15),
)
self.out_block = nn.Sequential(
spectral_norm(nn.Linear(128, 1)),
nn.Sigmoid(),
)
def forward(self, x):
x = self.block(x.view(-1, 1, self.input_dim))
x = self.out_block(x.view(-1, 128))
return x
if config["exp"]["dataset_name"] == "LibriSpeech":
speaker_lst = os.listdir(clean_dataset_dir)
speaker_lst = validate_dir(speaker_lst)
N_epochs_tr = int(config['gan']['N_epochs_tr'])
batch_size = int(config['gan']['batch_size'])
# Create networks and optimizers
input_dim = 40
generator = Generator(input_dim).cuda()
discriminator = Discriminator(input_dim).cuda()
if os.path.exists('gan/generator_audio.pt'):
try:
generator.load_state_dict(torch.load('gan/generator_audio.pt'))
except RuntimeError:
print("Load error loading G, network will be recreated.")
if os.path.exists('gan/discriminator_audio.pt'):
try:
discriminator.load_state_dict(torch.load('gan/discriminator_audio.pt'))
except RuntimeError:
print("Load error loading D, network will be recreated.")
if config["gan"]["optimizer"] == "adam":
optimizer_g = torch.optim.Adam(generator.parameters(), lr = float(config["gan"]["learning_rate_g"]))
optimizer_d = torch.optim.Adam(discriminator.parameters(), lr = float(config["gan"]["learning_rate_d"]))
elif config["gan"]["optimizer"] == "rmsprop":
optimizer_g = torch.optim.RMSprop(generator.parameters(), lr = float(config["gan"]["learning_rate_g"]))
optimizer_d = torch.optim.RMSprop(discriminator.parameters(), lr = float(config["gan"]["learning_rate_d"]))
# Training
print("\nTraining started.\n")
for epoch in range(1, N_epochs_tr + 1):
print("------------------ Epoch", epoch, "/", N_epochs_tr, "------------------")
generator.train()
discriminator.train()
d_loss = 0
g_loss = 0
correlation_noisy = 0
correlation_g = 0
mse_noisy = 0
mse_g = 0
performance_metric = 0
total_batches = 0
for speaker in speaker_lst:
speaker_dir_clean = os.path.join(clean_dataset_dir, speaker)
speaker_dir_noisy = os.path.join(noisy_dataset_dir, speaker)
# Get chapters by speaker
chapter_lst = os.listdir(speaker_dir_clean)
chapter_lst = validate_dir(chapter_lst)
for chap in chapter_lst:
chapter_dir_clean = os.path.join(speaker_dir_clean, chap)
chapter_dir_noisy = os.path.join(speaker_dir_noisy, chap)
# Get utterances by speaker per chapter
utterance_lst = os.listdir(chapter_dir_clean)
utt_transcripitons, utterance_lst = get_utterances(utterance_lst)
j = 0
for utt in utterance_lst:
utterance_dir_clean = os.path.join(chapter_dir_clean, utt)
utterance_dir_noisy = os.path.join(chapter_dir_noisy, utt)
audio_clean, sample_rate = torchaudio.load(utterance_dir_clean)
audio_noisy, _ = torchaudio.load(utterance_dir_noisy)
#CMN_extraction = torchaudio.transforms.SlidingWindowCmn(cmn_window = 600, min_cmn_window = 100)
MFCC_extraction = torchaudio.transforms.MFCC(sample_rate = sample_rate, n_mfcc = 40).cuda()
#Spectrogram_extraction = torchaudio.transforms.Spectrogram()
mfcc_clean = MFCC_extraction(audio_clean.cuda())
mfcc_noisy = MFCC_extraction(audio_noisy.cuda())
utterance_features_clean = reshape_utterance(mfcc_clean)
utterance_features_noisy = reshape_utterance(mfcc_noisy)
number_of_batches = int(utterance_features_clean.shape[0] / batch_size)
last_batch_size = utterance_features_clean.shape[0] % batch_size
if last_batch_size > 0:
number_of_batches += 1
for batch in range(number_of_batches):
total_batches += 1
print("Batch: {} \r".format(total_batches), end = '')
data_clean = get_batch(utterance_features_clean, batch, batch_size)
data_noisy = get_batch(utterance_features_noisy, batch, batch_size)
#=== TRAINING ==================================================================================
real_labels = get_labels(data_clean.shape[0], float(config['gan']['real_label'])).cuda()
fake_labels = get_labels(data_clean.shape[0], 0).cuda()
# Train Discriminator
optimizer_d.zero_grad()
d_output_real = discriminator(data_clean)
z_noise = torch.randn(torch.unsqueeze(data_noisy, dim = 1).shape).cuda()
g_output = generator(torch.cat((z_noise, torch.unsqueeze(data_noisy, dim = 1)), dim = 1))
d_output_fake = discriminator(g_output)
loss_real = F.binary_cross_entropy(d_output_real, real_labels)
loss_fake = F.binary_cross_entropy(d_output_fake, fake_labels)
loss_d = loss_real + loss_fake
loss_d.backward()
optimizer_d.step()
# Train Generator
optimizer_g.zero_grad()
d_verdict = discriminator(generator(torch.cat((z_noise, torch.unsqueeze(data_noisy, dim = 1)), dim = 1)))
bce_loss = F.binary_cross_entropy(d_verdict, real_labels)
cycle_loss = F.l1_loss(data_clean, generator(torch.cat((z_noise, torch.unsqueeze(data_noisy, dim = 1)), dim = 1)))
loss_g = bce_loss + cycle_loss
loss_g.backward()
optimizer_g.step()
#==== Statistics ====
d_loss += loss_d.item()
g_loss += loss_g.item()
correlation_noisy += get_pearson_correlation(data_clean, data_noisy)
correlation_g += get_pearson_correlation(data_clean, g_output)
mse_noisy += get_mean_squared_error(data_clean, data_noisy)
mse_g += get_mean_squared_error(data_clean, g_output)
performance_metric += get_g_performance(data_clean, data_noisy, g_output)
print("Discriminator loss:\t %.4f | Generator loss:\t %.4f" % (round(d_loss/total_batches, 4), round(g_loss/total_batches, 4)))
print("Correlation G:\t\t %.4f | Correlation noisy:\t %.4f" % (
round(correlation_g/total_batches, 4), round(correlation_noisy/total_batches, 4)))
print("MSE G:\t\t\t %.4f | MSE noisy:\t\t %.4f" % (round(mse_g/total_batches, 4), round(mse_noisy/total_batches, 4)))
print("Performance:", round(performance_metric/total_batches, 4))
# Save after each speaker
torch.save(generator.state_dict(), 'gan/generator_audio.pt')
torch.save(discriminator.state_dict(), 'gan/discriminator_audio.pt')
print("")
d_loss = d_loss / total_batches
g_loss = g_loss / total_batches
correlation_noisy = correlation_noisy / total_batches
correlation_g = correlation_g / total_batches
mse_noisy = mse_noisy / total_batches
mse_g = mse_g / total_batches
performance_metric = performance_metric / total_batches
print("\nEpoch complete")
print("Discriminator loss:\t %.4f | Generator loss:\t %.4f" % (round(d_loss, 4), round(g_loss, 4)))
print("Correlation G:\t\t %.4f | Correlation noisy:\t %.4f" % (
round(correlation_g, 4), round(correlation_noisy, 4)))
print("MSE G:\t\t\t %.4f | MSE noisy:\t\t %.4f" % (round(mse_g, 4), round(mse_noisy, 4)))
print("Performance:", round(performance_metric, 4))
print("Total batches", total_batches)
#===============================================================================================================
print("\n\nTraining complete\n")
| 18,066 | 35.061876 | 144 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/data_io.py | ##########################################################
# pytorch-kaldi-gan
# Walter Heymans
# North West University
# 2020
# Adapted from:
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import numpy as np
import sys
from utils import compute_cw_max, dict_fea_lab_arch, is_sequential_dict
import os
import configparser
import re, gzip, struct
def load_dataset(
fea_scp, fea_opts, lab_folder, lab_opts, left, right, max_sequence_length, output_folder, fea_only=False
):
def _input_is_wav_file(fea_scp):
with open(fea_scp, "r") as f:
first_line = f.readline()
ark_file = first_line.split(" ")[1].split(":")[0]
with open(ark_file, "rb") as f:
first_ark_line = f.readline()
return b"RIFF" in first_ark_line
def _input_is_feature_file(fea_scp):
return not _input_is_wav_file(fea_scp)
def _read_features_and_labels_with_kaldi(fea_scp, fea_opts, fea_only, lab_folder, lab_opts, output_folder):
fea = dict()
lab = dict()
if _input_is_feature_file(fea_scp):
kaldi_bin = "copy-feats"
read_function = read_mat_ark
elif _input_is_wav_file(fea_scp):
kaldi_bin = "wav-copy"
read_function = read_vec_flt_ark
fea = {
k: m
for k, m in read_function("ark:" + kaldi_bin + " scp:" + fea_scp + " ark:- |" + fea_opts, output_folder)
}
if not fea_only:
lab = {
k: v
for k, v in read_vec_int_ark(
"gunzip -c " + lab_folder + "/ali*.gz | " + lab_opts + " " + lab_folder + "/final.mdl ark:- ark:-|",
output_folder,
)
if k in fea
} # Note that I'm copying only the aligments of the loaded fea
fea = {
k: v for k, v in fea.items() if k in lab
} # This way I remove all the features without an aligment (see log file in alidir "Did not Succeded")
if "_ivector.lst" in fea_scp:
for utterance in fea.keys():
temp_features = fea[utterance]
new_features = np.zeros((lab[utterance].shape[0], temp_features.shape[1]))
for ivector in range(lab[utterance].shape[0]):
i_temp = int(ivector / 10)
new_features[ivector] = temp_features[i_temp]
fea[utterance] = new_features
return fea, lab
def _chunk_features_and_labels(max_sequence_length, fea, lab, fea_only, input_is_wav):
def _append_to_concat_list(fea_chunked, lab_chunked, fea_conc, lab_conc, name):
for j in range(0, len(fea_chunked)):
fea_conc.append(fea_chunked[j])
lab_conc.append(lab_chunked[j])
if len(fea_chunked) > 1:
snt_name.append(name + "_split" + str(j))
else:
snt_name.append(k)
return fea_conc, lab_conc
def _chunk(max_sequence_length, fea, lab, fea_only):
def _chunk_by_input_and_output_chunk_config(chunk_config, fea, lab, fea_only):
"""
If the sequence length is above the threshold, we split it with a minimal length max/4
If max length = 500, then the split will start at 500 + (500/4) = 625.
A seq of length 625 will be splitted in one of 500 and one of 125
"""
chunk_size_fea, chunk_step_fea, chunk_size_lab, chunk_step_lab = (
chunk_config["chunk_size_fea"],
chunk_config["chunk_step_fea"],
chunk_config["chunk_size_lab"],
chunk_config["chunk_step_lab"],
)
fea_chunked = list()
lab_chunked = list()
split_threshold_fea = chunk_size_fea + (chunk_size_fea / 4)
if (len(fea) > chunk_size_fea) and chunk_size_fea > 0:
nr_of_chunks = (len(fea) + chunk_size_fea - 1) // chunk_size_fea
for i in range(nr_of_chunks):
chunk_start_fea = i * chunk_step_fea
if len(fea[chunk_start_fea:]) > split_threshold_fea:
chunk_end_fea = chunk_start_fea + chunk_size_fea
fea_chunk = fea[chunk_start_fea:chunk_end_fea]
if not fea_only:
chunk_start_lab = i * chunk_step_lab
chunk_end_lab = chunk_start_lab + chunk_size_lab
lab_chunk = lab[chunk_start_lab:chunk_end_lab]
else:
lab_chunk = np.zeros((fea_chunk.shape[0],))
fea_chunked.append(fea_chunk)
lab_chunked.append(lab_chunk)
else:
fea_chunk = fea[chunk_start_fea:]
if not fea_only:
chunk_start_lab = i * chunk_step_lab
lab_chunk = lab[chunk_start_lab:]
else:
lab_chunk = np.zeros((fea_chunk.shape[0],))
lab_chunked.append(lab_chunk)
fea_chunked.append(fea_chunk)
break
else:
fea_chunked.append(fea)
if not fea_only:
lab_chunked.append(lab)
else:
lab_chunked.append(np.zeros((fea.shape[0],)))
return fea_chunked, lab_chunked
chunk_config = dict()
if type(max_sequence_length) == dict:
chunk_config["chunk_size_fea"] = max_sequence_length["chunk_size_fea"]
chunk_config["chunk_step_fea"] = max_sequence_length["chunk_step_fea"]
chunk_config["chunk_size_lab"] = max_sequence_length["chunk_size_lab"]
chunk_config["chunk_step_lab"] = max_sequence_length["chunk_step_lab"]
elif type(max_sequence_length) == int:
chunk_config["chunk_size_fea"] = max_sequence_length
chunk_config["chunk_step_fea"] = max_sequence_length
chunk_config["chunk_size_lab"] = max_sequence_length
chunk_config["chunk_step_lab"] = max_sequence_length
else:
raise ValueError("Unknown type of max_sequence_length")
return _chunk_by_input_and_output_chunk_config(chunk_config, fea, lab, fea_only)
snt_name = list()
fea_conc = list()
lab_conc = list()
feature_keys_soted_by_sequence_length = sorted(sorted(fea.keys()), key=lambda k: len(fea[k]))
for k in feature_keys_soted_by_sequence_length:
fea_el = fea[k]
lab_el = None
if not fea_only:
lab_el = lab[k]
fea_chunked, lab_chunked = _chunk(max_sequence_length, fea_el, lab_el, fea_only)
fea_conc, lab_conc = _append_to_concat_list(fea_chunked, lab_chunked, fea_conc, lab_conc, k)
return fea_conc, lab_conc, snt_name
def _concatenate_features_and_labels(fea_conc, lab_conc):
def _sort_chunks_by_length(fea_conc, lab_conc):
fea_zipped = zip(fea_conc, lab_conc)
fea_sorted = sorted(fea_zipped, key=lambda x: x[0].shape[0])
fea_conc, lab_conc = zip(*fea_sorted)
return fea_conc, lab_conc
def _get_end_index_from_list(conc):
end_snt = 0
end_index = list()
for entry in conc:
end_snt = end_snt + entry.shape[0]
end_index.append(end_snt)
return end_index
fea_conc, lab_conc = _sort_chunks_by_length(fea_conc, lab_conc)
end_index_fea = _get_end_index_from_list(fea_conc)
end_index_lab = _get_end_index_from_list(lab_conc)
fea_conc = np.concatenate(fea_conc)
lab_conc = np.concatenate(lab_conc)
return fea_conc, lab_conc, end_index_fea, end_index_lab
def _match_feature_and_label_sequence_lengths(fea, lab, max_sequence_length):
ALLOW_FRAME_DIFF_LARGER_ONE = False
def _adjust_feature_sequence_length(fea, nr_of_fea_for_lab):
nr_of_fea = fea.shape[0]
if nr_of_fea > nr_of_fea_for_lab:
fea_adj = np.take(fea, range(nr_of_fea_for_lab), axis=0)
elif nr_of_fea < nr_of_fea_for_lab:
padding = np.zeros(shape=(nr_of_fea_for_lab - nr_of_fea,) + fea.shape[1:])
fea_adj = np.concatenate([fea, padding], axis=0)
else:
fea_adj = fea
return fea_adj
chunk_size_fea = max_sequence_length["chunk_size_fea"]
chunk_step_fea = max_sequence_length["chunk_step_fea"]
chunk_size_lab = max_sequence_length["chunk_size_lab"]
chunk_step_lab = max_sequence_length["chunk_step_lab"]
window_shift = max_sequence_length["window_shift"]
window_size = max_sequence_length["window_size"]
for k in fea.keys():
nr_of_fea = fea[k].shape[0]
nr_of_lab = lab[k].shape[0]
nr_of_fea_for_lab = (nr_of_lab - 1) * window_shift + window_size
if abs(nr_of_fea - nr_of_fea_for_lab) > window_shift and not ALLOW_FRAME_DIFF_LARGER_ONE:
raise ValueError(
"Nr. of features: "
+ str(nr_of_fea)
+ " does not match nr. of labels: "
+ str(nr_of_lab)
+ " with expected nr. of features: "
+ str(nr_of_fea_for_lab)
)
fea[k] = _adjust_feature_sequence_length(fea[k], nr_of_fea_for_lab)
return fea, lab
fea, lab = _read_features_and_labels_with_kaldi(fea_scp, fea_opts, fea_only, lab_folder, lab_opts, output_folder)
if _input_is_wav_file(fea_scp) and (not fea_only):
fea, lab = _match_feature_and_label_sequence_lengths(fea, lab, max_sequence_length)
fea_chunks, lab_chunks, chunk_names = _chunk_features_and_labels(
max_sequence_length, fea, lab, fea_only, _input_is_wav_file(fea_scp)
)
fea_conc, lab_conc, end_index_fea, end_index_lab = _concatenate_features_and_labels(fea_chunks, lab_chunks)
return [chunk_names, fea_conc, lab_conc, np.asarray(end_index_fea), np.asarray(end_index_lab)]
def context_window_old(fea, left, right):
N_row = fea.shape[0]
N_fea = fea.shape[1]
frames = np.empty((N_row - left - right, N_fea * (left + right + 1)))
for frame_index in range(left, N_row - right):
right_context = fea[frame_index + 1 : frame_index + right + 1].flatten() # right context
left_context = fea[frame_index - left : frame_index].flatten() # left context
current_frame = np.concatenate([left_context, fea[frame_index], right_context])
frames[frame_index - left] = current_frame
return frames
def context_window(fea, left, right):
N_elem = fea.shape[0]
N_fea = fea.shape[1]
fea_conc = np.empty([N_elem, N_fea * (left + right + 1)])
index_fea = 0
for lag in range(-left, right + 1):
fea_conc[:, index_fea : index_fea + fea.shape[1]] = np.roll(fea, -lag, axis=0)
index_fea = index_fea + fea.shape[1]
fea_conc = fea_conc[left : fea_conc.shape[0] - right]
return fea_conc
def load_chunk(
fea_scp, fea_opts, lab_folder, lab_opts, left, right, max_sequence_length, output_folder, fea_only=False
):
''' CUSTOM CODE
Loading chunks from Kaldi files '''
# open the file
[data_name, data_set, data_lab, end_index_fea, end_index_lab] = load_dataset(
fea_scp, fea_opts, lab_folder, lab_opts, left, right, max_sequence_length, output_folder, fea_only
)
# TODO: currently end_index_lab is ignored
# Context window
if left != 0 or right != 0:
data_set = context_window(data_set, left, right)
end_index_fea = end_index_fea - left
end_index_fea[-1] = end_index_fea[-1] - right
# mean and variance normalization
data_set = (data_set - np.mean(data_set, axis=0)) / np.std(data_set, axis=0)
# Label processing
data_lab = data_lab - data_lab.min()
if right > 0:
data_lab = data_lab[left:-right]
else:
data_lab = data_lab[left:]
data_set = np.column_stack((data_set, data_lab))
return [data_name, data_set, end_index_fea]
def load_counts(class_counts_file):
with open(class_counts_file) as f:
row = next(f).strip().strip("[]").strip()
counts = np.array([np.float32(v) for v in row.split()])
return counts
def read_lab_fea_refac01(cfg_file, fea_only, shared_list, output_folder):
def _read_chunk_specific_config(cfg_file):
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
return config
def _read_from_config(config, fea_only):
def _get_max_seq_length_from_config_str(config_str):
max_seq_length = [int(e) for e in config_str.split(",")]
if len(max_seq_length) == 1:
max_seq_length = max_seq_length[0]
else:
assert len(max_seq_length) == 6
max_seq_length_list = max_seq_length
max_seq_length = dict()
max_seq_length["chunk_size_fea"] = max_seq_length_list[0]
max_seq_length["chunk_step_fea"] = max_seq_length_list[1]
max_seq_length["chunk_size_lab"] = max_seq_length_list[2]
max_seq_length["chunk_step_lab"] = max_seq_length_list[3]
max_seq_length["window_shift"] = max_seq_length_list[4]
max_seq_length["window_size"] = max_seq_length_list[5]
return max_seq_length
to_do = config["exp"]["to_do"]
if to_do == "train":
max_seq_length = _get_max_seq_length_from_config_str(config["batches"]["max_seq_length_train"])
if to_do == "valid":
max_seq_length = _get_max_seq_length_from_config_str(config["batches"]["max_seq_length_valid"])
if to_do == "forward":
max_seq_length = -1 # do to break forward sentences
fea_only = True
fea_dict, lab_dict, arch_dict = dict_fea_lab_arch(config, fea_only)
seq_model = is_sequential_dict(config, arch_dict)
return to_do, max_seq_length, fea_dict, lab_dict, arch_dict, seq_model
def _read_features_and_labels(fea_dict, lab_dict, max_seq_length, fea_only, output_folder):
def _get_fea_config_from_dict(fea_dict_entr):
fea_scp = fea_dict_entr[1]
fea_opts = fea_dict_entr[2]
cw_left = int(fea_dict_entr[3])
cw_right = int(fea_dict_entr[4])
return fea_scp, fea_opts, cw_left, cw_right
def _get_lab_config_from_dict(lab_dict_entr, fea_only):
if fea_only:
lab_folder = None
lab_opts = None
else:
lab_folder = lab_dict_entr[1]
lab_opts = lab_dict_entr[2]
return lab_folder, lab_opts
def _compensate_for_different_context_windows(
data_set_fea,
data_set_lab,
cw_left_max,
cw_left,
cw_right_max,
cw_right,
data_end_index_fea,
data_end_index_lab,
):
data_set_lab = np.take(
data_set_lab,
range(cw_left_max - cw_left, data_set_lab.shape[0] - (cw_right_max - cw_right)),
axis=0,
mode="clip",
)
data_set_fea = np.take(
data_set_fea,
range(cw_left_max - cw_left, data_set_fea.shape[0] - (cw_right_max - cw_right)),
axis=0,
mode="clip",
)
data_end_index_fea = data_end_index_fea - (cw_left_max - cw_left)
data_end_index_lab = data_end_index_lab - (cw_left_max - cw_left)
data_end_index_fea[-1] = data_end_index_fea[-1] - (cw_right_max - cw_right)
data_end_index_lab[-1] = data_end_index_lab[-1] - (cw_right_max - cw_right)
return data_set_lab, data_set_fea, data_end_index_fea, data_end_index_lab
def _update_data(data_set, labs, fea_dict, fea, fea_index, data_set_fea, labs_fea, cnt_fea, cnt_lab):
if cnt_fea == 0 and cnt_lab == 0:
data_set = data_set_fea
labs = labs_fea
fea_dict[fea].append(fea_index)
fea_index = fea_index + data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6] - fea_dict[fea][5])
elif cnt_fea == 0 and (not cnt_lab == 0):
labs = np.column_stack((labs, labs_fea))
elif (not cnt_fea == 0) and cnt_lab == 0:
data_set = np.column_stack((data_set, data_set_fea))
fea_dict[fea].append(fea_index)
fea_index = fea_index + data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6] - fea_dict[fea][5])
return data_set, labs, fea_dict, fea_index
def _check_consistency(
data_name,
data_name_fea,
data_end_index_fea_ini,
data_end_index_fea,
data_end_index_lab_ini,
data_end_index_lab,
):
if not (data_name == data_name_fea):
sys.stderr.write(
'ERROR: different sentence ids are detected for the different features. Plase check again input feature lists"\n'
)
sys.exit(0)
if not (data_end_index_fea_ini == data_end_index_fea).all():
sys.stderr.write('ERROR end_index must be the same for all the sentences"\n')
sys.exit(0)
if not (data_end_index_lab_ini == data_end_index_lab).all():
sys.stderr.write('ERROR end_index must be the same for all the sentences"\n')
sys.exit(0)
def _update_lab_dict(lab_dict, data_set):
cnt_lab = 0
for lab in lab_dict.keys():
lab_dict[lab].append(data_set.shape[1] + cnt_lab)
cnt_lab = cnt_lab + 1
return lab_dict
def _load_chunk_refac01(
fea_scp, fea_opts, lab_folder, lab_opts, left, right, max_sequence_length, output_folder, fea_only=False
):
[data_name, data_set, data_lab, end_index_fea, end_index_lab] = load_dataset(
fea_scp, fea_opts, lab_folder, lab_opts, left, right, max_sequence_length, output_folder, fea_only
)
# TODO: this function will currently only work well if no context window is given or fea and lab have the same time dimensionality
# Context window
if left != 0 or right != 0:
data_set = context_window(data_set, left, right)
end_index_fea = end_index_fea - left
end_index_lab = end_index_lab - left
end_index_fea[-1] = end_index_fea[-1] - right
end_index_lab[-1] = end_index_lab[-1] - right
# mean and variance normalization
data_set = (data_set - np.mean(data_set, axis=0)) / np.std(data_set, axis=0)
# Label processing
data_lab = data_lab - data_lab.min()
if right > 0:
data_lab = data_lab[left:-right]
else:
data_lab = data_lab[left:]
if len(data_set.shape) == 1:
data_set = np.expand_dims(data_set, -1)
return [data_name, data_set, data_lab, end_index_fea, end_index_lab]
cw_left_max, cw_right_max = compute_cw_max(fea_dict)
fea_index = 0
cnt_fea = 0
data_name = None
data_end_index_fea_ini = None
data_end_index_lab_ini = None
data_set = None
labs = None
for fea in fea_dict.keys():
fea_scp, fea_opts, cw_left, cw_right = _get_fea_config_from_dict(fea_dict[fea])
cnt_lab = 0
if fea_only:
lab_dict.update({"lab_name": "none"})
for lab in lab_dict.keys():
lab_folder, lab_opts = _get_lab_config_from_dict(lab_dict[lab], fea_only)
data_name_fea, data_set_fea, data_set_lab, data_end_index_fea, data_end_index_lab = _load_chunk_refac01(
fea_scp, fea_opts, lab_folder, lab_opts, cw_left, cw_right, max_seq_length, output_folder, fea_only
)
if sum([abs(e) for e in [cw_left_max, cw_right_max, cw_left, cw_right]]) != 0:
data_set_lab, data_set_fea, data_end_index_fea, data_end_index_lab = _compensate_for_different_context_windows(
data_set_fea,
data_set_lab,
cw_left_max,
cw_left,
cw_right_max,
cw_right,
data_end_index_fea,
data_end_index_lab,
)
if cnt_fea == 0 and cnt_lab == 0:
data_end_index_fea_ini = data_end_index_fea
data_end_index_lab_ini = data_end_index_lab
data_name = data_name_fea
data_set, labs, fea_dict, fea_index = _update_data(
data_set, labs, fea_dict, fea, fea_index, data_set_fea, data_set_lab, cnt_fea, cnt_lab
)
_check_consistency(
data_name,
data_name_fea,
data_end_index_fea_ini,
data_end_index_fea,
data_end_index_lab_ini,
data_end_index_lab,
)
cnt_lab = cnt_lab + 1
cnt_fea = cnt_fea + 1
if not fea_only:
lab_dict = _update_lab_dict(lab_dict, data_set)
return data_name, data_end_index_fea_ini, data_end_index_lab_ini, fea_dict, lab_dict, data_set, labs
def _reorder_data_set(data_set, labs, seq_model, to_do):
if not (seq_model) and to_do != "forward" and (data_set.shape[0] == labs.shape[0]):
data_set_shape = data_set.shape[1]
data_set_joint = np.column_stack((data_set, labs))
ganset = True
try:
if str(config["ganset"]["create_set"]) == "True":
ganset = False
except KeyError:
pass
if ganset:
np.random.shuffle(data_set)
data_set = data_set_joint[:, :data_set_shape]
labs = np.squeeze(data_set_joint[:, data_set_shape:], axis=-1)
return data_set, labs
def _append_to_shared_list(
shared_list, data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set
):
shared_list.append(data_name)
shared_list.append(data_end_index_fea)
shared_list.append(data_end_index_lab)
shared_list.append(fea_dict)
shared_list.append(lab_dict)
shared_list.append(arch_dict)
shared_list.append(data_set)
return shared_list
config = _read_chunk_specific_config(cfg_file)
to_do, max_seq_length, fea_dict, lab_dict, arch_dict, seq_model = _read_from_config(config, fea_only)
data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, data_set, labs = _read_features_and_labels(
fea_dict, lab_dict, max_seq_length, fea_only, output_folder
)
data_set, labs = _reorder_data_set(data_set, labs, seq_model, to_do)
data_set = {"input": data_set, "ref": labs}
shared_list = _append_to_shared_list(
shared_list, data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set
)
def read_lab_fea(cfg_file, fea_only, shared_list, output_folder):
# Reading chunk-specific cfg file (first argument-mandatory file)
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Reading some cfg parameters
to_do = config["exp"]["to_do"]
if to_do == "train":
max_seq_length = int(
config["batches"]["max_seq_length_train"]
) # *(int(info_file[-13:-10])+1) # increasing over the epochs
if to_do == "valid":
max_seq_length = int(config["batches"]["max_seq_length_valid"])
if to_do == "forward":
max_seq_length = -1 # do to break forward sentences
[fea_dict, lab_dict, arch_dict] = dict_fea_lab_arch(config, fea_only)
[cw_left_max, cw_right_max] = compute_cw_max(fea_dict)
fea_index = 0
cnt_fea = 0
for fea in fea_dict.keys():
# reading the features
fea_scp = fea_dict[fea][1]
fea_opts = fea_dict[fea][2]
cw_left = int(fea_dict[fea][3])
cw_right = int(fea_dict[fea][4])
cnt_lab = 0
# Production case, we don't have labels (lab_name = none)
if fea_only:
lab_dict.update({"lab_name": "none"})
for lab in lab_dict.keys():
# Production case, we don't have labels (lab_name = none)
if fea_only:
lab_folder = None
lab_opts = None
else:
lab_folder = lab_dict[lab][1]
lab_opts = lab_dict[lab][2]
[data_name_fea, data_set_fea, data_end_index_fea] = load_chunk(
fea_scp, fea_opts, lab_folder, lab_opts, cw_left, cw_right, max_seq_length, output_folder, fea_only
)
# making the same dimenion for all the features (compensating for different context windows)
labs_fea = data_set_fea[cw_left_max - cw_left : data_set_fea.shape[0] - (cw_right_max - cw_right), -1]
data_set_fea = data_set_fea[cw_left_max - cw_left : data_set_fea.shape[0] - (cw_right_max - cw_right), 0:-1]
data_end_index_fea = data_end_index_fea - (cw_left_max - cw_left)
data_end_index_fea[-1] = data_end_index_fea[-1] - (cw_right_max - cw_right)
if cnt_fea == 0 and cnt_lab == 0:
data_set = data_set_fea
labs = labs_fea
data_end_index = data_end_index_fea
data_name = data_name_fea
fea_dict[fea].append(fea_index)
fea_index = fea_index + data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6] - fea_dict[fea][5])
else:
if cnt_fea == 0:
labs = np.column_stack((labs, labs_fea))
if cnt_lab == 0:
data_set = np.column_stack((data_set, data_set_fea))
fea_dict[fea].append(fea_index)
fea_index = fea_index + data_set_fea.shape[1]
fea_dict[fea].append(fea_index)
fea_dict[fea].append(fea_dict[fea][6] - fea_dict[fea][5])
# Checks if lab_names are the same for all the features
if not (data_name == data_name_fea):
sys.stderr.write(
'ERROR: different sentence ids are detected for the different features. Plase check again input feature lists"\n'
)
sys.exit(0)
# Checks if end indexes are the same for all the features
if not (data_end_index == data_end_index_fea).all():
sys.stderr.write('ERROR end_index must be the same for all the sentences"\n')
sys.exit(0)
cnt_lab = cnt_lab + 1
cnt_fea = cnt_fea + 1
cnt_lab = 0
if not fea_only:
for lab in lab_dict.keys():
lab_dict[lab].append(data_set.shape[1] + cnt_lab)
cnt_lab = cnt_lab + 1
data_set = np.column_stack((data_set, labs)) # appends labels next to features
# check automatically if the model is sequential
seq_model = is_sequential_dict(config, arch_dict)
ganset = True
try:
if str(config["ganset"]["create_set"]) == "True":
ganset = False
except KeyError:
pass
# Randomize if the model is not sequential
if not (seq_model) and to_do != "forward" and ganset:
np.random.shuffle(data_set)
# Split dataset in many part. If the dataset is too big, we can have issues to copy it into the shared memory (due to pickle limits)
# N_split=10
# data_set=np.array_split(data_set, N_split)
# Adding all the elements in the shared list
shared_list.append(data_name)
shared_list.append(data_end_index)
shared_list.append(fea_dict)
shared_list.append(lab_dict)
shared_list.append(arch_dict)
shared_list.append(data_set)
# The following libraries are copied from kaldi-io-for-python project (https://github.com/vesis84/kaldi-io-for-python)
# Copyright 2014-2016 Brno University of Technology (author: Karel Vesely)
# Licensed under the Apache License, Version 2.0 (the "License")
#################################################
# Define all custom exceptions,
class UnsupportedDataType(Exception):
pass
class UnknownVectorHeader(Exception):
pass
class UnknownMatrixHeader(Exception):
pass
class BadSampleSize(Exception):
pass
class BadInputFormat(Exception):
pass
class SubprocessFailed(Exception):
pass
#################################################
# Data-type independent helper functions,
def open_or_fd(file, output_folder, mode="rb"):
""" fd = open_or_fd(file)
Open file, gzipped file, pipe, or forward the file-descriptor.
Eventually seeks in the 'file' argument contains ':offset' suffix.
"""
offset = None
try:
# strip 'ark:' prefix from r{x,w}filename (optional),
if re.search("^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:", file):
(prefix, file) = file.split(":", 1)
# separate offset from filename (optional),
if re.search(":[0-9]+$", file):
(file, offset) = file.rsplit(":", 1)
# input pipe?
if file[-1] == "|":
fd = popen(file[:-1], output_folder, "rb") # custom,
# output pipe?
elif file[0] == "|":
fd = popen(file[1:], output_folder, "wb") # custom,
# is it gzipped?
elif file.split(".")[-1] == "gz":
fd = gzip.open(file, mode)
# a normal file...
else:
fd = open(file, mode)
except TypeError:
# 'file' is opened file descriptor,
fd = file
# Eventually seek to offset,
if offset != None:
fd.seek(int(offset))
return fd
# based on '/usr/local/lib/python3.4/os.py'
def popen(cmd, output_folder, mode="rb"):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
import subprocess, io, threading
# cleanup function for subprocesses,
def cleanup(proc, cmd):
ret = proc.wait()
if ret > 0:
raise SubprocessFailed("cmd %s returned %d !" % (cmd, ret))
return
# text-mode,
if mode == "r":
err = open(output_folder + "/log.log", "a")
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=err)
threading.Thread(target=cleanup, args=(proc, cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdout)
elif mode == "w":
err = open(output_folder + "/log.log", "a")
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stderr=err)
threading.Thread(target=cleanup, args=(proc, cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdin)
# binary,
elif mode == "rb":
err = open(output_folder + "/log.log", "a")
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=err)
threading.Thread(target=cleanup, args=(proc, cmd)).start() # clean-up thread,
return proc.stdout
elif mode == "wb":
err = open(output_folder + "/log.log", "a")
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stderr=err)
threading.Thread(target=cleanup, args=(proc, cmd)).start() # clean-up thread,
return proc.stdin
# sanity,
else:
raise ValueError("invalid mode %s" % mode)
def read_key(fd):
""" [key] = read_key(fd)
Read the utterance-key from the opened ark/stream descriptor 'fd'.
"""
key = ""
while 1:
char = fd.read(1).decode("latin1")
if char == "":
break
if char == " ":
break
key += char
key = key.strip()
if key == "":
return None # end of file,
assert re.match("^\S+$", key) != None # check format (no whitespace!)
return key
#################################################
# Integer vectors (alignments, ...),
def read_ali_ark(file_or_fd, output_folder):
""" Alias to 'read_vec_int_ark()' """
return read_vec_int_ark(file_or_fd, output_folder)
def read_vec_int_ark(file_or_fd, output_folder):
""" generator(key,vec) = read_vec_int_ark(file_or_fd)
Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_int_ark(file) }
"""
fd = open_or_fd(file_or_fd, output_folder)
try:
key = read_key(fd)
while key:
ali = read_vec_int(fd, output_folder)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd:
fd.close()
def read_vec_int(file_or_fd, output_folder):
""" [int-vec] = read_vec_int(file_or_fd)
Read kaldi integer vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd, output_folder)
binary = fd.read(2).decode()
if binary == "\0B": # binary flag
assert fd.read(1).decode() == "\4"
# int-size
vec_size = np.frombuffer(fd.read(4), dtype="int32", count=1)[0] # vector dim
if vec_size == 0:
return np.array([], dtype="int32")
# Elements from int32 vector are sored in tuples: (sizeof(int32), value),
vec = np.frombuffer(fd.read(vec_size * 5), dtype=[("size", "int8"), ("value", "int32")], count=vec_size)
assert vec[0]["size"] == 4 # int32 size,
ans = vec[:]["value"] # values are in 2nd column,
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove("[")
arr.remove("]") # optionally
except ValueError:
pass
ans = np.array(arr, dtype=int)
if fd is not file_or_fd:
fd.close() # cleanup
return ans
# Writing,
def write_vec_int(file_or_fd, output_folder, v, key=""):
""" write_vec_int(f, v, key='')
Write a binary kaldi integer vector to filename or stream.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_int(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, output_folder, mode="wb")
if sys.version_info[0] == 3:
assert fd.mode == "wb"
try:
if key != "":
fd.write((key + " ").encode("latin1")) # ark-files have keys (utterance-id),
fd.write("\0B".encode()) # we write binary!
# dim,
fd.write("\4".encode()) # int32 type,
fd.write(struct.pack(np.dtype("int32").char, v.shape[0]))
# data,
for i in range(len(v)):
fd.write("\4".encode()) # int32 type,
fd.write(struct.pack(np.dtype("int32").char, v[i])) # binary,
finally:
if fd is not file_or_fd:
fd.close()
#################################################
# Float vectors (confidences, ivectors, ...),
# Reading,
def read_vec_flt_scp(file_or_fd, output_folder):
""" generator(key,mat) = read_vec_flt_scp(file_or_fd)
Returns generator of (key,vector) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,vec in kaldi_io.read_vec_flt_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd, output_folder)
try:
for line in fd:
(key, rxfile) = line.decode().split(" ")
vec = read_vec_flt(rxfile, output_folder)
yield key, vec
finally:
if fd is not file_or_fd:
fd.close()
def read_vec_flt_ark(file_or_fd, output_folder):
""" generator(key,vec) = read_vec_flt_ark(file_or_fd)
Create generator of (key,vector<float>) tuples, reading from an ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_flt_ark(file) }
"""
fd = open_or_fd(file_or_fd, output_folder)
try:
key = read_key(fd)
while key:
ali = read_vec_flt(fd, output_folder)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd:
fd.close()
def read_vec_flt(file_or_fd, output_folder):
""" [flt-vec] = read_vec_flt(file_or_fd)
Read kaldi float vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd, output_folder)
binary = fd.read(2).decode()
if binary == "\0B": # binary flag
return _read_vec_flt_binary(fd)
elif binary == "RI":
return _read_vec_flt_riff(fd)
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove("[")
arr.remove("]") # optionally
except ValueError:
pass
ans = np.array(arr, dtype=float)
if fd is not file_or_fd:
fd.close() # cleanup
return ans
def _read_vec_flt_riff(fd):
RIFF_CHUNK_DESCR_HEADER_SIZE = 12
ALREADY_READ_HEADER_BYTES = 2
SUB_CHUNK_HEADER_SIZE = 8
DATA_CHUNK_HEADER_SIZE = 8
def pcm2float(signal, dtype="float32"):
signal = np.asarray(signal)
dtype = np.dtype(dtype)
return signal.astype(dtype) / dtype.type(-np.iinfo(signal.dtype).min)
import struct
header = fd.read(RIFF_CHUNK_DESCR_HEADER_SIZE - ALREADY_READ_HEADER_BYTES)
assert header[:2] == b"FF"
chunk_header = fd.read(SUB_CHUNK_HEADER_SIZE)
subchunk_id, subchunk_size = struct.unpack("<4sI", chunk_header)
aformat, channels, samplerate, byterate, block_align, bps = struct.unpack("HHIIHH", fd.read(subchunk_size))
subchunk2_id, subchunk2_size = struct.unpack("<4sI", fd.read(DATA_CHUNK_HEADER_SIZE))
pcm_data = np.frombuffer(fd.read(subchunk2_size), dtype="int" + str(bps))
return pcm2float(pcm_data)
def _read_vec_flt_binary(fd):
header = fd.read(3).decode()
if header == "FV ":
sample_size = 4 # floats
elif header == "DV ":
sample_size = 8 # doubles
else:
raise UnknownVectorHeader("The header contained '%s'" % header)
assert sample_size > 0
# Dimension,
assert fd.read(1).decode() == "\4"
# int-size
vec_size = np.frombuffer(fd.read(4), dtype="int32", count=1)[0] # vector dim
if vec_size == 0:
return np.array([], dtype="float32")
# Read whole vector,
buf = fd.read(vec_size * sample_size)
if sample_size == 4:
ans = np.frombuffer(buf, dtype="float32")
elif sample_size == 8:
ans = np.frombuffer(buf, dtype="float64")
else:
raise BadSampleSize
return ans
# Writing,
def write_vec_flt(file_or_fd, output_folder, v, key=""):
""" write_vec_flt(f, v, key='')
Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_flt(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, output_folder, mode="wb")
if sys.version_info[0] == 3:
assert fd.mode == "wb"
try:
if key != "":
fd.write((key + " ").encode("latin1")) # ark-files have keys (utterance-id),
fd.write("\0B".encode()) # we write binary!
# Data-type,
if v.dtype == "float32":
fd.write("FV ".encode())
elif v.dtype == "float64":
fd.write("DV ".encode())
else:
raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % v.dtype)
# Dim,
fd.write("\04".encode())
fd.write(struct.pack(np.dtype("uint32").char, v.shape[0])) # dim
# Data,
fd.write(v.tobytes())
finally:
if fd is not file_or_fd:
fd.close()
#################################################
# Float matrices (features, transformations, ...),
# Reading,
def read_mat_scp(file_or_fd, output_folder):
""" generator(key,mat) = read_mat_scp(file_or_fd)
Returns generator of (key,matrix) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,mat in kaldi_io.read_mat_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd, output_folder)
try:
for line in fd:
(key, rxfile) = line.decode().split(" ")
mat = read_mat(rxfile, output_folder)
yield key, mat
finally:
if fd is not file_or_fd:
fd.close()
def read_mat_ark(file_or_fd, output_folder):
""" generator(key,mat) = read_mat_ark(file_or_fd)
Returns generator of (key,matrix) tuples, read from ark file/stream.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the ark:
for key,mat in kaldi_io.read_mat_ark(file):
...
Read ark to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_ark(file) }
"""
fd = open_or_fd(file_or_fd, output_folder)
try:
key = read_key(fd)
while key:
mat = read_mat(fd, output_folder)
yield key, mat
key = read_key(fd)
finally:
if fd is not file_or_fd:
fd.close()
def read_mat(file_or_fd, output_folder):
""" [mat] = read_mat(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd, output_folder)
try:
binary = fd.read(2).decode()
if binary == "\0B":
mat = _read_mat_binary(fd)
else:
assert binary == " ["
mat = _read_mat_ascii(fd)
finally:
if fd is not file_or_fd:
fd.close()
return mat
def _read_mat_binary(fd):
# Data type
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith("CM"):
return _read_compressed_mat(fd, header)
elif header == "FM ":
sample_size = 4 # floats
elif header == "DM ":
sample_size = 8 # doubles
else:
raise UnknownMatrixHeader("The header contained '%s'" % header)
assert sample_size > 0
# Dimensions
s1, rows, s2, cols = np.frombuffer(fd.read(10), dtype="int8,int32,int8,int32", count=1)[0]
# Read whole matrix
buf = fd.read(rows * cols * sample_size)
if sample_size == 4:
vec = np.frombuffer(buf, dtype="float32")
elif sample_size == 8:
vec = np.frombuffer(buf, dtype="float64")
else:
raise BadSampleSize
mat = np.reshape(vec, (rows, cols))
return mat
def _read_mat_ascii(fd):
rows = []
while 1:
line = fd.readline().decode()
if len(line) == 0:
raise BadInputFormat # eof, should not happen!
if len(line.strip()) == 0:
continue # skip empty line
arr = line.strip().split()
if arr[-1] != "]":
rows.append(np.array(arr, dtype="float32")) # not last line
else:
rows.append(np.array(arr[:-1], dtype="float32")) # last line
mat = np.vstack(rows)
return mat
def _read_compressed_mat(fd, format):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
assert format == "CM " # The formats CM2, CM3 are not supported...
# Format of header 'struct',
global_header = np.dtype(
[("minvalue", "float32"), ("range", "float32"), ("num_rows", "int32"), ("num_cols", "int32")]
) # member '.format' is not written,
per_col_header = np.dtype(
[
("percentile_0", "uint16"),
("percentile_25", "uint16"),
("percentile_75", "uint16"),
("percentile_100", "uint16"),
]
)
# Read global header,
globmin, globrange, rows, cols = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
col_headers = np.frombuffer(fd.read(cols * 8), dtype=per_col_header, count=cols)
col_headers = np.array(
[np.array([x for x in y]) * globrange * 1.52590218966964e-05 + globmin for y in col_headers], dtype=np.float32
)
data = np.reshape(
np.frombuffer(fd.read(cols * rows), dtype="uint8", count=cols * rows), newshape=(cols, rows)
) # stored as col-major,
mat = np.zeros((cols, rows), dtype="float32")
p0 = col_headers[:, 0].reshape(-1, 1)
p25 = col_headers[:, 1].reshape(-1, 1)
p75 = col_headers[:, 2].reshape(-1, 1)
p100 = col_headers[:, 3].reshape(-1, 1)
mask_0_64 = data <= 64
mask_193_255 = data > 192
mask_65_192 = ~(mask_0_64 | mask_193_255)
mat += (p0 + (p25 - p0) / 64.0 * data) * mask_0_64.astype(np.float32)
mat += (p25 + (p75 - p25) / 128.0 * (data - 64)) * mask_65_192.astype(np.float32)
mat += (p75 + (p100 - p75) / 63.0 * (data - 192)) * mask_193_255.astype(np.float32)
return mat.T # transpose! col-major -> row-major,
# Writing,
def write_mat(output_folder, file_or_fd, m, key=""):
""" write_mat(f, m, key='')
Write a binary kaldi matrix to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename of opened file descriptor for writing,
m : the matrix to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the matrix.
Example of writing single matrix:
kaldi_io.write_mat(filename, mat)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,mat in dict.iteritems():
kaldi_io.write_mat(f, mat, key=key)
"""
fd = open_or_fd(file_or_fd, output_folder, mode="wb")
if sys.version_info[0] == 3:
assert fd.mode == "wb"
try:
if key != "":
fd.write((key + " ").encode("latin1")) # ark-files have keys (utterance-id),
fd.write("\0B".encode()) # we write binary!
# Data-type,
if m.dtype == "float32":
fd.write("FM ".encode())
elif m.dtype == "float64":
fd.write("DM ".encode())
else:
raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % m.dtype)
# Dims,
fd.write("\04".encode())
fd.write(struct.pack(np.dtype("uint32").char, m.shape[0])) # rows
fd.write("\04".encode())
fd.write(struct.pack(np.dtype("uint32").char, m.shape[1])) # cols
# Data,
fd.write(m.tobytes())
finally:
if fd is not file_or_fd:
fd.close()
#################################################
# 'Posterior' kaldi type (posteriors, confusion network, nnet1 training targets, ...)
# Corresponds to: vector<vector<tuple<int,float> > >
# - outer vector: time axis
# - inner vector: records at the time
# - tuple: int = index, float = value
#
def read_cnet_ark(file_or_fd, output_folder):
""" Alias of function 'read_post_ark()', 'cnet' = confusion network """
return read_post_ark(file_or_fd, output_folder)
def read_post_rxspec(file_):
""" adaptor to read both 'ark:...' and 'scp:...' inputs of posteriors,
"""
if file_.startswith("ark:"):
return read_post_ark(file_)
elif file_.startswith("scp:"):
return read_post_scp(file_)
else:
print("unsupported intput type: %s" % file_)
print("it should begint with 'ark:' or 'scp:'")
sys.exit(1)
def read_post_scp(file_or_fd, output_folder):
""" generator(key,post) = read_post_scp(file_or_fd)
Returns generator of (key,post) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,post in kaldi_io.read_post_scp(file):
...
Read scp to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_scp(file) }
"""
fd = open_or_fd(file_or_fd, output_folder)
try:
for line in fd:
(key, rxfile) = line.decode().split(" ")
post = read_post(rxfile)
yield key, post
finally:
if fd is not file_or_fd:
fd.close()
def read_post_ark(file_or_fd, output_folder):
""" generator(key,vec<vec<int,float>>) = read_post_ark(file)
Returns generator of (key,posterior) tuples, read from ark file.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Iterate the ark:
for key,post in kaldi_io.read_post_ark(file):
...
Read ark to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd, output_folder)
try:
key = read_key(fd)
while key:
post = read_post(fd)
yield key, post
key = read_key(fd)
finally:
if fd is not file_or_fd:
fd.close()
def read_post(file_or_fd, output_folder):
""" [post] = read_post(file_or_fd)
Reads single kaldi 'Posterior' in binary format.
The 'Posterior' is C++ type 'vector<vector<tuple<int,float> > >',
the outer-vector is usually time axis, inner-vector are the records
at given time, and the tuple is composed of an 'index' (integer)
and a 'float-value'. The 'float-value' can represent a probability
or any other numeric value.
Returns vector of vectors of tuples.
"""
fd = open_or_fd(file_or_fd, output_folder)
ans = []
binary = fd.read(2).decode()
assert binary == "\0B"
# binary flag
assert fd.read(1).decode() == "\4"
# int-size
outer_vec_size = np.frombuffer(fd.read(4), dtype="int32", count=1)[0] # number of frames (or bins)
# Loop over 'outer-vector',
for i in range(outer_vec_size):
assert fd.read(1).decode() == "\4"
# int-size
inner_vec_size = np.frombuffer(fd.read(4), dtype="int32", count=1)[0] # number of records for frame (or bin)
data = np.frombuffer(
fd.read(inner_vec_size * 10),
dtype=[("size_idx", "int8"), ("idx", "int32"), ("size_post", "int8"), ("post", "float32")],
count=inner_vec_size,
)
assert data[0]["size_idx"] == 4
assert data[0]["size_post"] == 4
ans.append(data[["idx", "post"]].tolist())
if fd is not file_or_fd:
fd.close()
return ans
#################################################
# Kaldi Confusion Network bin begin/end times,
# (kaldi stores CNs time info separately from the Posterior).
#
def read_cntime_ark(file_or_fd, output_folder):
""" generator(key,vec<tuple<float,float>>) = read_cntime_ark(file_or_fd)
Returns generator of (key,cntime) tuples, read from ark file.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Iterate the ark:
for key,time in kaldi_io.read_cntime_ark(file):
...
Read ark to a 'dictionary':
d = { key:time for key,time in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd, output_folder)
try:
key = read_key(fd)
while key:
cntime = read_cntime(fd)
yield key, cntime
key = read_key(fd)
finally:
if fd is not file_or_fd:
fd.close()
def read_cntime(file_or_fd, output_folder):
""" [cntime] = read_cntime(file_or_fd)
Reads single kaldi 'Confusion Network time info', in binary format:
C++ type: vector<tuple<float,float> >.
(begin/end times of bins at the confusion network).
Binary layout is '<num-bins> <beg1> <end1> <beg2> <end2> ...'
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Returns vector of tuples.
"""
fd = open_or_fd(file_or_fd, output_folder)
binary = fd.read(2).decode()
assert binary == "\0B"
# assuming it's binary
assert fd.read(1).decode() == "\4"
# int-size
vec_size = np.frombuffer(fd.read(4), dtype="int32", count=1)[0] # number of frames (or bins)
data = np.frombuffer(
fd.read(vec_size * 10),
dtype=[("size_beg", "int8"), ("t_beg", "float32"), ("size_end", "int8"), ("t_end", "float32")],
count=vec_size,
)
assert data[0]["size_beg"] == 4
assert data[0]["size_end"] == 4
ans = data[["t_beg", "t_end"]].tolist() # Return vector of tuples (t_beg,t_end),
if fd is not file_or_fd:
fd.close()
return ans
#################################################
# Segments related,
#
# Segments as 'Bool vectors' can be handy,
# - for 'superposing' the segmentations,
# - for frame-selection in Speaker-ID experiments,
def read_segments_as_bool_vec(segments_file):
""" [ bool_vec ] = read_segments_as_bool_vec(segments_file)
using kaldi 'segments' file for 1 wav, format : '<utt> <rec> <t-beg> <t-end>'
- t-beg, t-end is in seconds,
- assumed 100 frames/second,
"""
segs = np.loadtxt(segments_file, dtype="object,object,f,f", ndmin=1)
# Sanity checks,
assert len(segs) > 0 # empty segmentation is an error,
assert len(np.unique([rec[1] for rec in segs])) == 1 # segments with only 1 wav-file,
# Convert time to frame-indexes,
start = np.rint([100 * rec[2] for rec in segs]).astype(int)
end = np.rint([100 * rec[3] for rec in segs]).astype(int)
# Taken from 'read_lab_to_bool_vec', htk.py,
frms = np.repeat(
np.r_[np.tile([False, True], len(end)), False], np.r_[np.c_[start - np.r_[0, end[:-1]], end - start].flat, 0]
)
assert np.sum(end - start) == np.sum(frms)
return frms
| 55,933 | 36.767725 | 142 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.