repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/d_2_tsne.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/d_2_tsne.py | from methods import *
from numpy.random import seed
from keras import backend as K
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
seed(0)
################################
#### get dense layer output ####
################################
#getting the x and y inputs in numpy array form from the text file
def train_x(train_txt, word2vec_len, input_size, word2vec):
#read in lines
train_lines = open(train_txt, 'r').readlines()
num_lines = len(train_lines)
x_matrix = np.zeros((num_lines, input_size, word2vec_len))
#insert values
for i, line in enumerate(train_lines):
parts = line[:-1].split('\t')
label = int(parts[0])
sentence = parts[1]
#insert x
words = sentence.split(' ')
words = words[:x_matrix.shape[1]] #cut off if too long
for j, word in enumerate(words):
if word in word2vec:
x_matrix[i, j, :] = word2vec[word]
return x_matrix
def get_dense_output(model_checkpoint, file, num_classes):
x = train_x(file, word2vec_len, input_size, word2vec)
model = load_model(model_checkpoint)
get_3rd_layer_output = K.function([model.layers[0].input], [model.layers[4].output])
layer_output = get_3rd_layer_output([x])[0]
return layer_output
def get_tsne_labels(file):
labels = []
alphas = []
lines = open(file, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
_class = int(parts[0])
alpha = i % 10
labels.append(_class)
alphas.append(alpha)
return labels, alphas
def get_plot_vectors(layer_output):
tsne = TSNE(n_components=2).fit_transform(layer_output)
return tsne
def plot_tsne(tsne, labels, output_path):
label_to_legend_label = { 'outputs_f4/pc_tsne.png':{ 0:'Con (augmented)',
100:'Con (original)',
1: 'Pro (augmented)',
101:'Pro (original)'},
'outputs_f4/trec_tsne.png':{0:'Description (augmented)',
100:'Description (original)',
1:'Entity (augmented)',
101:'Entity (original)',
2:'Abbreviation (augmented)',
102:'Abbreviation (original)',
3:'Human (augmented)',
103:'Human (original)',
4:'Location (augmented)',
104:'Location (original)',
5:'Number (augmented)',
105:'Number (original)'}}
plot_to_legend_size = {'outputs_f4/pc_tsne.png':11, 'outputs_f4/trec_tsne.png':6}
labels = labels.tolist()
big_groups = [label for label in labels if label < 100]
big_groups = list(sorted(set(big_groups)))
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#ff1493', '#FF4500']
fig, ax = plt.subplots()
for big_group in big_groups:
for group in [big_group, big_group+100]:
x, y = [], []
for j, label in enumerate(labels):
if label == group:
x.append(tsne[j][0])
y.append(tsne[j][1])
#params
color = colors[int(group % 100)]
marker = 'x' if group < 100 else 'o'
size = 1 if group < 100 else 27
legend_label = label_to_legend_label[output_path][group]
ax.scatter(x, y, color=color, marker=marker, s=size, label=legend_label)
plt.axis('off')
legend_size = plot_to_legend_size[output_path]
plt.legend(prop={'size': legend_size})
plt.savefig(output_path, dpi=1000)
plt.clf()
if __name__ == "__main__":
#global variables
word2vec_len = 300
input_size = 25
datasets = ['pc'] #['pc', 'trec']
num_classes_list =[2] #[2, 6]
for i, dataset in enumerate(datasets):
#load parameters
model_checkpoint = 'outputs_f4/' + dataset + '.h5'
file = 'special_f4/' + dataset + '/test_short_aug.txt'
num_classes = num_classes_list[i]
word2vec_pickle = 'special_f4/' + dataset + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
#do tsne
layer_output = get_dense_output(model_checkpoint, file, num_classes)
print(layer_output.shape)
t = get_plot_vectors(layer_output)
labels, alphas = get_tsne_labels(file)
print(labels, alphas)
writer = open("outputs_f4/new_tsne.txt", 'w')
label_to_mark = {0:'x', 1:'o'}
for i, label in enumerate(labels):
alpha = alphas[i]
line = str(t[i, 0]) + ' ' + str(t[i, 1]) + ' ' + str(label_to_mark[label]) + ' ' + str(alpha/10)
writer.write(line + '\n')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/c_2_train_eval.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/c_2_train_eval.py | from c_config import *
from methods import *
from numpy.random import seed
seed(5)
###############################
#### run model and get acc ####
###############################
def run_cnn(train_file, test_file, num_classes, percent_dataset):
#initialize model
model = build_cnn(input_size, word2vec_len, num_classes)
#load data
train_x, train_y = get_x_y(train_file, num_classes, word2vec_len, input_size, word2vec, percent_dataset)
test_x, test_y = get_x_y(test_file, num_classes, word2vec_len, input_size, word2vec, 1)
#implement early stopping
callbacks = [EarlyStopping(monitor='val_loss', patience=3)]
#train model
model.fit( train_x,
train_y,
epochs=100000,
callbacks=callbacks,
validation_split=0.1,
batch_size=1024,
shuffle=True,
verbose=0)
#model.save('checkpoints/lol')
#model = load_model('checkpoints/lol')
#evaluate model
y_pred = model.predict(test_x)
test_y_cat = one_hot_to_categorical(test_y)
y_pred_cat = one_hot_to_categorical(y_pred)
acc = accuracy_score(test_y_cat, y_pred_cat)
#clean memory???
train_x, train_y = None, None
gc.collect()
#return the accuracy
#print("data with shape:", train_x.shape, train_y.shape, 'train=', train_file, 'test=', test_file, 'with fraction', percent_dataset, 'had acc', acc)
return acc
###############################
############ main #############
###############################
if __name__ == "__main__":
for see in range(5):
seed(see)
print('seed:', see)
writer = open('outputs_f3/' + get_now_str() + '.txt', 'w')
#for each size dataset
for size_folder in size_folders:
writer.write(size_folder + '\n')
#get all six datasets
dataset_folders = [size_folder + '/' + s for s in datasets]
#for storing the performances
performances = {num_aug:[] for num_aug in num_aug_list}
#for each dataset
for i in range(len(dataset_folders)):
#initialize all the variables
dataset_folder = dataset_folders[i]
dataset = datasets[i]
num_classes = num_classes_list[i]
input_size = input_size_list[i]
word2vec_pickle = dataset_folder + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
#test each num_aug value
for num_aug in num_aug_list:
train_path = dataset_folder + '/train_' + str(num_aug) + '.txt'
test_path = 'size_data_f3/test/' + dataset + '/test.txt'
acc = run_cnn(train_path, test_path, num_classes, percent_dataset=1)
performances[num_aug].append(acc)
writer.write(train_path + ',' + str(acc))
writer.write(str(performances) + '\n')
print()
for num_aug in performances:
line = str(num_aug) + ' : ' + str(sum(performances[num_aug])/len(performances[num_aug]))
writer.write(line + '\n')
print(line)
print(performances)
writer.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/b_config.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/b_config.py | #user inputs
#dataset folder
datasets = ['pc']#['cr', 'sst2', 'subj', 'trec', 'pc']
dataset_folders = ['increment_datasets_f2/' + dataset for dataset in datasets]
#number of output classes
num_classes_list = [2]#[2, 2, 2, 6, 2]
#dataset increments
increments = [0.7, 0.8, 0.9, 1]#[0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
#number of words for input
input_size_list = [25]#[50, 50, 40, 25, 25]
#word2vec dictionary
huge_word2vec = 'word2vec/glove.840B.300d.txt'
word2vec_len = 300 | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/a_config.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/a_config.py | #user inputs
#size folders
sizes = ['1_tiny', '2_small', '3_standard', '4_full']
size_folders = ['size_data_f1/' + size for size in sizes]
#augmentation methods
a_methods = ['sr', 'ri', 'rd', 'rs']
#dataset folder
datasets = ['cr', 'sst2', 'subj', 'trec', 'pc']
#number of output classes
num_classes_list = [2, 2, 2, 6, 2]
#number of augmentations
n_aug_list_dict = {'size_data_f1/1_tiny': [16, 16, 16, 16, 16],
'size_data_f1/2_small': [16, 16, 16, 16, 16],
'size_data_f1/3_standard': [8, 8, 8, 8, 4],
'size_data_f1/4_full': [8, 8, 8, 8, 4]}
#alpha values we care about
alphas = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5]
#number of words for input
input_size_list = [50, 50, 40, 25, 25]
#word2vec dictionary
huge_word2vec = 'word2vec/glove.840B.300d.txt'
word2vec_len = 300 # don't want to load the huge pickle every time, so just save the words that are actually used into a smaller dictionary
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/e_2_cnn_aug.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/e_2_cnn_aug.py | from methods import *
from numpy.random import seed
seed(0)
from e_config import *
###############################
#### run model and get acc ####
###############################
def run_cnn(train_file, test_file, num_classes, input_size, percent_dataset, word2vec):
#initialize model
model = build_cnn(input_size, word2vec_len, num_classes)
#load data
train_x, train_y = get_x_y(train_file, num_classes, word2vec_len, input_size, word2vec, percent_dataset)
test_x, test_y = get_x_y(test_file, num_classes, word2vec_len, input_size, word2vec, 1)
#implement early stopping
callbacks = [EarlyStopping(monitor='val_loss', patience=3)]
#train model
model.fit( train_x,
train_y,
epochs=100000,
callbacks=callbacks,
validation_split=0.1,
batch_size=1024,
shuffle=True,
verbose=0)
#model.save('checkpoints/lol')
#model = load_model('checkpoints/lol')
#evaluate model
y_pred = model.predict(test_x)
test_y_cat = one_hot_to_categorical(test_y)
y_pred_cat = one_hot_to_categorical(y_pred)
acc = accuracy_score(test_y_cat, y_pred_cat)
#clean memory???
train_x, train_y, model = None, None, None
gc.collect()
#return the accuracy
#print("data with shape:", train_x.shape, train_y.shape, 'train=', train_file, 'test=', test_file, 'with fraction', percent_dataset, 'had acc', acc)
return acc
###############################
### get baseline accuracies ###
###############################
def compute_baselines(writer):
#baseline computation
for size_folder in size_folders:
#get all six datasets
dataset_folders = [size_folder + '/' + s for s in datasets]
performances = []
#for each dataset
for i in range(len(dataset_folders)):
#initialize all the variables
dataset_folder = dataset_folders[i]
dataset = datasets[i]
num_classes = num_classes_list[i]
input_size = input_size_list[i]
word2vec_pickle = dataset_folder + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
train_path = dataset_folder + '/train_aug_st.txt'
test_path = 'size_data_t1/test/' + dataset + '/test.txt'
acc = run_cnn(train_path, test_path, num_classes, input_size, 1, word2vec)
performances.append(str(acc))
line = ','.join(performances)
print(line)
writer.write(line+'\n')
###############################
############ main #############
###############################
if __name__ == "__main__":
writer = open('baseline_cnn/' + get_now_str() + '.csv', 'w')
for i in range(0, 10):
seed(i)
print(i)
compute_baselines(writer)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/e_config.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/e_config.py | #user inputs
#load hyperparameters
sizes = ['4_full']#['1_tiny', '2_small', '3_standard', '4_full']
size_folders = ['size_data_t1/' + size for size in sizes]
#datasets
datasets = ['cr', 'sst2', 'subj', 'trec', 'pc']
#number of output classes
num_classes_list = [2, 2, 2, 6, 2]
#number of augmentations per original sentence
n_aug_list_dict = {'size_data_t1/1_tiny': [32, 32, 32, 32, 32],
'size_data_t1/2_small': [32, 32, 32, 32, 32],
'size_data_t1/3_standard': [16, 16, 16, 16, 4],
'size_data_t1/4_full': [16, 16, 16, 16, 4]}
#number of words for input
input_size_list = [50, 50, 40, 25, 25]
#word2vec dictionary
huge_word2vec = 'word2vec/glove.840B.300d.txt'
word2vec_len = 300 | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/b_2_train_eval.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/b_2_train_eval.py | from b_config import *
from methods import *
from numpy.random import seed
seed(0)
###############################
#### run model and get acc ####
###############################
def run_model(train_file, test_file, num_classes, percent_dataset):
#initialize model
model = build_model(input_size, word2vec_len, num_classes)
#load data
train_x, train_y = get_x_y(train_file, num_classes, word2vec_len, input_size, word2vec, percent_dataset)
test_x, test_y = get_x_y(test_file, num_classes, word2vec_len, input_size, word2vec, 1)
#implement early stopping
callbacks = [EarlyStopping(monitor='val_loss', patience=3)]
#train model
model.fit( train_x,
train_y,
epochs=100000,
callbacks=callbacks,
validation_split=0.1,
batch_size=1024,
shuffle=True,
verbose=0)
#model.save('checkpoints/lol')
#model = load_model('checkpoints/lol')
#evaluate model
y_pred = model.predict(test_x)
test_y_cat = one_hot_to_categorical(test_y)
y_pred_cat = one_hot_to_categorical(y_pred)
acc = accuracy_score(test_y_cat, y_pred_cat)
#clean memory???
train_x, train_y = None, None
gc.collect()
#return the accuracy
#print("data with shape:", train_x.shape, train_y.shape, 'train=', train_file, 'test=', test_file, 'with fraction', percent_dataset, 'had acc', acc)
return acc
if __name__ == "__main__":
#get the accuracy at each increment
orig_accs = {dataset:{} for dataset in datasets}
aug_accs = {dataset:{} for dataset in datasets}
writer = open('outputs_f2/' + get_now_str() + '.csv', 'w')
#for each dataset
for i, dataset_folder in enumerate(dataset_folders):
dataset = datasets[i]
num_classes = num_classes_list[i]
input_size = input_size_list[i]
train_orig = dataset_folder + '/train_orig.txt'
train_aug_st = dataset_folder + '/train_aug_st.txt'
test_path = dataset_folder + '/test.txt'
word2vec_pickle = dataset_folder + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
for increment in increments:
#calculate augmented accuracy
aug_acc = run_model(train_aug_st, test_path, num_classes, increment)
aug_accs[dataset][increment] = aug_acc
#calculate original accuracy
orig_acc = run_model(train_orig, test_path, num_classes, increment)
orig_accs[dataset][increment] = orig_acc
print(dataset, increment, orig_acc, aug_acc)
writer.write(dataset + ',' + str(increment) + ',' + str(orig_acc) + ',' + str(aug_acc) + '\n')
gc.collect()
print(orig_accs, aug_accs)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/a_1_data_process.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/a_1_data_process.py | from methods import *
from a_config import *
if __name__ == "__main__":
#for each method
for a_method in a_methods:
#for each data size
for size_folder in size_folders:
n_aug_list = n_aug_list_dict[size_folder]
dataset_folders = [size_folder + '/' + s for s in datasets]
#for each dataset
for i, dataset_folder in enumerate(dataset_folders):
train_orig = dataset_folder + '/train_orig.txt'
n_aug = n_aug_list[i]
#for each alpha value
for alpha in alphas:
output_file = dataset_folder + '/train_' + a_method + '_' + str(alpha) + '.txt'
#generate the augmented data
if a_method == 'sr':
gen_sr_aug(train_orig, output_file, alpha, n_aug)
if a_method == 'ri':
gen_ri_aug(train_orig, output_file, alpha, n_aug)
if a_method == 'rd':
gen_rd_aug(train_orig, output_file, alpha, n_aug)
if a_method == 'rs':
gen_rs_aug(train_orig, output_file, alpha, n_aug)
#generate the vocab dictionary
word2vec_pickle = dataset_folder + '/word2vec.p'
gen_vocab_dicts(dataset_folder, word2vec_pickle, huge_word2vec)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/a_2_train_eval.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/a_2_train_eval.py | from a_config import *
from methods import *
from numpy.random import seed
seed(5)
###############################
#### run model and get acc ####
###############################
def run_cnn(train_file, test_file, num_classes, percent_dataset):
#initialize model
model = build_cnn(input_size, word2vec_len, num_classes)
#load data
train_x, train_y = get_x_y(train_file, num_classes, word2vec_len, input_size, word2vec, percent_dataset)
test_x, test_y = get_x_y(test_file, num_classes, word2vec_len, input_size, word2vec, 1)
#implement early stopping
callbacks = [EarlyStopping(monitor='val_loss', patience=3)]
#train model
model.fit( train_x,
train_y,
epochs=100000,
callbacks=callbacks,
validation_split=0.1,
batch_size=1024,
shuffle=True,
verbose=0)
#model.save('checkpoints/lol')
#model = load_model('checkpoints/lol')
#evaluate model
y_pred = model.predict(test_x)
test_y_cat = one_hot_to_categorical(test_y)
y_pred_cat = one_hot_to_categorical(y_pred)
acc = accuracy_score(test_y_cat, y_pred_cat)
#clean memory???
train_x, train_y, test_x, test_y, model = None, None, None, None, None
gc.collect()
#return the accuracy
#print("data with shape:", train_x.shape, train_y.shape, 'train=', train_file, 'test=', test_file, 'with fraction', percent_dataset, 'had acc', acc)
return acc
###############################
############ main #############
###############################
if __name__ == "__main__":
#for each method
for a_method in a_methods:
writer = open('outputs_f1/' + a_method + '_' + get_now_str() + '.txt', 'w')
#for each size dataset
for size_folder in size_folders:
writer.write(size_folder + '\n')
#get all six datasets
dataset_folders = [size_folder + '/' + s for s in datasets]
#for storing the performances
performances = {alpha:[] for alpha in alphas}
#for each dataset
for i in range(len(dataset_folders)):
#initialize all the variables
dataset_folder = dataset_folders[i]
dataset = datasets[i]
num_classes = num_classes_list[i]
input_size = input_size_list[i]
word2vec_pickle = dataset_folder + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
#test each alpha value
for alpha in alphas:
train_path = dataset_folder + '/train_' + a_method + '_' + str(alpha) + '.txt'
test_path = 'size_data_f1/test/' + dataset + '/test.txt'
acc = run_cnn(train_path, test_path, num_classes, percent_dataset=1)
performances[alpha].append(acc)
writer.write(str(performances) + '\n')
for alpha in performances:
line = str(alpha) + ' : ' + str(sum(performances[alpha])/len(performances[alpha]))
writer.write(line + '\n')
print(line)
print(performances)
writer.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/d_0_preprocess.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/d_0_preprocess.py | from methods import *
def generate_short(input_file, output_file, alpha):
lines = open(input_file, 'r').readlines()
increment = int(len(lines)/alpha)
lines = lines[::increment]
writer = open(output_file, 'w')
for line in lines:
writer.write(line)
if __name__ == "__main__":
#global params
huge_word2vec = 'word2vec/glove.840B.300d.txt'
datasets = ['pc']#, 'trec']
for dataset in datasets:
dataset_folder = 'special_f4/' + dataset
test_short = 'special_f4/' + dataset + '/test_short.txt'
test_aug_short = dataset_folder + '/test_short_aug.txt'
word2vec_pickle = dataset_folder + '/word2vec.p'
#augment the data
gen_tsne_aug(test_short, test_aug_short)
#generate the vocab dictionaries
gen_vocab_dicts(dataset_folder, word2vec_pickle, huge_word2vec)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/e_1_data_process.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/e_1_data_process.py | from methods import *
from e_config import *
if __name__ == "__main__":
for size_folder in size_folders:
dataset_folders = [size_folder + '/' + s for s in datasets]
n_aug_list = n_aug_list_dict[size_folder]
#for each dataset
for i, dataset_folder in enumerate(dataset_folders):
n_aug = n_aug_list[i]
#pre-existing file locations
train_orig = dataset_folder + '/train_orig.txt'
#file to be created
train_aug_st = dataset_folder + '/train_aug_st.txt'
#standard augmentation
gen_standard_aug(train_orig, train_aug_st, n_aug)
#generate the vocab dictionary
word2vec_pickle = dataset_folder + '/word2vec.p'
gen_vocab_dicts(dataset_folder, word2vec_pickle, huge_word2vec)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/e_2_rnn_aug.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/e_2_rnn_aug.py | from methods import *
from numpy.random import seed
seed(0)
from e_config import *
###############################
#### run model and get acc ####
###############################
def run_model(train_file, test_file, num_classes, input_size, percent_dataset, word2vec):
#initialize model
model = build_model(input_size, word2vec_len, num_classes)
#load data
train_x, train_y = get_x_y(train_file, num_classes, word2vec_len, input_size, word2vec, percent_dataset)
test_x, test_y = get_x_y(test_file, num_classes, word2vec_len, input_size, word2vec, 1)
#implement early stopping
callbacks = [EarlyStopping(monitor='val_loss', patience=3)]
#train model
model.fit( train_x,
train_y,
epochs=100000,
callbacks=callbacks,
validation_split=0.1,
batch_size=1024,
shuffle=True,
verbose=0)
#model.save('checkpoints/lol')
#model = load_model('checkpoints/lol')
#evaluate model
y_pred = model.predict(test_x)
test_y_cat = one_hot_to_categorical(test_y)
y_pred_cat = one_hot_to_categorical(y_pred)
acc = accuracy_score(test_y_cat, y_pred_cat)
#clean memory???
train_x, train_y, model = None, None, None
gc.collect()
#return the accuracy
#print("data with shape:", train_x.shape, train_y.shape, 'train=', train_file, 'test=', test_file, 'with fraction', percent_dataset, 'had acc', acc)
return acc
###############################
### get baseline accuracies ###
###############################
def compute_baselines(writer):
#baseline computation
for size_folder in size_folders:
#get all six datasets
dataset_folders = [size_folder + '/' + s for s in datasets]
performances = []
#for each dataset
for i in range(len(dataset_folders)):
#initialize all the variables
dataset_folder = dataset_folders[i]
dataset = datasets[i]
num_classes = num_classes_list[i]
input_size = input_size_list[i]
word2vec_pickle = dataset_folder + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
train_path = dataset_folder + '/train_aug_st.txt'
test_path = 'size_data_t1/test/' + dataset + '/test.txt'
acc = run_model(train_path, test_path, num_classes, input_size, 1, word2vec)
performances.append(str(acc))
line = ','.join(performances)
print(line)
writer.write(line+'\n')
###############################
############ main #############
###############################
if __name__ == "__main__":
writer = open('baseline_rnn/' + get_now_str() + '.csv', 'w')
for i in range(0, 10):
seed(i)
print(i)
compute_baselines(writer)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/d_neg_1_balance_trec.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/d_neg_1_balance_trec.py | lines = open('special_f4/trec/test_orig.txt', 'r').readlines()
label_to_lines = {x:[] for x in range(0, 6)}
for line in lines:
label = int(line[0])
label_to_lines[label].append(line)
for label in label_to_lines:
print(label, len(label_to_lines[label])) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/e_2_cnn_baselines.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/e_2_cnn_baselines.py | from methods import *
from numpy.random import seed
seed(0)
from e_config import *
###############################
#### run model and get acc ####
###############################
def run_model(train_file, test_file, num_classes, input_size, percent_dataset, word2vec):
#initialize model
model = build_model(input_size, word2vec_len, num_classes)
#load data
train_x, train_y = get_x_y(train_file, num_classes, word2vec_len, input_size, word2vec, percent_dataset)
test_x, test_y = get_x_y(test_file, num_classes, word2vec_len, input_size, word2vec, 1)
#implement early stopping
callbacks = [EarlyStopping(monitor='val_loss', patience=3)]
#train model
model.fit( train_x,
train_y,
epochs=100000,
callbacks=callbacks,
validation_split=0.1,
batch_size=1024,
shuffle=True,
verbose=0)
#model.save('checkpoints/lol')
#model = load_model('checkpoints/lol')
#evaluate model
y_pred = model.predict(test_x)
test_y_cat = one_hot_to_categorical(test_y)
y_pred_cat = one_hot_to_categorical(y_pred)
acc = accuracy_score(test_y_cat, y_pred_cat)
#clean memory???
train_x, train_y = None, None
gc.collect()
#return the accuracy
#print("data with shape:", train_x.shape, train_y.shape, 'train=', train_file, 'test=', test_file, 'with fraction', percent_dataset, 'had acc', acc)
return acc
###############################
### get baseline accuracies ###
###############################
def compute_baselines(writer):
#baseline computation
for size_folder in size_folders:
#get all six datasets
dataset_folders = [size_folder + '/' + s for s in datasets]
performances = []
#for each dataset
for i in range(len(dataset_folders)):
#initialize all the variables
dataset_folder = dataset_folders[i]
dataset = datasets[i]
num_classes = num_classes_list[i]
input_size = input_size_list[i]
word2vec_pickle = dataset_folder + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
train_path = dataset_folder + '/train_orig.txt'
test_path = 'size_data_t1/test/' + dataset + '/test.txt'
acc = run_model(train_path, test_path, num_classes, input_size, 1, word2vec)
performances.append(str(acc))
line = ','.join(performances)
print(line)
writer.write(line+'\n')
###############################
############ main #############
###############################
if __name__ == "__main__":
writer = open('baseline_rnn/' + get_now_str() + '.csv', 'w')
for i in range(10, 24):
seed(i)
print(i)
compute_baselines(writer)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/methods.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/methods.py | from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.layers import Bidirectional
import keras.layers as layers
from keras.models import Sequential
from keras.models import load_model
from keras.callbacks import EarlyStopping
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
import math
import time
import numpy as np
import random
from random import randint
random.seed(3)
import datetime, re, operator
from random import shuffle
from time import gmtime, strftime
import gc
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #get rid of warnings
from os import listdir
from os.path import isfile, join, isdir
import pickle
#import data augmentation methods
from nlp_aug import *
###################################################
######### loading folders and txt files ###########
###################################################
#loading a pickle file
def load_pickle(file):
return pickle.load(open(file, 'rb'))
#create an output folder if it does not already exist
def confirm_output_folder(output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
#get full image paths
def get_txt_paths(folder):
txt_paths = [join(folder, f) for f in listdir(folder) if isfile(join(folder, f)) and '.txt' in f]
if join(folder, '.DS_Store') in txt_paths:
txt_paths.remove(join(folder, '.DS_Store'))
txt_paths = sorted(txt_paths)
return txt_paths
#get subfolders
def get_subfolder_paths(folder):
subfolder_paths = [join(folder, f) for f in listdir(folder) if (isdir(join(folder, f)) and '.DS_Store' not in f)]
if join(folder, '.DS_Store') in subfolder_paths:
subfolder_paths.remove(join(folder, '.DS_Store'))
subfolder_paths = sorted(subfolder_paths)
return subfolder_paths
#get all image paths
def get_all_txt_paths(master_folder):
all_paths = []
subfolders = get_subfolder_paths(master_folder)
if len(subfolders) > 1:
for subfolder in subfolders:
all_paths += get_txt_paths(subfolder)
else:
all_paths = get_txt_paths(master_folder)
return all_paths
###################################################
################ data processing ##################
###################################################
#get the pickle file for the word2vec so you don't have to load the entire huge file each time
def gen_vocab_dicts(folder, output_pickle_path, huge_word2vec):
vocab = set()
text_embeddings = open(huge_word2vec, 'r').readlines()
word2vec = {}
#get all the vocab
all_txt_paths = get_all_txt_paths(folder)
print(all_txt_paths)
#loop through each text file
for txt_path in all_txt_paths:
# get all the words
try:
all_lines = open(txt_path, "r").readlines()
for line in all_lines:
words = line[:-1].split(' ')
for word in words:
vocab.add(word)
except:
print(txt_path, "has an error")
print(len(vocab), "unique words found")
# load the word embeddings, and only add the word to the dictionary if we need it
for line in text_embeddings:
items = line.split(' ')
word = items[0]
if word in vocab:
vec = items[1:]
word2vec[word] = np.asarray(vec, dtype = 'float32')
print(len(word2vec), "matches between unique words and word2vec dictionary")
pickle.dump(word2vec, open(output_pickle_path, 'wb'))
print("dictionaries outputted to", output_pickle_path)
#getting the x and y inputs in numpy array form from the text file
def get_x_y(train_txt, num_classes, word2vec_len, input_size, word2vec, percent_dataset):
#read in lines
train_lines = open(train_txt, 'r').readlines()
shuffle(train_lines)
train_lines = train_lines[:int(percent_dataset*len(train_lines))]
num_lines = len(train_lines)
#initialize x and y matrix
x_matrix = None
y_matrix = None
try:
x_matrix = np.zeros((num_lines, input_size, word2vec_len))
except:
print("Error!", num_lines, input_size, word2vec_len)
y_matrix = np.zeros((num_lines, num_classes))
#insert values
for i, line in enumerate(train_lines):
parts = line[:-1].split('\t')
label = int(parts[0])
sentence = parts[1]
#insert x
words = sentence.split(' ')
words = words[:x_matrix.shape[1]] #cut off if too long
for j, word in enumerate(words):
if word in word2vec:
x_matrix[i, j, :] = word2vec[word]
#insert y
y_matrix[i][label] = 1.0
return x_matrix, y_matrix
###################################################
############### data augmentation #################
###################################################
def gen_tsne_aug(train_orig, output_file):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
writer.write(line)
for alpha in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
aug_sentence = eda_4(sentence, alpha_sr=alpha, alpha_ri=alpha, alpha_rs=alpha, p_rd=alpha, num_aug=2)[0]
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished eda for tsne for", train_orig, "to", output_file)
#generate more data with standard augmentation
def gen_standard_aug(train_orig, output_file, num_aug=9):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = eda_4(sentence, num_aug=num_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished eda for", train_orig, "to", output_file)
#generate more data with only synonym replacement (SR)
def gen_sr_aug(train_orig, output_file, alpha_sr, n_aug):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = SR(sentence, alpha_sr=alpha_sr, n_aug=n_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished SR for", train_orig, "to", output_file, "with alpha", alpha_sr)
#generate more data with only random insertion (RI)
def gen_ri_aug(train_orig, output_file, alpha_ri, n_aug):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = RI(sentence, alpha_ri=alpha_ri, n_aug=n_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished RI for", train_orig, "to", output_file, "with alpha", alpha_ri)
#generate more data with only random swap (RS)
def gen_rs_aug(train_orig, output_file, alpha_rs, n_aug):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = RS(sentence, alpha_rs=alpha_rs, n_aug=n_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished RS for", train_orig, "to", output_file, "with alpha", alpha_rs)
#generate more data with only random deletion (RD)
def gen_rd_aug(train_orig, output_file, alpha_rd, n_aug):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = RD(sentence, alpha_rd=alpha_rd, n_aug=n_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished RD for", train_orig, "to", output_file, "with alpha", alpha_rd)
###################################################
##################### model #######################
###################################################
#building the model in keras
def build_model(sentence_length, word2vec_len, num_classes):
model = None
model = Sequential()
model.add(Bidirectional(LSTM(64, return_sequences=True), input_shape=(sentence_length, word2vec_len)))
model.add(Dropout(0.5))
model.add(Bidirectional(LSTM(32, return_sequences=False)))
model.add(Dropout(0.5))
model.add(Dense(20, activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#print(model.summary())
return model
#building the cnn in keras
def build_cnn(sentence_length, word2vec_len, num_classes):
model = None
model = Sequential()
model.add(layers.Conv1D(128, 5, activation='relu', input_shape=(sentence_length, word2vec_len)))
model.add(layers.GlobalMaxPooling1D())
model.add(Dense(20, activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
#one hot to categorical
def one_hot_to_categorical(y):
assert len(y.shape) == 2
return np.argmax(y, axis=1)
def get_now_str():
return str(strftime("%Y-%m-%d_%H:%M:%S", gmtime()))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/c_config.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/c_config.py | #user inputs
#size folders
sizes = ['3_standard']#, '4_full']#['1_tiny', '2_small', '3_standard', '4_full']
size_folders = ['size_data_f3/' + size for size in sizes]
#dataset folder
datasets = ['cr', 'sst2', 'subj', 'trec', 'pc']
#number of output classes
num_classes_list = [2, 2, 2, 6, 2]
#alpha values we care about
num_aug_list = [0.125, 0.25, 0.5, 1, 2, 4, 8, 16, 32]
#number of words for input
input_size_list = [50, 50, 50, 25, 25]
#word2vec dictionary
huge_word2vec = 'word2vec/glove.840B.300d.txt'
word2vec_len = 300 # don't want to load the huge pickle every time, so just save the words that are actually used into a smaller dictionary
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/nlp_aug.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/nlp_aug.py | # Easy data augmentation techniques for text classification
# Jason Wei, Chengyu Huang, Yifang Wei, Fei Xing, Kai Zou
import random
from random import shuffle
random.seed(1)
#stop words list
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our',
'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who',
'whom', 'this', 'that', 'these', 'those', 'am',
'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did',
'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or',
'because', 'as', 'until', 'while', 'of', 'at',
'by', 'for', 'with', 'about', 'against', 'between',
'into', 'through', 'during', 'before', 'after',
'above', 'below', 'to', 'from', 'up', 'down', 'in',
'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'when',
'where', 'why', 'how', 'all', 'any', 'both', 'each',
'few', 'more', 'most', 'other', 'some', 'such', 'no',
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too',
'very', 's', 't', 'can', 'will', 'just', 'don',
'should', 'now', '']
#cleaning up text
import re
def get_only_chars(line):
clean_line = ""
line = line.replace("’", "")
line = line.replace("'", "")
line = line.replace("-", " ") #replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.lower()
for char in line:
if char in 'qwertyuiopasdfghjklzxcvbnm ':
clean_line += char
else:
clean_line += ' '
clean_line = re.sub(' +',' ',clean_line) #delete extra spaces
if clean_line[0] == ' ':
clean_line = clean_line[1:]
return clean_line
########################################################################
# Synonym replacement
# Replace n words in the sentence with synonyms from wordnet
########################################################################
#for the first time you use wordnet
#import nltk
#nltk.download('wordnet')
from nltk.corpus import wordnet
def synonym_replacement(words, n):
new_words = words.copy()
random_word_list = list(set([word for word in words if word not in stop_words]))
random.shuffle(random_word_list)
num_replaced = 0
for random_word in random_word_list:
synonyms = get_synonyms(random_word)
if len(synonyms) >= 1:
synonym = random.choice(list(synonyms))
new_words = [synonym if word == random_word else word for word in new_words]
#print("replaced", random_word, "with", synonym)
num_replaced += 1
if num_replaced >= n: #only replace up to n words
break
#this is stupid but we need it, trust me
sentence = ' '.join(new_words)
new_words = sentence.split(' ')
return new_words
def get_synonyms(word):
synonyms = set()
for syn in wordnet.synsets(word):
for l in syn.lemmas():
synonym = l.name().replace("_", " ").replace("-", " ").lower()
synonym = "".join([char for char in synonym if char in ' qwertyuiopasdfghjklzxcvbnm'])
synonyms.add(synonym)
if word in synonyms:
synonyms.remove(word)
return list(synonyms)
########################################################################
# Random deletion
# Randomly delete words from the sentence with probability p
########################################################################
def random_deletion(words, p):
#obviously, if there's only one word, don't delete it
if len(words) == 1:
return words
#randomly delete words with probability p
new_words = []
for word in words:
r = random.uniform(0, 1)
if r > p:
new_words.append(word)
#if you end up deleting all words, just return a random word
if len(new_words) == 0:
rand_int = random.randint(0, len(words)-1)
return [words[rand_int]]
return new_words
########################################################################
# Random swap
# Randomly swap two words in the sentence n times
########################################################################
def random_swap(words, n):
new_words = words.copy()
for _ in range(n):
new_words = swap_word(new_words)
return new_words
def swap_word(new_words):
random_idx_1 = random.randint(0, len(new_words)-1)
random_idx_2 = random_idx_1
counter = 0
while random_idx_2 == random_idx_1:
random_idx_2 = random.randint(0, len(new_words)-1)
counter += 1
if counter > 3:
return new_words
new_words[random_idx_1], new_words[random_idx_2] = new_words[random_idx_2], new_words[random_idx_1]
return new_words
########################################################################
# Random addition
# Randomly add n words into the sentence
########################################################################
def random_addition(words, n):
new_words = words.copy()
for _ in range(n):
add_word(new_words)
return new_words
def add_word(new_words):
synonyms = []
counter = 0
while len(synonyms) < 1:
random_word = new_words[random.randint(0, len(new_words)-1)]
synonyms = get_synonyms(random_word)
counter += 1
if counter >= 10:
return
random_synonym = synonyms[0]
random_idx = random.randint(0, len(new_words)-1)
new_words.insert(random_idx, random_synonym)
########################################################################
# main data augmentation function
########################################################################
def eda_4(sentence, alpha_sr=0.3, alpha_ri=0.2, alpha_rs=0.1, p_rd=0.15, num_aug=9):
sentence = get_only_chars(sentence)
words = sentence.split(' ')
words = [word for word in words if word is not '']
num_words = len(words)
augmented_sentences = []
num_new_per_technique = int(num_aug/4)+1
n_sr = max(1, int(alpha_sr*num_words))
n_ri = max(1, int(alpha_ri*num_words))
n_rs = max(1, int(alpha_rs*num_words))
#sr
for _ in range(num_new_per_technique):
a_words = synonym_replacement(words, n_sr)
augmented_sentences.append(' '.join(a_words))
#ri
for _ in range(num_new_per_technique):
a_words = random_addition(words, n_ri)
augmented_sentences.append(' '.join(a_words))
#rs
for _ in range(num_new_per_technique):
a_words = random_swap(words, n_rs)
augmented_sentences.append(' '.join(a_words))
#rd
for _ in range(num_new_per_technique):
a_words = random_deletion(words, p_rd)
augmented_sentences.append(' '.join(a_words))
augmented_sentences = [get_only_chars(sentence) for sentence in augmented_sentences]
shuffle(augmented_sentences)
#trim so that we have the desired number of augmented sentences
if num_aug >= 1:
augmented_sentences = augmented_sentences[:num_aug]
else:
keep_prob = num_aug / len(augmented_sentences)
augmented_sentences = [s for s in augmented_sentences if random.uniform(0, 1) < keep_prob]
#append the original sentence
augmented_sentences.append(sentence)
return augmented_sentences
def SR(sentence, alpha_sr, n_aug=9):
sentence = get_only_chars(sentence)
words = sentence.split(' ')
num_words = len(words)
augmented_sentences = []
n_sr = max(1, int(alpha_sr*num_words))
for _ in range(n_aug):
a_words = synonym_replacement(words, n_sr)
augmented_sentences.append(' '.join(a_words))
augmented_sentences = [get_only_chars(sentence) for sentence in augmented_sentences]
shuffle(augmented_sentences)
augmented_sentences.append(sentence)
return augmented_sentences
def RI(sentence, alpha_ri, n_aug=9):
sentence = get_only_chars(sentence)
words = sentence.split(' ')
num_words = len(words)
augmented_sentences = []
n_ri = max(1, int(alpha_ri*num_words))
for _ in range(n_aug):
a_words = random_addition(words, n_ri)
augmented_sentences.append(' '.join(a_words))
augmented_sentences = [get_only_chars(sentence) for sentence in augmented_sentences]
shuffle(augmented_sentences)
augmented_sentences.append(sentence)
return augmented_sentences
def RS(sentence, alpha_rs, n_aug=9):
sentence = get_only_chars(sentence)
words = sentence.split(' ')
num_words = len(words)
augmented_sentences = []
n_rs = max(1, int(alpha_rs*num_words))
for _ in range(n_aug):
a_words = random_swap(words, n_rs)
augmented_sentences.append(' '.join(a_words))
augmented_sentences = [get_only_chars(sentence) for sentence in augmented_sentences]
shuffle(augmented_sentences)
augmented_sentences.append(sentence)
return augmented_sentences
def RD(sentence, alpha_rd, n_aug=9):
sentence = get_only_chars(sentence)
words = sentence.split(' ')
words = [word for word in words if word is not '']
num_words = len(words)
augmented_sentences = []
for _ in range(n_aug):
a_words = random_deletion(words, alpha_rd)
augmented_sentences.append(' '.join(a_words))
augmented_sentences = [get_only_chars(sentence) for sentence in augmented_sentences]
shuffle(augmented_sentences)
augmented_sentences.append(sentence)
return augmented_sentences
########################################################################
# Testing
########################################################################
if __name__ == '__main__':
line = 'Hi. My name is Jason. I’m a third-year computer science major at Dartmouth College, interested in deep learning and computer vision. My advisor is Saeed Hassanpour. I’m currently working on deep learning for lung cancer classification.'
########################################################################
# Sliding window
# Slide a window of size w over the sentence with stride s
# Returns a list of lists of words
########################################################################
# def sliding_window_sentences(words, w, s):
# windows = []
# for i in range(0, len(words)-w+1, s):
# window = words[i:i+w]
# windows.append(window)
# return windows
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/e_2_rnn_baselines.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/e_2_rnn_baselines.py | from methods import *
from numpy.random import seed
seed(0)
from e_config import *
###############################
#### run model and get acc ####
###############################
def run_model(train_file, test_file, num_classes, input_size, percent_dataset, word2vec):
#initialize model
model = build_model(input_size, word2vec_len, num_classes)
#load data
train_x, train_y = get_x_y(train_file, num_classes, word2vec_len, input_size, word2vec, percent_dataset)
test_x, test_y = get_x_y(test_file, num_classes, word2vec_len, input_size, word2vec, 1)
#implement early stopping
callbacks = [EarlyStopping(monitor='val_loss', patience=3)]
#train model
model.fit( train_x,
train_y,
epochs=100000,
callbacks=callbacks,
validation_split=0.1,
batch_size=1024,
shuffle=True,
verbose=0)
#model.save('checkpoints/lol')
#model = load_model('checkpoints/lol')
#evaluate model
y_pred = model.predict(test_x)
test_y_cat = one_hot_to_categorical(test_y)
y_pred_cat = one_hot_to_categorical(y_pred)
acc = accuracy_score(test_y_cat, y_pred_cat)
#clean memory???
train_x, train_y = None, None
gc.collect()
#return the accuracy
#print("data with shape:", train_x.shape, train_y.shape, 'train=', train_file, 'test=', test_file, 'with fraction', percent_dataset, 'had acc', acc)
return acc
###############################
### get baseline accuracies ###
###############################
def compute_baselines(writer):
#baseline computation
for size_folder in size_folders:
#get all six datasets
dataset_folders = [size_folder + '/' + s for s in datasets]
performances = []
#for each dataset
for i in range(len(dataset_folders)):
#initialize all the variables
dataset_folder = dataset_folders[i]
dataset = datasets[i]
num_classes = num_classes_list[i]
input_size = input_size_list[i]
word2vec_pickle = dataset_folder + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
train_path = dataset_folder + '/train_orig.txt'
test_path = 'size_data_t1/test/' + dataset + '/test.txt'
acc = run_model(train_path, test_path, num_classes, input_size, 1, word2vec)
performances.append(str(acc))
line = ','.join(performances)
print(line)
writer.write(line+'\n')
###############################
############ main #############
###############################
if __name__ == "__main__":
writer = open('baseline_rnn/' + get_now_str() + '.csv', 'w')
for i in range(10, 24):
seed(i)
print(i)
compute_baselines(writer)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/c_1_data_process.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/c_1_data_process.py | from methods import *
from c_config import *
if __name__ == "__main__":
#generate the augmented data sets
for size_folder in size_folders:
dataset_folders = [size_folder + '/' + s for s in datasets]
#for each dataset
for dataset_folder in dataset_folders:
train_orig = dataset_folder + '/train_orig.txt'
#for each n_aug value
for num_aug in num_aug_list:
output_file = dataset_folder + '/train_' + str(num_aug) + '.txt'
#generate the augmented data
if num_aug > 4 and '4_full/pc' in train_orig:
gen_standard_aug(train_orig, output_file, num_aug=4)
else:
gen_standard_aug(train_orig, output_file, num_aug=num_aug)
#generate the vocab dictionary
word2vec_pickle = dataset_folder + '/word2vec.p'
gen_vocab_dicts(dataset_folder, word2vec_pickle, huge_word2vec)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/d_1_train_models.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/d_1_train_models.py | from methods import *
from numpy.random import seed
seed(0)
###############################
#### run model and get acc ####
###############################
def run_model(train_file, test_file, num_classes, model_output_path):
#initialize model
model = build_model(input_size, word2vec_len, num_classes)
#load data
train_x, train_y = get_x_y(train_file, num_classes, word2vec_len, input_size, word2vec, 1)
test_x, test_y = get_x_y(test_file, num_classes, word2vec_len, input_size, word2vec, 1)
#implement early stopping
callbacks = [EarlyStopping(monitor='val_loss', patience=3)]
#train model
model.fit( train_x,
train_y,
epochs=100000,
callbacks=callbacks,
validation_split=0.1,
batch_size=1024,
shuffle=True,
verbose=0)
#save the model
model.save(model_output_path)
#model = load_model('checkpoints/lol')
#evaluate model
y_pred = model.predict(test_x)
test_y_cat = one_hot_to_categorical(test_y)
y_pred_cat = one_hot_to_categorical(y_pred)
acc = accuracy_score(test_y_cat, y_pred_cat)
#clean memory???
train_x, train_y = None, None
#return the accuracy
#print("data with shape:", train_x.shape, train_y.shape, 'train=', train_file, 'test=', test_file, 'with fraction', percent_dataset, 'had acc', acc)
return acc
if __name__ == "__main__":
#parameters
dataset_folders = ['increment_datasets_f2/trec', 'increment_datasets_f2/pc']
output_paths = ['outputs_f4/trec_aug.h5', 'outputs_f4/pc_aug.h5']
num_classes_list = [6, 2]
input_size_list = [25, 25]
#word2vec dictionary
word2vec_len = 300
for i, dataset_folder in enumerate(dataset_folders):
num_classes = num_classes_list[i]
input_size = input_size_list[i]
output_path = output_paths[i]
train_orig = dataset_folder + '/train_aug_st.txt'
test_path = dataset_folder + '/test.txt'
word2vec_pickle = dataset_folder + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
#train model and save
acc = run_model(train_orig, test_path, num_classes, output_path)
print(dataset_folder, acc) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/bg_clean.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/bg_clean.py |
from utils import *
def clean_csv(input_file, output_file):
input_r = open(input_file, 'r').read()
lines = input_r.split(',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,')
print(len(lines))
for line in lines[:10]:
print(line[-3:])
if __name__ == "__main__":
input_file = 'raw/blog-gender-dataset.csv'
output_file = 'datasets/bg/train.csv'
clean_csv(input_file, output_file)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/subj_clean.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/subj_clean.py | from utils import *
if __name__ == "__main__":
subj_path = "subj/rotten_imdb/subj.txt"
obj_path = "subj/rotten_imdb/plot.tok.gt9.5000"
subj_lines = open(subj_path, 'r').readlines()
obj_lines = open(obj_path, 'r').readlines()
print(len(subj_lines), len(obj_lines))
test_split = int(0.9*len(subj_lines))
train_lines = []
test_lines = []
#training set
for s_line in subj_lines[:test_split]:
clean_line = '1\t' + get_only_chars(s_line[:-1])
train_lines.append(clean_line)
for o_line in obj_lines[:test_split]:
clean_line = '0\t' + get_only_chars(o_line[:-1])
train_lines.append(clean_line)
#testing set
for s_line in subj_lines[test_split:]:
clean_line = '1\t' + get_only_chars(s_line[:-1])
test_lines.append(clean_line)
for o_line in obj_lines[test_split:]:
clean_line = '0\t' + get_only_chars(o_line[:-1])
test_lines.append(clean_line)
print(len(test_lines), len(train_lines))
#print training set
writer = open('datasets/subj/train_orig.txt', 'w')
for line in train_lines:
writer.write(line + '\n')
writer.close()
#print testing set
writer = open('datasets/subj/test.txt', 'w')
for line in test_lines:
writer.write(line + '\n')
writer.close() | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/sst1_clean.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/sst1_clean.py | from utils import *
def get_label(decimal):
if decimal >= 0 and decimal <= 0.2:
return 0
elif decimal > 0.2 and decimal <= 0.4:
return 1
elif decimal > 0.4 and decimal <= 0.6:
return 2
elif decimal > 0.6 and decimal <= 0.8:
return 3
elif decimal > 0.8 and decimal <= 1:
return 4
else:
return -1
def get_label_binary(decimal):
if decimal >= 0 and decimal <= 0.4:
return 0
elif decimal > 0.6 and decimal <= 1:
return 1
else:
return -1
def get_split(split_num):
if split_num == 1 or split_num == 3:
return 'train'
elif split_num == 2:
return 'test'
if __name__ == "__main__":
data_path = 'raw/sst_1/stanfordSentimentTreebank/datasetSentences.txt'
labels_path = 'raw/sst_1/stanfordSentimentTreebank/sentiment_labels.txt'
split_path = 'raw/sst_1/stanfordSentimentTreebank/datasetSplit.txt'
dictionary_path = 'raw/sst_1/stanfordSentimentTreebank/dictionary.txt'
sentence_lines = open(data_path, 'r').readlines()
labels_lines = open(labels_path, 'r').readlines()
split_lines = open(split_path, 'r').readlines()
dictionary_lines = open(dictionary_path, 'r').readlines()
print(len(sentence_lines))
print(len(split_lines))
print(len(labels_lines))
print(len(dictionary_lines))
#create dictionary for id to label
id_to_label = {}
for line in labels_lines[1:]:
parts = line[:-1].split("|")
_id = parts[0]
score = float(parts[1])
label = get_label_binary(score)
id_to_label[_id] = label
print(len(id_to_label), "id to labels read in")
#create dictionary for phrase to label
phrase_to_label = {}
for line in dictionary_lines:
parts = line[:-1].split("|")
phrase = parts[0]
_id = parts[1]
label = id_to_label[_id]
phrase_to_label[phrase] = label
print(len(phrase_to_label), "phrase to id read in")
#create id to split
id_to_split = {}
for line in split_lines[1:]:
parts = line[:-1].split(",")
_id = parts[0]
split_num = float(parts[1])
split = get_split(split_num)
id_to_split[_id] = split
print(len(id_to_split), "id to split read in")
train_writer = open('datasets/sst2/train_orig.txt', 'w')
test_writer = open('datasets/sst2/test.txt', 'w')
#create sentence to split and label
for sentence_line in sentence_lines[1:]:
parts = sentence_line[:-1].split('\t')
_id = parts[0]
sentence = get_only_chars(parts[1])
split = id_to_split[_id]
if parts[1] in phrase_to_label:
label = phrase_to_label[parts[1]]
if label in {0, 1}:
#print(label, sentence, split)
if split == 'train':
train_writer.write(str(label) + '\t' + sentence + '\n')
elif split == 'test':
test_writer.write(str(label) + '\t' + sentence + '\n')
#print(parts, split)
#label = []
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/shuffle_lines.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/shuffle_lines.py | import random
def shuffle_lines(text_file):
lines = open(text_file).readlines()
random.shuffle(lines)
open(text_file, 'w').writelines(lines)
shuffle_lines('special_f4/pc/test_short_aug_shuffle.txt') | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/procon_clean.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/procon_clean.py |
from utils import *
def get_good_stuff(line):
idx = line.find('s>')
good = line[idx+2:-8]
return get_only_chars(good)
def clean_file(con_file, pro_file, output_train, output_test):
train_writer = open(output_train, 'w')
test_writer = open(output_test, 'w')
con_lines = open(con_file, 'r').readlines()
for line in con_lines[:int(len(con_lines)*0.9)]:
content = get_good_stuff(line)
if len(content) >= 8:
train_writer.write('0\t' + content + '\n')
for line in con_lines[int(len(con_lines)*0.9):]:
content = get_good_stuff(line)
if len(content) >= 8:
test_writer.write('0\t' + content + '\n')
pro_lines = open(pro_file, 'r').readlines()
for line in pro_lines[:int(len(con_lines)*0.9)]:
content = get_good_stuff(line)
if len(content) >= 8:
train_writer.write('1\t' + content + '\n')
for line in pro_lines[int(len(con_lines)*0.9):]:
content = get_good_stuff(line)
if len(content) >= 8:
test_writer.write('1\t' + content + '\n')
if __name__ == '__main__':
con_file = 'raw/pros-cons/integratedCons.txt'
pro_file = 'raw/pros-cons/integratedPros.txt'
output_train = 'datasets/procon/train.txt'
output_test = 'datasets/procon/test.txt'
clean_file(con_file, pro_file, output_train, output_test) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/trej_clean.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/trej_clean.py |
from utils import *
class_name_to_num = {'DESC': 0, 'ENTY':1, 'ABBR':2, 'HUM': 3, 'LOC': 4, 'NUM': 5}
def clean(input_file, output_file):
lines = open(input_file, 'r').readlines()
writer = open(output_file, 'w')
for line in lines:
parts = line[:-1].split(' ')
tag = parts[0].split(':')[0]
class_num = class_name_to_num[tag]
sentence = get_only_chars(' '.join(parts[1:]))
print(tag, class_num, sentence)
output_line = str(class_num) + '\t' + sentence
writer.write(output_line + '\n')
writer.close()
if __name__ == "__main__":
clean('raw/trec/train_copy.txt', 'datasets/trec/train_orig.txt')
clean('raw/trec/test_copy.txt', 'datasets/trec/test.txt')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/utils.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/utils.py | import re
#cleaning up text
def get_only_chars(line):
clean_line = ""
line = line.lower()
line = line.replace(" 's", " is")
line = line.replace("-", " ") #replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.replace("'", "")
for char in line:
if char in 'qwertyuiopasdfghjklzxcvbnm ':
clean_line += char
else:
clean_line += ' '
clean_line = re.sub(' +',' ',clean_line) #delete extra spaces
print(clean_line)
if clean_line[0] == ' ':
clean_line = clean_line[1:]
return clean_line | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/create_dataset_increments.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/create_dataset_increments.py | import os
datasets = ['cr', 'pc', 'sst1', 'sst2', 'subj', 'trec']
for dataset in datasets:
line = 'cat increment_datasets_f2/' + dataset + '/test.txt > sized_datasets_f1/test/' + dataset + '/test.txt'
os.system(line) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/cr_clean.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/cr_clean.py | #0 = neg, 1 = pos
from utils import *
def retrieve_reviews(line):
reviews = set()
chars = list(line)
for i, char in enumerate(chars):
if char == '[':
if chars[i+1] == '-':
reviews.add(0)
elif chars[i+1] == '+':
reviews.add(1)
reviews = list(reviews)
if len(reviews) == 2:
return -2
elif len(reviews) == 1:
return reviews[0]
else:
return -1
def clean_files(input_files, output_file):
writer = open(output_file, 'w')
for input_file in input_files:
print(input_file)
input_lines = open(input_file, 'r').readlines()
counter = 0
bad_counter = 0
for line in input_lines:
review = retrieve_reviews(line)
if review in {0, 1}:
good_line = get_only_chars(re.sub("([\(\[]).*?([\)\]])", "\g<1>\g<2>", line))
output_line = str(review) + '\t' + good_line
writer.write(output_line + '\n')
counter += 1
elif review == -2:
bad_counter +=1
print(input_file, counter, bad_counter)
writer.close()
if __name__ == '__main__':
input_files = ['all.txt']#['canon_power.txt', 'canon_s1.txt', 'diaper.txt', 'hitachi.txt', 'ipod.txt', 'micromp3.txt', 'nokia6600.txt', 'norton.txt', 'router.txt']
input_files = ['raw/cr/data_new/' + f for f in input_files]
output_file = 'datasets/cr/apex_clean.txt'
clean_files(input_files, output_file)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/copy_sized_datasets.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/copy_sized_datasets.py | import os
sizes = ['1_tiny', '2_small', '3_standard', '4_full']
datasets = ['sst2', 'cr', 'subj', 'trec', 'pc']
for size in sizes:
for dataset in datasets:
folder = 'size_data_t1/' + size + '/' + dataset
if not os.path.exists(folder):
os.makedirs(folder)
origin = 'sized_datasets_f1/' + size + '/' + dataset + '/train_orig.txt'
destination = 'size_data_t1/' + size + '/' + dataset + '/train_orig.txt'
os.system('cp ' + origin + ' ' + destination) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/preprocess/get_stats.py | augmentation/text_augmentation/helpers/eda_nlp/preprocess/get_stats.py | import statistics
datasets = ['sst2', 'cr', 'subj', 'trec', 'pc']
filenames = ['increment_datasets_f2/' + x + '/train_orig.txt' for x in datasets]
def get_vocab_size(filename):
lines = open(filename, 'r').readlines()
vocab = set()
for line in lines:
words = line[:-1].split(' ')
for word in words:
if word not in vocab:
vocab.add(word)
return len(vocab)
def get_mean_and_std(filename):
lines = open(filename, 'r').readlines()
line_lengths = []
for line in lines:
length = len(line[:-1].split(' ')) - 1
line_lengths.append(length)
print(filename, statistics.mean(line_lengths), statistics.stdev(line_lengths), max(line_lengths))
for filename in filenames:
#print(get_vocab_size(filename))
get_mean_and_std(filename)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/code/eda.py | augmentation/text_augmentation/helpers/eda_nlp/code/eda.py | # Easy data augmentation techniques for text classification
# Jason Wei and Kai Zou
import random
from random import shuffle
random.seed(1)
#stop words list
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our',
'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who',
'whom', 'this', 'that', 'these', 'those', 'am',
'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did',
'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or',
'because', 'as', 'until', 'while', 'of', 'at',
'by', 'for', 'with', 'about', 'against', 'between',
'into', 'through', 'during', 'before', 'after',
'above', 'below', 'to', 'from', 'up', 'down', 'in',
'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'when',
'where', 'why', 'how', 'all', 'any', 'both', 'each',
'few', 'more', 'most', 'other', 'some', 'such', 'no',
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too',
'very', 's', 't', 'can', 'will', 'just', 'don',
'should', 'now', '']
#cleaning up text
import re
def get_only_chars(line):
clean_line = ""
line = line.replace("’", "")
line = line.replace("'", "")
line = line.replace("-", " ") #replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.lower()
for char in line:
if char in 'qwertyuiopasdfghjklzxcvbnm ':
clean_line += char
else:
clean_line += ' '
clean_line = re.sub(' +',' ',clean_line) #delete extra spaces
if clean_line[0] == ' ':
clean_line = clean_line[1:]
return clean_line
########################################################################
# Synonym replacement
# Replace n words in the sentence with synonyms from wordnet
########################################################################
#for the first time you use wordnet
#import nltk
#nltk.download('wordnet')
from nltk.corpus import wordnet
def synonym_replacement(words, n):
new_words = words.copy()
random_word_list = list(set([word for word in words if word not in stop_words]))
random.shuffle(random_word_list)
num_replaced = 0
for random_word in random_word_list:
synonyms = get_synonyms(random_word)
if len(synonyms) >= 1:
synonym = random.choice(list(synonyms))
new_words = [synonym if word == random_word else word for word in new_words]
#print("replaced", random_word, "with", synonym)
num_replaced += 1
if num_replaced >= n: #only replace up to n words
break
#this is stupid but we need it, trust me
sentence = ' '.join(new_words)
new_words = sentence.split(' ')
return new_words
def get_synonyms(word):
synonyms = set()
for syn in wordnet.synsets(word):
for l in syn.lemmas():
synonym = l.name().replace("_", " ").replace("-", " ").lower()
synonym = "".join([char for char in synonym if char in ' qwertyuiopasdfghjklzxcvbnm'])
synonyms.add(synonym)
if word in synonyms:
synonyms.remove(word)
return list(synonyms)
########################################################################
# Random deletion
# Randomly delete words from the sentence with probability p
########################################################################
def random_deletion(words, p):
#obviously, if there's only one word, don't delete it
if len(words) == 1:
return words
#randomly delete words with probability p
new_words = []
for word in words:
r = random.uniform(0, 1)
if r > p:
new_words.append(word)
#if you end up deleting all words, just return a random word
if len(new_words) == 0:
rand_int = random.randint(0, len(words)-1)
return [words[rand_int]]
return new_words
########################################################################
# Random swap
# Randomly swap two words in the sentence n times
########################################################################
def random_swap(words, n):
new_words = words.copy()
for _ in range(n):
new_words = swap_word(new_words)
return new_words
def swap_word(new_words):
random_idx_1 = random.randint(0, len(new_words)-1)
random_idx_2 = random_idx_1
counter = 0
while random_idx_2 == random_idx_1:
random_idx_2 = random.randint(0, len(new_words)-1)
counter += 1
if counter > 3:
return new_words
new_words[random_idx_1], new_words[random_idx_2] = new_words[random_idx_2], new_words[random_idx_1]
return new_words
########################################################################
# Random insertion
# Randomly insert n words into the sentence
########################################################################
def random_insertion(words, n):
new_words = words.copy()
for _ in range(n):
add_word(new_words)
return new_words
def add_word(new_words):
synonyms = []
counter = 0
while len(synonyms) < 1:
random_word = new_words[random.randint(0, len(new_words)-1)]
synonyms = get_synonyms(random_word)
counter += 1
if counter >= 10:
return
random_synonym = synonyms[0]
random_idx = random.randint(0, len(new_words)-1)
new_words.insert(random_idx, random_synonym)
########################################################################
# main data augmentation function
########################################################################
def eda(sentence, alpha_sr=0.1, alpha_ri=0.1, alpha_rs=0.1, p_rd=0.1, num_aug=9):
sentence = get_only_chars(sentence)
words = sentence.split(' ')
words = [word for word in words if word is not '']
num_words = len(words)
augmented_sentences = []
num_new_per_technique = int(num_aug/4)+1
n_sr = max(1, int(alpha_sr*num_words))
n_ri = max(1, int(alpha_ri*num_words))
n_rs = max(1, int(alpha_rs*num_words))
#sr
for _ in range(num_new_per_technique):
a_words = synonym_replacement(words, n_sr)
augmented_sentences.append(' '.join(a_words))
#ri
for _ in range(num_new_per_technique):
a_words = random_insertion(words, n_ri)
augmented_sentences.append(' '.join(a_words))
#rs
for _ in range(num_new_per_technique):
a_words = random_swap(words, n_rs)
augmented_sentences.append(' '.join(a_words))
#rd
for _ in range(num_new_per_technique):
a_words = random_deletion(words, p_rd)
augmented_sentences.append(' '.join(a_words))
augmented_sentences = [get_only_chars(sentence) for sentence in augmented_sentences]
shuffle(augmented_sentences)
#trim so that we have the desired number of augmented sentences
if num_aug >= 1:
augmented_sentences = augmented_sentences[:num_aug]
else:
keep_prob = num_aug / len(augmented_sentences)
augmented_sentences = [s for s in augmented_sentences if random.uniform(0, 1) < keep_prob]
#append the original sentence
augmented_sentences.append(sentence)
return augmented_sentences | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/code/augment.py | augmentation/text_augmentation/helpers/eda_nlp/code/augment.py | # Easy data augmentation techniques for text classification
# Jason Wei and Kai Zou
from eda import *
#arguments to be parsed from command line
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--input", required=True, type=str, help="input file of unaugmented data")
ap.add_argument("--output", required=False, type=str, help="output file of unaugmented data")
ap.add_argument("--num_aug", required=False, type=int, help="number of augmented sentences per original sentence")
ap.add_argument("--alpha", required=False, type=float, help="percent of words in each sentence to be changed")
args = ap.parse_args()
#the output file
output = None
if args.output:
output = args.output
else:
from os.path import dirname, basename, join
output = join(dirname(args.input), 'eda_' + basename(args.input))
#number of augmented sentences to generate per original sentence
num_aug = 9 #default
if args.num_aug:
num_aug = args.num_aug
#how much to change each sentence
alpha = 0.1#default
if args.alpha:
alpha = args.alpha
#generate more data with standard augmentation
def gen_eda(train_orig, output_file, alpha, num_aug=9):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = eda(sentence, alpha_sr=alpha, alpha_ri=alpha, alpha_rs=alpha, p_rd=alpha, num_aug=num_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("generated augmented sentences with eda for " + train_orig + " to " + output_file + " with num_aug=" + str(num_aug))
#main function
if __name__ == "__main__":
#generate augmented sentences and output into a new file
gen_eda(args.input, output, alpha=alpha, num_aug=num_aug) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/augment_time.py | augmentation/audio_augmentation/augment_time.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ ___ _ _
/ _ \ | ___ \_ _| _ / _ \ | (_)
/ /_\ \| |_/ / | | (_) / /_\ \_ _ __| |_ ___
| _ || __/ | | | _ | | | |/ _` | |/ _ \
| | | || | _| |_ _ | | | | |_| | (_| | | (_) |
\_| |_/\_| \___/ (_) \_| |_/\__,_|\__,_|_|\___/
Stretches files by 0.5x, 1.5x, and 2x.
'''
import os, librosa
def augment_time(filename):
basefile=filename[0:-4]
y, sr = librosa.load(filename)
y_fast = librosa.effects.time_stretch(y, 1.5)
librosa.output.write_wav(basefile+'_stretch_0.wav', y_fast, sr)
y_slow = librosa.effects.time_stretch(y, 0.75)
librosa.output.write_wav(basefile+'_stretch_2.wav', y_slow, sr)
return [filename, basefile+'_stretch_0.wav', basefile+'_stretch_2.wav']
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/augment_volume.py | augmentation/audio_augmentation/augment_volume.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ ___ _ _
/ _ \ | ___ \_ _| _ / _ \ | (_)
/ /_\ \| |_/ / | | (_) / /_\ \_ _ __| |_ ___
| _ || __/ | | | _ | | | |/ _` | |/ _ \
| | | || | _| |_ _ | | | | |_| | (_| | | (_) |
\_| |_/\_| \___/ (_) \_| |_/\__,_|\__,_|_|\___/
takes in an audio file and outputs files normalized to
different volumes. This corrects for microphone distance and ages.
Note that in using ffmpeg-normalize, this mimicks real world-use.
An alternative could be to use SoX to move up or down volume.
'''
import os
def augment_volume(filename):
def change_volume(filename, vol):
# rename file
if vol > 1:
new_file=filename[0:-4]+'_increase_'+str(vol)+'.wav'
else:
new_file=filename[0:-4]+'_decrease_'+str(vol)+'.wav'
# changes volume, vol, by input
os.system('sox -v %s %s %s'%(str(vol),filename,new_file))
return new_file
basefile=filename[0:-4]
# using peak normalization
os.system('ffmpeg-normalize %s -nt peak -t 0 -o %s_peak_normalized.wav'%(filename, basefile))
filenames=[filename]
# increase volume by 2x
new_file=change_volume(filename, 3)
filenames.append(new_file)
# decrease volume by 1/2
new_file=change_volume(filename, 0.33)
filenames.append(new_file)
return filenames
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/augment_randomsplice.py | augmentation/audio_augmentation/augment_randomsplice.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ ___ _ _
/ _ \ | ___ \_ _| _ / _ \ | (_)
/ /_\ \| |_/ / | | (_) / /_\ \_ _ __| |_ ___
| _ || __/ | | | _ | | | |/ _` | |/ _ \
| | | || | _| |_ _ | | | | |_| | (_| | | (_) |
\_| |_/\_| \___/ (_) \_| |_/\__,_|\__,_|_|\___/
Randomly cut an audio file into a segment of 1 to 5 seconds.
'''
import os, random
import soundfile as sf
def augment_randomsplice(filename):
slicenum=random.randint(1,5)
file=filename
data, samplerate = sf.read(file)
totalframes=len(data)
totalseconds=int(totalframes/samplerate)
startsec=random.randint(0,totalseconds-(slicenum+1))
endsec=startsec+slicenum
startframe=samplerate*startsec
endframe=samplerate*endsec
newfile='snipped%s_'%(str(slicenum))+file
sf.write(newfile, data[int(startframe):int(endframe)], samplerate)
return [filename, newfile]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/augment_silence.py | augmentation/audio_augmentation/augment_silence.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ ___ _ _
/ _ \ | ___ \_ _| _ / _ \ | (_)
/ /_\ \| |_/ / | | (_) / /_\ \_ _ __| |_ ___
| _ || __/ | | | _ | | | |/ _` | |/ _ \
| | | || | _| |_ _ | | | | |_| | (_| | | (_) |
\_| |_/\_| \___/ (_) \_| |_/\__,_|\__,_|_|\___/
Adds silence to an audio file.
'''
import os
def augment_silence(filename):
new_filename=filename[0:-4]+'_trimmed.wav'
command='sox %s %s silence -l 1 0.1 1'%(filename, new_filename)+"% -1 2.0 1%"
os.system(command)
return [filename, new_filename]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/augment_tsaug.py | augmentation/audio_augmentation/augment_tsaug.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ ___ _ _
/ _ \ | ___ \_ _| _ / _ \ | (_)
/ /_\ \| |_/ / | | (_) / /_\ \_ _ __| |_ ___
| _ || __/ | | | _ | | | |/ _` | |/ _ \
| | | || | _| |_ _ | | | | |_| | (_| | | (_) |
\_| |_/\_| \___/ (_) \_| |_/\__,_|\__,_|_|\___/
Random crop subsequences of randomly spliced audio file.
with 50% probability, add random noise up to 1% - 5%,
drop out 10% of the time points (dropped out units are 1 ms,
10 ms, or 100 ms) and fill the dropped out points with zeros.
More info @ https://tsaug.readthedocs.io/en/stable/
'''
import os, librosa
import numpy as np
import random
from tsaug import Crop, AddNoise, Dropout
import soundfile as sf
def augment_tsaug(filename):
y, sr = librosa.load(filename, mono=False)
duration=int(librosa.core.get_duration(y,sr))
print(y.shape)
# y=np.expand_dims(y.swapaxes(0,1), 0)
# N second splice between 1 second to N-1 secondsd
splice=random.randint(1,duration-1)
my_augmenter = (Crop(size=sr * splice) * 5 # random crop subsequences of splice seconds
+ AddNoise(scale=(0.01, 0.05)) @ 0.5 # with 50% probability, add random noise up to 1% - 5%
+ Dropout(
p=0.1,
fill=0,
size=[int(0.001 * sr), int(0.01 * sr), int(0.1 * sr)]
) # drop out 10% of the time points (dropped out units are 1 ms, 10 ms, or 100 ms) and fill the dropped out points with zeros
)
y_aug = my_augmenter.augment(y)
newfile='tsaug_'+filename
sf.write(newfile, y_aug.T, sr)
return [filename, newfile]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/augment_noise.py | augmentation/audio_augmentation/augment_noise.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ ___ _ _
/ _ \ | ___ \_ _| _ / _ \ | (_)
/ /_\ \| |_/ / | | (_) / /_\ \_ _ __| |_ ___
| _ || __/ | | | _ | | | |/ _` | |/ _ \
| | | || | _| |_ _ | | | | |_| | (_| | | (_) |
\_| |_/\_| \___/ (_) \_| |_/\__,_|\__,_|_|\___/
Following remove_noise.py from voicebook.
'''
import os
import soundfile as sf
def augment_noise(filename):
#now use sox to denoise using the noise profile
data, samplerate =sf.read(filename)
duration=data/samplerate
first_data=samplerate/10
filter_data=list()
for i in range(int(first_data)):
filter_data.append(data[i])
noisefile='noiseprof.wav'
sf.write(noisefile, filter_data, samplerate)
os.system('sox %s -n noiseprof noise.prof'%(noisefile))
filename2='tempfile.wav'
filename3='tempfile2.wav'
noisereduction="sox %s %s noisered noise.prof 0.21 "%(filename,filename2)
command=noisereduction
#run command
os.system(command)
print(command)
#rename and remove files
newfile=filename[0:-4]+'_noise_remove.wav'
os.rename(filename2,newfile)
#os.remove(filename2)
os.remove(noisefile)
os.remove('noise.prof')
return [newfile]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/augment.py | augmentation/audio_augmentation/augment.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ ___ _ _
/ _ \ | ___ \_ _| / _ \ | (_)
/ /_\ \| |_/ / | |(_) / /_\ \_ _ __| |_ ___
| _ || __/ | | | _ | | | |/ _` | |/ _ \
| | | || | _| |__ | | | | |_| | (_| | | (_) |
\_| |_/\_| \___(_) \_| |_/\__,_|\__,_|_|\___/
This is Allie's Augmentation API for audio files.
Usage: python3 augment.py [folder] [augment_type]
All augment_type options include:
['normalize_volume', 'normalize_pitch', 'time_stretch', 'opus_enhance',
'trim_silence', 'remove_noise', 'add_noise', "augment_tsaug"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/augmentation/audio_augmentation
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def transcribe(file, default_audio_transcriber, settingsdir):
# create all transcription methods here
print('%s transcribing: %s'%(default_audio_transcriber, file))
# use the audio file as the audio source
r = sr.Recognizer()
transcript_engine = default_audio_transcriber
with sr.AudioFile(file) as source:
audio = r.record(source) # read the entire audio file
if transcript_engine == 'pocketsphinx':
# recognize speech using Sphinx
try:
transcript= r.recognize_sphinx(audio)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'deepspeech_nodict':
curdir=os.getcwd()
os.chdir(settingsdir+'/features/audio_features/helpers')
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --audio "%s" >> "%s"'%(deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
elif transcript_engine == 'deepspeech_dict':
curdir=os.getcwd()
os.chdir(settingsdir+'/features/audio_features/helpers')
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm')
if 'deepspeech-0.7.0-models.scorer' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.scorer')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --scorer %s/deepspeech-0.7.0-models.scorer --audio "%s" >> "%s"'%(deepspeech_dir, deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
elif transcript_engine == 'google':
# recognize speech using Google Speech Recognition
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
# recognize speech using Google Cloud Speech
GOOGLE_CLOUD_SPEECH_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
print(GOOGLE_CLOUD_SPEECH_CREDENTIALS)
try:
transcript=r.recognize_google_cloud(audio, credentials_json=open(GOOGLE_CLOUD_SPEECH_CREDENTIALS).read())
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'wit':
# recognize speech using Wit.ai
WIT_AI_KEY = os.environ['WIT_AI_KEY']
try:
transcript=r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'azure':
# recognize speech using Microsoft Azure Speech
AZURE_SPEECH_KEY = os.environ['AZURE_SPEECH_KEY']
print(AZURE_SPEECH_KEY)
try:
transcript=r.recognize_azure(audio, key=AZURE_SPEECH_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'bing':
# recognize speech using Microsoft Bing Voice Recognition
BING_KEY = os.environ['BING_KEY']
try:
transcript=r.recognize_bing(audio, key=BING_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'houndify':
# recognize speech using Houndify
HOUNDIFY_CLIENT_ID = os.environ['HOUNDIFY_CLIENT_ID']
HOUNDIFY_CLIENT_KEY = os.environ['HOUNDIFY_CLIENT_KEY']
try:
transcript=r.recognize_houndify(audio, client_id=HOUNDIFY_CLIENT_ID, client_key=HOUNDIFY_CLIENT_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'ibm':
# recognize speech using IBM Speech to Text
IBM_USERNAME = os.environ['IBM_USERNAME']
IBM_PASSWORD = os.environ['IBM_PASSWORD']
try:
transcript=r.recognize_ibm(audio, username=IBM_USERNAME, password=IBM_PASSWORD)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
else:
print('no transcription engine specified')
transcript=''
# show transcript
print(transcript_engine.upper())
print('--> '+ transcript)
return transcript
def audio_augment(augmentation_set, audiofile, basedir):
# only load the relevant featuresets for featurization to save memory
if augmentation_set=='augment_addnoise':
augment_addnoise.augment_addnoise(audiofile,os.getcwd(),basedir+'/helpers/audio_augmentation/noise/')
elif augmentation_set=='augment_noise':
augment_noise.augment_noise(audiofile)
elif augmentation_set=='augment_pitch':
augment_pitch.augment_pitch(audiofile)
elif augmentation_set=='augment_randomsplice':
augment_randomsplice.augment_randomsplice(audiofile)
elif augmentation_set=='augment_silence':
augment_silence.augment_silence(audiofile)
elif augmentation_set=='augment_time':
augment_time.augment_time(audiofile)
elif augmentation_set=='augment_tsaug':
augment_tsaug.augment_tsaug(audiofile)
elif augmentation_set=='augment_volume':
augment_volume.augment_volume(audiofile)
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
audio_transcribe=settings['transcribe_audio']
default_audio_transcribers=settings['default_audio_transcriber']
try:
# assume 1 type of feature_set
augmentation_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
augmentation_sets=settings['default_audio_augmenters']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'augment_addnoise' in augmentation_sets:
import augment_addnoise
if 'augment_noise' in augmentation_sets:
import augment_noise
if 'augment_pitch' in augmentation_sets:
import augment_pitch
if 'augment_randomsplice' in augmentation_sets:
import augment_randomsplice
if 'augment_silence' in augmentation_sets:
import augment_silence
if 'augment_time' in augmentation_sets:
import augment_time
if 'augment_tsaug' in augmentation_sets:
import augment_tsaug
if 'augment_volume' in augmentation_sets:
import augment_volume
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## NOW AUGMENT!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.wav', '.mp3', '.m4a']:
filename=[listdir[i]]
for j in range(len(augmentation_sets)):
for k in range(len(filename)):
augmentation_set=augmentation_sets[j]
filename=audio_augment(augmentation_set, filename[k], basedir) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/augment_addnoise.py | augmentation/audio_augmentation/augment_addnoise.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ ___ _ _
/ _ \ | ___ \_ _| / _ \ | (_)
/ /_\ \| |_/ / | |(_) / /_\ \_ _ __| |_ ___
| _ || __/ | | | _ | | | |/ _` | |/ _ \
| | | || | _| |__ | | | | |_| | (_| | | (_) |
\_| |_/\_| \___(_) \_| |_/\__,_|\__,_|_|\___/
Add noise to an audio file.
'''
import os, uuid, random, math
from pydub import AudioSegment
def augment_addnoise(filename,curdir, noisedir):
if filename[-4:]=='.wav':
audioseg=AudioSegment.from_wav(filename)
elif filename[-4:]=='.mp3':
audioseg=AudioSegment.from_mp3(filename)
hostdir=os.getcwd()
os.chdir(curdir)
os.chdir(noisedir)
listdir=os.listdir()
if 'noise.wav' in listdir:
os.remove('noise.wav')
mp3files=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
mp3files.append(listdir[i])
noise=random.choice(mp3files)
# add noise to the regular file
noise_seg = AudioSegment.from_mp3(noise)
# find number of noise segments needed
cuts=math.floor(len(audioseg)/len(noise_seg))
noise_seg_2=noise_seg * cuts
noise_seg_3=noise_seg[:(len(audioseg)-len(noise_seg_2))]
noise_seg_4=noise_seg_2 + noise_seg_3
os.chdir(hostdir)
print(len(noise_seg_4))
print(len(audioseg))
noise_seg_4.export("noise.wav", format="wav")
# now combine audio file and noise file
newfile=filename[0:-4]+'_noise.wav'
os.system('ffmpeg -i %s -i %s -filter_complex "[0:a][1:a]join=inputs=2:channel_layout=stereo[a]" -map "[a]" %s'%(filename, 'noise.wav',newfile))
os.remove('noise.wav')
return [filename, newfile]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/augment_pitch.py | augmentation/audio_augmentation/augment_pitch.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ ___ _ _
/ _ \ | ___ \_ _| _ / _ \ | (_)
/ /_\ \| |_/ / | | (_) / /_\ \_ _ __| |_ ___
| _ || __/ | | | _ | | | |/ _` | |/ _ \
| | | || | _| |_ _ | | | | |_| | (_| | | (_) |
\_| |_/\_| \___/ (_) \_| |_/\__,_|\__,_|_|\___/
Takes in an audio file and outputs files normalized to
different pitches. This corrects for gender ane time-of-day differences.
Where gives the pitch shift as positive or negative ‘cents’
(i.e. 100ths of a semitone). There are 12 semitones to an octave,
so that would mean ±1200 as a parameter.
'''
import os, random
def augment_pitch(filename):
basefile=filename[0:-4]
randint=random.randint(300,600)
os.system('sox %s %s pitch %s'%(filename, basefile+'_freq_1.wav', str(randint)))
randint=random.randint(300,600)
os.system('sox %s %s pitch -%s'%(filename, , str(randint)))
return [filename, basefile+'_freq_1.wav',basefile+'_freq_2.wav']
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/helpers/audio_augment.py | augmentation/audio_augmentation/helpers/audio_augment.py | import os, sys, librosa, shutil, time, random, math
from pydub import AudioSegment
import soundfile as sf
def remove_json():
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-5:]=='.json':
os.remove(listdir[i])
def convert_audio():
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-4:]!='.wav':
os.system('ffmpeg -i %s %s'%(listdir[i], listdir[i][0:-4]+'.wav'))
os.remove(listdir[i])
def augment_dataset(filename, opusdir, curdir):
def microphone_adjust(filename):
'''
takes in an audio file and filters it to different microphone
configurations (sample rates). Perhaps change sample rate here?
'''
return ''
def normalize_volume(filename):
'''
takes in an audio file and outputs files normalized to
different volumes. This corrects for microphone distance and ages.
Note that in using ffmpeg-normalize, this mimicks real world-use.
An alternative could be to use SoX to move up or down volume.
'''
def change_volume(filename, vol):
# rename file
if vol > 1:
new_file=filename[0:-4]+'_increase_'+str(vol)+'.wav'
else:
new_file=filename[0:-4]+'_decrease_'+str(vol)+'.wav'
# changes volume, vol, by input
os.system('sox -v %s %s %s'%(str(vol),filename,new_file))
return new_file
filenames=list()
basefile=filename[0:-4]
# using peak normalization
os.system('ffmpeg-normalize %s -nt peak -t 0 -o %s_peak_normalized.wav'%(filename, basefile))
filenames.append('%s_peak_normalized'%(basefile))
# using low volume
# os.system('ffmpeg-normalize %s low.wav -o file1-normalized.wav -o %s-normalized_1.wav'%(filename, basefile))
# filenames.append('%s-normalized_1.wav'%(basefile))
# using moderate volume
# os.system('ffmpeg-normalize %s moderate.wav -o file3-normalized.wav -o %s-normalized_2.wav'%(filename, basefile))
# filenames.append('%s-normalized_2.wav'%(basefile))
# using high volume
# os.system('ffmpeg-normalize %s high.wav -o file1-normalized.wav -o %s-normalized_3.wav'(filename, basefile))
# filenames.append('%s-normalized_3.wav'%(basefile))
# increase volume by 2x
new_file=change_volume(filename, 3)
filenames.append(new_file)
# decrease volume by 1/2
new_file=change_volume(filename, 0.33)
filenames.append(new_file)
return filenames
def normalize_pitch(filename):
'''
takes in an audio file and outputs files normalized to
different pitches. This corrects for gender ane time-of-day differences.
where gives the pitch shift as positive or negative ‘cents’ (i.e. 100ths of a semitone).
There are 12 semitones to an octave, so that would mean ±1200 as a parameter.
'''
filenames=list()
basefile=filename[0:-4]
# down two octave
# os.system('sox %s %s pitch -2400'%(filename, basefile+'_freq_0.wav'))
# filenames.append(basefile+'_freq_0.wav')
# down two octave
os.system('sox %s %s pitch -600'%(filename, basefile+'_freq_1.wav'))
filenames.append(basefile+'_freq_1.wav')
# up one octave
os.system('sox %s %s pitch 600'%(filename, basefile+'_freq_2.wav'))
filenames.append(basefile+'_freq_2.wav')
# up two octaves
# os.system('sox %s %s pitch 2400'%(filename, basefile+'_freq_3.wav'))
# filenames.append(basefile+'_freq_3.wav')
return filenames
def time_stretch(filename):
'''
stretches files by 0.5x, 1.5x, and 2x.
'''
basefile=filename[0:-4]
filenames=list()
y, sr = librosa.load(filename)
y_fast = librosa.effects.time_stretch(y, 1.5)
librosa.output.write_wav(basefile+'_stretch_0.wav', y_fast, sr)
filenames.append(basefile+'_stretch_0.wav')
# y_fast_2 = librosa.effects.time_stretch(y, 1.5)
# librosa.output.write_wav(basefile+'_stretch_1.wav', y, sr)
# filenames.append(basefile+'_stretch_1.wav')
y_slow = librosa.effects.time_stretch(y, 0.75)
librosa.output.write_wav(basefile+'_stretch_2.wav', y_slow, sr)
filenames.append(basefile+'_stretch_2.wav')
# y_slow_2 = librosa.effects.time_stretch(y, 0.25)
# librosa.output.write_wav(basefile+'_stretch_3.wav', y, sr)
# filenames.append(basefile+'_stretch_3.wav')
return filenames
def codec_enhance(filename, opusdir):
filenames=list()
#########################
# lossy codec - .mp3
#########################
# os.system('ffmpeg -i %s %s'%(filename, filename[0:-4]+'.mp3'))
# os.system('ffmpeg -i %s %s'%(filename[0:-4]+'.mp3', filename[0:-4]+'_mp3.wav'))
# os.remove(filename[0:-4]+'.mp3')
# filenames.append(filename[0:-4]+'_mp3.wav')
#########################
# lossy codec - .opus
#########################
curdir=os.getcwd()
newfile=filename[0:-4]+'.opus'
# copy file to opus encoding folder
shutil.copy(curdir+'/'+filename, opusdir+'/'+filename)
os.chdir(opusdir)
print(os.getcwd())
# encode with opus codec
os.system('opusenc %s %s'%(filename,newfile))
os.remove(filename)
filename=filename[0:-4]+'_opus.wav'
os.system('opusdec %s %s'%(newfile, filename))
os.remove(newfile)
# delete .wav file in original dir
shutil.copy(opusdir+'/'+filename, curdir+'/'+filename)
os.remove(filename)
os.chdir(curdir)
filenames.append(filename[0:-4]+'_opus.wav')
return filenames
def trim_silence(filename):
new_filename=filename[0:-4]+'_trimmed.wav'
command='sox %s %s silence -l 1 0.1 1'%(filename, new_filename)+"% -1 2.0 1%"
os.system(command)
return [new_filename]
def remove_noise(filename):
'''
following remove_noise.py from voicebook.
'''
#now use sox to denoise using the noise profile
data, samplerate =sf.read(filename)
duration=data/samplerate
first_data=samplerate/10
filter_data=list()
for i in range(int(first_data)):
filter_data.append(data[i])
noisefile='noiseprof.wav'
sf.write(noisefile, filter_data, samplerate)
os.system('sox %s -n noiseprof noise.prof'%(noisefile))
filename2='tempfile.wav'
filename3='tempfile2.wav'
noisereduction="sox %s %s noisered noise.prof 0.21 "%(filename,filename2)
command=noisereduction
#run command
os.system(command)
print(command)
#reduce silence again
#os.system(silenceremove)
#print(silenceremove)
#rename and remove files
os.rename(filename2,filename[0:-4]+'_noise_remove.wav')
#os.remove(filename2)
os.remove(noisefile)
os.remove('noise.prof')
return [filename[0:-4]+'_noise_remove.wav']
def add_noise(filename,curdir, newfilename):
if filename[-4:]=='.wav':
audioseg=AudioSegment.from_wav(filename)
elif filename[-4:]=='.mp3':
audioseg=AudioSegment.from_mp3(filename)
hostdir=os.getcwd()
os.chdir(curdir)
os.chdir('noise')
listdir=os.listdir()
if 'noise.wav' in listdir:
os.remove('noise.wav')
mp3files=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
mp3files.append(listdir[i])
noise=random.choice(mp3files)
# add noise to the regular file
noise_seg = AudioSegment.from_mp3(noise)
# find number of noise segments needed
cuts=math.floor(len(audioseg)/len(noise_seg))
noise_seg_2=noise_seg * cuts
noise_seg_3=noise_seg[:(len(audioseg)-len(noise_seg_2))]
noise_seg_4=noise_seg_2 + noise_seg_3
os.chdir(hostdir)
print(len(noise_seg_4))
print(len(audioseg))
noise_seg_4.export("noise.wav", format="wav")
# now combine audio file and noise file
os.system('ffmpeg -i %s -i %s -filter_complex "[0:a][1:a]join=inputs=2:channel_layout=stereo[a]" -map "[a]" %s'%(filename, 'noise.wav',filename[0:-4]+'_noise.wav'))
os.remove('noise.wav')
os.rename(filename[0:-4]+'_noise.wav', newfilename)
return [newfilename]
def random_splice(filename):
# need to do this only in non-speaking regions. Need pause detection for this. will do later.
return ''
def insert_pauses(filename):
# need to do this only in non-speaking regions. Need pause detection for this. will do later.
return ''
# _0=microphone_adjust(filename)
_1=normalize_volume(filename)
_2=normalize_pitch(filename)
_3=time_stretch(filename)
_4=codec_enhance(filename, opusdir)
_5=trim_silence(filename)
_6=remove_noise(filename)
_7=add_noise(filename, curdir, filename[0:-4]+'_add_noise_1.wav')
_8=add_noise(filename, curdir, filename[0:-4]+'_add_noise_2.wav')
# _9=random_splice(filename)
# _10=insert_pauses(filename)
augmented_filenames=_1+_2+_3+_4+_5+_6+_7+_8
return augmented_filenames
## augment by 'python3 augment.py [folderpath]
## '/Users/jim/desktop/files'
opusdir=os.getcwd()+'/opustools'
directory=sys.argv[1]
curdir=os.getcwd()
os.chdir(directory)
remove_json()
convert_audio()
time.sleep(5)
listdir=os.listdir()
wavfiles=list()
for i in range(len(listdir)):
if listdir[i][-4:] in ['.wav']:
new_name=listdir[i].replace(' ','_')
os.rename(listdir[i],new_name)
wavfiles.append(new_name)
print(wavfiles)
augmented_files=list()
for i in range(len(wavfiles)):
#try:
temp=augment_dataset(wavfiles[i], opusdir, curdir)
augmented_files=augmented_files+temp
#except:
#print('error')
print('augmented dataset with %s files'%(str(len(augmented_files))))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/helpers/audio_augmentation/setup.py | augmentation/audio_augmentation/helpers/audio_augmentation/setup.py | '''
Quick installation script to get everything up-and-running.
'''
import os
os.system('brew install opus-tools')
os.system('brew install opus')
os.system('brew install sox')
os.system('brew install ffmpeg')
os.system('pip3 install -U nltk')
import nltk
nltk.download('wordnet')
os.system('pip3 install -r requirements.txt')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/audio_augmentation/helpers/audio_augmentation/audio_augment_folder.py | augmentation/audio_augmentation/helpers/audio_augmentation/audio_augment_folder.py | import os, sys, librosa, shutil, time, random, math
from pydub import AudioSegment
import soundfile as sf
def remove_json():
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-5:]=='.json':
os.remove(listdir[i])
def convert_audio():
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-4:]!='.wav':
os.system('ffmpeg -i %s %s'%(listdir[i], listdir[i][0:-4]+'.wav'))
os.remove(listdir[i])
def augsort(filename,augmented_files, directory):
dirname=directory.split('/')[-1]
curdir=os.getcwd()
os.chdir(directory)
newdir=os.getcwd()
listdir=os.listdir()
file=filename.split('.wav')[0]
for i in range(len(augmented_files)):
print(augmented_files[i])
print(file)
foldername=augmented_files[i].split(file+'_')
print(foldername)
foldername=foldername[1].replace('.wav','')
if dirname+'_'+foldername not in listdir:
os.mkdir(dirname+'_'+foldername)
shutil.move(newdir+'/'+augmented_files[i], newdir+'/'+dirname+'_'+foldername+'/'+augmented_files[i])
os.chdir(curdir)
def augment_dataset(filename, opusdir, curdir):
def microphone_adjust(filename):
'''
takes in an audio file and filters it to different microphone
configurations (sample rates). Perhaps change sample rate here?
'''
return ''
def normalize_volume(filename):
'''
takes in an audio file and outputs files normalized to
different volumes. This corrects for microphone distance and ages.
Note that in using ffmpeg-normalize, this mimicks real world-use.
An alternative could be to use SoX to move up or down volume.
'''
def change_volume(filename, vol):
# rename file
if vol > 1:
new_file=filename[0:-4]+'_increase_vol.wav'
else:
new_file=filename[0:-4]+'_decrease_vol.wav'
# changes volume, vol, by input
os.system('sox -v %s %s %s'%(str(vol),filename,new_file))
return new_file
filenames=list()
basefile=filename[0:-4]
# using peak normalization
os.system('ffmpeg-normalize %s -nt peak -t 0 -o %s_peak_normalized.wav'%(filename, basefile))
filenames.append('%s_peak_normalized.wav'%(basefile))
# using low volume
# os.system('ffmpeg-normalize %s low.wav -o file1-normalized.wav -o %s-normalized_1.wav'%(filename, basefile))
# filenames.append('%s-normalized_1.wav'%(basefile))
# using moderate volume
# os.system('ffmpeg-normalize %s moderate.wav -o file3-normalized.wav -o %s-normalized_2.wav'%(filename, basefile))
# filenames.append('%s-normalized_2.wav'%(basefile))
# using high volume
# os.system('ffmpeg-normalize %s high.wav -o file1-normalized.wav -o %s-normalized_3.wav'(filename, basefile))
# filenames.append('%s-normalized_3.wav'%(basefile))
# increase volume by 2x
new_file=change_volume(filename, 3)
filenames.append(new_file)
# decrease volume by 1/2
new_file=change_volume(filename, 0.33)
filenames.append(new_file)
return filenames
def normalize_pitch(filename):
'''
takes in an audio file and outputs files normalized to
different pitches. This corrects for gender ane time-of-day differences.
where gives the pitch shift as positive or negative ‘cents’ (i.e. 100ths of a semitone).
There are 12 semitones to an octave, so that would mean ±1200 as a parameter.
'''
filenames=list()
basefile=filename[0:-4]
# down two octave
# os.system('sox %s %s pitch -2400'%(filename, basefile+'_freq_0.wav'))
# filenames.append(basefile+'_freq_0.wav')
# down two octave
os.system('sox %s %s pitch -600'%(filename, basefile+'_freq_one.wav'))
filenames.append(basefile+'_freq_one.wav')
# up one octave
os.system('sox %s %s pitch 600'%(filename, basefile+'_freq_two.wav'))
filenames.append(basefile+'_freq_two.wav')
# up two octaves
# os.system('sox %s %s pitch 2400'%(filename, basefile+'_freq_3.wav'))
# filenames.append(basefile+'_freq_3.wav')
return filenames
def time_stretch(filename):
'''
stretches files by 0.5x, 1.5x, and 2x.
'''
basefile=filename[0:-4]
filenames=list()
y, sr = librosa.load(filename)
y_fast = librosa.effects.time_stretch(y, 1.5)
librosa.output.write_wav(basefile+'_stretch_one.wav', y_fast, sr)
filenames.append(basefile+'_stretch_one.wav')
# y_fast_2 = librosa.effects.time_stretch(y, 1.5)
# librosa.output.write_wav(basefile+'_stretch_1.wav', y, sr)
# filenames.append(basefile+'_stretch_1.wav')
y_slow = librosa.effects.time_stretch(y, 0.75)
librosa.output.write_wav(basefile+'_stretch_two.wav', y_slow, sr)
filenames.append(basefile+'_stretch_two.wav')
# y_slow_2 = librosa.effects.time_stretch(y, 0.25)
# librosa.output.write_wav(basefile+'_stretch_3.wav', y, sr)
# filenames.append(basefile+'_stretch_3.wav')
return filenames
def codec_enhance(filename, opusdir):
filenames=list()
#########################
# lossy codec - .mp3
#########################
# os.system('ffmpeg -i %s %s'%(filename, filename[0:-4]+'.mp3'))
# os.system('ffmpeg -i %s %s'%(filename[0:-4]+'.mp3', filename[0:-4]+'_mp3.wav'))
# os.remove(filename[0:-4]+'.mp3')
# filenames.append(filename[0:-4]+'_mp3.wav')
#########################
# lossy codec - .opus
#########################
curdir=os.getcwd()
newfile=filename[0:-4]+'.opus'
# copy file to opus encoding folder
shutil.copy(curdir+'/'+filename, opusdir+'/'+filename)
os.chdir(opusdir)
print(os.getcwd())
# encode with opus codec
os.system('opusenc %s %s'%(filename,newfile))
os.remove(filename)
filename=filename[0:-4]+'_opus.wav'
os.system('opusdec %s %s'%(newfile, filename))
os.remove(newfile)
# delete .wav file in original dir
shutil.copy(opusdir+'/'+filename, curdir+'/'+filename)
os.remove(filename)
os.chdir(curdir)
filenames.append(filename[0:-4]+'.wav')
return filenames
def trim_silence(filename):
new_filename=filename[0:-4]+'_trimmed.wav'
command='sox %s %s silence -l 1 0.1 1'%(filename, new_filename)+"% -1 2.0 1%"
os.system(command)
return [new_filename]
def remove_noise(filename):
'''
following remove_noise.py from voicebook.
'''
#now use sox to denoise using the noise profile
data, samplerate =sf.read(filename)
duration=data/samplerate
first_data=samplerate/10
filter_data=list()
for i in range(int(first_data)):
filter_data.append(data[i])
noisefile='noiseprof.wav'
sf.write(noisefile, filter_data, samplerate)
os.system('sox %s -n noiseprof noise.prof'%(noisefile))
filename2='tempfile.wav'
filename3='tempfile2.wav'
noisereduction="sox %s %s noisered noise.prof 0.21 "%(filename,filename2)
command=noisereduction
#run command
os.system(command)
print(command)
#reduce silence again
#os.system(silenceremove)
#print(silenceremove)
#rename and remove files
os.rename(filename2,filename[0:-4]+'_noise_remove.wav')
#os.remove(filename2)
os.remove(noisefile)
os.remove('noise.prof')
return [filename[0:-4]+'_noise_remove.wav']
def add_noise(filename,curdir, newfilename):
if filename[-4:]=='.wav':
audioseg=AudioSegment.from_wav(filename)
elif filename[-4:]=='.mp3':
audioseg=AudioSegment.from_mp3(filename)
hostdir=os.getcwd()
os.chdir(curdir)
os.chdir('noise')
listdir=os.listdir()
if 'noise.wav' in listdir:
os.remove('noise.wav')
mp3files=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
mp3files.append(listdir[i])
noise=random.choice(mp3files)
# add noise to the regular file
noise_seg = AudioSegment.from_mp3(noise)
# find number of noise segments needed
cuts=math.floor(len(audioseg)/len(noise_seg))
noise_seg_2=noise_seg * cuts
noise_seg_3=noise_seg[:(len(audioseg)-len(noise_seg_2))]
noise_seg_4=noise_seg_2 + noise_seg_3
os.chdir(hostdir)
print(len(noise_seg_4))
print(len(audioseg))
noise_seg_4.export("noise.wav", format="wav")
# now combine audio file and noise file
os.system('ffmpeg -i %s -i %s -filter_complex "[0:a][1:a]join=inputs=2:channel_layout=stereo[a]" -map "[a]" %s'%(filename, 'noise.wav',filename[0:-4]+'_noise.wav'))
os.remove('noise.wav')
os.rename(filename[0:-4]+'_noise.wav', newfilename)
return [newfilename]
def random_splice(filename):
# need to do this only in non-speaking regions. Need pause detection for this. will do later.
return ''
def insert_pauses(filename):
# need to do this only in non-speaking regions. Need pause detection for this. will do later.
return ''
# _0=microphone_adjust(filename)
_1=normalize_volume(filename)
_2=normalize_pitch(filename)
_3=time_stretch(filename)
_4=codec_enhance(filename, opusdir)
_5=trim_silence(filename)
_6=remove_noise(filename)
_7=add_noise(filename, curdir, filename[0:-4]+'_add_noise_one.wav')
_8=add_noise(filename, curdir, filename[0:-4]+'_add_noise_two.wav')
# _9=random_splice(filename)
# _10=insert_pauses(filename)
augmented_filenames=_1+_2+_3+_4+_5+_6+_7+_8
return augmented_filenames
## augment by 'python3 augment.py [folderpath]
## '/Users/jim/desktop/files'
opusdir=os.getcwd()+'/opustools'
directory=sys.argv[1]
curdir=os.getcwd()
os.chdir(directory)
remove_json()
convert_audio()
time.sleep(5)
listdir=os.listdir()
wavfiles=list()
for i in range(len(listdir)):
if listdir[i][-4:] in ['.wav']:
new_name=listdir[i].replace(' ','_')
os.rename(listdir[i],new_name)
wavfiles.append(new_name)
print(wavfiles)
augmented_files=list()
for i in range(len(wavfiles)):
try:
print(os.getcwd())
augmented_files=augment_dataset(wavfiles[i], opusdir, curdir)
print(augmented_files)
augsort(wavfiles[i], augmented_files, directory)
print(augmented_files)
except:
print('error')
print('augmented dataset with %s files'%(str(len(augmented_files))))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/augment_imgaug.py | augmentation/image_augmentation/augment_imgaug.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
___ _ _ _
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ _____
/ _ \ | ___ \_ _| _ |_ _|
/ /_\ \| |_/ / | | (_) | | _ __ ___ __ _ __ _ ___
| _ || __/ | | | || '_ ` _ \ / _` |/ _` |/ _ \
| | | || | _| |_ _ _| || | | | | | (_| | (_| | __/
\_| |_/\_| \___/ (_) \___/_| |_| |_|\__,_|\__, |\___|
Following this tutorial:
https://nbviewer.jupyter.org/github/aleju/imgaug-doc/blob/master/notebooks/A01%20-%20Load%20and%20Augment%20an%20Image.ipynb
'''
import os
try:
import imgaug.augmenters as iaa
except:
os.system('pip3 install git+https://github.com/aleju/imgaug.git')
import imgaug.augmenters as iaa
import imageio
import matplotlib.pyplot as plt
def augment_imgaug(imagefile):
image = imageio.imread(imagefile)
seq = iaa.Sequential([
iaa.Affine(rotate=(-25, 25)),
iaa.AdditiveGaussianNoise(scale=(30, 90)),
iaa.Crop(percent=(0, 0.4))
], random_order=True)
images_aug = [seq.augment_image(image) for _ in range(1)]
# print("Augmented:")
# ia.imshow(ia.draw_grid(images_aug, cols=4, rows=2))
files=[imagefile]
for i in range(len(images_aug)):
filename='augmented_%s'%(str(i))+imagefile
plt.imsave(filename, images_aug[i])
files.append(filename)
return files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/augment.py | augmentation/image_augmentation/augment.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
___ _ _ _
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ _____
/ _ \ | ___ \_ _| _ |_ _|
/ /_\ \| |_/ / | | (_) | | _ __ ___ __ _ __ _ ___
| _ || __/ | | | || '_ ` _ \ / _` |/ _` |/ _ \
| | | || | _| |_ _ _| || | | | | | (_| | (_| | __/
\_| |_/\_| \___/ (_) \___/_| |_| |_|\__,_|\__, |\___|
This section of Allie's Augmentation API augments folders of images via
the default_image_augmenters.
Usage: python3 augment.py [folder] [augment_type]
All augment_type options include:
["augment_imaug"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/augmentation/image_augmentation
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def transcribe(file, default_audio_transcriber, settingsdir):
# create all transcription methods here
print('%s transcribing: %s'%(default_audio_transcriber, file))
# use the audio file as the audio source
r = sr.Recognizer()
transcript_engine = default_audio_transcriber
with sr.AudioFile(file) as source:
audio = r.record(source) # read the entire audio file
if transcript_engine == 'pocketsphinx':
# recognize speech using Sphinx
try:
transcript= r.recognize_sphinx(audio)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'deepspeech_nodict':
curdir=os.getcwd()
os.chdir(settingsdir+'/features/audio_features/helpers')
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --audio "%s" >> "%s"'%(deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
elif transcript_engine == 'deepspeech_dict':
curdir=os.getcwd()
os.chdir(settingsdir+'/features/audio_features/helpers')
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm')
if 'deepspeech-0.7.0-models.scorer' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.scorer')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --scorer %s/deepspeech-0.7.0-models.scorer --audio "%s" >> "%s"'%(deepspeech_dir, deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
elif transcript_engine == 'google':
# recognize speech using Google Speech Recognition
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
# recognize speech using Google Cloud Speech
GOOGLE_CLOUD_SPEECH_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
print(GOOGLE_CLOUD_SPEECH_CREDENTIALS)
try:
transcript=r.recognize_google_cloud(audio, credentials_json=open(GOOGLE_CLOUD_SPEECH_CREDENTIALS).read())
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'wit':
# recognize speech using Wit.ai
WIT_AI_KEY = os.environ['WIT_AI_KEY']
try:
transcript=r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'azure':
# recognize speech using Microsoft Azure Speech
AZURE_SPEECH_KEY = os.environ['AZURE_SPEECH_KEY']
print(AZURE_SPEECH_KEY)
try:
transcript=r.recognize_azure(audio, key=AZURE_SPEECH_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'bing':
# recognize speech using Microsoft Bing Voice Recognition
BING_KEY = os.environ['BING_KEY']
try:
transcript=r.recognize_bing(audio, key=BING_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'houndify':
# recognize speech using Houndify
HOUNDIFY_CLIENT_ID = os.environ['HOUNDIFY_CLIENT_ID']
HOUNDIFY_CLIENT_KEY = os.environ['HOUNDIFY_CLIENT_KEY']
try:
transcript=r.recognize_houndify(audio, client_id=HOUNDIFY_CLIENT_ID, client_key=HOUNDIFY_CLIENT_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'ibm':
# recognize speech using IBM Speech to Text
IBM_USERNAME = os.environ['IBM_USERNAME']
IBM_PASSWORD = os.environ['IBM_PASSWORD']
try:
transcript=r.recognize_ibm(audio, username=IBM_USERNAME, password=IBM_PASSWORD)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
else:
print('no transcription engine specified')
transcript=''
# show transcript
print(transcript_engine.upper())
print('--> '+ transcript)
return transcript
def image_augment(augmentation_set, imagefile, basedir):
# only load the relevant featuresets for featurization to save memory
if augmentation_set=='augment_imgaug':
augment_imgaug.augment_imgaug(imagefile)
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
audio_transcribe=settings['transcribe_image']
default_image_transcribers=settings['default_image_transcriber']
try:
# assume 1 type of feature_set
augmentation_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
augmentation_sets=settings['default_image_augmenters']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'augment_imgaug' in augmentation_sets:
import augment_imgaug
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## NOW AUGMENT!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.png']:
filename=[listdir[i]]
for j in range(len(augmentation_sets)):
augmentation_set=augmentation_sets[j]
for k in range(len(filename)):
filename=image_augment(augmentation_set, filename[k], basedir)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/multicore.py | augmentation/image_augmentation/helpers/imgaug/multicore.py | """Classes and functions dealing with multicore augmentation."""
from __future__ import print_function, division, absolute_import
import sys
import multiprocessing
import threading
import traceback
import time
import random
import numpy as np
import imgaug.imgaug as ia
from imgaug.augmentables.batches import Batch, UnnormalizedBatch
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty, Full as QueueFull
import socket
BrokenPipeError = socket.error
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty, Full as QueueFull
class Pool(object):
"""
Wrapper around the standard library's multiprocessing.Pool for multicore augmentation.
"""
# This attribute saves the augmentation sequence for background workers so that it does not have to be resend with
# every batch. The attribute is set once per worker in the worker's initializer. As each worker has its own
# process, it is a different variable per worker (though usually should be of equal content).
_WORKER_AUGSEQ = None
# This attribute saves the initial seed for background workers so that for any future batch the batch's specific
# seed can be derived, roughly via SEED_START+SEED_BATCH. As each worker has its own process, this seed can be
# unique per worker even though all seemingly use the same constant attribute.
_WORKER_SEED_START = None
def __init__(self, augseq, processes=None, maxtasksperchild=None, seed=None):
"""
Initialize augmentation pool.
Parameters
----------
augseq : Augmenter
The augmentation sequence to apply to batches.
processes : None or int, optional
The number of background workers, similar to the same parameter in multiprocessing.Pool.
If ``None``, the number of the machine's CPU cores will be used (this counts hyperthreads as CPU cores).
If this is set to a negative value ``p``, then ``P - abs(p)`` will be used, where ``P`` is the number
of CPU cores. E.g. ``-1`` would use all cores except one (this is useful to e.g. reserve one core to
feed batches to the GPU).
maxtasksperchild : None or int, optional
The number of tasks done per worker process before the process is killed and restarted, similar to the
same parameter in multiprocessing.Pool. If ``None``, worker processes will not be automatically restarted.
seed : None or int, optional
The seed to use for child processes. If ``None``, a random seed will be used.
"""
# make sure that don't call pool again in a child process
assert Pool._WORKER_AUGSEQ is None, "_WORKER_AUGSEQ was already set when calling " \
"Pool.__init__(). Did you try to instantiate a Pool within a Pool?"
assert processes is None or processes != 0
self.augseq = augseq
self.processes = processes
self.maxtasksperchild = maxtasksperchild
self.seed = seed
if self.seed is not None:
assert ia.SEED_MIN_VALUE <= self.seed <= ia.SEED_MAX_VALUE
# multiprocessing.Pool instance
self._pool = None
# Running counter of the number of augmented batches. This will be used to send indexes for each batch to
# the workers so that they can augment using SEED_BASE+SEED_BATCH and ensure consistency of applied
# augmentation order between script runs.
self._batch_idx = 0
@property
def pool(self):
"""Return the multiprocessing.Pool instance or create it if not done yet.
Returns
-------
multiprocessing.Pool
The multiprocessing.Pool used internally by this imgaug.multicore.Pool.
"""
if self._pool is None:
processes = self.processes
if processes is not None and processes < 0:
try:
# cpu count includes the hyperthreads, e.g. 8 for 4 cores + hyperthreading
processes = multiprocessing.cpu_count() - abs(processes)
processes = max(processes, 1)
except (ImportError, NotImplementedError):
processes = None
self._pool = multiprocessing.Pool(processes,
initializer=_Pool_initialize_worker,
initargs=(self.augseq, self.seed),
maxtasksperchild=self.maxtasksperchild)
return self._pool
def map_batches(self, batches, chunksize=None):
"""
Augment batches.
Parameters
----------
batches : list of imgaug.augmentables.batches.Batch
The batches to augment.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
Returns
-------
list of imgaug.augmentables.batches.Batch
Augmented batches.
"""
assert isinstance(batches, list), ("Expected to get a list as 'batches', got type %s. "
+ "Call imap_batches() if you use generators.") % (type(batches),)
return self.pool.map(_Pool_starworker, self._handle_batch_ids(batches), chunksize=chunksize)
def map_batches_async(self, batches, chunksize=None, callback=None, error_callback=None):
"""
Augment batches asynchonously.
Parameters
----------
batches : list of imgaug.augmentables.batches.Batch
The batches to augment.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
callback : None or callable, optional
Function to call upon finish. See `multiprocessing.Pool`.
error_callback : None or callable, optional
Function to call upon errors. See `multiprocessing.Pool`.
Returns
-------
multiprocessing.MapResult
Asynchonous result. See `multiprocessing.Pool`.
"""
assert isinstance(batches, list), ("Expected to get a list as 'batches', got type %s. "
+ "Call imap_batches() if you use generators.") % (type(batches),)
return self.pool.map_async(_Pool_starworker, self._handle_batch_ids(batches),
chunksize=chunksize, callback=callback, error_callback=error_callback)
def imap_batches(self, batches, chunksize=1, output_buffer_size=None):
"""
Augment batches from a generator.
Pattern for output buffer constraint is from
https://stackoverflow.com/a/47058399.
Parameters
----------
batches : generator of imgaug.augmentables.batches.Batch
The batches to augment, provided as a generator. Each call to the generator should yield exactly one
batch.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
output_buffer_size : None or int, optional
Max number of batches to handle *at the same time* in the *whole*
pipeline (including already augmented batches that are waiting to
be requested). If the buffer size is reached, no new batches will
be loaded from `batches` until a produced (i.e. augmented) batch is
consumed (i.e. requested from this method).
The buffer is unlimited if this is set to ``None``. For large
datasets, this should be set to an integer value to avoid filling
the whole RAM if loading+augmentation happens faster than training.
*New in version 0.3.0.*
Yields
------
imgaug.augmentables.batches.Batch
Augmented batch.
"""
assert ia.is_generator(batches), ("Expected to get a generator as 'batches', got type %s. "
+ "Call map_batches() if you use lists.") % (type(batches),)
# buffer is either None or a Semaphore
output_buffer_left = _create_output_buffer_left(output_buffer_size)
# TODO change this to 'yield from' once switched to 3.3+
gen = self.pool.imap(
_Pool_starworker,
self._ibuffer_batch_loading(
self._handle_batch_ids_gen(batches),
output_buffer_left
),
chunksize=chunksize)
for batch in gen:
yield batch
if output_buffer_left is not None:
output_buffer_left.release()
def imap_batches_unordered(self, batches, chunksize=1,
output_buffer_size=None):
"""
Augment batches from a generator in a way that does not guarantee to preserve order.
Pattern for output buffer constraint is from
https://stackoverflow.com/a/47058399.
Parameters
----------
batches : generator of imgaug.augmentables.batches.Batch
The batches to augment, provided as a generator. Each call to the generator should yield exactly one
batch.
chunksize : None or int, optional
Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve
performance.
output_buffer_size : None or int, optional
Max number of batches to handle *at the same time* in the *whole*
pipeline (including already augmented batches that are waiting to
be requested). If the buffer size is reached, no new batches will
be loaded from `batches` until a produced (i.e. augmented) batch is
consumed (i.e. requested from this method).
The buffer is unlimited if this is set to ``None``. For large
datasets, this should be set to an integer value to avoid filling
the whole RAM if loading+augmentation happens faster than training.
*New in version 0.3.0.*
Yields
------
imgaug.augmentables.batches.Batch
Augmented batch.
"""
assert ia.is_generator(batches), ("Expected to get a generator as 'batches', got type %s. "
+ "Call map_batches() if you use lists.") % (type(batches),)
# buffer is either None or a Semaphore
output_buffer_left = _create_output_buffer_left(output_buffer_size)
# TODO change this to 'yield from' once switched to 3.3+
gen = self.pool.imap_unordered(
_Pool_starworker,
self._ibuffer_batch_loading(
self._handle_batch_ids_gen(batches),
output_buffer_left
),
chunksize=chunksize
)
for batch in gen:
yield batch
if output_buffer_left is not None:
output_buffer_left.release()
def __enter__(self):
assert self._pool is None, "Tried to __enter__ a pool that has already been initialized."
_ = self.pool # initialize multiprocessing pool
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# We don't check here if _pool is still not None here. Should we?
self.close()
def close(self):
"""Close the pool gracefully."""
if self._pool is not None:
self._pool.close()
self._pool.join()
self._pool = None
def terminate(self):
"""Terminate the pool immediately."""
if self._pool is not None:
self._pool.terminate()
self._pool.join()
self._pool = None
def join(self):
"""
Wait for the workers to exit.
This may only be called after calling :func:`imgaug.multicore.Pool.join` or
:func:`imgaug.multicore.Pool.terminate`.
"""
if self._pool is not None:
self._pool.join()
def _handle_batch_ids(self, batches):
ids = np.arange(self._batch_idx, self._batch_idx + len(batches))
inputs = list(zip(ids, batches))
self._batch_idx += len(batches)
return inputs
def _handle_batch_ids_gen(self, batches):
for batch in batches:
batch_idx = self._batch_idx
yield batch_idx, batch
self._batch_idx += 1
@classmethod
def _ibuffer_batch_loading(cls, batches, output_buffer_left):
for batch in batches:
if output_buffer_left is not None:
output_buffer_left.acquire()
yield batch
def _create_output_buffer_left(output_buffer_size):
output_buffer_left = None
if output_buffer_size:
assert output_buffer_size > 0, (
("Expected non-zero buffer size, "
+ "but got %d") % (output_buffer_size,))
output_buffer_left = multiprocessing.Semaphore(output_buffer_size)
return output_buffer_left
# could be a classmethod or staticmethod of Pool in 3.x, but in 2.7 that leads to pickle errors
def _Pool_initialize_worker(augseq, seed_start):
if seed_start is None:
# pylint falsely thinks in older versions that multiprocessing.current_process() was not
# callable, see https://github.com/PyCQA/pylint/issues/1699
# pylint: disable=not-callable
process_name = multiprocessing.current_process().name
# pylint: enable=not-callable
# time_ns() exists only in 3.7+
if sys.version_info[0] == 3 and sys.version_info[1] >= 7:
seed_offset = time.time_ns()
else:
seed_offset = int(time.time() * 10**6) % 10**6
seed = hash(process_name) + seed_offset
seed_global = ia.SEED_MIN_VALUE + (seed - 10**9) % (ia.SEED_MAX_VALUE - ia.SEED_MIN_VALUE)
seed_local = ia.SEED_MIN_VALUE + seed % (ia.SEED_MAX_VALUE - ia.SEED_MIN_VALUE)
ia.seed(seed_global)
augseq.reseed(seed_local)
Pool._WORKER_SEED_START = seed_start
Pool._WORKER_AUGSEQ = augseq
Pool._WORKER_AUGSEQ.localize_random_state_() # not sure if really necessary, but won't hurt either
# could be a classmethod or staticmethod of Pool in 3.x, but in 2.7 that leads to pickle errors
def _Pool_worker(batch_idx, batch):
assert ia.is_single_integer(batch_idx)
assert isinstance(batch, (UnnormalizedBatch, Batch))
assert Pool._WORKER_AUGSEQ is not None
aug = Pool._WORKER_AUGSEQ
if Pool._WORKER_SEED_START is not None:
seed = Pool._WORKER_SEED_START + batch_idx
seed_global = ia.SEED_MIN_VALUE + (seed - 10**9) % (ia.SEED_MAX_VALUE - ia.SEED_MIN_VALUE)
seed_local = ia.SEED_MIN_VALUE + seed % (ia.SEED_MAX_VALUE - ia.SEED_MIN_VALUE)
ia.seed(seed_global)
aug.reseed(seed_local)
result = aug.augment_batch(batch)
return result
# could be a classmethod or staticmethod of Pool in 3.x, but in 2.7 that leads to pickle errors
# starworker is here necessary, because starmap does not exist in 2.7
def _Pool_starworker(inputs):
return _Pool_worker(*inputs)
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using :attr:`imgaug.BatchLoader.queue`.
Parameters
----------
load_batch_func : callable or generator
Generator or generator function (i.e. function that yields Batch objects)
or a function that returns a list of Batch objects.
Background loading automatically stops when the last batch was yielded or the
last batch in the list was reached.
queue_size : int, optional
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional
Number of workers to run in the background.
threaded : bool, optional
Whether to run the background processes using threads (True) or full processes (False).
"""
@ia.deprecated(alt_func="imgaug.multicore.Pool")
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
ia.do_assert(queue_size >= 2, "Queue size for BatchLoader must be at least 2, got %d." % (queue_size,))
ia.do_assert(nb_workers >= 1, "Number of workers for BatchLoader must be at least 1, got %d" % (nb_workers,))
self._queue_internal = multiprocessing.Queue(queue_size//2)
self.queue = multiprocessing.Queue(queue_size//2)
self.join_signal = multiprocessing.Event()
self.workers = []
self.threaded = threaded
seeds = ia.current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
if threaded:
worker = threading.Thread(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, None)
)
else:
worker = multiprocessing.Process(
target=self._load_batches,
args=(load_batch_func, self._queue_internal, self.join_signal, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
self.main_worker_thread = threading.Thread(
target=self._main_worker,
args=()
)
self.main_worker_thread.daemon = True
self.main_worker_thread.start()
def count_workers_alive(self):
return sum([int(worker.is_alive()) for worker in self.workers])
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return self.count_workers_alive() == 0
def _main_worker(self):
workers_running = self.count_workers_alive()
while workers_running > 0 and not self.join_signal.is_set():
# wait for a new batch in the source queue and load it
try:
batch_str = self._queue_internal.get(timeout=0.1)
if batch_str == "":
workers_running -= 1
else:
self.queue.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
except (EOFError, BrokenPipeError):
break
workers_running = self.count_workers_alive()
# All workers have finished, move the remaining entries from internal to external queue
while True:
try:
batch_str = self._queue_internal.get(timeout=0.005)
if batch_str != "":
self.queue.put(batch_str)
except QueueEmpty:
break
except (EOFError, BrokenPipeError):
break
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def _load_batches(self, load_batch_func, queue_internal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
ia.seed(seedval)
try:
gen = load_batch_func() if not ia.is_generator(load_batch_func) else load_batch_func
for batch in gen:
ia.do_assert(isinstance(batch, Batch),
"Expected batch returned by load_batch_func to be of class imgaug.Batch, got %s." % (
type(batch),))
batch_pickled = pickle.dumps(batch, protocol=-1)
while not join_signal.is_set():
try:
queue_internal.put(batch_pickled, timeout=0.005)
break
except QueueFull:
pass
if join_signal.is_set():
break
except Exception:
traceback.print_exc()
finally:
queue_internal.put("")
time.sleep(0.01)
def terminate(self):
"""Stop all workers."""
if not self.join_signal.is_set():
self.join_signal.set()
# give minimal time to put generated batches in queue and gracefully shut down
time.sleep(0.01)
if self.main_worker_thread.is_alive():
self.main_worker_thread.join()
if self.threaded:
for worker in self.workers:
if worker.is_alive():
worker.join()
else:
for worker in self.workers:
if worker.is_alive():
worker.terminate()
worker.join()
# wait until all workers are fully terminated
while not self.all_finished():
time.sleep(0.001)
# empty queue until at least one element can be added and place None as signal that BL finished
if self.queue.full():
self.queue.get()
self.queue.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
# clean the queue, this reportedly prevents hanging threads
while True:
try:
self._queue_internal.get(timeout=0.005)
except QueueEmpty:
break
if not self._queue_internal._closed:
self._queue_internal.close()
if not self.queue._closed:
self.queue.close()
self._queue_internal.join_thread()
self.queue.join_thread()
time.sleep(0.025)
def __del__(self):
if not self.join_signal.is_set():
self.join_signal.set()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader or multiprocessing.Queue
BatchLoader object that loads the data fed into the BackgroundAugmenter, or alternatively a Queue.
If a Queue, then it must be made sure that a final ``None`` in the Queue signals that the loading is
finished and no more batches will follow. Otherwise the BackgroundAugmenter will wait forever for the next
batch.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : 'auto' or int
Number of background workers to spawn.
If ``auto``, it will be set to ``C-1``, where ``C`` is the number of CPU cores.
"""
@ia.deprecated(alt_func="imgaug.multicore.Pool")
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
ia.do_assert(queue_size > 0)
self.augseq = augseq
self.queue_source = batch_loader if isinstance(batch_loader, multiprocessing.queues.Queue) else batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
ia.do_assert(nb_workers >= 1)
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
seeds = ia.current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(
target=self._augment_images_worker,
args=(augseq, self.queue_source, self.queue_result, seeds[i])
)
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
return self.nb_workers_finished == self.nb_workers
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or imgaug.Batch
One batch or None if all workers have finished.
"""
if self.all_finished():
return None
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished >= self.nb_workers:
try:
self.queue_source.get(timeout=0.001) # remove the None from the source queue
except QueueEmpty:
pass
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, seedval):
"""
Augment endlessly images in the source queue.
This is a worker function for that endlessly queries the source queue (input batches),
augments batches in it and sends the result to the output queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
ia.seed(seedval)
loader_finished = False
while not loader_finished:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
if batch is None:
loader_finished = True
# put it back in so that other workers know that the loading queue is finished
queue_source.put(pickle.dumps(None, protocol=-1))
else:
batch_aug = augseq.augment_batch(batch)
# send augmented batch to output queue
batch_str = pickle.dumps(batch_aug, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
time.sleep(0.01)
queue_result.put(pickle.dumps(None, protocol=-1))
time.sleep(0.01)
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
if worker.is_alive():
worker.terminate()
self.nb_workers_finished = len(self.workers)
if not self.queue_result._closed:
self.queue_result.close()
time.sleep(0.01)
def __del__(self):
time.sleep(0.1)
self.terminate()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/testutils.py | augmentation/image_augmentation/helpers/imgaug/testutils.py | """
Some utility functions that are only used for unittests.
Placing them in test/ directory seems to be against convention, so they are part of the library.
"""
from __future__ import print_function, division, absolute_import
import random
import numpy as np
import six.moves as sm
import imgaug as ia
def create_random_images(size):
return np.random.uniform(0, 255, size).astype(np.uint8)
def create_random_keypoints(size_images, nb_keypoints_per_img):
result = []
for _ in sm.xrange(size_images[0]):
kps = []
height, width = size_images[1], size_images[2]
for _ in sm.xrange(nb_keypoints_per_img):
x = np.random.randint(0, width-1)
y = np.random.randint(0, height-1)
kps.append(ia.Keypoint(x=x, y=y))
result.append(ia.KeypointsOnImage(kps, shape=size_images[1:]))
return result
def array_equal_lists(list1, list2):
ia.do_assert(isinstance(list1, list))
ia.do_assert(isinstance(list2, list))
if len(list1) != len(list2):
return False
for a, b in zip(list1, list2):
if not np.array_equal(a, b):
return False
return True
def keypoints_equal(kps1, kps2, eps=0.001):
if len(kps1) != len(kps2):
return False
for i in sm.xrange(len(kps1)):
a = kps1[i].keypoints
b = kps2[i].keypoints
if len(a) != len(b):
return False
for j in sm.xrange(len(a)):
x_equal = float(b[j].x) - eps <= float(a[j].x) <= float(b[j].x) + eps
y_equal = float(b[j].y) - eps <= float(a[j].y) <= float(b[j].y) + eps
if not x_equal or not y_equal:
return False
return True
def reseed(seed=0):
ia.seed(seed)
np.random.seed(seed)
random.seed(seed)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/__init__.py | augmentation/image_augmentation/helpers/imgaug/__init__.py | from __future__ import absolute_import
# this contains some deprecated classes/functions pointing to the new
# classes/functions, hence always place the other imports below this so that
# the deprecated stuff gets overwritten as much as possible
from imgaug.imgaug import *
import imgaug.augmentables as augmentables
from imgaug.augmentables import *
import imgaug.augmenters as augmenters
import imgaug.parameters as parameters
import imgaug.dtypes as dtypes
__version__ = '0.2.9'
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/dtypes.py | augmentation/image_augmentation/helpers/imgaug/dtypes.py | from __future__ import print_function, division
import warnings
import numpy as np
import six.moves as sm
import imgaug as ia
KIND_TO_DTYPES = {
"i": ["int8", "int16", "int32", "int64"],
"u": ["uint8", "uint16", "uint32", "uint64"],
"b": ["bool"],
"f": ["float16", "float32", "float64", "float128"]
}
def restore_dtypes_(images, dtypes, clip=True, round=True):
if ia.is_np_array(images):
if ia.is_iterable(dtypes):
assert len(dtypes) > 0
if len(dtypes) > 1:
assert all([dtype_i == dtypes[0] for dtype_i in dtypes])
dtypes = dtypes[0]
dtypes = np.dtype(dtypes)
dtype_to = dtypes
if images.dtype.type == dtype_to:
result = images
else:
if round and dtype_to.kind in ["u", "i", "b"]:
images = np.round(images)
if clip:
min_value, _, max_value = get_value_range_of_dtype(dtype_to)
images = clip_(images, min_value, max_value)
result = images.astype(dtype_to, copy=False)
elif ia.is_iterable(images):
result = images
dtypes = dtypes if not isinstance(dtypes, np.dtype) else [dtypes] * len(images)
for i, (image, dtype) in enumerate(zip(images, dtypes)):
assert ia.is_np_array(image)
result[i] = restore_dtypes_(image, dtype, clip=clip)
else:
raise Exception("Expected numpy array or iterable of numpy arrays, got type '%s'." % (type(images),))
return result
def copy_dtypes_for_restore(images, force_list=False):
if ia.is_np_array(images):
if force_list:
return [images.dtype for _ in sm.xrange(len(images))]
else:
return images.dtype
else:
return [image.dtype for image in images]
def get_minimal_dtype(arrays, increase_itemsize_factor=1):
input_dts = [array.dtype if not isinstance(array, np.dtype) else array
for array in arrays]
promoted_dt = np.promote_types(*input_dts)
if increase_itemsize_factor > 1:
promoted_dt_highres = "%s%d" % (promoted_dt.kind, promoted_dt.itemsize * increase_itemsize_factor)
try:
promoted_dt_highres = np.dtype(promoted_dt_highres)
return promoted_dt_highres
except TypeError:
raise TypeError(
("Unable to create a numpy dtype matching the name '%s'. "
+ "This error was caused when trying to find a minimal dtype covering the dtypes '%s' (which was "
+ "determined to be '%s') and then increasing its resolution (aka itemsize) by a factor of %d. "
+ "This error can be avoided by choosing arrays with lower resolution dtypes as inputs, e.g. by "
+ "reducing float32 to float16.") % (
promoted_dt_highres,
", ".join([input_dt.name for input_dt in input_dts]),
promoted_dt.name,
increase_itemsize_factor
)
)
return promoted_dt
def get_minimal_dtype_for_values(values, allowed_kinds, default, allow_bool_as_intlike=True):
values_normalized = []
for value in values:
if ia.is_np_array(value):
values_normalized.extend([np.min(values), np.max(values)])
else:
values_normalized.append(value)
vmin = np.min(values_normalized)
vmax = np.max(values_normalized)
possible_kinds = []
if ia.is_single_float(vmin) or ia.is_single_float(vmax):
# at least one is a float
possible_kinds.append("f")
elif ia.is_single_bool(vmin) and ia.is_single_bool(vmax):
# both are bools
possible_kinds.extend(["b", "u", "i"])
else:
# at least one of them is an integer and none is float
if vmin >= 0:
possible_kinds.append("u")
possible_kinds.append("i")
# vmin and vmax are already guarantueed to not be float due to if-statement above
if allow_bool_as_intlike and 0 <= vmin <= 1 and 0 <= vmax <= 1:
possible_kinds.append("b")
for allowed_kind in allowed_kinds:
if allowed_kind in possible_kinds:
dt = get_minimal_dtype_by_value_range(vmin, vmax, allowed_kind, default=None)
if dt is not None:
return dt
if ia.is_string(default) and default == "raise":
raise Exception(("Did not find matching dtypes for vmin=%s (type %s) and vmax=%s (type %s). "
+ "Got %s input values of types %s.") % (
vmin, type(vmin), vmax, type(vmax), ", ".join([str(type(value)) for value in values])))
return default
def get_minimal_dtype_by_value_range(low, high, kind, default):
assert low <= high, "Expected low to be less or equal than high, got %s and %s." % (low, high)
for dt in KIND_TO_DTYPES[kind]:
min_value, _center_value, max_value = get_value_range_of_dtype(dt)
if min_value <= low and high <= max_value:
return np.dtype(dt)
if ia.is_string(default) and default == "raise":
raise Exception("Could not find dtype of kind '%s' within value range [%s, %s]" % (kind, low, high))
return default
def promote_array_dtypes_(arrays, dtypes=None, increase_itemsize_factor=1, affects=None):
if dtypes is None:
dtypes = [array.dtype for array in arrays]
dt = get_minimal_dtype(dtypes, increase_itemsize_factor=increase_itemsize_factor)
if affects is None:
affects = arrays
result = []
for array in affects:
if array.dtype.type != dt:
array = array.astype(dt, copy=False)
result.append(array)
return result
def increase_array_resolutions_(arrays, factor):
assert ia.is_single_integer(factor)
assert factor in [1, 2, 4, 8]
if factor == 1:
return arrays
for i, array in enumerate(arrays):
dtype = array.dtype
dtype_target = np.dtype("%s%d" % (dtype.kind, dtype.itemsize * factor))
arrays[i] = array.astype(dtype_target, copy=False)
return arrays
def get_value_range_of_dtype(dtype):
# normalize inputs, makes it work with strings (e.g. "uint8"), types like np.uint8 and also proper dtypes, like
# np.dtype("uint8")
dtype = np.dtype(dtype)
# This check seems to fail sometimes, e.g. get_value_range_of_dtype(np.int8)
# assert isinstance(dtype, np.dtype), "Expected instance of numpy.dtype, got %s." % (type(dtype),)
if dtype.kind == "f":
finfo = np.finfo(dtype)
return finfo.min, 0.0, finfo.max
elif dtype.kind == "u":
iinfo = np.iinfo(dtype)
return iinfo.min, int(iinfo.min + 0.5 * iinfo.max), iinfo.max
elif dtype.kind == "i":
iinfo = np.iinfo(dtype)
return iinfo.min, -0.5, iinfo.max
elif dtype.kind == "b":
return 0, None, 1
else:
raise Exception("Cannot estimate value range of dtype '%s' (type: %s)" % (str(dtype), type(dtype)))
# TODO call this function wherever data is clipped
def clip_(array, min_value, max_value):
# uint64 is disallowed, because numpy's clip seems to convert it to float64
# TODO find the cause for that
gate_dtypes(array,
allowed=["bool",
"uint8", "uint16", "uint32",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"],
disallowed=["uint64"],
augmenter=None)
# If the min of the input value range is above the allowed min, we do not
# have to clip to the allowed min as we cannot exceed it anyways.
# Analogous for max. In fact, we must not clip then to min/max as that can
# lead to errors in numpy's clip. E.g.
# >>> arr = np.zeros((1,), dtype=np.int32)
# >>> np.clip(arr, 0, np.iinfo(np.dtype("uint32")).max)
# will return
# array([-1], dtype=int32)
# (observed on numpy version 1.15.2).
min_value_arrdt, _, max_value_arrdt = get_value_range_of_dtype(array.dtype)
if min_value is not None and min_value < min_value_arrdt:
min_value = None
if max_value is not None and max_value_arrdt < max_value:
max_value = None
if min_value is not None or max_value is not None:
# for scalar arrays, i.e. with shape = (), "out" is not a valid
# argument
if len(array.shape) == 0:
array = np.clip(array, min_value, max_value)
else:
array = np.clip(array, min_value, max_value, out=array)
return array
def clip_to_dtype_value_range_(array, dtype, validate=True, validate_values=None):
# for some reason, using 'out' did not work for uint64 (would clip max value to 0)
# but removing out then results in float64 array instead of uint64
assert array.dtype.name not in ["uint64", "uint128"]
dtype = np.dtype(dtype)
min_value, _, max_value = get_value_range_of_dtype(dtype)
if validate:
array_val = array
if ia.is_single_integer(validate):
assert validate_values is None
array_val = array.flat[0:validate]
if validate_values is not None:
min_value_found, max_value_found = validate_values
else:
min_value_found = np.min(array_val)
max_value_found = np.max(array_val)
assert min_value <= min_value_found <= max_value
assert min_value <= max_value_found <= max_value
return clip_(array, min_value, max_value)
def gate_dtypes(dtypes, allowed, disallowed, augmenter=None):
assert len(allowed) > 0
assert ia.is_string(allowed[0])
if len(disallowed) > 0:
assert ia.is_string(disallowed[0])
# don't use is_np_array() here, because this is supposed to handle numpy
# scalars too
if hasattr(dtypes, "dtype"):
dtypes = [dtypes.dtype]
else:
dtypes = [dtype
if not hasattr(dtype, "dtype") else dtype.dtype
for dtype in dtypes]
for dtype in dtypes:
if dtype.name in allowed:
pass
elif dtype.name in disallowed:
if augmenter is None:
raise ValueError("Got dtype '%s', which is a forbidden dtype (%s)." % (
dtype.name, ", ".join(disallowed)
))
else:
raise ValueError("Got dtype '%s' in augmenter '%s' (class '%s'), which is a forbidden dtype (%s)." % (
dtype.name, augmenter.name, augmenter.__class__.__name__, ", ".join(disallowed)
))
else:
if augmenter is None:
warnings.warn(("Got dtype '%s', which was neither explicitly allowed "
+ "(%s), nor explicitly disallowed (%s). Generated outputs may contain errors.") % (
dtype.name, ", ".join(allowed), ", ".join(disallowed)
))
else:
warnings.warn(("Got dtype '%s' in augmenter '%s' (class '%s'), which was neither explicitly allowed "
+ "(%s), nor explicitly disallowed (%s). Generated outputs may contain errors.") % (
dtype.name, augmenter.name, augmenter.__class__.__name__, ", ".join(allowed), ", ".join(disallowed)
))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/parameters.py | augmentation/image_augmentation/helpers/imgaug/parameters.py | from __future__ import print_function, division, absolute_import
import copy as copy_module
from collections import defaultdict
from abc import ABCMeta, abstractmethod
import tempfile
import numpy as np
import six
import six.moves as sm
import scipy
import scipy.stats
import imageio
from . import imgaug as ia
from . import dtypes as iadt
from .external.opensimplex import OpenSimplex
def _check_value_range(v, name, value_range):
if value_range is None:
return True
elif isinstance(value_range, tuple):
ia.do_assert(len(value_range) == 2)
if value_range[0] is None and value_range[1] is None:
return True
elif value_range[0] is None:
ia.do_assert(
v <= value_range[1],
"Parameter '%s' is outside of the expected value range (x <= %.4f)" % (name, value_range[1]))
return True
elif value_range[1] is None:
ia.do_assert(
value_range[0] <= v,
"Parameter '%s' is outside of the expected value range (%.4f <= x)" % (name, value_range[0]))
return True
else:
ia.do_assert(
value_range[0] <= v <= value_range[1],
"Parameter '%s' is outside of the expected value range (%.4f <= x <= %.4f)" % (
name, value_range[0], value_range[1]))
return True
elif ia.is_callable(value_range):
value_range(v)
return True
else:
raise Exception("Unexpected input for value_range, got %s." % (str(value_range),))
def handle_continuous_param(param, name, value_range=None, tuple_to_uniform=True, list_to_choice=True):
if ia.is_single_number(param):
_check_value_range(param, name, value_range)
return Deterministic(param)
elif tuple_to_uniform and isinstance(param, tuple):
ia.do_assert(
len(param) == 2,
"Expected parameter '%s' with type tuple to have exactly two entries, but got %d." % (name, len(param)))
ia.do_assert(
all([ia.is_single_number(v) for v in param]),
"Expected parameter '%s' with type tuple to only contain numbers, got %s." % (
name, [type(v) for v in param],))
_check_value_range(param[0], name, value_range)
_check_value_range(param[1], name, value_range)
return Uniform(param[0], param[1])
elif list_to_choice and ia.is_iterable(param) and not isinstance(param, tuple):
ia.do_assert(
all([ia.is_single_number(v) for v in param]),
"Expected iterable parameter '%s' to only contain numbers, got %s." % (
name, [type(v) for v in param],))
for param_i in param:
_check_value_range(param_i, name, value_range)
return Choice(param)
elif isinstance(param, StochasticParameter):
return param
else:
allowed_type = "number"
list_str = ", list of %s" % (allowed_type,) if list_to_choice else ""
raise Exception("Expected %s, tuple of two %s%s or StochasticParameter for %s, got %s." % (
allowed_type, allowed_type, list_str, name, type(param),))
def handle_discrete_param(param, name, value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True):
if ia.is_single_integer(param) or (allow_floats and ia.is_single_float(param)):
_check_value_range(param, name, value_range)
return Deterministic(int(param))
elif tuple_to_uniform and isinstance(param, tuple):
ia.do_assert(len(param) == 2)
ia.do_assert(
all([ia.is_single_number(v) if allow_floats else ia.is_single_integer(v) for v in param]),
"Expected parameter '%s' of type tuple to only contain %s, got %s." % (
name, "number" if allow_floats else "integer", [type(v) for v in param],))
_check_value_range(param[0], name, value_range)
_check_value_range(param[1], name, value_range)
return DiscreteUniform(int(param[0]), int(param[1]))
elif list_to_choice and ia.is_iterable(param) and not isinstance(param, tuple):
ia.do_assert(
all([ia.is_single_number(v) if allow_floats else ia.is_single_integer(v) for v in param]),
"Expected iterable parameter '%s' to only contain %s, got %s." % (
name, "number" if allow_floats else "integer", [type(v) for v in param],))
for param_i in param:
_check_value_range(param_i, name, value_range)
return Choice([int(param_i) for param_i in param])
elif isinstance(param, StochasticParameter):
return param
else:
allowed_type = "number" if allow_floats else "int"
list_str = ", list of %s" % (allowed_type,) if list_to_choice else ""
raise Exception(
"Expected %s, tuple of two %s%s or StochasticParameter for %s, got %s." % (
allowed_type, allowed_type, list_str, name, type(param),))
def handle_discrete_kernel_size_param(param, name, value_range=(1, None), allow_floats=True):
if ia.is_single_integer(param) or (allow_floats and ia.is_single_float(param)):
_check_value_range(param, name, value_range)
return Deterministic(int(param)), None
elif isinstance(param, tuple):
ia.do_assert(len(param) == 2)
if all([ia.is_single_integer(param_i) for param_i in param]) \
or (allow_floats and all([ia.is_single_float(param_i) for param_i in param])):
_check_value_range(param[0], name, value_range)
_check_value_range(param[1], name, value_range)
return DiscreteUniform(int(param[0]), int(param[1])), None
elif all([isinstance(param_i, StochasticParameter) for param_i in param]):
return param[0], param[1]
else:
handled = (
handle_discrete_param(param[0], "%s[0]" % (name,), value_range, allow_floats=allow_floats),
handle_discrete_param(param[1], "%s[1]" % (name,), value_range, allow_floats=allow_floats)
)
return handled
elif ia.is_iterable(param) and not isinstance(param, tuple):
ia.do_assert(
all([ia.is_single_number(v) if allow_floats else ia.is_single_integer(v) for v in param]),
"Expected iterable parameter '%s' to only contain %s, got %s." % (
name, "number" if allow_floats else "integer", [type(v) for v in param],))
for param_i in param:
_check_value_range(param_i, name, value_range)
return Choice([int(param_i) for param_i in param]), None
elif isinstance(param, StochasticParameter):
return param, None
else:
raise Exception("Expected int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(param),))
def handle_probability_param(param, name, tuple_to_uniform=False, list_to_choice=False):
eps = 1e-6
if param in [True, False, 0, 1]:
return Deterministic(int(param))
elif ia.is_single_number(param):
ia.do_assert(0.0 <= param <= 1.0)
if 0.0-eps < param < 0.0+eps or 1.0-eps < param < 1.0+eps:
return Deterministic(int(np.round(param)))
else:
return Binomial(param)
elif tuple_to_uniform and isinstance(param, tuple):
ia.do_assert(
all([ia.is_single_number(v) for v in param]),
"Expected parameter '%s' of type tuple to only contain number, got %s." % (
name, [type(v) for v in param],))
ia.do_assert(len(param) == 2)
ia.do_assert(0 <= param[0] <= 1.0)
ia.do_assert(0 <= param[1] <= 1.0)
return Binomial(Uniform(param[0], param[1]))
elif list_to_choice and ia.is_iterable(param):
ia.do_assert(
all([ia.is_single_number(v) for v in param]),
"Expected iterable parameter '%s' to only contain number, got %s." % (
name, [type(v) for v in param],))
ia.do_assert(all([0 <= p_i <= 1.0 for p_i in param]))
return Binomial(Choice(param))
elif isinstance(param, StochasticParameter):
return param
else:
raise Exception("Expected boolean or number or StochasticParameter for %s, got %s." % (name, type(param),))
def force_np_float_dtype(val):
if val.dtype.kind == "f":
return val
return val.astype(np.float64)
def both_np_float_if_one_is_float(a, b):
a_f = a.dtype.type in ia.NP_FLOAT_TYPES
b_f = b.dtype.type in ia.NP_FLOAT_TYPES
if a_f and b_f:
return a, b
elif a_f:
return a, b.astype(np.float64)
elif b_f:
return a.astype(np.float64), b
else:
return a.astype(np.float64), b.astype(np.float64)
def draw_distributions_grid(params, rows=None, cols=None, graph_sizes=(350, 350), sample_sizes=None, titles=None):
if titles is None:
titles = [None] * len(params)
elif titles is False:
titles = [False] * len(params)
if sample_sizes is not None:
images = [param_i.draw_distribution_graph(size=size_i, title=title_i)
for param_i, size_i, title_i in zip(params, sample_sizes, titles)]
else:
images = [param_i.draw_distribution_graph(title=title_i)
for param_i, title_i in zip(params, titles)]
images_rs = ia.imresize_many_images(images, sizes=graph_sizes)
grid = ia.draw_grid(images_rs, rows=rows, cols=cols)
return grid
def show_distributions_grid(params, rows=None, cols=None, graph_sizes=(350, 350), sample_sizes=None, titles=None):
ia.imshow(
draw_distributions_grid(
params,
graph_sizes=graph_sizes,
sample_sizes=sample_sizes,
rows=rows,
cols=cols,
titles=titles
)
)
@six.add_metaclass(ABCMeta)
class StochasticParameter(object): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Abstract parent class for all stochastic parameters.
Stochastic parameters are here all parameters from which values are
supposed to be sampled. Usually the sampled values are to a degree random.
E.g. a stochastic parameter may be the range [-10, 10], with sampled
values being 5.2, -3.7, -9.7 and 6.4.
"""
def __init__(self):
super(StochasticParameter, self).__init__()
def draw_sample(self, random_state=None):
"""
Draws a single sample value from this parameter.
Parameters
----------
random_state : None or numpy.random.RandomState, optional
A random state to use during the sampling process.
If None, the libraries global random state will be used.
Returns
-------
any
A single sample value.
"""
return self.draw_samples(1, random_state=random_state)[0]
def draw_samples(self, size, random_state=None):
"""
Draws one or more sample values from the parameter.
Parameters
----------
size : tuple of int or int
Number of sample values by dimension.
random_state : None or np.random.RandomState, optional
A random state to use during the sampling process.
If None, the libraries global random state will be used.
Returns
-------
samples : iterable
Sampled values. Usually a numpy ndarray of basically any dtype,
though not strictly limited to numpy arrays. Its shape is expected to
match `size`.
"""
# TODO convert int to random state here
random_state = random_state if random_state is not None else ia.current_random_state()
samples = self._draw_samples(
size if not ia.is_single_integer(size) else tuple([size]),
random_state)
ia.forward_random_state(random_state)
return samples
@abstractmethod
def _draw_samples(self, size, random_state):
raise NotImplementedError()
def __add__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Add(self, other)
else:
raise Exception(("Invalid datatypes in: StochasticParameter + %s. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __sub__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Subtract(self, other)
else:
raise Exception(("Invalid datatypes in: StochasticParameter - %s. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __mul__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Multiply(self, other)
else:
raise Exception(("Invalid datatypes in: StochasticParameter * %s. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __pow__(self, other, z=None):
if z is not None:
raise NotImplementedError("Modulo power is currently not supported by StochasticParameter.")
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Power(self, other)
else:
raise Exception(("Invalid datatypes in: StochasticParameter ** %s. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __div__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Divide(self, other)
else:
raise Exception(("Invalid datatypes in: StochasticParameter / %s. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __truediv__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Divide(self, other)
else:
raise Exception(("Invalid datatypes in: StochasticParameter / %s (truediv). "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __floordiv__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Discretize(Divide(self, other))
else:
raise Exception(("Invalid datatypes in: StochasticParameter // %s (floordiv). "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __radd__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Add(other, self)
else:
raise Exception(("Invalid datatypes in: %s + StochasticParameter. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __rsub__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Subtract(other, self)
else:
raise Exception(("Invalid datatypes in: %s - StochasticParameter. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __rmul__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Multiply(other, self)
else:
raise Exception(("Invalid datatypes in: %s * StochasticParameter. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __rpow__(self, other, z=None):
if z is not None:
raise NotImplementedError("Modulo power is currently not supported by StochasticParameter.")
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Power(other, self)
else:
raise Exception(("Invalid datatypes in: %s ** StochasticParameter. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __rdiv__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Divide(other, self)
else:
raise Exception(("Invalid datatypes in: %s / StochasticParameter. "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __rtruediv__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Divide(other, self)
else:
raise Exception(("Invalid datatypes in: %s / StochasticParameter (rtruediv). "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def __rfloordiv__(self, other):
if ia.is_single_number(other) or isinstance(other, StochasticParameter):
return Discretize(Divide(other, self))
else:
raise Exception(("Invalid datatypes in: StochasticParameter // %s (rfloordiv). "
+ "Expected second argument to be number or StochasticParameter.") % (type(other),))
def copy(self):
"""
Create a shallow copy of this parameter.
Returns
-------
imgaug.parameters.StochasticParameter
Shallow copy.
"""
return copy_module.copy(self)
def deepcopy(self):
"""
Create a deep copy of this parameter.
Returns
-------
imgaug.parameters.StochasticParameter
Deep copy.
"""
return copy_module.deepcopy(self)
def draw_distribution_graph(self, title=None, size=(1000, 1000), bins=100):
"""
Generate a plot (image) that shows the parameter's distribution of
values.
Parameters
----------
title : None or False or str, optional
Title of the plot. None is automatically replaced by a title
derived from ``str(param)``. If set to False, no title will be
shown.
size : tuple of int
Number of points to sample. This is always expected to have at
least two values. The first defines the number of sampling runs,
the second (and further) dimensions define the size assigned
to each :func:`imgaug.parameters.StochasticParameter.draw_samples`
call. E.g. ``(10, 20, 15)`` will lead to ``10`` calls of
``draw_samples(size=(20, 15))``. The results will be merged to a single 1d array.
bins : int
Number of bins in the plot histograms.
Returns
-------
data : (H,W,3) ndarray
Image of the plot.
"""
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
points = []
for _ in sm.xrange(size[0]):
points.append(self.draw_samples(size[1:]).flatten())
points = np.concatenate(points)
fig = plt.figure()
fig.add_subplot(111)
ax = fig.gca()
heights, bins = np.histogram(points, bins=bins)
heights = heights / sum(heights)
ax.bar(bins[:-1], heights, width=(max(bins) - min(bins))/len(bins), color="blue", alpha=0.75)
if title is None:
title = str(self)
if title is not False:
# split long titles - otherwise matplotlib generates errors
title_fragments = [title[i:i+50] for i in sm.xrange(0, len(title), 50)]
ax.set_title("\n".join(title_fragments))
fig.tight_layout(pad=0)
with tempfile.NamedTemporaryFile(suffix=".png") as f:
# we don't add bbox_inches='tight' here so that draw_distributions_grid has an easier
# time combining many plots
fig.savefig(f.name)
data = imageio.imread(f)[..., 0:3]
plt.close()
return data
class Deterministic(StochasticParameter):
"""
Parameter that is a constant value.
If ``N`` values are sampled from this parameter, it will return ``N`` times
``V``, where ``V`` is the constant value.
Parameters
----------
value : number or str or imgaug.parameters.StochasticParameter
A constant value to use.
A string may be provided to generate arrays of strings.
If this is a StochasticParameter, a single value will be sampled
from it exactly once and then used as the constant value.
Examples
--------
>>> param = Deterministic(10)
Will always sample the value 10.
"""
def __init__(self, value):
super(Deterministic, self).__init__()
if isinstance(value, StochasticParameter):
self.value = value.draw_sample()
elif ia.is_single_number(value) or ia.is_string(value):
self.value = value
else:
raise Exception("Expected StochasticParameter object or number or string, got %s." % (type(value),))
def _draw_samples(self, size, random_state):
return np.full(size, self.value)
def __repr__(self):
return self.__str__()
def __str__(self):
if ia.is_single_integer(self.value):
return "Deterministic(int %d)" % (self.value,)
elif ia.is_single_float(self.value):
return "Deterministic(float %.8f)" % (self.value,)
else:
return "Deterministic(%s)" % (str(self.value),)
class Choice(StochasticParameter):
"""
Parameter that samples value from a list of allowed values.
Parameters
----------
a : iterable
List of allowed values.
Usually expected to be integers, floats or strings.
replace : bool, optional
Whether to perform sampling with or without replacing.
p : None or iterable of number, optional
Optional probabilities of each element in `a`.
Must have the same length as `a` (if provided).
Examples
--------
>>> param = Choice([0.25, 0.5, 0.75], p=[0.25, 0.5, 0.25])
Parameter of which 50 pecent of all sampled values will be 0.5.
The other 50 percent will be either 0.25 or 0.75.
"""
def __init__(self, a, replace=True, p=None):
super(Choice, self).__init__()
ia.do_assert(ia.is_iterable(a), "Expected a to be an iterable (e.g. list), got %s." % (type(a),))
self.a = a
self.replace = replace
if p is not None:
ia.do_assert(ia.is_iterable(p), "Expected p to be None or an iterable, got %s." % (type(p),))
ia.do_assert(len(p) == len(a),
"Expected lengths of a and p to be identical, got %d and %d." % (len(a), len(p)))
self.p = p
def _draw_samples(self, size, random_state):
if any([isinstance(a_i, StochasticParameter) for a_i in self.a]):
# TODO replace by derive_random_state()
seed = random_state.randint(0, 10**6, 1)[0]
samples = ia.new_random_state(seed).choice(self.a, np.prod(size), replace=self.replace, p=self.p)
# collect the sampled parameters and how many samples must be taken
# from each of them
params_counter = defaultdict(lambda: 0)
for sample in samples:
if isinstance(sample, StochasticParameter):
key = str(sample)
params_counter[key] += 1
# collect per parameter once the required number of samples
# iterate here over self.a to always use the same seed for
# the same parameter
# TODO this might fail if the same parameter is added multiple times to self.a?
# TODO this will fail if a parameter cant handle size=(N,)
param_to_samples = dict()
for i, param in enumerate(self.a):
key = str(param)
if key in params_counter:
param_to_samples[key] = param.draw_samples(
size=(params_counter[key],),
random_state=ia.new_random_state(seed+1+i)
)
# assign the values sampled from the parameters to the `samples`
# array by replacing the respective parameter
param_to_readcount = defaultdict(lambda: 0)
for i, sample in enumerate(samples):
if isinstance(sample, StochasticParameter):
key = str(sample)
readcount = param_to_readcount[key]
samples[i] = param_to_samples[key][readcount]
param_to_readcount[key] += 1
samples = samples.reshape(size)
else:
samples = random_state.choice(self.a, size, replace=self.replace, p=self.p)
return samples
def __repr__(self):
return self.__str__()
def __str__(self):
return "Choice(a=%s, replace=%s, p=%s)" % (str(self.a), str(self.replace), str(self.p),)
class Binomial(StochasticParameter):
"""
Binomial distribution.
Parameters
----------
p : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Probability of the binomial distribution. Expected to be in the
range [0, 1].
* If this is a number, then that number will always be used as the probability.
* If this is a tuple (a, b), a random value will be sampled from the range a<=x<b per call
to :func:`imgaug.parameters.Binomial._draw_samples`.
* If this is a list of numbers, a random value will be picked from that list per call.
* If this is a StochasticParameter, the value will be sampled once per call.
Examples
--------
>>> param = Binomial(Uniform(0.01, 0.2))
Uses a varying probability `p` between 0.01 and 0.2 per sampling.
"""
def __init__(self, p):
super(Binomial, self).__init__()
self.p = handle_continuous_param(p, "p")
def _draw_samples(self, size, random_state):
p = self.p.draw_sample(random_state=random_state)
ia.do_assert(0 <= p <= 1.0, "Expected probability p to be in range [0.0, 1.0], got %s." % (p,))
return random_state.binomial(1, p, size)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Binomial(%s)" % (self.p,)
class DiscreteUniform(StochasticParameter):
"""
Parameter that resembles a discrete range of values [a .. b].
Parameters
----------
a : int or imgaug.parameters.StochasticParameter
Lower bound of the sampling range. Values will be sampled from ``a<=x<=b``. All sampled values will be
discrete. If `a` is a StochasticParameter, it will be queried once per sampling to estimate the value
of `a`. If ``a>b``, the values will automatically be flipped. If ``a==b``, all generated values will be
identical to a.
b : int or imgaug.parameters.StochasticParameter
Upper bound of the sampling range. Values will be sampled from ``a<=x<=b``. All sampled values will be
discrete. If `b` is a StochasticParameter, it will be queried once per sampling to estimate the value
of `b`. If ``a>b``, the values will automatically be flipped. If ``a==b``, all generated values will be
identical to a.
Examples
--------
>>> param = DiscreteUniform(10, Choice([20, 30, 40]))
Sampled values will be discrete and come from the either [10..20] or [10..30] or [10..40].
"""
def __init__(self, a, b):
super(DiscreteUniform, self).__init__()
self.a = handle_discrete_param(a, "a")
self.b = handle_discrete_param(b, "b")
def _draw_samples(self, size, random_state):
a = self.a.draw_sample(random_state=random_state)
b = self.b.draw_sample(random_state=random_state)
if a > b:
a, b = b, a
elif a == b:
return np.full(size, a)
return random_state.randint(a, b + 1, size)
def __repr__(self):
return self.__str__()
def __str__(self):
return "DiscreteUniform(%s, %s)" % (self.a, self.b)
class Poisson(StochasticParameter):
"""
Parameter that resembles a poisson distribution.
A poisson distribution with lambda=0 has its highest probability at
point 0 and decreases quickly from there.
Poisson distributions are discrete and never negative.
Parameters
----------
lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Lambda parameter of the poisson distribution.
* If a number, this number will be used as a constant value.
* If a tuple of two numbers (a, b), the value will be sampled
from the range ``[a, b)`` once per call
to :func:`imgaug.parameters.Poisson._draw_samples`.
* If a list of numbers, a random value will be picked from the
list per call.
* If a StochasticParameter, that parameter will be queried once
per call.
Examples
--------
>>> param = Poisson(1)
Sample from a poisson distribution with ``lambda=1``.
"""
def __init__(self, lam):
super(Poisson, self).__init__()
self.lam = handle_continuous_param(lam, "lam")
def _draw_samples(self, size, random_state):
lam = self.lam.draw_sample(random_state=random_state)
lam = max(lam, 0)
return random_state.poisson(lam=lam, size=size)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Poisson(%s)" % (self.lam,)
class Normal(StochasticParameter):
"""
Parameter that resembles a (continuous) normal distribution.
This is a wrapper around numpy's random.normal().
Parameters
----------
loc : number or imgaug.parameters.StochasticParameter
The mean of the normal distribution.
If StochasticParameter, the mean will be sampled once per call
to :func:`imgaug.parameters.Normal._draw_samples`.
scale : number or imgaug.parameters.StochasticParameter
The standard deviation of the normal distribution.
If StochasticParameter, the scale will be sampled once per call
to :func:`imgaug.parameters.Normal._draw_samples`.
Examples
--------
>>> param = Normal(Choice([-1.0, 1.0]), 1.0)
A standard normal distribution, which's mean is shifted either 1.0 to
the left or 1.0 to the right.
"""
def __init__(self, loc, scale):
super(Normal, self).__init__()
self.loc = handle_continuous_param(loc, "loc")
self.scale = handle_continuous_param(scale, "scale", value_range=(0, None))
def _draw_samples(self, size, random_state):
loc = self.loc.draw_sample(random_state=random_state)
scale = self.scale.draw_sample(random_state=random_state)
ia.do_assert(scale >= 0, "Expected scale to be in range [0, inf), got %s." % (scale,))
if scale == 0:
return np.full(size, loc)
else:
return random_state.normal(loc, scale, size=size)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Normal(loc=%s, scale=%s)" % (self.loc, self.scale)
class TruncatedNormal(StochasticParameter):
"""
Parameter that resembles a truncated normal distribution.
A truncated normal distribution is very close to a normal distribution
except the domain is smoothly bounded.
This is a wrapper around scipy.stats.truncnorm.
Parameters
----------
loc : number or imgaug.parameters.StochasticParameter
The mean of the normal distribution.
If StochasticParameter, the mean will be sampled once per call
to :func:`imgaug.parameters.TruncatedNormal._draw_samples`.
scale : number or imgaug.parameters.StochasticParameter
The standard deviation of the normal distribution.
If StochasticParameter, the scale will be sampled once per call
to :func:`imgaug.parameters.TruncatedNormal._draw_samples`.
low : number or imgaug.parameters.StochasticParameter
The minimum value of the truncated normal distribution.
If StochasticParameter, the scale will be sampled once per call
to :func:`imgaug.parameters.TruncatedNormal._draw_samples`.
high : number or imgaug.parameters.StochasticParameter
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/imgaug.py | augmentation/image_augmentation/helpers/imgaug/imgaug.py | from __future__ import print_function, division, absolute_import
import math
import numbers
import sys
import os
import json
import types
import functools
import numpy as np
import cv2
import imageio
import six
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
from PIL import Image as PIL_Image, ImageDraw as PIL_ImageDraw, ImageFont as PIL_ImageFont
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
SEED_MIN_VALUE = 0
SEED_MAX_VALUE = 2**31-1 # use 2**31 instead of 2**32 here because 2**31 errored on some systems
# to check if a dtype instance is among these dtypes, use e.g. `dtype.type in NP_FLOAT_TYPES`
# do not just use `dtype in NP_FLOAT_TYPES` as that would fail
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = ["nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here via isinstance(val, (np.ndarray, np.generic)) seems to also fire for scalar numpy values
# even though those are not arrays
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_single_bool(val):
"""
Checks whether a variable is a boolean.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a boolean. Otherwise False.
"""
return type(val) == type(True)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def is_generator(val):
"""
Checks whether a variable is a generator.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True is the variable is a generator. Otherwise False.
"""
return isinstance(val, types.GeneratorType)
def flatten(nested_iterable):
"""
Flattens arbitrarily nested lists/tuples.
Code partially taken from https://stackoverflow.com/a/10824420.
Parameters
----------
nested_iterable
A list or tuple of arbitrarily nested values.
Yields
------
any
Non-list and non-tuple values in `nested_iterable`.
"""
# don't just check if something is iterable here, because then strings
# and arrays will be split into their characters and components
if not isinstance(nested_iterable, (list, tuple)):
yield nested_iterable
else:
for i in nested_iterable:
if isinstance(i, (list, tuple)):
for j in flatten(i):
yield j
else:
yield i
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
numpy.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
numpy.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : numpy.random.RandomState
The random state to copy.
force_copy : bool, optional
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : numpy.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
"""
Create a new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive the new random state.
Returns
-------
numpy.random.RandomState
Derived random state.
"""
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
"""
Create N new random states based on an existing random state or seed.
Parameters
----------
random_state : numpy.random.RandomState
Random state or seed from which to derive new random states.
n : int, optional
Number of random states to derive.
Returns
-------
list of numpy.random.RandomState
Derived random states.
"""
seed_ = random_state.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return [new_random_state(seed_+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
"""
Forward the internal state of a random state.
This makes sure that future calls to the random_state will produce new random values.
Parameters
----------
random_state : numpy.random.RandomState
Random state to forward.
"""
random_state.uniform()
def _quokka_normalize_extract(extract):
"""
Generate a normalized rectangle to be extract from the standard quokka image.
Parameters
----------
extract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``
will be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
bb : imgaug.BoundingBox
Normalized representation of the area to extract from the standard quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1], x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
do_assert(len(extract.bounding_boxes) == 1)
do_assert(extract.shape[0:2] == (643, 960))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected 'square' or tuple of four entries or BoundingBox or BoundingBoxesOnImage "
+ "for parameter 'extract', got %s." % (type(extract),)
)
return bb
def _compute_resized_shape(from_shape, to_shape):
"""
Computes the intended new shape of an image-like array after resizing.
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or
alternatively an array with two or three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If None, then `from_shape` will be used as the new shape.
* If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it
is part of `from_shape`.
* If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height
and width.
* If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will
be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
to_shape_computed : tuple of int
New shape.
"""
if is_np_array(from_shape):
from_shape = from_shape.shape
if is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
do_assert(len(from_shape) in [2, 3])
do_assert(len(to_shape) in [2, 3])
if len(from_shape) == 3 and len(to_shape) == 3:
do_assert(from_shape[2] == to_shape[2])
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
do_assert(all([v is None or is_single_number(v) for v in to_shape[0:2]]),
"Expected the first two entries in to_shape to be None or numbers, "
+ "got types %s." % (str([type(v) for v in to_shape[0:2]]),))
for i, from_shape_i in enumerate(from_shape[0:2]):
if to_shape[i] is None:
to_shape_computed[i] = from_shape_i
elif is_single_integer(to_shape[i]):
to_shape_computed[i] = to_shape[i]
else: # float
to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))
elif is_single_integer(to_shape) or is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))
else:
raise Exception("Expected to_shape to be None or ndarray or tuple of floats or tuple of ints or single int "
+ "or single float, got %s." % (type(to_shape),))
return tuple(to_shape_computed)
def quokka(size=None, extract=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If None, then the whole image will be used.
* If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will
be extracted from the image.
* If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``
and ``y2``.
* If a BoundingBox, then that bounding box's area will be extracted from the image.
* If a BoundingBoxesOnImage, then expected to contain exactly one bounding box
and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the
one bounding box will be used similar to BoundingBox.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = imageio.imread(QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.
Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height
and ``W`` is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
# TODO get rid of this deferred import
from imgaug.augmentables.heatmaps import HeatmapsOnImage
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
def quokka_segmentation_map(size=None, extract=None):
"""
Returns a segmentation map for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.SegmentationMapOnImage
Segmentation map object.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.segmaps import SegmentationMapOnImage
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.float32)
rr, cc = skimage.draw.polygon(np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc] = 1.0
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.resize(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap
def quokka_keypoints(size=None, extract=None):
"""
Returns example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If None, then the keypoints
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
kpsoi : imgaug.KeypointsOnImage
Example keypoints on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""
Returns example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If None, then the BBs
are not projected to any new size (positions on the original image are used).
Floats lead to relative size changes, ints to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
bbsoi : imgaug.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def quokka_polygons(size=None, extract=None):
"""
Returns example polygons on the standard example quokke image.
The result contains one polygon, covering the quokka's outline.
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the polygons are placed. If None,
then the polygons are not projected to any new size (positions on the
original image are used). Floats lead to relative size changes, ints
to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or \
imgaug.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`imgaug.quokka`.
Returns
-------
psoi : imgaug.PolygonsOnImage
Example polygons on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
polygons = []
for poly_json in json_dict["polygons"]:
polygons.append(
Polygon([(point["x"] - left, point["y"] - top)
for point in poly_json["keypoints"]])
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
psoi = PolygonsOnImage(polygons, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
psoi = psoi.on(shape_resized)
return psoi
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors `v1` and `v2`.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
v1 : (N,) ndarray
First vector.
v2 : (N,) ndarray
Second vector.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))
1.570796...
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))
0.0
>>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))
3.141592...
"""
l1 = np.linalg.norm(v1)
l2 = np.linalg.norm(v2)
v1_u = (v1 / l1) if l1 > 0 else np.float32(v1) * 0
v2_u = (v2 / l2) if l2 > 0 else np.float32(v2) * 0
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# TODO is this used anywhere?
def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):
"""
Compute the intersection point of two lines.
Taken from https://stackoverflow.com/a/20679579 .
Parameters
----------
x1 : number
x coordinate of the first point on line 1. (The lines extends beyond this point.)
y1 : number
y coordinate of the first point on line 1. (The lines extends beyond this point.)
x2 : number
x coordinate of the second point on line 1. (The lines extends beyond this point.)
y2 : number
y coordinate of the second point on line 1. (The lines extends beyond this point.)
x3 : number
x coordinate of the first point on line 2. (The lines extends beyond this point.)
y3 : number
y coordinate of the first point on line 2. (The lines extends beyond this point.)
x4 : number
x coordinate of the second point on line 2. (The lines extends beyond this point.)
y4 : number
y coordinate of the second point on line 2. (The lines extends beyond this point.)
Returns
-------
tuple of number or bool
The coordinate of the intersection point as a tuple ``(x, y)``.
If the lines are parallel (no intersection point or an infinite number of them), the result is False.
"""
def _make_line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
L1 = _make_line((x1, y1), (x2, y2))
L2 = _make_line((x3, y3), (x4, y4))
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return False
# TODO replace by cv2.putText()?
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
# PIL/asarray returns read only array
if not img_np.flags["WRITEABLE"]:
try:
# this seems to no longer work with np 1.16 (or was pillow updated?)
img_np.setflags(write=True)
except ValueError as ex:
if "cannot set WRITEABLE flag to True of this array" in str(ex):
img_np = np.copy(img_np)
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
# TODO rename sizes to size?
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: limited; tested (4)
* ``int64``: no (2)
* ``float16``: yes; tested (5)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (6)
- (1) rejected by ``cv2.imresize``
- (2) results too inaccurate
- (3) mapped internally to ``int16`` when interpolation!="nearest"
- (4) only supported for interpolation="nearest", other interpolations lead to cv2 error
- (5) mapped internally to ``float32``
- (6) mapped internally to ``uint8``
Parameters
----------
images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray
Array of the images to resize.
Usually recommended to be of dtype uint8.
sizes : float or iterable of int or iterable of float
The new size of the images, given either as a fraction (a single float) or as
a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``
tuple of two floats.
interpolation : None or str or int, optional
The interpolation to use during resize.
If int, then expected to be one of:
* ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)
* ``cv2.INTER_LINEAR`` (linear interpolation)
* ``cv2.INTER_AREA`` (area interpolation)
* ``cv2.INTER_CUBIC`` (cubic interpolation)
If string, then expected to be one of:
* ``nearest`` (identical to ``cv2.INTER_NEAREST``)
* ``linear`` (identical to ``cv2.INTER_LINEAR``)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/weather.py | augmentation/image_augmentation/helpers/imgaug/augmenters/weather.py | """
Augmenters that create wheather effects.
Do not import directly from this file, as the categorization is not final.
Use instead::
from imgaug import augmenters as iaa
and then e.g.::
seq = iaa.Sequential([iaa.Snowflakes()])
List of augmenters:
* FastSnowyLandscape
* Clouds
* Fog
* CloudLayer
* Snowflakes
* SnowflakesLayer
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import cv2
from . import meta, arithmetic, blur, contrast, color as augmenters_color
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
class FastSnowyLandscape(meta.Augmenter):
"""
Augmenter to convert non-snowy landscapes to snowy ones.
This expects to get an image that roughly shows a landscape.
This is based on the method proposed by
https://medium.freecodecamp.org/image-augmentation-make-it-rain-make-it-snow-how-to-modify-a-photo-with-machine-learning-163c0cb3843f?gi=bca4a13e634c
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) This augmenter is based on a colorspace conversion to HLS. Hence, only RGB uint8
inputs are sensible.
Parameters
----------
lightness_threshold : number or tuple of number or list of number\
or imgaug.parameters.StochasticParameter, optional
All pixels with lightness in HLS colorspace below this value will have their lightness increased by
`lightness_multiplier`.
* If an int, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the discrete range ``[a .. b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
lightness_multiplier : number or tuple of number or list of number\
or imgaug.parameters.StochasticParameter, optional
Multiplier for pixel's lightness value in HLS colorspace. Affects all pixels selected via `lightness_threshold`.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
from_colorspace : str, optional
The source colorspace of the input images. See :func:`imgaug.augmenters.color.ChangeColorspace.__init__`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.FastSnowyLandscape(lightness_threshold=140, lightness_multiplier=2.5)
Search for all pixels in the image with a lightness value in HLS colorspace of less than 140 and increase their
lightness by a factor of 2.5. This is the configuration proposed in the original article (see link above).
>>> aug = iaa.FastSnowyLandscape(lightness_threshold=[128, 200], lightness_multiplier=(1.5, 3.5))
Search for all pixels in the image with a lightness value in HLS colorspace of less than 128 or less than 200
(one of these values is picked per image) and multiply their lightness by a factor of ``x`` with ``x`` being
sampled from ``uniform(1.5, 3.5)`` (once per image).
>>> aug = iaa.FastSnowyLandscape(lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0))
Similar to above, but the lightness threshold is sampled from ``uniform(100, 255)`` (per image) and the multiplier
from ``uniform(1.0, 4.0)`` (per image). This seems to produce good and varied results.
"""
def __init__(self, lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0), from_colorspace="RGB",
name=None, deterministic=False, random_state=None):
super(FastSnowyLandscape, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.lightness_threshold = iap.handle_continuous_param(lightness_threshold, "lightness_threshold",
value_range=(0, 255),
tuple_to_uniform=True,
list_to_choice=True)
self.lightness_multiplier = iap.handle_continuous_param(lightness_multiplier, "lightness_multiplier",
value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
self.from_colorspace = from_colorspace
def _draw_samples(self, augmentables, random_state):
nb_augmentables = len(augmentables)
rss = ia.derive_random_states(random_state, 2)
thresh_samples = self.lightness_threshold.draw_samples((nb_augmentables,), rss[1])
lmul_samples = self.lightness_multiplier.draw_samples((nb_augmentables,), rss[0])
return thresh_samples, lmul_samples
def _augment_images(self, images, random_state, parents, hooks):
thresh_samples, lmul_samples = self._draw_samples(images, random_state)
result = images
for i, (image, thresh, lmul) in enumerate(zip(images, thresh_samples, lmul_samples)):
color_transform = augmenters_color.ChangeColorspace.CV_VARS["%s2HLS" % (self.from_colorspace,)]
color_transform_inverse = augmenters_color.ChangeColorspace.CV_VARS["HLS2%s" % (self.from_colorspace,)]
image_hls = cv2.cvtColor(image, color_transform)
cvt_dtype = image_hls.dtype
image_hls = image_hls.astype(np.float64)
lightness = image_hls[..., 1]
lightness[lightness < thresh] *= lmul
image_hls = iadt.restore_dtypes_(image_hls, cvt_dtype)
image_rgb = cv2.cvtColor(image_hls, color_transform_inverse)
result[i] = image_rgb
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.lightness_threshold, self.lightness_multiplier]
# TODO add vertical gradient alpha to have clouds only at skylevel/groundlevel
# TODO add configurable parameters
def Clouds(name=None, deterministic=False, random_state=None):
"""
Augmenter to draw clouds in images.
This is a wrapper around ``CloudLayer``. It executes 1 to 2 layers per image, leading to varying densities
and frequency patterns of clouds.
This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``
and ``960x1280``.
dtype support::
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range of uint8.
While other dtypes may be accepted, they will lead to images augmented in
ways inappropriate for the respective dtype.
Parameters
----------
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Clouds()
Creates an augmenter that adds clouds to images.
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return meta.SomeOf((1, 2), children=[
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.5, -2.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.25, 0.75), alpha_size_px_max=(2, 8), alpha_freq_exponent=(-2.5, -2.0),
sparsity=(0.8, 1.0), density_multiplier=(0.5, 1.0)
),
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.0, -1.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.5, 1.0), alpha_size_px_max=(64, 128), alpha_freq_exponent=(-2.0, -1.0),
sparsity=(1.0, 1.4), density_multiplier=(0.8, 1.5)
)
], random_order=False, name=name, deterministic=deterministic, random_state=random_state)
# TODO add vertical gradient alpha to have fog only at skylevel/groundlevel
# TODO add configurable parameters
def Fog(name=None, deterministic=False, random_state=None):
"""
Augmenter to draw fog in images.
This is a wrapper around ``CloudLayer``. It executes a single layer per image with a configuration leading
to fairly dense clouds with low-frequency patterns.
This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``
and ``960x1280``.
dtype support::
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range of uint8.
While other dtypes may be accepted, they will lead to images augmented in
ways inappropriate for the respective dtype.
Parameters
----------
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Fog()
Creates an augmenter that adds fog to images.
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return CloudLayer(
intensity_mean=(220, 255), intensity_freq_exponent=(-2.0, -1.5), intensity_coarse_scale=2,
alpha_min=(0.7, 0.9), alpha_multiplier=0.3, alpha_size_px_max=(2, 8), alpha_freq_exponent=(-4.0, -2.0),
sparsity=0.9, density_multiplier=(0.4, 0.9),
name=name, deterministic=deterministic, random_state=random_state
)
# TODO add perspective transform to each cloud layer to make them look more distant?
# TODO alpha_mean and density overlap - remove one of them
class CloudLayer(meta.Augmenter):
"""
Augmenter to add a single layer of clouds to an image.
dtype support::
* ``uint8``: yes; indirectly tested (1)
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: yes; not tested
* ``float32``: yes; not tested
* ``float64``: yes; not tested
* ``float128``: yes; not tested (2)
* ``bool``: no
- (1) indirectly tested via tests for ``Clouds`` and ``Fog``
- (2) Note that random values are usually sampled as ``int64`` or ``float64``, which
``float128`` images would exceed. Note also that random values might have to upscaled,
which is done via :func:`imgaug.imgaug.imresize_many_images` and has its own limited
dtype support (includes however floats up to ``64bit``).
Parameters
----------
intensity_mean : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Mean intensity of the clouds (i.e. mean color). Recommended to be around ``(190, 255)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
intensity_freq_exponent : number or tuple of number or list of number\
or imgaug.parameters.StochasticParameter
Exponent of the frequency noise used to add fine intensity to the mean intensity.
Recommended to be somewhere around ``(-2.5, -1.5)``.
See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.
intensity_coarse_scale : number or tuple of number or list of number\
or imgaug.parameters.StochasticParameter
Standard deviation of the gaussian distribution used to add more localized intensity to the mean intensity.
Sampled in low resolution space, i.e. affects final intensity on a coarse level. Recommended to be
around ``(0, 10)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
alpha_min : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Minimum alpha when blending cloud noise with the image. High values will lead to clouds being "everywhere".
Recommended to usually be at around ``0.0`` for clouds and ``>0`` for fog.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
alpha_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Multiplier for the sampled alpha values. High values will lead to denser clouds wherever they are visible.
Recommended to be at around ``(0.3, 1.0)``. Note that this parameter currently overlaps with
`density_multiplier`, which is applied a bit later to the alpha mask.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
alpha_size_px_max : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Controls the image size at which the alpha mask is sampled. Lower values will lead to coarser alpha masks
and hence larger clouds (and empty areas).
See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.
alpha_freq_exponent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Exponent of the frequency noise used to sample the alpha mask. Similarly to `alpha_size_max_px`, lower values
will lead to coarser alpha patterns. Recommended to be somewhere around ``(-4.0, -1.5)``.
See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.
sparsity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Exponent applied late to the alpha mask. Lower values will lead to coarser cloud patterns, higher values
to finer patterns. Recommended to be somewhere around ``1.0``. Do not deviate far from that values, otherwise
the alpha mask might get weird patterns with sudden fall-offs to zero that look very unnatural.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
density_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Late multiplier for the alpha mask, similar to `alpha_multiplier`. Set this higher to get "denser" clouds
wherever they are visible. Recommended to be around ``(0.5, 1.5)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
"""
def __init__(self, intensity_mean, intensity_freq_exponent, intensity_coarse_scale,
alpha_min, alpha_multiplier, alpha_size_px_max, alpha_freq_exponent,
sparsity, density_multiplier,
name=None, deterministic=False, random_state=None):
super(CloudLayer, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.intensity_mean = iap.handle_continuous_param(intensity_mean, "intensity_mean")
self.intensity_freq_exponent = intensity_freq_exponent
self.intensity_coarse_scale = intensity_coarse_scale
self.alpha_min = iap.handle_continuous_param(alpha_min, "alpha_min")
self.alpha_multiplier = iap.handle_continuous_param(alpha_multiplier, "alpha_multiplier")
self.alpha_size_px_max = alpha_size_px_max
self.alpha_freq_exponent = alpha_freq_exponent
self.sparsity = iap.handle_continuous_param(sparsity, "sparsity")
self.density_multiplier = iap.handle_continuous_param(density_multiplier, "density_multiplier")
def _augment_images(self, images, random_state, parents, hooks):
rss = ia.derive_random_states(random_state, len(images))
result = images
for i, (image, rs) in enumerate(zip(images, rss)):
result[i] = self.draw_on_image(image, rs)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.intensity_mean, self.alpha_min, self.alpha_multiplier, self.alpha_size_px_max,
self.alpha_freq_exponent, self.intensity_freq_exponent, self.sparsity,
self.density_multiplier,
self.intensity_coarse_scale]
def draw_on_image(self, image, random_state):
iadt.gate_dtypes(image,
allowed=["uint8", "float16", "float32", "float64", "float96", "float128", "float256"],
disallowed=["bool",
"uint16", "uint32", "uint64", "uint128", "uint256",
"int8", "int16", "int32", "int64", "int128", "int256"])
alpha, intensity = self.generate_maps(image, random_state)
alpha = alpha[..., np.newaxis]
intensity = intensity[..., np.newaxis]
if image.dtype.kind == "f":
intensity = intensity.astype(image.dtype)
return (1 - alpha) * image + alpha * intensity,
else:
intensity = np.clip(intensity, 0, 255)
# TODO use blend_alpha_() here
return np.clip(
(1 - alpha) * image.astype(alpha.dtype) + alpha * intensity.astype(alpha.dtype),
0,
255
).astype(np.uint8)
def generate_maps(self, image, random_state):
intensity_mean_sample = self.intensity_mean.draw_sample(random_state)
alpha_min_sample = self.alpha_min.draw_sample(random_state)
alpha_multiplier_sample = self.alpha_multiplier.draw_sample(random_state)
alpha_size_px_max = self.alpha_size_px_max
intensity_freq_exponent = self.intensity_freq_exponent
alpha_freq_exponent = self.alpha_freq_exponent
sparsity_sample = self.sparsity.draw_sample(random_state)
density_multiplier_sample = self.density_multiplier.draw_sample(random_state)
height, width = image.shape[0:2]
rss_alpha, rss_intensity = ia.derive_random_states(random_state, 2)
intensity_coarse = self._generate_intensity_map_coarse(
height, width, intensity_mean_sample,
iap.Normal(0, scale=self.intensity_coarse_scale),
rss_intensity
)
intensity_fine = self._generate_intensity_map_fine(height, width, intensity_mean_sample,
intensity_freq_exponent, rss_intensity)
intensity = intensity_coarse + intensity_fine
alpha = self._generate_alpha_mask(height, width, alpha_min_sample, alpha_multiplier_sample,
alpha_freq_exponent, alpha_size_px_max,
sparsity_sample, density_multiplier_sample, rss_alpha)
return alpha, intensity
@classmethod
def _generate_intensity_map_coarse(cls, height, width, intensity_mean, intensity_local_offset, random_state):
height_intensity, width_intensity = (8, 8) # TODO this might be too simplistic for some image sizes
intensity = intensity_mean\
+ intensity_local_offset.draw_samples((height_intensity, width_intensity), random_state)
intensity = ia.imresize_single_image(intensity, (height, width), interpolation="cubic")
return intensity
@classmethod
def _generate_intensity_map_fine(cls, height, width, intensity_mean, exponent, random_state):
intensity_details_generator = iap.FrequencyNoise(
exponent=exponent,
size_px_max=max(height, width),
upscale_method="cubic"
)
intensity_details = intensity_details_generator.draw_samples((height, width), random_state)
return intensity_mean * ((2*intensity_details - 1.0)/5.0)
@classmethod
def _generate_alpha_mask(cls, height, width, alpha_min, alpha_multiplier, exponent, alpha_size_px_max, sparsity,
density_multiplier, random_state):
alpha_generator = iap.FrequencyNoise(
exponent=exponent,
size_px_max=alpha_size_px_max,
upscale_method="cubic"
)
alpha_local = alpha_generator.draw_samples((height, width), random_state)
alpha = alpha_min + (alpha_multiplier * alpha_local)
alpha = (alpha ** sparsity) * density_multiplier
alpha = np.clip(alpha, 0.0, 1.0)
return alpha
def Snowflakes(density=(0.005, 0.075), density_uniformity=(0.3, 0.9), flake_size=(0.2, 0.7),
flake_size_uniformity=(0.4, 0.8), angle=(-30, 30), speed=(0.007, 0.03),
name=None, deterministic=False, random_state=None):
"""
Augmenter to add falling snowflakes to images.
This is a wrapper around ``SnowflakesLayer``. It executes 1 to 3 layers per image.
dtype support::
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range of uint8.
While other dtypes may be accepted, they will lead to images augmented in
ways inappropriate for the respective dtype.
Parameters
----------
density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size uniformity of the snowflakes. Higher values denote more similarly sized snowflakes.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size of the snowflakes. This parameter controls the resolution at which snowflakes are sampled.
Higher values mean that the resolution is closer to the input image's resolution and hence each sampled
snowflake will be smaller (because of the smaller pixel size).
Valid value range is ``[0.0, 1.0)``. Recommended values:
* On ``96x128`` a value of ``(0.1, 0.4)`` worked well.
* On ``192x256`` a value of ``(0.2, 0.7)`` worked well.
* On ``960x1280`` a value of ``(0.7, 0.95)`` worked well.
Allowed datatypes:
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Controls the size uniformity of the snowflakes. Higher values mean that the snowflakes are more similarly
sized. Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Angle in degrees of motion blur applied to the snowflakes, where ``0.0`` is motion blur that points straight
upwards. Recommended to be around ``(-30, 30)``.
See also :func:`imgaug.augmenters.blur.MotionBlur.__init__`.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Perceived falling speed of the snowflakes. This parameter controls the motion blur's kernel size.
It follows roughly the form ``kernel_size = image_size * speed``. Hence,
Values around ``1.0`` denote that the motion blur should "stretch" each snowflake over the whole image.
Valid value range is ``(0.0, 1.0)``. Recommended values:
* On ``96x128`` a value of ``(0.01, 0.05)`` worked well.
* On ``192x256`` a value of ``(0.007, 0.03)`` worked well.
* On ``960x1280`` a value of ``(0.001, 0.03)`` worked well.
Allowed datatypes:
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05))
Adds snowflakes to small images (around ``96x128``).
>>> aug = iaa.Snowflakes(flake_size=(0.2, 0.7), speed=(0.007, 0.03))
Adds snowflakes to medium-sized images (around ``192x256``).
>>> aug = iaa.Snowflakes(flake_size=(0.7, 0.95), speed=(0.001, 0.03))
Adds snowflakes to large images (around ``960x1280``).
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
layer = SnowflakesLayer(
density=density, density_uniformity=density_uniformity,
flake_size=flake_size, flake_size_uniformity=flake_size_uniformity,
angle=angle, speed=speed,
blur_sigma_fraction=(0.0001, 0.001)
)
return meta.SomeOf(
(1, 3), children=[layer.deepcopy() for _ in range(3)],
random_order=False, name=name, deterministic=deterministic, random_state=random_state
)
# TODO snowflakes are all almost 100% white, add some grayish tones and maybe color to them
class SnowflakesLayer(meta.Augmenter):
"""
Augmenter to add a single layer of falling snowflakes to images.
dtype support::
* ``uint8``: yes; indirectly tested (1)
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: no
* ``float64``: no
* ``float128``: no
* ``bool``: no
- (1) indirectly tested via tests for ``Snowflakes``
Parameters
----------
density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/edges.py | augmentation/image_augmentation/helpers/imgaug/augmenters/edges.py | """
Augmenters that deal with edge detection.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Canny()
])
List of augmenters:
* Canny
EdgeDetect and DirectedEdgeDetect are currently still in `convolutional.py`.
"""
from __future__ import print_function, division, absolute_import
from abc import ABCMeta, abstractmethod
import numpy as np
import cv2
import six
from . import meta
from . import blend
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
# TODO this should be placed in some other file than edges.py as it could be
# re-used wherever a binary image is the result
@six.add_metaclass(ABCMeta)
class BinaryImageColorizerIf(object):
@abstractmethod
def colorize(self, image_binary, image_original, nth_image, random_state):
"""
Convert a binary image to a colorized one.
Parameters
----------
image_binary : ndarray
Boolean ``(H,W)`` image.
image_original : ndarray
Original ``(H,W,C)`` input image.
nth_image : int
Index of the image in the batch.
random_state : numpy.random.RandomState
Random state to use.
Returns
-------
ndarray
Colorized form of `image_binary`.
"""
# TODO see above, this should be moved to another file
class RandomColorsBinaryImageColorizer(BinaryImageColorizerIf):
"""
Colorizer using two randomly sampled foreground/background colors.
Parameters
----------
color_true : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Color of the foreground, i.e. all pixels in binary images that are
``True``. This parameter will be queried once per image to
generate ``(3,)`` samples denoting the color. (Note that even for
grayscale images three values will be sampled and converted to
grayscale according to ``0.299*R + 0.587*G + 0.114*B``. This is the
same equation that is also used by OpenCV.)
* If an int, exactly that value will always be used, i.e. every
color will be ``(v, v, v)`` for value ``v``.
* If a tuple ``(a, b)``, three random values from the range
``a <= x <= b`` will be sampled per image.
* If a list, then three random values will be sampled from that
list per image.
* If a StochasticParameter, three values will be sampled from the
parameter per image.
color_false : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Analogous to `color_true`, but denotes the color for all pixels that
are ``False`` in the binary input image.
"""
def __init__(self, color_true=(0, 255), color_false=(0, 255)):
self.color_true = iap.handle_discrete_param(
color_true,
"color_true",
value_range=(0, 255),
tuple_to_uniform=True,
list_to_choice=True,
allow_floats=False)
self.color_false = iap.handle_discrete_param(
color_false,
"color_false",
value_range=(0, 255),
tuple_to_uniform=True,
list_to_choice=True,
allow_floats=False)
def _draw_samples(self, random_state):
color_true = self.color_true.draw_samples((3,),
random_state=random_state)
color_false = self.color_false.draw_samples((3,),
random_state=random_state)
return color_true, color_false
def colorize(self, image_binary, image_original, nth_image, random_state):
assert image_binary.ndim == 2
assert image_binary.dtype.kind == "b"
assert image_original.ndim == 3
assert image_original.shape[-1] in [1, 3, 4]
assert image_original.dtype.name == "uint8"
color_true, color_false = self._draw_samples(random_state)
nb_channels = min(image_original.shape[-1], 3)
image_colorized = np.zeros(
(image_original.shape[0], image_original.shape[1], nb_channels),
dtype=image_original.dtype)
if nb_channels == 1:
# single channel input image, convert colors to grayscale
image_colorized[image_binary] = (
0.299*color_true[0]
+ 0.587*color_true[1]
+ 0.114*color_true[2])
image_colorized[~image_binary] = (
0.299*color_false[0]
+ 0.587*color_false[1]
+ 0.114*color_false[2])
else:
image_colorized[image_binary] = color_true
image_colorized[~image_binary] = color_false
# re-attach alpha channel if it was present in input image
if image_original.shape[-1] == 4:
image_colorized = np.dstack(
[image_colorized, image_original[:, :, 3:4]])
return image_colorized
def __str__(self):
return ("RandomColorsBinaryImageColorizer("
"color_true=%s, color_false=%s)") % (
self.color_true, self.color_false)
class Canny(meta.Augmenter):
"""
Apply a canny edge detector to input images.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no; not tested
* ``uint32``: no; not tested
* ``uint64``: no; not tested
* ``int8``: no; not tested
* ``int16``: no; not tested
* ``int32``: no; not tested
* ``int64``: no; not tested
* ``float16``: no; not tested
* ``float32``: no; not tested
* ``float64``: no; not tested
* ``float128``: no; not tested
* ``bool``: no; not tested
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Blending factor to use in alpha blending.
A value close to 1.0 means that only the edge image is visible.
A value close to 0.0 means that only the original image is visible.
A value close to 0.5 means that the images are merged according to
`0.5*image + 0.5*edge_image`.
If a sample from this parameter is 0, no action will be performed for
the corresponding image.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range
``a <= x <= b`` will be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
hysteresis_thresholds : number or tuple of number or list of number or imgaug.parameters.StochasticParameter or tuple of tuple of number or tuple of list of number or tuple of imgaug.parameters.StochasticParameter, optional
Min and max values to use in hysteresis thresholding.
(This parameter seems to have not very much effect on the results.)
Either a single parameter or a tuple of two parameters.
If a single parameter is provided, the sampling happens once for all
images with `(N,2)` samples being requested from the parameter,
where each first value denotes the hysteresis minimum and each second
the maximum.
If a tuple of two parameters is provided, one sampling of `(N,)` values
is independently performed per parameter (first parameter: hysteresis
minimum, second: hysteresis maximum).
* If this is a single number, both min and max value will always be
exactly that value.
* If this is a tuple of numbers ``(a, b)``, two random values from
the range ``a <= x <= b`` will be sampled per image.
* If this is a list, two random values will be sampled from that
list per image.
* If this is a StochasticParameter, two random values will be
sampled from that parameter per image.
* If this is a tuple ``(min, max)`` with ``min`` and ``max``
both *not* being numbers, they will be treated according to the
rules above (i.e. may be a number, tuple, list or
StochasticParameter). A single value will be sampled per image
and parameter.
sobel_kernel_size : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Kernel size of the sobel operator initially applied to each image.
This corresponds to ``apertureSize`` in ``cv2.Canny()``.
If a sample from this parameter is ``<=1``, no action will be performed
for the corresponding image.
The maximum for this parameter is ``7`` (inclusive). Higher values are
not accepted by OpenCV.
If an even value ``v`` is sampled, it is automatically changed to
``v-1``.
* If this is a single integer, the kernel size always matches that
value.
* If this is a tuple of integers ``(a, b)``, a random discrete
value will be sampled from the range ``a <= x <= b`` per image.
* If this is a list, a random value will be sampled from that
list per image.
* If this is a StochasticParameter, a random value will be sampled
from that parameter per image.
colorizer : None or imgaug.augmenters.edges.BinaryImageColorizerIf, optional
A strategy to convert binary edge images to color images.
If this is ``None``, an instance of ``RandomColorBinaryImageColorizer``
is created, which means that each edge image is converted into an
``uint8`` image, where edge and non-edge pixels each have a different
color that was uniformly randomly sampled from the space of all
``uint8`` colors.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Canny()
Creates an augmenter that generates random blends between images and
their canny edge representations. Apply the augmenter to images using
e.g. ``images_aug = aug(images=<list of numpy array>)``.
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Canny(sobel_kernel_size=(0, 7))
Creates a canny edge augmenter that initially preprocesses images using
a sobel filter with kernel size ``3x3`` to ``7x7`` and will sometimes
not modify images at all (if a value ``<=2`` is sampled).
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Canny(alpha=(0.0, 0.5))
Creates a canny edge augmenter that generates edge images with a blending
factor of max 50%, i.e. the original (non-edge) image is always at least
partially visible.
"""
def __init__(self,
alpha=(0.0, 1.0),
hysteresis_thresholds=((100-40, 100+40), (200-40, 200+40)),
sobel_kernel_size=(3, 7),
colorizer=None,
name=None, deterministic=False, random_state=None):
super(Canny, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.alpha = iap.handle_continuous_param(
alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
if isinstance(hysteresis_thresholds, tuple) \
and len(hysteresis_thresholds) == 2 \
and not ia.is_single_number(hysteresis_thresholds[0]) \
and not ia.is_single_number(hysteresis_thresholds[1]):
self.hysteresis_thresholds = (
iap.handle_discrete_param(
hysteresis_thresholds[0],
"hysteresis_thresholds[0]",
value_range=(0, 255),
tuple_to_uniform=True,
list_to_choice=True,
allow_floats=True),
iap.handle_discrete_param(
hysteresis_thresholds[1],
"hysteresis_thresholds[1]",
value_range=(0, 255),
tuple_to_uniform=True,
list_to_choice=True,
allow_floats=True)
)
else:
self.hysteresis_thresholds = iap.handle_discrete_param(
hysteresis_thresholds,
"hysteresis_thresholds",
value_range=(0, 255),
tuple_to_uniform=True,
list_to_choice=True,
allow_floats=True)
# we don't use handle_discrete_kernel_size_param() here, because
# cv2.Canny() can't handle independent height/width values, only a
# single kernel size
self.sobel_kernel_size = iap.handle_discrete_param(
sobel_kernel_size,
"sobel_kernel_size",
value_range=(0, 7), # OpenCV only accepts ksize up to 7
tuple_to_uniform=True,
list_to_choice=True,
allow_floats=False)
self.colorizer = (
colorizer
if colorizer is not None
else RandomColorsBinaryImageColorizer()
)
def _draw_samples(self, augmentables, random_state):
nb_images = len(augmentables)
rss = ia.derive_random_states(random_state, 4)
alpha_samples = self.alpha.draw_samples((nb_images,), rss[0])
hthresh = self.hysteresis_thresholds
if isinstance(hthresh, tuple):
assert len(hthresh) == 2
min_values = hthresh[0].draw_samples((nb_images,), rss[1])
max_values = hthresh[1].draw_samples((nb_images,), rss[2])
hthresh_samples = np.stack([min_values, max_values], axis=-1)
else:
hthresh_samples = hthresh.draw_samples((nb_images, 2), rss[1])
sobel_samples = self.sobel_kernel_size.draw_samples((nb_images,),
rss[3])
# verify for hysteresis thresholds that min_value < max_value everywhere
invalid = (hthresh_samples[:, 0] > hthresh_samples[:, 1])
if np.any(invalid):
hthresh_samples[invalid, :] = hthresh_samples[invalid, :][:, [1, 0]]
# ensure that sobel kernel sizes are correct
# note that OpenCV accepts only kernel sizes that are (a) even
# and (b) <=7
assert not np.any(sobel_samples < 0), (
"Sampled a sobel kernel size below 0 in Canny. "
"Allowed value range is 0 to 7.")
assert not np.any(sobel_samples > 7), (
"Sampled a sobel kernel size above 7 in Canny. "
"Allowed value range is 0 to 7.")
even_idx = (np.mod(sobel_samples, 2) == 0)
sobel_samples[even_idx] -= 1
return alpha_samples, hthresh_samples, sobel_samples
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["uint8"],
disallowed=[
"bool",
"uint16", "uint32", "uint64", "uint128",
"uint256",
"int8", "int16", "int32", "int64", "int128",
"int256",
"float32", "float64", "float96", "float128",
"float256"],
augmenter=self)
rss = ia.derive_random_states(random_state, len(images))
samples = self._draw_samples(images, rss[-1])
alpha_samples = samples[0]
hthresh_samples = samples[1]
sobel_samples = samples[2]
result = images
gen = enumerate(zip(images, alpha_samples, hthresh_samples,
sobel_samples))
for i, (image, alpha, hthreshs, sobel) in gen:
assert image.ndim == 3
assert image.shape[-1] in [1, 3, 4], (
"Canny edge detector can currently only handle images with "
"channel numbers that are 1, 3 or 4. Got %d.") % (
image.shape[-1],)
if alpha > 0 and sobel > 1:
image_canny = cv2.Canny(
image[:, :, 0:3],
threshold1=hthreshs[0],
threshold2=hthreshs[1],
apertureSize=sobel,
L2gradient=True)
image_canny = (image_canny > 0)
# canny returns a boolean (H,W) image, so we change it to
# (H,W,C) and then uint8
image_canny_color = self.colorizer.colorize(
image_canny, image, nth_image=i, random_state=rss[i])
result[i] = blend.blend_alpha(image_canny_color, image, alpha)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
# pylint: disable=no-self-use
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents,
hooks):
# pylint: disable=no-self-use
return keypoints_on_images
def get_parameters(self):
return [self.alpha, self.hysteresis_thresholds, self.sobel_kernel_size,
self.colorizer]
def __str__(self):
return ("Canny("
"alpha=%s, "
"hysteresis_thresholds=%s, "
"sobel_kernel_size=%s, "
"colorizer=%s, "
"name=%s, "
"deterministic=%s)" % (
self.alpha, self.hysteresis_thresholds,
self.sobel_kernel_size, self.colorizer,
self.name, self.deterministic))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/blend.py | augmentation/image_augmentation/helpers/imgaug/augmenters/blend.py | """
Augmenters that blend two images with each other.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Alpha(0.5, iaa.Add((-5, 5)))
])
List of augmenters:
* Alpha
* AlphaElementwise
* SimplexNoiseAlpha
* FrequencyNoiseAlpha
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import six.moves as sm
from . import meta
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
def blend_alpha(image_fg, image_bg, alpha, eps=1e-2):
"""
Blend two images using an alpha blending.
In an alpha blending, the two images are naively mixed. Let ``A`` be the foreground image
and ``B`` the background image and ``a`` is the alpha value. Each pixel intensity is then
computed as ``a * A_ij + (1-a) * B_ij``.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested (1)
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested (1)
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested (1)
* ``float128``: no (2)
* ``bool``: yes; fully tested (2)
- (1) Tests show that these dtypes work, but a conversion to float128 happens, which only
has 96 bits of size instead of true 128 bits and hence not twice as much resolution.
It is possible that these dtypes result in inaccuracies, though the tests did not
indicate that.
- (2) Not available due to the input dtype having to be increased to an equivalent float
dtype with two times the input resolution.
- (3) Mapped internally to ``float16``.
Parameters
----------
image_fg : (H,W,[C]) ndarray
Foreground image. Shape and dtype kind must match the one of the
background image.
image_bg : (H,W,[C]) ndarray
Background image. Shape and dtype kind must match the one of the
foreground image.
alpha : number or iterable of number or ndarray
The blending factor, between 0.0 and 1.0. Can be interpreted as the opacity of the
foreground image. Values around 1.0 result in only the foreground image being visible.
Values around 0.0 result in only the background image being visible.
Multiple alphas may be provided. In these cases, there must be exactly one alpha per
channel in the foreground/background image. Alternatively, for ``(H,W,C)`` images,
either one ``(H,W)`` array or an ``(H,W,C)`` array of alphas may be provided,
denoting the elementwise alpha value.
eps : number, optional
Controls when an alpha is to be interpreted as exactly 1.0 or exactly 0.0, resulting
in only the foreground/background being visible and skipping the actual computation.
Returns
-------
image_blend : (H,W,C) ndarray
Blend of foreground and background image.
"""
assert image_fg.shape == image_bg.shape
assert image_fg.dtype.kind == image_bg.dtype.kind
# TODO switch to gate_dtypes()
assert image_fg.dtype.name not in ["float128"]
assert image_bg.dtype.name not in ["float128"]
# TODO add test for this
input_was_2d = (len(image_fg.shape) == 2)
if input_was_2d:
image_fg = np.atleast_3d(image_fg)
image_bg = np.atleast_3d(image_bg)
input_was_bool = False
if image_fg.dtype.kind == "b":
input_was_bool = True
# use float32 instead of float16 here because it seems to be faster
image_fg = image_fg.astype(np.float32)
image_bg = image_bg.astype(np.float32)
alpha = np.array(alpha, dtype=np.float64)
if alpha.size == 1:
pass
else:
if alpha.ndim == 2:
assert alpha.shape == image_fg.shape[0:2]
alpha = alpha.reshape((alpha.shape[0], alpha.shape[1], 1))
elif alpha.ndim == 3:
assert alpha.shape == image_fg.shape or alpha.shape == image_fg.shape[0:2] + (1,)
else:
alpha = alpha.reshape((1, 1, -1))
if alpha.shape[2] != image_fg.shape[2]:
alpha = np.tile(alpha, (1, 1, image_fg.shape[2]))
if not input_was_bool:
if np.all(alpha >= 1.0 - eps):
return np.copy(image_fg)
elif np.all(alpha <= eps):
return np.copy(image_bg)
# for efficiency reaons, only test one value of alpha here, even if alpha is much larger
assert 0 <= alpha.item(0) <= 1.0
dt_images = iadt.get_minimal_dtype([image_fg, image_bg])
# doing this only for non-float images led to inaccuracies for large floats values
isize = dt_images.itemsize * 2
isize = max(isize, 4) # at least 4 bytes (=float32), tends to be faster than float16
dt_blend = np.dtype("f%d" % (isize,))
if alpha.dtype != dt_blend:
alpha = alpha.astype(dt_blend)
if image_fg.dtype != dt_blend:
image_fg = image_fg.astype(dt_blend)
if image_bg.dtype != dt_blend:
image_bg = image_bg.astype(dt_blend)
# the following is equivalent to
# image_blend = alpha * image_fg + (1 - alpha) * image_bg
# but supposedly faster
image_blend = image_bg + alpha * (image_fg - image_bg)
if input_was_bool:
image_blend = image_blend > 0.5
else:
# skip clip, because alpha is expected to be in range [0.0, 1.0] and both images must have same dtype
# dont skip round, because otherwise it is very unlikely to hit the image's max possible value
image_blend = iadt.restore_dtypes_(image_blend, dt_images, clip=False, round=True)
if input_was_2d:
return image_blend[:, :, 0]
return image_blend
class Alpha(meta.Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Augmenter to blend two image sources using an alpha/transparency value.
The two image sources can be imagined as branches.
If a source is not given, it is automatically the same as the input.
Let A be the first branch and B be the second branch.
Then the result images are defined as ``factor * A + (1-factor) * B``,
where ``factor`` is an overlay factor.
For keypoint augmentation this augmenter will pick the keypoints either
from the first or the second branch. The first one is picked if
``factor >= 0.5`` is true (per image). It is recommended to *not* use
augmenters that change keypoint positions with this class.
dtype support::
See :func:`imgaug.augmenters.blend.blend_alpha`.
Parameters
----------
factor : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Weighting of the results of the first branch. Values close to 0 mean
that the results from the second branch (see parameter `second`)
make up most of the final image.
* If float, then that value will be used for all images.
* If tuple ``(a, b)``, then a random value from range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be picked from that list per
image.
* If StochasticParameter, then that parameter will be used to
sample a value per image.
first : None or imgaug.augmenters.meta.Augmenter or iterable of imgaug.augmenters.meta.Augmenter, optional
Augmenter(s) that make up the first of the two branches.
* If None, then the input images will be reused as the output
of the first branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
second : None or imgaug.augmenters.meta.Augmenter or iterable of imgaug.augmenters.meta.Augmenter, optional
Augmenter(s) that make up the second of the two branches.
* If None, then the input images will be reused as the output
of the second branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
per_channel : bool or float, optional
Whether to use the same factor for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Alpha(0.5, iaa.Grayscale(1.0))
Converts each image to grayscale and alpha-blends it by 50 percent with the
original image, thereby removing about 50 percent of all color. This
is equivalent to ``iaa.Grayscale(0.5)``.
>>> aug = iaa.Alpha((0.0, 1.0), iaa.Grayscale(1.0))
Converts each image to grayscale and alpha-blends it by a random percentage
(sampled per image) with the original image, thereby removing a random
percentage of all colors. This is equivalent to ``iaa.Grayscale((0.0, 1.0))``.
>>> aug = iaa.Alpha((0.0, 1.0), iaa.Affine(rotate=(-20, 20)), per_channel=0.5)
Rotates each image by a random degree from the range ``[-20, 20]``. Then
alpha-blends that new image with the original one by a random factor from
the range ``[0.0, 1.0]``. In 50 percent of all cases, the blending happens
channel-wise and the factor is sampled independently per channel. As a
result, e.g. the red channel may look visible rotated (factor near 1.0),
while the green and blue channels may not look rotated (factors near 0.0).
NOTE: It is not recommended to use Alpha with augmenters that change the
positions of pixels if you *also* want to augment keypoints, as it is
unclear which of the two keypoint results (first or second branch) should
be used as the final result.
>>> aug = iaa.Alpha((0.0, 1.0), first=iaa.Add(10), second=iaa.Multiply(0.8))
(A) Adds 10 to each image and (B) multiplies each image by 0.8. Then per
image a blending factor is sampled from the range ``[0.0, 1.0]``. If it is
close to 1.0, the results from (A) are mostly used, otherwise the ones
from (B). This is equivalent to
``iaa.Sequential([iaa.Multiply(0.8), iaa.Alpha((0.0, 1.0), iaa.Add(10))])``.
>>> aug = iaa.Alpha(iap.Choice([0.25, 0.75]), iaa.MedianBlur((3, 7)))
Applies a random median blur to each image and alpha-blends the result with
the original image by either 25 or 75 percent strength.
"""
# TODO rename first/second to foreground/background?
def __init__(self, factor=0, first=None, second=None, per_channel=False,
name=None, deterministic=False, random_state=None):
super(Alpha, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.factor = iap.handle_continuous_param(factor, "factor", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
ia.do_assert(first is not None or second is not None,
"Expected 'first' and/or 'second' to not be None (i.e. at least one Augmenter), "
+ "but got two None values.")
self.first = meta.handle_children_list(first, self.name, "first", default=None)
self.second = meta.handle_children_list(second, self.name, "second", default=None)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
self.epsilon = 1e-2
def _augment_images(self, images, random_state, parents, hooks):
result = images
nb_images = len(images)
nb_channels = meta.estimate_max_number_of_channels(images)
rss = ia.derive_random_states(random_state, 2)
per_channel = self.per_channel.draw_samples(nb_images, random_state=rss[0])
alphas = self.factor.draw_samples((nb_images, nb_channels), random_state=rss[1])
if hooks is None or hooks.is_propagating(images, augmenter=self, parents=parents, default=True):
if self.first is None:
images_first = images
else:
images_first = self.first.augment_images(
images=meta.copy_arrays(images),
parents=parents + [self],
hooks=hooks
)
if self.second is None:
images_second = images
else:
images_second = self.second.augment_images(
images=meta.copy_arrays(images),
parents=parents + [self],
hooks=hooks
)
else:
images_first = images
images_second = images
for i, (image_first, image_second) in enumerate(zip(images_first, images_second)):
if per_channel[i] > 0.5:
nb_channels_i = image_first.shape[2]
alphas_i = alphas[i, 0:nb_channels_i]
else:
alphas_i = alphas[i, 0]
result[i] = blend_alpha(image_first, image_second, alphas_i, eps=self.epsilon)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
result = heatmaps
nb_heatmaps = len(heatmaps)
if nb_heatmaps == 0:
return heatmaps
nb_channels = meta.estimate_max_number_of_channels(heatmaps)
rss = ia.derive_random_states(random_state, 2)
per_channel = self.per_channel.draw_samples(nb_heatmaps, random_state=rss[0])
alphas = self.factor.draw_samples((nb_heatmaps, nb_channels), random_state=rss[1])
if hooks is None or hooks.is_propagating(heatmaps, augmenter=self, parents=parents, default=True):
if self.first is None:
heatmaps_first = heatmaps
else:
heatmaps_first = self.first.augment_heatmaps(
[heatmaps_i.deepcopy() for heatmaps_i in heatmaps],
parents=parents + [self],
hooks=hooks
)
if self.second is None:
heatmaps_second = heatmaps
else:
heatmaps_second = self.second.augment_heatmaps(
[heatmaps_i.deepcopy() for heatmaps_i in heatmaps],
parents=parents + [self],
hooks=hooks
)
else:
heatmaps_first = heatmaps
heatmaps_second = heatmaps
for i, (heatmaps_first_i, heatmaps_second_i) in enumerate(zip(heatmaps_first, heatmaps_second)):
# sample alphas channelwise if necessary and try to use the image's channel number
# values properly synchronized with the image augmentation
# per_channel = self.per_channel.draw_sample(random_state=rs_image)
if per_channel[i] > 0.5:
nb_channels_i = heatmaps[i].shape[2] if len(heatmaps[i].shape) >= 3 else 1
alpha = np.average(alphas[i, 0:nb_channels_i])
else:
alpha = alphas[i, 0]
ia.do_assert(0 <= alpha <= 1.0)
if alpha >= 0.5:
result[i].arr_0to1 = heatmaps_first_i.arr_0to1
else:
result[i].arr_0to1 = heatmaps_second_i.arr_0to1
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
def _augfunc(augs_, keypoints_on_images_, parents_, hooks_):
return augs_.augment_keypoints(
keypoints_on_images=[kpsoi_i.deepcopy() for kpsoi_i in keypoints_on_images_],
parents=parents_,
hooks=hooks_
)
return self._augment_coordinate_based(
keypoints_on_images, random_state, parents, hooks, _augfunc
)
def _augment_polygons(self, polygons_on_images, random_state, parents, hooks):
def _augfunc(augs_, polygons_on_images_, parents_, hooks_):
return augs_.augment_polygons(
polygons_on_images=[polysoi_i.deepcopy() for polysoi_i in polygons_on_images_],
parents=parents_,
hooks=hooks_
)
return self._augment_coordinate_based(
polygons_on_images, random_state, parents, hooks, _augfunc
)
def _augment_coordinate_based(self, inputs, random_state, parents, hooks, func):
nb_images = len(inputs)
if nb_images == 0:
return inputs
nb_channels = meta.estimate_max_number_of_channels(inputs)
rss = ia.derive_random_states(random_state, 2)
per_channel = self.per_channel.draw_samples(nb_images, random_state=rss[0])
alphas = self.factor.draw_samples((nb_images, nb_channels), random_state=rss[1])
result = inputs
if hooks is None or hooks.is_propagating(inputs, augmenter=self, parents=parents, default=True):
if self.first is None:
outputs_first = inputs
else:
outputs_first = func(self.first, inputs, parents + [self], hooks)
if self.second is None:
outputs_second = inputs
else:
outputs_second = func(self.second, inputs, parents + [self], hooks)
else:
outputs_first = inputs
outputs_second = inputs
for i, (outputs_first_i, outputs_second_i) in enumerate(zip(outputs_first, outputs_second)):
# coordinate augmentation also works channel-wise -- even though
# e.g. keypoints do not have channels -- in order to keep the random
# values properly synchronized with the image augmentation
# per_channel = self.per_channel.draw_sample(random_state=rs_image)
if per_channel[i] > 0.5:
nb_channels_i = inputs[i].shape[2] if len(inputs[i].shape) >= 3 else 1
alpha = np.average(alphas[i, 0:nb_channels_i])
else:
alpha = alphas[i, 0]
ia.do_assert(0 <= alpha <= 1.0)
# We cant choose "just a bit" of one keypoint augmentation result
# without messing up the positions (interpolation doesn't make much
# sense here),
# so if the alpha is >= 0.5 (branch A is more visible than
# branch B), the result of branch A, otherwise branch B.
if alpha >= 0.5:
result[i] = outputs_first_i
else:
result[i] = outputs_second_i
return result
def _to_deterministic(self):
aug = self.copy()
aug.first = aug.first.to_deterministic() if aug.first is not None else None
aug.second = aug.second.to_deterministic() if aug.second is not None else None
aug.deterministic = True
aug.random_state = ia.derive_random_state(self.random_state)
return aug
def get_parameters(self):
return [self.factor, self.per_channel]
def get_children_lists(self):
return [lst for lst in [self.first, self.second] if lst is not None]
def __str__(self):
return "%s(factor=%s, per_channel=%s, name=%s, first=%s, second=%s, deterministic=%s)" % (
self.__class__.__name__, self.factor, self.per_channel, self.name,
self.first, self.second, self.deterministic)
# TODO merge this with Alpha
class AlphaElementwise(Alpha): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Augmenter to blend two image sources pixelwise alpha/transparency values.
This is the same as ``Alpha``, except that the transparency factor is
sampled per pixel instead of once per image (or a few times per image, if
per_channel is True).
See ``Alpha`` for more description.
dtype support::
See :func:`imgaug.augmenters.blend.blend_alpha`.
Parameters
----------
factor : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Weighting of the results of the first branch. Values close to 0 mean
that the results from the second branch (see parameter `second`)
make up most of the final image.
* If float, then that value will be used for all images.
* If tuple ``(a, b)``, then a random value from range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be picked from that list per
image.
* If StochasticParameter, then that parameter will be used to
sample a value per image.
first : None or imgaug.augmenters.meta.Augmenter or iterable of imgaug.augmenters.meta.Augmenter, optional
Augmenter(s) that make up the first of the two branches.
* If None, then the input images will be reused as the output
of the first branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
second : None or imgaug.augmenters.meta.Augmenter or iterable of imgaug.augmenters.meta.Augmenter, optional
Augmenter(s) that make up the second of the two branches.
* If None, then the input images will be reused as the output
of the second branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
per_channel : bool or float, optional
Whether to use the same factor for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AlphaElementwise(0.5, iaa.Grayscale(1.0))
Converts each image to grayscale and overlays it by 50 percent with the
original image, thereby removing about 50 percent of all color. This
is equivalent to ``iaa.Grayscale(0.5)``. This is also equivalent to
``iaa.Alpha(0.5, iaa.Grayscale(1.0))``, as the transparency factor is the
same for all pixels.
>>> aug = iaa.AlphaElementwise((0, 1.0), iaa.Grayscale(1.0))
Converts each image to grayscale and alpha-blends it by a random percentage
(sampled per pixel) with the original image, thereby removing a random
percentage of all colors per pixel.
>>> aug = iaa.AlphaElementwise((0.0, 1.0), iaa.Affine(rotate=(-20, 20)), per_channel=0.5)
Rotates each image by a random degree from the range ``[-20, 20]``. Then
alpha-blends that new image with the original one by a random factor from
the range ``[0.0, 1.0]``, sampled per pixel. In 50 percent of all cases, the
blending happens channel-wise and the factor is sampled independently per
channel. As a result, e.g. the red channel may look visible rotated (factor
near 1.0), while the green and blue channels may not look rotated (factors
near 0.0). NOTE: It is not recommended to use Alpha with augmenters that
change the positions of pixels if you *also* want to augment keypoints, as
it is unclear which of the two keypoint results (first or second branch)
should be used as the final result.
>>> aug = iaa.AlphaElementwise((0.0, 1.0), first=iaa.Add(10), second=iaa.Multiply(0.8))
(A) Adds 10 to each image and (B) multiplies each image by 0.8. Then per
pixel a blending factor is sampled from the range ``[0.0, 1.0]``. If it is
close to 1.0, the results from (A) are mostly used, otherwise the ones
from (B).
>>> aug = iaa.AlphaElementwise(iap.Choice([0.25, 0.75]), iaa.MedianBlur((3, 7)))
Applies a random median blur to each image and alpha-blends the result with
the original image by either 25 or 75 percent strength (sampled per pixel).
"""
def __init__(self, factor=0, first=None, second=None, per_channel=False,
name=None, deterministic=False, random_state=None):
super(AlphaElementwise, self).__init__(
factor=factor,
first=first,
second=second,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
)
def _augment_images(self, images, random_state, parents, hooks):
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
if hooks is None or hooks.is_propagating(images, augmenter=self, parents=parents, default=True):
if self.first is None:
images_first = images
else:
images_first = self.first.augment_images(
images=meta.copy_arrays(images),
parents=parents + [self],
hooks=hooks
)
if self.second is None:
images_second = images
else:
images_second = self.second.augment_images(
images=meta.copy_arrays(images),
parents=parents + [self],
hooks=hooks
)
else:
images_first = images
images_second = images
# TODO simplify this loop and the ones for heatmaps, keypoints; similar to Alpha
for i in sm.xrange(nb_images):
image = images[i]
h, w, nb_channels = image.shape[0:3]
image_first = images_first[i]
image_second = images_second[i]
per_channel = self.per_channel.draw_sample(random_state=ia.new_random_state(seeds[i]))
if per_channel > 0.5:
alphas = []
for c in sm.xrange(nb_channels):
samples_c = self.factor.draw_samples((h, w), random_state=ia.new_random_state(seeds[i]+1+c))
ia.do_assert(0 <= samples_c.item(0) <= 1.0) # validate only first value
alphas.append(samples_c)
alphas = np.float64(alphas).transpose((1, 2, 0))
else:
alphas = self.factor.draw_samples((h, w), random_state=ia.new_random_state(seeds[i]))
ia.do_assert(0.0 <= alphas.item(0) <= 1.0)
result[i] = blend_alpha(image_first, image_second, alphas, eps=self.epsilon)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
def _sample_factor_mask(h_images, w_images, h_heatmaps, w_heatmaps, seed):
samples_c = self.factor.draw_samples((h_images, w_images), random_state=ia.new_random_state(seed))
ia.do_assert(0 <= samples_c.item(0) <= 1.0) # validate only first value
if (h_images, w_images) != (h_heatmaps, w_heatmaps):
samples_c = np.clip(samples_c * 255, 0, 255).astype(np.uint8)
samples_c = ia.imresize_single_image(samples_c, (h_heatmaps, w_heatmaps), interpolation="cubic")
samples_c = samples_c.astype(np.float32) / 255.0
return samples_c
result = heatmaps
nb_heatmaps = len(heatmaps)
seeds = random_state.randint(0, 10**6, (nb_heatmaps,))
if hooks is None or hooks.is_propagating(heatmaps, augmenter=self, parents=parents, default=True):
if self.first is None:
heatmaps_first = heatmaps
else:
heatmaps_first = self.first.augment_heatmaps(
[heatmaps_i.deepcopy() for heatmaps_i in heatmaps],
parents=parents + [self],
hooks=hooks
)
if self.second is None:
heatmaps_second = heatmaps
else:
heatmaps_second = self.second.augment_heatmaps(
[heatmaps_i.deepcopy() for heatmaps_i in heatmaps],
parents=parents + [self],
hooks=hooks
)
else:
heatmaps_first = heatmaps
heatmaps_second = heatmaps
for i in sm.xrange(nb_heatmaps):
heatmaps_i = heatmaps[i]
h_img, w_img = heatmaps_i.shape[0:2]
h_heatmaps, w_heatmaps = heatmaps_i.arr_0to1.shape[0:2]
nb_channels_img = heatmaps_i.shape[2] if len(heatmaps_i.shape) >= 3 else 1
nb_channels_heatmaps = heatmaps_i.arr_0to1.shape[2]
heatmaps_first_i = heatmaps_first[i]
heatmaps_second_i = heatmaps_second[i]
per_channel = self.per_channel.draw_sample(random_state=ia.new_random_state(seeds[i]))
if per_channel > 0.5:
samples = []
for c in sm.xrange(nb_channels_img):
# We sample here at the same size as the original image, as some effects
# might not scale with image size. We sampled mask is then downscaled to the
# heatmap size.
samples_c = _sample_factor_mask(h_img, w_img, h_heatmaps, w_heatmaps, seeds[i]+1+c)
samples.append(samples_c[..., np.newaxis])
samples = np.concatenate(samples, axis=2)
samples_avg = np.average(samples, axis=2)
samples_tiled = np.tile(samples_avg[..., np.newaxis], (1, 1, nb_channels_heatmaps))
else:
samples = _sample_factor_mask(h_img, w_img, h_heatmaps, w_heatmaps, seeds[i])
samples_tiled = np.tile(samples[..., np.newaxis], (1, 1, nb_channels_heatmaps))
mask = samples_tiled >= 0.5
heatmaps_arr_aug = mask * heatmaps_first_i.arr_0to1 + (~mask) * heatmaps_second_i.arr_0to1
result[i].arr_0to1 = heatmaps_arr_aug
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
def _augfunc(augs_, keypoints_on_images_, parents_, hooks_):
return augs_.augment_keypoints(
keypoints_on_images=[kpsoi_i.deepcopy() for kpsoi_i in keypoints_on_images_],
parents=parents_,
hooks=hooks_
)
return self._augment_coordinate_based(
keypoints_on_images, random_state, parents, hooks, _augfunc
)
def _augment_polygons(self, polygons_on_images, random_state, parents, hooks):
def _augfunc(augs_, polygons_on_images_, parents_, hooks_):
return augs_.augment_polygons(
polygons_on_images=[polysoi_i.deepcopy() for polysoi_i in polygons_on_images_],
parents=parents_,
hooks=hooks_
)
return self._augment_coordinate_based(
polygons_on_images, random_state, parents, hooks, _augfunc
)
def _augment_coordinate_based(self, inputs, random_state, parents, hooks, func):
result = inputs
nb_images = len(inputs)
seeds = random_state.randint(0, 10**6, (nb_images,))
if hooks is None or hooks.is_propagating(inputs, augmenter=self, parents=parents, default=True):
if self.first is None:
outputs_first = inputs
else:
outputs_first = func(self.first, inputs, parents + [self], hooks)
if self.second is None:
outputs_second = inputs
else:
outputs_second = func(self.second, inputs, parents + [self], hooks)
else:
outputs_first = inputs
outputs_second = inputs
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/contrast.py | augmentation/image_augmentation/helpers/imgaug/augmenters/contrast.py | """
Augmenters that perform contrast changes.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([iaa.GammaContrast((0.5, 1.5))])
List of augmenters:
* GammaContrast
* SigmoidContrast
* LogContrast
* LinearContrast
* AllChannelsHistogramEqualization
* HistogramEqualization
* AllChannelsCLAHE
* CLAHE
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import six.moves as sm
import skimage.exposure as ski_exposure
import cv2
import warnings
from . import meta
from . import color as color_lib
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
# TODO quite similar to the other adjust_contrast_*() functions, make DRY
def adjust_contrast_gamma(arr, gamma):
"""
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gamma : number
Exponent for the contrast adjustment. Higher values darken the image.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
dynamic_range = max_value - min_value
value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32)
# 255 * ((I_ij/255)**gamma)
# using np.float32(.) here still works when the input is a numpy array of size 1
table = (min_value + (value_range ** np.float32(gamma)) * dynamic_range)
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
return ski_exposure.adjust_gamma(arr, gamma)
# TODO quite similar to the other adjust_contrast_*() functions, make DRY
def adjust_contrast_sigmoid(arr, gain, cutoff):
"""
Adjust contrast by scaling each pixel value to ``255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gain : number
Multiplier for the sigmoid function's output.
Higher values lead to quicker changes from dark to light pixels.
cutoff : number
Cutoff that shifts the sigmoid function in horizontal direction.
Higher values mean that the switch from dark to light pixels happens later, i.e.
the pixels will remain darker.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
dynamic_range = max_value - min_value
value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32)
# 255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))
# using np.float32(.) here still works when the input is a numpy array of size 1
gain = np.float32(gain)
cutoff = np.float32(cutoff)
table = min_value + dynamic_range * 1/(1 + np.exp(gain * (cutoff - value_range)))
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
return ski_exposure.adjust_sigmoid(arr, cutoff=cutoff, gain=gain)
# TODO quite similar to the other adjust_contrast_*() functions, make DRY
def adjust_contrast_log(arr, gain):
"""
Adjust contrast by scaling each pixel value to ``255 * gain * log_2(1 + I_ij/255)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2) (3)
* ``uint16``: yes; tested (2) (3)
* ``uint32``: yes; tested (2) (3)
* ``uint64``: yes; tested (2) (3) (4)
* ``int8``: limited; tested (2) (3) (5)
* ``int16``: limited; tested (2) (3) (5)
* ``int32``: limited; tested (2) (3) (5)
* ``int64``: limited; tested (2) (3) (4) (5)
* ``float16``: limited; tested (5)
* ``float32``: limited; tested (5)
* ``float64``: limited; tested (5)
* ``float128``: no (6)
* ``bool``: no (7)
- (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.
- (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the
dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,
e.g. ``result*255`` for ``uint8``.
- (3) Integer-like values are not rounded after applying the contrast adjustment equation
(before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous
space to discrete happens according to floor function.
- (4) Note that scikit-image doc says that integers are converted to ``float64`` values before
applying the contrast normalization method. This might lead to inaccuracies for large
64bit integer values. Tests showed no indication of that happening though.
- (5) Must not contain negative values. Values >=0 are fully supported.
- (6) Leads to error in scikit-image.
- (7) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
gain : number
Multiplier for the logarithm result. Values around 1.0 lead to a contrast-adjusted
images. Values above 1.0 quickly lead to partially broken images due to exceeding the
datatype's value range.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
dynamic_range = max_value - min_value
value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32)
# 255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))
# using np.float32(.) here still works when the input is a numpy array of size 1
gain = np.float32(gain)
table = min_value + dynamic_range * gain * np.log2(1 + value_range)
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
return ski_exposure.adjust_log(arr, gain=gain)
# TODO quite similar to the other adjust_contrast_*() functions, make DRY
def adjust_contrast_linear(arr, alpha):
"""Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.
dtype support::
* ``uint8``: yes; fully tested (1) (2)
* ``uint16``: yes; tested (2)
* ``uint32``: yes; tested (2)
* ``uint64``: no (3)
* ``int8``: yes; tested (2)
* ``int16``: yes; tested (2)
* ``int32``: yes; tested (2)
* ``int64``: no (2)
* ``float16``: yes; tested (2)
* ``float32``: yes; tested (2)
* ``float64``: yes; tested (2)
* ``float128``: no (2)
* ``bool``: no (4)
- (1) Handled by ``cv2``. Other dtypes are handled by raw ``numpy``.
- (2) Only tested for reasonable alphas with up to a value of around 100.
- (3) Conversion to ``float64`` is done during augmentation, hence ``uint64``, ``int64``,
and ``float128`` support cannot be guaranteed.
- (4) Does not make sense for contrast adjustments.
Parameters
----------
arr : numpy.ndarray
Array for which to adjust the contrast. Dtype ``uint8`` is fastest.
alpha : number
Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the
difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.
Returns
-------
numpy.ndarray
Array with adjusted contrast.
"""
# int8 is also possible according to docs
# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed
# like `d` was 0 for CV_8S, causing that to fail
if arr.dtype.name == "uint8":
min_value, center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)
value_range = np.arange(0, 256, dtype=np.float32)
# 127 + alpha*(I_ij-127)
# using np.float32(.) here still works when the input is a numpy array of size 1
alpha = np.float32(alpha)
table = center_value + alpha * (value_range - center_value)
arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))
if arr.ndim == 3 and arr_aug.ndim == 2:
return arr_aug[..., np.newaxis]
return arr_aug
else:
input_dtype = arr.dtype
_min_value, center_value, _max_value = iadt.get_value_range_of_dtype(input_dtype)
if input_dtype.kind in ["u", "i"]:
center_value = int(center_value)
image_aug = center_value + alpha * (arr.astype(np.float64)-center_value)
image_aug = iadt.restore_dtypes_(image_aug, input_dtype)
return image_aug
def GammaContrast(gamma=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
Values in the range ``gamma=(0.5, 2.0)`` seem to be sensible.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_gamma`.
Parameters
----------
gamma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Exponent for the contrast adjustment. Higher values darken the image.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform gamma contrast adjustment.
"""
params1d = [iap.handle_continuous_param(gamma, "gamma", value_range=None, tuple_to_uniform=True,
list_to_choice=True)]
func = adjust_contrast_gamma
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
dtypes_disallowed=["float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
)
def SigmoidContrast(gain=10, cutoff=0.5, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adjust contrast by scaling each pixel value to ``255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))``.
Values in the range ``gain=(5, 20)`` and ``cutoff=(0.25, 0.75)`` seem to be sensible.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_sigmoid`.
Parameters
----------
gain : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier for the sigmoid function's output.
Higher values lead to quicker changes from dark to light pixels.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
cutoff : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Cutoff that shifts the sigmoid function in horizontal direction.
Higher values mean that the switch from dark to light pixels happens later, i.e.
the pixels will remain darker.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform sigmoid contrast adjustment.
"""
# TODO add inv parameter?
params1d = [
iap.handle_continuous_param(gain, "gain", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True),
iap.handle_continuous_param(cutoff, "cutoff", value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
]
func = adjust_contrast_sigmoid
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
dtypes_disallowed=["float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
)
def LogContrast(gain=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adjust contrast by scaling each pixel value to ``255 * gain * log_2(1 + I_ij/255)``.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_log`.
Parameters
----------
gain : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier for the logarithm result. Values around 1.0 lead to a contrast-adjusted
images. Values above 1.0 quickly lead to partially broken images due to exceeding the
datatype's value range.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform logarithmic contrast adjustment.
"""
# TODO add inv parameter?
params1d = [iap.handle_continuous_param(gain, "gain", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)]
func = adjust_contrast_log
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
dtypes_disallowed=["float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
)
def LinearContrast(alpha=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_linear`.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the
difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform contrast adjustment by linearly scaling the distance to 128.
"""
params1d = [
iap.handle_continuous_param(alpha, "alpha", value_range=None, tuple_to_uniform=True, list_to_choice=True)
]
func = adjust_contrast_linear
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32",
"int8", "int16", "int32",
"float16", "float32", "float64"],
dtypes_disallowed=["uint64", "int64", "float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
)
# TODO maybe offer the other contrast augmenters also wrapped in this, similar to CLAHE and HistogramEqualization?
# this is essentially tested by tests for CLAHE
class _IntensityChannelBasedApplier(object):
RGB = color_lib.ChangeColorspace.RGB
BGR = color_lib.ChangeColorspace.BGR
HSV = color_lib.ChangeColorspace.HSV
HLS = color_lib.ChangeColorspace.HLS
Lab = color_lib.ChangeColorspace.Lab
_CHANNEL_MAPPING = {
HSV: 2,
HLS: 1,
Lab: 0
}
def __init__(self, from_colorspace, to_colorspace, name):
super(_IntensityChannelBasedApplier, self).__init__()
# TODO maybe add CIE, Luv?
ia.do_assert(from_colorspace in [self.RGB,
self.BGR,
self.Lab,
self.HLS,
self.HSV])
ia.do_assert(to_colorspace in [self.Lab,
self.HLS,
self.HSV])
self.change_colorspace = color_lib.ChangeColorspace(
to_colorspace=to_colorspace,
from_colorspace=from_colorspace,
name="%s_IntensityChannelBasedApplier_ChangeColorspace" % (name,))
self.change_colorspace_inv = color_lib.ChangeColorspace(
to_colorspace=from_colorspace,
from_colorspace=to_colorspace,
name="%s_IntensityChannelBasedApplier_ChangeColorspaceInverse" % (name,))
def apply(self, images, random_state, parents, hooks, func):
input_was_array = ia.is_np_array(images)
rss = ia.derive_random_states(random_state, 3)
# normalize images
# (H, W, 1) will be used directly in AllChannelsCLAHE
# (H, W, 3) will be converted to target colorspace in the next block
# (H, W, 4) will be reduced to (H, W, 3) (remove 4th channel) and converted to target colorspace in next block
# (H, W, <else>) will raise a warning and be treated channelwise by AllChannelsCLAHE
images_normalized = []
images_change_cs = []
images_change_cs_indices = []
for i, image in enumerate(images):
nb_channels = image.shape[2]
if nb_channels == 1:
images_normalized.append(image)
elif nb_channels == 3:
images_normalized.append(None)
images_change_cs.append(image)
images_change_cs_indices.append(i)
elif nb_channels == 4:
# assume that 4th channel is an alpha channel, e.g. in RGBA
images_normalized.append(None)
images_change_cs.append(image[..., 0:3])
images_change_cs_indices.append(i)
else:
warnings.warn("Got image with %d channels in _IntensityChannelBasedApplier (parents: %s), "
"expected 0, 1, 3 or 4 channels." % (
nb_channels, ", ".join(parent.name for parent in parents)))
images_normalized.append(image)
# convert colorspaces of normalized 3-channel images
images_after_color_conversion = [None] * len(images_normalized)
if len(images_change_cs) > 0:
images_new_cs = self.change_colorspace._augment_images(images_change_cs, rss[0], parents + [self], hooks)
for image_new_cs, target_idx in zip(images_new_cs, images_change_cs_indices):
chan_idx = self._CHANNEL_MAPPING[self.change_colorspace.to_colorspace.value]
images_normalized[target_idx] = image_new_cs[..., chan_idx:chan_idx+1]
images_after_color_conversion[target_idx] = image_new_cs
# apply CLAHE channelwise
# images_aug = self.all_channel_clahe._augment_images(images_normalized, rss[1], parents + [self], hooks)
images_aug = func(images_normalized, rss[1])
# denormalize
result = []
images_change_cs = []
images_change_cs_indices = []
for i, (image, image_conv, image_aug) in enumerate(zip(images, images_after_color_conversion, images_aug)):
nb_channels = image.shape[2]
if nb_channels in [3, 4]:
chan_idx = self._CHANNEL_MAPPING[self.change_colorspace.to_colorspace.value]
image_tmp = image_conv
image_tmp[..., chan_idx:chan_idx+1] = image_aug
result.append(None if nb_channels == 3 else image[..., 3:4])
images_change_cs.append(image_tmp)
images_change_cs_indices.append(i)
else:
result.append(image_aug)
# invert colorspace conversion
if len(images_change_cs) > 0:
images_new_cs = self.change_colorspace_inv._augment_images(images_change_cs, rss[0], parents + [self],
hooks)
for image_new_cs, target_idx in zip(images_new_cs, images_change_cs_indices):
if result[target_idx] is None:
result[target_idx] = image_new_cs
else: # input image had four channels, 4th channel is already in result
result[target_idx] = np.dstack((image_new_cs, result[target_idx]))
# convert to array if necessary
if input_was_array:
result = np.array(result, dtype=result[0].dtype)
return result
# TODO add parameter `tile_grid_size_percent`
class AllChannelsCLAHE(meta.Augmenter):
"""
Contrast Limited Adaptive Histogram Equalization, applied to all channels of the input images.
CLAHE performs histogram equilization within image patches, i.e. over local neighbourhoods.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: no (2)
* ``int16``: no (2)
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: no (2)
* ``float32``: no (2)
* ``float64``: no (2)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) rejected by cv2
- (2) results in error in cv2: ``cv2.error: OpenCV(3.4.2) (...)/clahe.cpp:351: error: (-215:Assertion failed)
src.type() == (((0) & ((1 << 3) - 1)) + (((1)-1) << 3))
|| _src.type() == (((2) & ((1 << 3) - 1)) + (((1)-1) << 3)) in function 'apply'``
Parameters
----------
clip_limit : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See ``imgaug.augmenters.contrast.CLAHE``.
tile_grid_size_px : int or tuple of int or list of int or imgaug.parameters.StochasticParameter \
or tuple of tuple of int or tuple of list of int \
or tuple of imgaug.parameters.StochasticParameter, optional
See ``imgaug.augmenters.contrast.CLAHE``.
tile_grid_size_px_min : int, optional
See ``imgaug.augmenters.contrast.CLAHE``.
per_channel : bool or float, optional
Whether to use the same values for all channels (False)
or to sample new values for each channel (True).
If this parameter is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
"""
def __init__(self, clip_limit=40, tile_grid_size_px=8, tile_grid_size_px_min=3, per_channel=False, name=None,
deterministic=False, random_state=None):
super(AllChannelsCLAHE, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.clip_limit = iap.handle_continuous_param(clip_limit, "clip_limit", value_range=(0+1e-4, None),
tuple_to_uniform=True, list_to_choice=True)
self.tile_grid_size_px = iap.handle_discrete_kernel_size_param(tile_grid_size_px, "tile_grid_size_px",
value_range=(0, None),
allow_floats=False)
self.tile_grid_size_px_min = tile_grid_size_px_min
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["uint8", "uint16"],
disallowed=["bool",
"uint32", "uint64", "uint128", "uint256",
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/arithmetic.py | augmentation/image_augmentation/helpers/imgaug/augmenters/arithmetic.py | """
Augmenters that perform simple arithmetic changes.
Do not import directly from this file, as the categorization is not final.
Use instead::
from imgaug import augmenters as iaa
and then e.g.::
seq = iaa.Sequential([iaa.Add((-5, 5)), iaa.Multiply((0.9, 1.1))])
List of augmenters:
* Add
* AddElementwise
* AdditiveGaussianNoise
* AdditiveLaplaceNoise
* AdditivePoissonNoise
* Multiply
* MultiplyElementwise
* Dropout
* CoarseDropout
* ReplaceElementwise
* ImpulseNoise
* SaltAndPepper
* CoarseSaltAndPepper
* Salt
* CoarseSalt
* Pepper
* CoarsePepper
* Invert
* ContrastNormalization
* JpegCompression
"""
from __future__ import print_function, division, absolute_import
from PIL import Image as PIL_Image
import imageio
import tempfile
import numpy as np
import cv2
from . import meta
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
class Add(meta.Augmenter):
"""
Add a value to all pixels in an image.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no
* ``uint64``: no
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: no
* ``int64``: no
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: no
* ``float128``: no
* ``bool``: yes; tested
Parameters
----------
value : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Value to add to all pixels.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the discrete range ``[a, b]``
will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image
from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Add(10)
always adds a value of 10 to all pixels in the image.
>>> aug = iaa.Add((-10, 10))
adds a value from the discrete range ``[-10 .. 10]`` to all pixels of
the input images. The exact value is sampled per image.
>>> aug = iaa.Add((-10, 10), per_channel=True)
adds a value from the discrete range ``[-10 .. 10]`` to all pixels of
the input images. The exact value is sampled per image AND channel,
i.e. to a red-channel it might add 5 while subtracting 7 from the
blue channel of the same image.
>>> aug = iaa.Add((-10, 10), per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
def __init__(self, value=0, per_channel=False, name=None, deterministic=False, random_state=None):
super(Add, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.value = iap.handle_continuous_param(value, "value", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int32", "int64", "int128", "int256",
"float64", "float96", "float128", "float256"],
augmenter=self)
input_dtypes = iadt.copy_dtypes_for_restore(images, force_list=True)
nb_images = len(images)
nb_channels_max = meta.estimate_max_number_of_channels(images)
rss = ia.derive_random_states(random_state, 2)
value_samples = self.value.draw_samples((nb_images, nb_channels_max), random_state=rss[0])
per_channel_samples = self.per_channel.draw_samples((nb_images,), random_state=rss[1])
gen = enumerate(zip(images, value_samples, per_channel_samples, input_dtypes))
for i, (image, value_samples_i, per_channel_samples_i, input_dtype) in gen:
nb_channels = image.shape[2]
# Example code to directly add images via image+sample (uint8 only)
# if per_channel_samples_i > 0.5:
# result = []
# image = image.astype(np.int16)
# value_samples_i = value_samples_i.astype(np.int16)
# for c, value in enumerate(value_samples_i[0:nb_channels]):
# result.append(np.clip(image[..., c:c+1] + value, 0, 255).astype(np.uint8))
# images[i] = np.concatenate(result, axis=2)
# else:
# images[i] = np.clip(
# image.astype(np.int16) + value_samples_i[0].astype(np.int16), 0, 255).astype(np.uint8)
if image.dtype.name == "uint8":
# Using this LUT approach is significantly faster than the else-block code (around 3-4x speedup)
# and is still faster than the simpler image+sample approach without LUT (about 10% at 64x64 and about
# 2x at 224x224 -- maybe dependent on installed BLAS libraries?)
value_samples_i = np.clip(np.round(value_samples_i), -255, 255).astype(np.int16)
value_range = np.arange(0, 256, dtype=np.int16)
if per_channel_samples_i > 0.5:
result = []
tables = np.tile(value_range[np.newaxis, :], (nb_channels, 1)) \
+ value_samples_i[0:nb_channels, np.newaxis]
tables = np.clip(tables, 0, 255).astype(image.dtype)
for c, table in enumerate(tables):
arr_aug = cv2.LUT(image[..., c], table)
result.append(arr_aug[..., np.newaxis])
images[i] = np.concatenate(result, axis=2)
else:
table = value_range + value_samples_i[0]
image_aug = cv2.LUT(image, np.clip(table, 0, 255).astype(image.dtype))
if image_aug.ndim == 2:
image_aug = image_aug[..., np.newaxis]
images[i] = image_aug
else:
if per_channel_samples_i > 0.5:
value = value_samples_i[0:nb_channels].reshape((1, 1, nb_channels))
else:
value = value_samples_i[0:1].reshape((1, 1, 1))
# We limit here the value range of the value parameter to the bytes in the image's dtype.
# This prevents overflow problems and makes it less likely that the image has to be up-casted, which
# again improves performance and saves memory. Note that this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its value range.
#
# We need 2* the itemsize of the image here to allow to shift the image's max value to the lowest
# possible value, e.g. for uint8 it must allow for -255 to 255.
itemsize = image.dtype.itemsize * 2
dtype_target = np.dtype("%s%d" % (value.dtype.kind, itemsize))
value = iadt.clip_to_dtype_value_range_(value, dtype_target, validate=True)
image, value = iadt.promote_array_dtypes_([image, value], dtypes=[image.dtype, dtype_target],
increase_itemsize_factor=2)
image = np.add(image, value, out=image, casting="no")
image = iadt.restore_dtypes_(image, input_dtype)
images[i] = image
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.value, self.per_channel]
# TODO merge this with Add
class AddElementwise(meta.Augmenter):
"""
Add values to the pixels of images with possibly different values for neighbouring pixels.
While the Add Augmenter adds a constant value per image, this one can
add different values (sampled per pixel).
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no
* ``uint64``: no
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: no
* ``int64``: no
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: no
* ``float128``: no
* ``bool``: yes; tested
Parameters
----------
value : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Value to add to the pixels.
* If an int, then that value will be used for all images.
* If a tuple ``(a, b)``, then values from the discrete range ``[a .. b]``
will be sampled.
* If a list of integers, a random value will be sampled from the list
per image.
* If a StochasticParameter, then values will be sampled per pixel
(and possibly channel) from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AddElementwise(10)
always adds a value of 10 to all pixels in the image.
>>> aug = iaa.AddElementwise((-10, 10))
samples per pixel a value from the discrete range ``[-10 .. 10]`` and
adds that value to the pixel.
>>> aug = iaa.AddElementwise((-10, 10), per_channel=True)
samples per pixel *and channel* a value from the discrete
range ``[-10 .. 10]`` ands adds it to the pixel's value. Therefore,
added values may differ between channels of the same pixel.
>>> aug = iaa.AddElementwise((-10, 10), per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
def __init__(self, value=0, per_channel=False, name=None, deterministic=False, random_state=None):
super(AddElementwise, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
# TODO open to continous, similar to Add
self.value = iap.handle_discrete_param(value, "value", value_range=(-255, 255), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int32", "int64", "int128", "int256",
"float64", "float96", "float128", "float256"],
augmenter=self)
input_dtypes = iadt.copy_dtypes_for_restore(images, force_list=True)
nb_images = len(images)
rss = ia.derive_random_states(random_state, nb_images+1)
per_channel_samples = self.per_channel.draw_samples((nb_images,), random_state=rss[-1])
gen = enumerate(zip(images, per_channel_samples, rss[:-1], input_dtypes))
for i, (image, per_channel_samples_i, rs, input_dtype) in gen:
height, width, nb_channels = image.shape
sample_shape = (height, width, nb_channels if per_channel_samples_i > 0.5 else 1)
value = self.value.draw_samples(sample_shape, random_state=rs)
if image.dtype.name == "uint8":
# This special uint8 block is around 60-100% faster than the else-block further below (more speedup
# for smaller images).
#
# Also tested to instead compute min/max of image and value and then only convert image/value dtype
# if actually necessary, but that was like 20-30% slower, even for 224x224 images.
#
if value.dtype.kind == "f":
value = np.round(value)
image = image.astype(np.int16)
value = np.clip(value, -255, 255).astype(np.int16)
image_aug = image + value
image_aug = np.clip(image_aug, 0, 255).astype(np.uint8)
images[i] = image_aug
else:
# We limit here the value range of the value parameter to the bytes in the image's dtype.
# This prevents overflow problems and makes it less likely that the image has to be up-casted, which
# again improves performance and saves memory. Note that this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its value range.
#
# We need 2* the itemsize of the image here to allow to shift the image's max value to the lowest
# possible value, e.g. for uint8 it must allow for -255 to 255.
itemsize = image.dtype.itemsize * 2
dtype_target = np.dtype("%s%d" % (value.dtype.kind, itemsize))
value = iadt.clip_to_dtype_value_range_(value, dtype_target, validate=100)
if value.shape[2] == 1:
value = np.tile(value, (1, 1, nb_channels))
image, value = iadt.promote_array_dtypes_([image, value], dtypes=[image.dtype, dtype_target],
increase_itemsize_factor=2)
image = np.add(image, value, out=image, casting="no")
image = iadt.restore_dtypes_(image, input_dtype)
images[i] = image
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.value, self.per_channel]
def AdditiveGaussianNoise(loc=0, scale=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Add gaussian noise (aka white noise) to images.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution ``N(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
loc2 = iap.handle_continuous_param(loc, "loc", value_range=None, tuple_to_uniform=True, list_to_choice=True)
scale2 = iap.handle_continuous_param(scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.Normal(loc=loc2, scale=scale2), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state)
def AdditiveLaplaceNoise(loc=0, scale=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Add laplace noise to images.
The laplace distribution is similar to the gaussian distribution, but has puts weight on the long tail.
Hence, this noise will add more outliers (very high/low values). It is somewhere between gaussian noise and
salt and pepper noise.
Values of around ``255 * 0.05`` for `scale` lead to visible noise (for uint8).
Values of around ``255 * 0.10`` for `scale` lead to very visible noise (for uint8).
It is recommended to usually set `per_channel` to True.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the laplace distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the laplace distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
Recommended to be around ``255 * 0.05``.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255)
Adds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveLaplaceNoise(scale=(0, 0.1*255))
Adds laplace noise from the distribution ``Laplace(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255, per_channel=True)
Adds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255, per_channel=0.5)
Adds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
loc2 = iap.handle_continuous_param(loc, "loc", value_range=None, tuple_to_uniform=True, list_to_choice=True)
scale2 = iap.handle_continuous_param(scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.Laplace(loc=loc2, scale=scale2), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state)
def AdditivePoissonNoise(lam=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Create an augmenter to add poisson noise to images.
Poisson noise is comparable to gaussian noise as in ``AdditiveGaussianNoise``, but the values are sampled from
a poisson distribution instead of a gaussian distribution. As poisson distributions produce only positive numbers,
the sign of the sampled values are here randomly flipped.
Values of around ``10.0`` for `lam` lead to visible noise (for uint8).
Values of around ``20.0`` for `lam` lead to very visible noise (for uint8).
It is recommended to usually set `per_channel` to True.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Lambda parameter of the poisson distribution. Recommended values are around ``0.0`` to ``10.0``.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditivePoissonNoise(lam=5.0)
Adds poisson noise sampled from ``Poisson(5.0)`` to images.
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0))
Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is randomly sampled per image from the
interval ``[0.0, 10.0]``.
>>> aug = iaa.AdditivePoissonNoise(lam=5.0, per_channel=True)
Adds poisson noise sampled from ``Poisson(5.0)`` to images,
where the values are different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0), per_channel=True)
Adds poisson noise sampled from ``Poisson(x)`` to images,
with ``x`` being sampled from ``uniform(0.0, 10.0)`` per image, pixel and channel.
This is the *recommended* configuration.
>>> aug = iaa.AdditivePoissonNoise(lam=2, per_channel=0.5)
Adds poisson noise sampled from the distribution ``Poisson(2)`` to images,
where the values are sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
"""
lam2 = iap.handle_continuous_param(lam, "lam", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.RandomSign(iap.Poisson(lam=lam2)), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state)
class Multiply(meta.Augmenter):
"""
Multiply all pixels in an image with a specific value.
This augmenter can be used to make images lighter or darker.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no
* ``uint64``: no
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: no
* ``int64``: no
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: no
* ``float128``: no
* ``bool``: yes; tested
Note: tests were only conducted for rather small multipliers, around -10.0 to +10.0.
In general, the multipliers sampled from `mul` must be in a value range that corresponds to
the input image's dtype. E.g. if the input image has dtype uint16 and the samples generated
from `mul` are float64, this augmenter will still force all samples to be within the value
range of float16, as it has the same number of bytes (two) as uint16. This is done to
make overflows less likely to occur.
Parameters
----------
mul : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
The value with which to multiply the pixel values in each image.
* If a number, then that value will always be used.
* If a tuple ``(a, b)``, then a value from the range ``a <= x <= b`` will
be sampled per image and used for all pixels.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then that parameter will be used to
sample a new value per image.
per_channel : bool or float, optional
Whether to use the same multiplier per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Multiply(2.0)
would multiply all images by a factor of 2, making the images
significantly brighter.
>>> aug = iaa.Multiply((0.5, 1.5))
would multiply images by a random value from the range ``0.5 <= x <= 1.5``,
making some images darker and others brighter.
"""
def __init__(self, mul=1.0, per_channel=False, name=None, deterministic=False, random_state=None):
super(Multiply, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.mul = iap.handle_continuous_param(mul, "mul", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32"],
disallowed=["uint32", "uint64", "uint128", "uint256", "int32", "int64", "int128", "int256",
"float64", "float96", "float128", "float256"],
augmenter=self)
input_dtypes = iadt.copy_dtypes_for_restore(images, force_list=True)
nb_images = len(images)
nb_channels_max = meta.estimate_max_number_of_channels(images)
rss = ia.derive_random_states(random_state, 2)
mul_samples = self.mul.draw_samples((nb_images, nb_channels_max), random_state=rss[0])
per_channel_samples = self.per_channel.draw_samples((nb_images,), random_state=rss[1])
gen = enumerate(zip(images, mul_samples, per_channel_samples, input_dtypes))
for i, (image, mul_samples_i, per_channel_samples_i, input_dtype) in gen:
nb_channels = image.shape[2]
# Example code to directly multiply images via image*sample (uint8 only) -- apparently slower than LUT
# if per_channel_samples_i > 0.5:
# result = []
# image = image.astype(np.float32)
# mul_samples_i = mul_samples_i.astype(np.float32)
# for c, mul in enumerate(mul_samples_i[0:nb_channels]):
# result.append(np.clip(image[..., c:c+1] * mul, 0, 255).astype(np.uint8))
# images[i] = np.concatenate(result, axis=2)
# else:
# images[i] = np.clip(
# image.astype(np.float32) * mul_samples_i[0].astype(np.float32), 0, 255).astype(np.uint8)
if image.dtype.name == "uint8":
# Using this LUT approach is significantly faster than else-block code (more than 10x speedup)
# and is still faster than the simpler image*sample approach without LUT (1.5-3x speedup,
# maybe dependent on installed BLAS libraries?)
value_range = np.arange(0, 256, dtype=np.float32)
if per_channel_samples_i > 0.5:
result = []
mul_samples_i = mul_samples_i.astype(np.float32)
tables = np.tile(value_range[np.newaxis, :], (nb_channels, 1)) \
* mul_samples_i[0:nb_channels, np.newaxis]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/flip.py | augmentation/image_augmentation/helpers/imgaug/augmenters/flip.py | """
Augmenters that apply mirroring/flipping operations to images.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Fliplr((0.0, 1.0)),
iaa.Flipud((0.0, 1.0))
])
List of augmenters:
* Fliplr
* Flipud
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from . import meta
from .. import parameters as iap
def HorizontalFlip(*args, **kwargs):
"""Alias for Fliplr."""
return Fliplr(*args, **kwargs)
def VerticalFlip(*args, **kwargs):
"""Alias for Flipud."""
return Flipud(*args, **kwargs)
class Fliplr(meta.Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Flip/mirror input images horizontally.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
Parameters
----------
p : number or imgaug.parameters.StochasticParameter, optional
Probability of each image to get flipped.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Fliplr(0.5)
would horizontally flip/mirror 50 percent of all input images.
>>> aug = iaa.Fliplr(1.0)
would horizontally flip/mirror all input images.
"""
def __init__(self, p=0, name=None, deterministic=False, random_state=None):
super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.p = iap.handle_probability_param(p, "p")
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, (image, sample) in enumerate(zip(images, samples)):
if sample > 0.5:
images[i] = np.fliplr(image)
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
arrs_flipped = self._augment_images(
[heatmaps_i.arr_0to1 for heatmaps_i in heatmaps],
random_state=random_state,
parents=parents,
hooks=hooks
)
for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped):
heatmaps_i.arr_0to1 = arr_flipped
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
nb_images = len(keypoints_on_images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if not keypoints_on_image.keypoints:
continue
elif samples[i] == 1:
width = keypoints_on_image.shape[1]
for keypoint in keypoints_on_image.keypoints:
keypoint.x = width - float(keypoint.x)
return keypoints_on_images
def _augment_polygons(self, polygons_on_images, random_state, parents,
hooks):
# TODO maybe reverse the order of points afterwards? the flip probably inverts them
return self._augment_polygons_as_keypoints(
polygons_on_images, random_state, parents, hooks)
def get_parameters(self):
return [self.p]
# TODO merge with Fliplr
class Flipud(meta.Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Flip/mirror input images vertically.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
Parameters
----------
p : number or imgaug.parameters.StochasticParameter, optional
Probability of each image to get flipped.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Flipud(0.5)
would vertically flip/mirror 50 percent of all input images.
>>> aug = iaa.Flipud(1.0)
would vertically flip/mirror all input images.
"""
def __init__(self, p=0, name=None, deterministic=False, random_state=None):
super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.p = iap.handle_probability_param(p, "p")
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, (image, sample) in enumerate(zip(images, samples)):
if sample > 0.5:
images[i] = np.flipud(image)
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
arrs_flipped = self._augment_images(
[heatmaps_i.arr_0to1 for heatmaps_i in heatmaps],
random_state=random_state,
parents=parents,
hooks=hooks
)
for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped):
heatmaps_i.arr_0to1 = arr_flipped
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
nb_images = len(keypoints_on_images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if not keypoints_on_image.keypoints:
continue
elif samples[i] == 1:
height = keypoints_on_image.shape[0]
for keypoint in keypoints_on_image.keypoints:
keypoint.y = height - float(keypoint.y)
return keypoints_on_images
def _augment_polygons(self, polygons_on_images, random_state, parents,
hooks):
# TODO how does flipping affect the point order?
return self._augment_polygons_as_keypoints(
polygons_on_images, random_state, parents, hooks)
def get_parameters(self):
return [self.p]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/size.py | augmentation/image_augmentation/helpers/imgaug/augmenters/size.py | """
Augmenters that somehow change the size of the images.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Resize({"height": 32, "width": 64})
iaa.Crop((0, 20))
])
List of augmenters:
* Resize
* CropAndPad
* Crop
* Pad
* PadToFixedSize
* CropToFixedSize
* KeepSizeByResize
"""
from __future__ import print_function, division, absolute_import
import re
import numpy as np
import six.moves as sm
from . import meta
import imgaug as ia
from .. import parameters as iap
# TODO somehow integrate this with ia.pad()
def _handle_pad_mode_param(pad_mode):
pad_modes_available = {"constant", "edge", "linear_ramp", "maximum", "mean", "median", "minimum", "reflect",
"symmetric", "wrap"}
if pad_mode == ia.ALL:
return iap.Choice(list(pad_modes_available))
elif ia.is_string(pad_mode):
ia.do_assert(
pad_mode in pad_modes_available,
"Value '%s' is not a valid pad mode. Valid pad modes are: %s." % (pad_mode, ", ".join(pad_modes_available))
)
return iap.Deterministic(pad_mode)
elif isinstance(pad_mode, list):
ia.do_assert(
all([v in pad_modes_available for v in pad_mode]),
"At least one in list %s is not a valid pad mode. Valid pad modes are: %s." % (
str(pad_mode), ", ".join(pad_modes_available))
)
return iap.Choice(pad_mode)
elif isinstance(pad_mode, iap.StochasticParameter):
return pad_mode
raise Exception("Expected pad_mode to be ia.ALL or string or list of strings or StochasticParameter, got %s." % (
type(pad_mode),))
def _crop_prevent_zero_size(height, width, crop_top, crop_right, crop_bottom, crop_left):
remaining_height = height - (crop_top + crop_bottom)
remaining_width = width - (crop_left + crop_right)
if remaining_height < 1:
regain = abs(remaining_height) + 1
regain_top = regain // 2
regain_bottom = regain // 2
if regain_top + regain_bottom < regain:
regain_top += 1
if regain_top > crop_top:
diff = regain_top - crop_top
regain_top = crop_top
regain_bottom += diff
elif regain_bottom > crop_bottom:
diff = regain_bottom - crop_bottom
regain_bottom = crop_bottom
regain_top += diff
ia.do_assert(regain_top <= crop_top)
ia.do_assert(regain_bottom <= crop_bottom)
crop_top = crop_top - regain_top
crop_bottom = crop_bottom - regain_bottom
if remaining_width < 1:
regain = abs(remaining_width) + 1
regain_right = regain // 2
regain_left = regain // 2
if regain_right + regain_left < regain:
regain_right += 1
if regain_right > crop_right:
diff = regain_right - crop_right
regain_right = crop_right
regain_left += diff
elif regain_left > crop_left:
diff = regain_left - crop_left
regain_left = crop_left
regain_right += diff
ia.do_assert(regain_right <= crop_right)
ia.do_assert(regain_left <= crop_left)
crop_right = crop_right - regain_right
crop_left = crop_left - regain_left
return crop_top, crop_right, crop_bottom, crop_left
def _handle_position_parameter(position):
if position == "uniform":
return iap.Uniform(0.0, 1.0), iap.Uniform(0.0, 1.0)
elif position == "normal":
return (
iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2), minval=0.0, maxval=1.0),
iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2), minval=0.0, maxval=1.0)
)
elif position == "center":
return iap.Deterministic(0.5), iap.Deterministic(0.5)
elif ia.is_string(position) and re.match(r"^(left|center|right)-(top|center|bottom)$", position):
mapping = {"top": 0.0, "center": 0.5, "bottom": 1.0, "left": 0.0, "right": 1.0}
return (
iap.Deterministic(mapping[position.split("-")[0]]),
iap.Deterministic(mapping[position.split("-")[1]])
)
elif isinstance(position, iap.StochasticParameter):
return position
elif isinstance(position, tuple):
ia.do_assert(
len(position) == 2,
"Expected tuple with two entries as position parameter. Got %d entries with types %s.." % (
len(position), str([type(el) for el in position])
))
for el in position:
if ia.is_single_number(el) and (el < 0 or el > 1.0):
raise Exception(
"Both position values must be within the value range [0.0, 1.0]. Got type %s with value %.8f." % (
type(el), el,)
)
position = [iap.Deterministic(el) if ia.is_single_number(el) else el for el in position]
ia.do_assert(
all([isinstance(el, iap.StochasticParameter) for el in position]),
"Expected tuple with two entries that are both either StochasticParameter or float/int. Got types %s." % (
str([type(el) for el in position])
)
)
return tuple(position)
else:
raise Exception(
("Expected one of the following as position parameter: string 'uniform', string 'normal', string 'center', "
+ "a string matching regex ^(left|center|right)-(top|center|bottom)$, a single StochasticParameter or a "
+ "tuple of two entries, both being either StochasticParameter or floats or int. Got instead type %s with "
+ "content '%s'.") % (
type(position), str(position) if len(str(position)) < 20 else str(position)[0:20] + "..."
)
)
@ia.deprecated(alt_func="Resize",
comment="Resize has the exactly same interface as Scale.")
def Scale(*args, **kwargs):
return Resize(*args, **kwargs)
class Resize(meta.Augmenter):
"""
Augmenter that resizes images to specified heights and widths.
dtype support::
See :func:`imgaug.imgaug.imresize_many_images`.
Parameters
----------
size : 'keep' or int or float or tuple of int or tuple of float or list of int or list of float or\
imgaug.parameters.StochasticParameter or dict
The new size of the images.
* If this has the string value "keep", the original height and
width values will be kept (image is not resized).
* If this is an integer, this value will always be used as the new
height and width of the images.
* If this is a float v, then per image the image's height H and
width W will be changed to ``H*v`` and ``W*v``.
* If this is a tuple, it is expected to have two entries ``(a, b)``.
If at least one of these are floats, a value will be sampled from
range ``[a, b]`` and used as the float value to resize the image
(see above). If both are integers, a value will be sampled from
the discrete range ``[a..b]`` and used as the integer value
to resize the image (see above).
* If this is a list, a random value from the list will be picked
to resize the image. All values in the list must be integers or
floats (no mixture is possible).
* If this is a StochasticParameter, then this parameter will first
be queried once per image. The resulting value will be used
for both height and width.
* If this is a dictionary, it may contain the keys "height" and
"width". Each key may have the same datatypes as above and
describes the scaling on x and y-axis. Both axis are sampled
independently. Additionally, one of the keys may have the value
"keep-aspect-ratio", which means that the respective side of the
image will be resized so that the original aspect ratio is kept.
This is useful when only resizing one image size by a pixel
value (e.g. resize images to a height of 64 pixels and resize
the width so that the overall aspect ratio is maintained).
interpolation : imgaug.ALL or int or str or list of int or list of str or imgaug.parameters.StochasticParameter,\
optional
Interpolation to use.
* If imgaug.ALL, then a random interpolation from ``nearest``, ``linear``,
``area`` or ``cubic`` will be picked (per image).
* If int, then this interpolation will always be used.
Expected to be any of the following:
``cv2.INTER_NEAREST``, ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``,
``cv2.INTER_CUBIC``
* If string, then this interpolation will always be used.
Expected to be any of the following:
``nearest``, ``linear``, ``area``, ``cubic``
* If list of ints/strings, then a random one of the values will be
picked per image as the interpolation.
If a StochasticParameter, then this parameter will be queried per
image and is expected to return an integer or string.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Resize(32)
resizes all images to ``32x32`` pixels.
>>> aug = iaa.Resize(0.5)
resizes all images to 50 percent of their original size.
>>> aug = iaa.Resize((16, 22))
resizes all images to a random height and width within the
discrete range ``16<=x<=22``.
>>> aug = iaa.Resize((0.5, 0.75))
resizes all image's height and width to ``H*v`` and ``W*v``,
where ``v`` is randomly sampled from the range ``0.5<=x<=0.75``.
>>> aug = iaa.Resize([16, 32, 64])
resizes all images either to ``16x16``, ``32x32`` or ``64x64`` pixels.
>>> aug = iaa.Resize({"height": 32})
resizes all images to a height of 32 pixels and keeps the original
width.
>>> aug = iaa.Resize({"height": 32, "width": 48})
resizes all images to a height of 32 pixels and a width of 48.
>>> aug = iaa.Resize({"height": 32, "width": "keep-aspect-ratio"})
resizes all images to a height of 32 pixels and resizes the x-axis
(width) so that the aspect ratio is maintained.
>>> aug = iaa.Resize({"height": (0.5, 0.75), "width": [16, 32, 64]})
resizes all images to a height of ``H*v``, where ``H`` is the original height
and v is a random value sampled from the range ``0.5<=x<=0.75``.
The width/x-axis of each image is resized to either 16 or 32 or
64 pixels.
>>> aug = iaa.Resize(32, interpolation=["linear", "cubic"])
resizes all images to ``32x32`` pixels. Randomly uses either ``linear``
or ``cubic`` interpolation.
"""
def __init__(self, size, interpolation="cubic", name=None, deterministic=False, random_state=None):
super(Resize, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
def handle(val, allow_dict):
if val == "keep":
return iap.Deterministic("keep")
elif ia.is_single_integer(val):
ia.do_assert(val > 0)
return iap.Deterministic(val)
elif ia.is_single_float(val):
ia.do_assert(val > 0)
return iap.Deterministic(val)
elif allow_dict and isinstance(val, dict):
if len(val.keys()) == 0:
return iap.Deterministic("keep")
else:
ia.do_assert(all([key in ["height", "width"] for key in val.keys()]))
if "height" in val and "width" in val:
ia.do_assert(val["height"] != "keep-aspect-ratio" or val["width"] != "keep-aspect-ratio")
size_tuple = []
for k in ["height", "width"]:
if k in val:
if val[k] == "keep-aspect-ratio" or val[k] == "keep":
entry = iap.Deterministic(val[k])
else:
entry = handle(val[k], False)
else:
entry = iap.Deterministic("keep")
size_tuple.append(entry)
return tuple(size_tuple)
elif isinstance(val, tuple):
ia.do_assert(len(val) == 2)
ia.do_assert(val[0] > 0 and val[1] > 0)
if ia.is_single_float(val[0]) or ia.is_single_float(val[1]):
return iap.Uniform(val[0], val[1])
else:
return iap.DiscreteUniform(val[0], val[1])
elif isinstance(val, list):
if len(val) == 0:
return iap.Deterministic("keep")
else:
all_int = all([ia.is_single_integer(v) for v in val])
all_float = all([ia.is_single_float(v) for v in val])
ia.do_assert(all_int or all_float)
ia.do_assert(all([v > 0 for v in val]))
return iap.Choice(val)
elif isinstance(val, iap.StochasticParameter):
return val
else:
raise Exception(
"Expected number, tuple of two numbers, list of numbers, dictionary of "
"form {'height': number/tuple/list/'keep-aspect-ratio'/'keep', "
"'width': <analogous>}, or StochasticParameter, got %s." % (type(val),)
)
self.size = handle(size, True)
if interpolation == ia.ALL:
self.interpolation = iap.Choice(["nearest", "linear", "area", "cubic"])
elif ia.is_single_integer(interpolation):
self.interpolation = iap.Deterministic(interpolation)
elif ia.is_string(interpolation):
self.interpolation = iap.Deterministic(interpolation)
elif ia.is_iterable(interpolation):
self.interpolation = iap.Choice(interpolation)
elif isinstance(interpolation, iap.StochasticParameter):
self.interpolation = interpolation
else:
raise Exception("Expected int or string or iterable or StochasticParameter, got %s." % (
type(interpolation),))
def _augment_images(self, images, random_state, parents, hooks):
result = []
nb_images = len(images)
samples_h, samples_w, samples_ip = self._draw_samples(nb_images, random_state, do_sample_ip=True)
for i in sm.xrange(nb_images):
image = images[i]
sample_h, sample_w, sample_ip = samples_h[i], samples_w[i], samples_ip[i]
h, w = self._compute_height_width(image.shape, sample_h, sample_w)
image_rs = ia.imresize_single_image(image, (h, w), interpolation=sample_ip)
result.append(image_rs)
if not isinstance(images, list):
all_same_size = (len(set([image.shape for image in result])) == 1)
if all_same_size:
result = np.array(result, dtype=np.uint8)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
result = []
nb_heatmaps = len(heatmaps)
samples_h, samples_w, samples_ip = self._draw_samples(nb_heatmaps, random_state, do_sample_ip=True)
for i in sm.xrange(nb_heatmaps):
heatmaps_i = heatmaps[i]
sample_h, sample_w, sample_ip = samples_h[i], samples_w[i], samples_ip[i]
h_img, w_img = self._compute_height_width(heatmaps_i.shape, sample_h, sample_w)
h = int(np.round(h_img * (heatmaps_i.arr_0to1.shape[0] / heatmaps_i.shape[0])))
w = int(np.round(w_img * (heatmaps_i.arr_0to1.shape[1] / heatmaps_i.shape[1])))
h = max(h, 1)
w = max(w, 1)
heatmaps_i_resized = heatmaps_i.resize((h, w), interpolation=sample_ip)
heatmaps_i_resized.shape = (h_img, w_img) + heatmaps_i.shape[2:]
result.append(heatmaps_i_resized)
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
result = []
nb_images = len(keypoints_on_images)
samples_h, samples_w, _samples_ip = self._draw_samples(nb_images, random_state, do_sample_ip=False)
for i in sm.xrange(nb_images):
keypoints_on_image = keypoints_on_images[i]
sample_h, sample_w = samples_h[i], samples_w[i]
h, w = self._compute_height_width(keypoints_on_image.shape, sample_h, sample_w)
new_shape = (h, w) + keypoints_on_image.shape[2:]
keypoints_on_image_rs = keypoints_on_image.on(new_shape)
result.append(keypoints_on_image_rs)
return result
def _augment_polygons(self, polygons_on_images, random_state, parents,
hooks):
return self._augment_polygons_as_keypoints(
polygons_on_images, random_state, parents, hooks)
def _draw_samples(self, nb_images, random_state, do_sample_ip=True):
seed = random_state.randint(0, 10**6, 1)[0]
if isinstance(self.size, tuple):
samples_h = self.size[0].draw_samples(nb_images, random_state=ia.new_random_state(seed + 0))
samples_w = self.size[1].draw_samples(nb_images, random_state=ia.new_random_state(seed + 1))
else:
samples_h = self.size.draw_samples(nb_images, random_state=ia.new_random_state(seed + 0))
samples_w = samples_h
if do_sample_ip:
samples_ip = self.interpolation.draw_samples(nb_images, random_state=ia.new_random_state(seed + 2))
else:
samples_ip = None
return samples_h, samples_w, samples_ip
@classmethod
def _compute_height_width(cls, image_shape, sample_h, sample_w):
imh, imw = image_shape[0:2]
h, w = sample_h, sample_w
if ia.is_single_float(h):
ia.do_assert(0 < h)
h = int(np.round(imh * h))
h = h if h > 0 else 1
elif h == "keep":
h = imh
if ia.is_single_float(w):
ia.do_assert(0 < w)
w = int(np.round(imw * w))
w = w if w > 0 else 1
elif w == "keep":
w = imw
# at least the checks for keep-aspect-ratio must come after
# the float checks, as they are dependent on the results
# this is also why these are not written as elifs
if h == "keep-aspect-ratio":
h_per_w_orig = imh / imw
h = int(np.round(w * h_per_w_orig))
if w == "keep-aspect-ratio":
w_per_h_orig = imw / imh
w = int(np.round(h * w_per_h_orig))
return h, w
def get_parameters(self):
return [self.size, self.interpolation]
class CropAndPad(meta.Augmenter):
"""
Augmenter that crops/pads images by defined amounts in pixels or
percent (relative to input image size).
Cropping removes pixels at the sides (i.e. extracts a subimage from
a given full image). Padding adds pixels to the sides (e.g. black pixels).
dtype support::
if (keep_size=False)::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
if (keep_size=True)::
minimum of (
``imgaug.augmenters.size.CropAndPad(keep_size=False)``,
:func:`imgaug.imgaug.imresize_many_images`
)
Parameters
----------
px : None or int or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to crop (negative values) or pad (positive values)
on each side of the image. Either this or the parameter `percent` may
be set, not both at the same time.
* If None, then pixel-based cropping/padding will not be used.
* If int, then that exact number of pixels will always be
cropped/padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left), unless `sample_independently` is set to False, as then
only one value will be sampled per image and used for all sides.
* If a tuple of two ints with values ``a`` and ``b``, then each
side will be cropped/padded by a random amount in the range
``a <= x <= b``. ``x`` is sampled per image side. If however
`sample_independently` is set to False, only one value will be
sampled per image and used for all sides.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single integer (always crop/pad
by exactly that value), a tuple of two ints ``a`` and ``b``
(crop/pad by an amount ``a <= x <= b``), a list of ints (crop/pad
by a random value that is contained in the list) or a
StochasticParameter (sample the amount to crop/pad from that
parameter).
percent : None or int or float or imgaug.parameters.StochasticParameter \
or tuple, optional
The number of pixels to crop (negative values) or pad (positive values)
on each side of the image given *in percent* of the image height/width.
E.g. if this is set to 0.1, the augmenter will always crop away 10
percent of the image's height at the top, 10 percent of the width on
the right, 10 percent of the height at the bottom and 10 percent of
the width on the left. Either this or the parameter `px` may be set,
not both at the same time.
* If None, then percent-based cropping/padding will not be used.
* If int, then expected to be 0 (no cropping/padding).
* If float, then that percentage will always be cropped/padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left). If however `sample_independently` is set to False, only
one value will be sampled per image and used for all sides.
* If a tuple of two floats with values ``a`` and ``b``, then each
side will be cropped/padded by a random percentage in the range
``a <= x <= b``. ``x`` is sampled per image side.
If however `sample_independently` is set to False, only one value
will be sampled per image and used for all sides.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single float (always crop/pad
by exactly that percent value), a tuple of two floats ``a`` and
``b`` (crop/pad by a percentage ``a <= x <= b``), a list of
floats (crop by a random value that is contained in the list) or
a StochasticParameter (sample the percentage to crop/pad from
that parameter).
pad_mode : imgaug.ALL or str or list of str or \
imgaug.parameters.StochasticParameter, optional
Padding mode to use. The available modes match the numpy padding modes,
i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,
``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes
``constant`` and ``linear_ramp`` use extra values, which are provided
by ``pad_cval`` when necessary. See :func:`imgaug.imgaug.pad` for
more details.
* If ``imgaug.ALL``, then a random mode from all available modes
will be sampled per image.
* If a string, it will be used as the pad mode for all images.
* If a list of strings, a random one of these will be sampled per
image and used as the mode.
* If StochasticParameter, a random mode will be sampled from this
parameter per image.
pad_cval : number or tuple of number list of number or \
imgaug.parameters.StochasticParameter, optional
The constant value to use if the pad mode is ``constant`` or the end
value to use if the mode is ``linear_ramp``.
See :func:`imgaug.imgaug.pad` for more details.
* If number, then that value will be used.
* If a tuple of two numbers and at least one of them is a float,
then a random number will be sampled from the continuous range
``a <= x <= b`` and used as the value. If both numbers are
integers, the range is discrete.
* If a list of number, then a random value will be chosen from the
elements of the list and used as the value.
* If StochasticParameter, a random value will be sampled from that
parameter per image.
keep_size : bool, optional
After cropping and padding, the result image will usually have a
different height/width compared to the original input image. If this
parameter is set to True, then the cropped/padded image will be resized
to the input image's size, i.e. the augmenter's output shape is always
identical to the input shape.
sample_independently : bool, optional
If False AND the values for `px`/`percent` result in exactly one
probability distribution for the amount to crop/pad, only one single
value will be sampled from that probability distribution and used for
all sides. I.e. the crop/pad amount then is the same for all sides.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CropAndPad(px=(-10, 0))
crops each side by a random value from the range -10px to 0px (the value
is sampled per side).
>>> aug = iaa.CropAndPad(px=(0, 10))
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding happens by zero-padding (i.e. adds
black pixels).
>>> aug = iaa.CropAndPad(px=(0, 10), pad_mode="edge")
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses the ``edge`` mode from numpy's
pad function.
>>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=["constant", "edge"])
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses randomly either the ``constant``
or ``edge`` mode from numpy's pad function.
>>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). It uses a random mode for numpy's pad function.
If the mode is ``constant`` or ``linear_ramp``, it samples a random value
``v`` from the range ``[0, 255]`` and uses that as the constant
value (``mode=constant``) or end value (``mode=linear_ramp``).
>>> aug = iaa.CropAndPad(px=(0, 10), sample_independently=False)
samples one value v from the discrete range ``[0..10]`` and pads all sides
by v pixels.
>>> aug = iaa.CropAndPad(px=(0, 10), keep_size=False)
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). After padding, the images are NOT resized to
their original size (i.e. the images may end up having different
heights/widths).
>>> aug = iaa.CropAndPad(px=((0, 10), (0, 5), (0, 10), (0, 5)))
pads the top and bottom by a random value from the range 0px to 10px
and the left and right by a random value in the range 0px to 5px.
>>> aug = iaa.CropAndPad(percent=(0, 0.1))
pads each side by a random value from the range 0 percent to
10 percent. (Percent with respect to the side's size, e.g. for the
top side it uses the image's height.)
>>> aug = iaa.CropAndPad(percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
pads each side by either 5 percent or 10 percent.
>>> aug = iaa.CropAndPad(px=(-10, 10))
samples per side and image a value ``v`` from the discrete range ``[-10..10]``
and either crops (negative value) or pads (positive value) the side
by ``v`` pixels.
"""
def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0, keep_size=True,
sample_independently=True, name=None, deterministic=False, random_state=None):
super(CropAndPad, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.all_sides = None
self.top = None
self.right = None
self.bottom = None
self.left = None
if px is None and percent is None:
self.mode = "noop"
elif px is not None and percent is not None:
raise Exception("Can only pad by pixels or percent, not both.")
elif px is not None:
self.mode = "px"
if ia.is_single_integer(px):
self.all_sides = iap.Deterministic(px)
elif isinstance(px, tuple):
ia.do_assert(len(px) in [2, 4])
def handle_param(p):
if ia.is_single_integer(p):
return iap.Deterministic(p)
elif isinstance(p, tuple):
ia.do_assert(len(p) == 2)
ia.do_assert(ia.is_single_integer(p[0]))
ia.do_assert(ia.is_single_integer(p[1]))
return iap.DiscreteUniform(p[0], p[1])
elif isinstance(p, list):
ia.do_assert(len(p) > 0)
ia.do_assert(all([ia.is_single_integer(val) for val in p]))
return iap.Choice(p)
elif isinstance(p, iap.StochasticParameter):
return p
else:
raise Exception("Expected int, tuple of two ints, list of ints or StochasticParameter, "
+ "got type %s." % (type(p),))
if len(px) == 2:
self.all_sides = handle_param(px)
else: # len == 4
self.top = handle_param(px[0])
self.right = handle_param(px[1])
self.bottom = handle_param(px[2])
self.left = handle_param(px[3])
elif isinstance(px, iap.StochasticParameter):
self.top = self.right = self.bottom = self.left = px
else:
raise Exception("Expected int, tuple of 4 ints/tuples/lists/StochasticParameters or "
+ "StochasticParameter, got type %s." % (type(px),))
else: # = elif percent is not None:
self.mode = "percent"
if ia.is_single_number(percent):
ia.do_assert(-1.0 < percent)
self.all_sides = iap.Deterministic(percent)
elif isinstance(percent, tuple):
ia.do_assert(len(percent) in [2, 4])
def handle_param(p):
if ia.is_single_number(p):
return iap.Deterministic(p)
elif isinstance(p, tuple):
ia.do_assert(len(p) == 2)
ia.do_assert(ia.is_single_number(p[0]))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/meta.py | augmentation/image_augmentation/helpers/imgaug/augmenters/meta.py | """
Augmenters that don't apply augmentations themselves, but are needed
for meta usage.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([...])
List of augmenters:
* Augmenter (base class for all augmenters)
* Sequential
* SomeOf
* OneOf
* Sometimes
* WithChannels
* Noop
* Lambda
* AssertLambda
* AssertShape
* ChannelShuffle
Note: WithColorspace is in ``color.py``.
"""
from __future__ import print_function, division, absolute_import
import warnings
from abc import ABCMeta, abstractmethod
import copy as copy_module
import re
import itertools
import sys
import numpy as np
import six
import six.moves as sm
import imgaug as ia
from .. import parameters as iap
from imgaug.augmentables.batches import Batch, UnnormalizedBatch
def clip_augmented_image_(image, min_value, max_value):
return clip_augmented_images_(image, min_value, max_value)
def clip_augmented_image(image, min_value, max_value):
return clip_augmented_images(image, min_value, max_value)
def clip_augmented_images_(images, min_value, max_value):
if ia.is_np_array(images):
return np.clip(images, min_value, max_value, out=images)
else:
return [np.clip(image, min_value, max_value, out=image) for image in images]
def clip_augmented_images(images, min_value, max_value):
if ia.is_np_array(images):
images = np.copy(images)
else:
images = [np.copy(image) for image in images]
return clip_augmented_images_(images, min_value, max_value)
def handle_children_list(lst, augmenter_name, lst_name, default="sequential"):
if lst is None:
if default == "sequential":
return Sequential([], name="%s-%s" % (augmenter_name, lst_name))
else:
return default
elif isinstance(lst, Augmenter):
if ia.is_iterable(lst):
# TODO why was this assert added here? seems to make no sense
ia.do_assert(all([isinstance(child, Augmenter) for child in lst]))
return lst
else:
return Sequential(lst, name="%s-%s" % (augmenter_name, lst_name))
elif ia.is_iterable(lst):
if len(lst) == 0 and default != "sequential":
return default
ia.do_assert(all([isinstance(child, Augmenter) for child in lst]))
return Sequential(lst, name="%s-%s" % (augmenter_name, lst_name))
else:
raise Exception(("Expected None, Augmenter or list/tuple as children list %s for augmenter with name %s, "
+ "got %s.") % (lst_name, augmenter_name, type(lst),))
def reduce_to_nonempty(objs):
objs_reduced = []
ids = []
for i, obj in enumerate(objs):
ia.do_assert(hasattr(obj, "empty"))
if not obj.empty:
objs_reduced.append(obj)
ids.append(i)
return objs_reduced, ids
def invert_reduce_to_nonempty(objs, ids, objs_reduced):
objs_inv = list(objs)
for idx, obj_from_reduced in zip(ids, objs_reduced):
objs_inv[idx] = obj_from_reduced
return objs_inv
def estimate_max_number_of_channels(images):
if ia.is_np_array(images):
assert images.ndim == 4
return images.shape[3]
else:
assert ia.is_iterable(images)
if len(images) == 0:
return None
channels = [el.shape[2] if len(el.shape) >= 3 else 1 for el in images]
return max(channels)
def copy_arrays(arrays):
if ia.is_np_array(arrays):
return np.copy(arrays)
else:
assert ia.is_iterable(arrays), "Expected ndarray or iterable of ndarray, got type %s." % (type(arrays),)
return [np.copy(array) for array in arrays]
@six.add_metaclass(ABCMeta)
class Augmenter(object): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Base class for Augmenter objects.
All augmenters derive from this class.
"""
def __init__(self, name=None, deterministic=False, random_state=None):
"""
Create a new Augmenter instance.
Parameters
----------
name : None or str, optional
Name given to an Augmenter object. This name is used in ``print()``
statements as well as find and remove functions.
If None, ``UnnamedX`` will be used as the name, where ``X`` is the
Augmenter's class name.
deterministic : bool, optional
Whether the augmenter instance's random state will be saved before
augmenting images and then reset to that saved state after an
augmentation (of multiple images/keypoints) is finished.
I.e. if set to True, each batch of images will be augmented in the
same way (e.g. first image might always be flipped horizontally,
second image will never be flipped etc.).
This is useful when you want to transform multiple batches of images
in the same way, or when you want to augment images and keypoints
on these images.
Usually, there is no need to set this variable by hand. Instead,
instantiate the augmenter with the defaults and then use
:func:`imgaug.augmenters.Augmenter.to_deterministic`.
random_state : None or int or numpy.random.RandomState, optional
The random state to use for this augmenter.
* If int, a new ``numpy.random.RandomState`` will be created using this
value as the seed.
* If ``numpy.random.RandomState`` instance, the instance will be used directly.
* If None, imgaug's default RandomState will be used, which's state can
be controlled using ``imgaug.seed(int)``.
Usually there is no need to set this variable by hand. Instead,
instantiate the augmenter with the defaults and then use
:func:`imgaug.augmenters.Augmenter.to_deterministic`.
"""
super(Augmenter, self).__init__()
ia.do_assert(name is None or ia.is_string(name),
"Expected name to be None or string-like, got %s." % (type(name),))
if name is None:
self.name = "Unnamed%s" % (self.__class__.__name__,)
else:
self.name = name
ia.do_assert(ia.is_single_bool(deterministic),
"Expected deterministic to be a boolean, got %s." % (type(deterministic),))
self.deterministic = deterministic
if random_state is None:
if self.deterministic:
self.random_state = ia.new_random_state()
else:
self.random_state = ia.current_random_state()
elif isinstance(random_state, np.random.RandomState):
self.random_state = random_state
else:
self.random_state = np.random.RandomState(random_state)
self.activated = True
def augment_batches(self, batches, hooks=None, background=False):
"""
Augment multiple batches.
In contrast to other augment functions, this function _yields_ batches
instead of just returning a full list. This is more suited for most
training loops. It also supports augmentation on multiple cpu cores,
activated via the `background` flag.
Parameters
----------
batches : imgaug.augmentables.batches.Batch \
or imgaug.augmentables.batches.UnnormalizedBatch \
or iterable of imgaug.augmentables.batches.Batch \
or iterable of imgaug.augmentables.batches.UnnormalizedBatch
A single batch or a list of batches to augment.
hooks : None or imgaug.HooksImages, optional
HooksImages object to dynamically interfere with the augmentation
process.
background : bool, optional
Whether to augment the batches in background processes.
If true, hooks can currently not be used as that would require
pickling functions.
Note that multicore augmentation distributes the batches onto
different CPU cores. It does not split the data within batches.
It is therefore not sensible to use ``background=True`` for a
single batch.
Note also that multicore augmentation needs some time to start. It
is therefore not recommended to use it for very few batches.
Yields
-------
imgaug.augmentables.batches.Batch \
or imgaug.augmentables.batches.UnnormalizedBatch \
or iterable of imgaug.augmentables.batches.Batch \
or iterable of imgaug.augmentables.batches.UnnormalizedBatch
Augmented batches.
"""
if isinstance(batches, (Batch, UnnormalizedBatch)):
batches = [batches]
ia.do_assert(
(ia.is_iterable(batches)
and not ia.is_np_array(batches)
and not ia.is_string(batches))
or ia.is_generator(batches),
("Expected either (a) an iterable that is not an array or a string "
+ "or (b) a generator. Got: %s") % (type(batches),))
if background:
ia.do_assert(
hooks is None,
"Hooks can not be used when background augmentation is "
"activated.")
def _normalize_batch(idx, batch):
if isinstance(batch, Batch):
batch_copy = batch.deepcopy()
batch_copy.data = (idx, batch_copy.data)
batch_normalized = batch_copy
batch_orig_dt = "imgaug.Batch"
elif isinstance(batch, UnnormalizedBatch):
batch_copy = batch.to_normalized_batch()
batch_copy.data = (idx, batch_copy.data)
batch_normalized = batch_copy
batch_orig_dt = "imgaug.UnnormalizedBatch"
elif ia.is_np_array(batch):
ia.do_assert(
batch.ndim in (3, 4),
("Expected numpy array to have shape (N, H, W) or "
+ "(N, H, W, C), got %s.") % (batch.shape,))
batch_normalized = Batch(images=batch, data=(idx,))
batch_orig_dt = "numpy_array"
elif isinstance(batch, list):
if len(batch) == 0:
batch_normalized = Batch(data=(idx,))
batch_orig_dt = "empty_list"
elif ia.is_np_array(batch[0]):
batch_normalized = Batch(images=batch, data=(idx,))
batch_orig_dt = "list_of_numpy_arrays"
elif isinstance(batch[0], ia.HeatmapsOnImage):
batch_normalized = Batch(heatmaps=batch, data=(idx,))
batch_orig_dt = "list_of_imgaug.HeatmapsOnImage"
elif isinstance(batch[0], ia.SegmentationMapOnImage):
batch_normalized = Batch(segmentation_maps=batch,
data=(idx,))
batch_orig_dt = "list_of_imgaug.SegmentationMapOnImage"
elif isinstance(batch[0], ia.KeypointsOnImage):
batch_normalized = Batch(keypoints=batch, data=(idx,))
batch_orig_dt = "list_of_imgaug.KeypointsOnImage"
elif isinstance(batch[0], ia.BoundingBoxesOnImage):
batch_normalized = Batch(bounding_boxes=batch, data=(idx,))
batch_orig_dt = "list_of_imgaug.BoundingBoxesOnImage"
elif isinstance(batch[0], ia.PolygonsOnImage):
batch_normalized = Batch(polygons=batch, data=(idx,))
batch_orig_dt = "list_of_imgaug.PolygonsOnImage"
else:
raise Exception(
("Unknown datatype in batch[0]. Expected numpy array "
+ "or imgaug.HeatmapsOnImage or "
+ "imgaug.SegmentationMapOnImage or "
+ "imgaug.KeypointsOnImage or "
+ "imgaug.BoundingBoxesOnImage, "
+ "or imgaug.PolygonsOnImage, "
+ "got %s.") % (type(batch[0]),))
else:
raise Exception(
("Unknown datatype of batch. Expected imgaug.Batch or "
+ "imgaug.UnnormalizedBatch or "
+ "numpy array or list of (numpy array or "
+ "imgaug.HeatmapsOnImage or "
+ "imgaug.SegmentationMapOnImage "
+ "or imgaug.KeypointsOnImage or "
+ "imgaug.BoundingBoxesOnImage or "
+ "imgaug.PolygonsOnImage). Got %s.") % (type(batch),))
if batch_orig_dt not in ["imgaug.Batch",
"imgaug.UnnormalizedBatch"]:
ia.warn_deprecated(
("Received an input in augment_batches() that was not an "
+ "instance of imgaug.augmentables.batches.Batch "
+ "or imgaug.augmentables.batches.UnnormalizedBatch, but "
+ "instead %s. This is outdated. Use augment() for such "
+ "data or wrap it in a Batch instance.") % (
batch_orig_dt,))
return batch_normalized, batch_orig_dt
# unnormalization of non-Batch/UnnormalizedBatch is for legacy support
def _unnormalize_batch(batch_aug, batch_orig, batch_orig_dt):
if batch_orig_dt == "imgaug.Batch":
batch_unnormalized = batch_aug
# change (i, .data) back to just .data
batch_unnormalized.data = batch_unnormalized.data[1]
elif batch_orig_dt == "imgaug.UnnormalizedBatch":
# change (i, .data) back to just .data
batch_aug.data = batch_aug.data[1]
batch_unnormalized = \
batch_orig.fill_from_augmented_normalized_batch(batch_aug)
elif batch_orig_dt == "numpy_array":
batch_unnormalized = batch_aug.images_aug
elif batch_orig_dt == "empty_list":
batch_unnormalized = []
elif batch_orig_dt == "list_of_numpy_arrays":
batch_unnormalized = batch_aug.images_aug
elif batch_orig_dt == "list_of_imgaug.HeatmapsOnImage":
batch_unnormalized = batch_aug.heatmaps_aug
elif batch_orig_dt == "list_of_imgaug.SegmentationMapOnImage":
batch_unnormalized = batch_aug.segmentation_maps_aug
elif batch_orig_dt == "list_of_imgaug.KeypointsOnImage":
batch_unnormalized = batch_aug.keypoints_aug
elif batch_orig_dt == "list_of_imgaug.BoundingBoxesOnImage":
batch_unnormalized = batch_aug.bounding_boxes_aug
else: # only option left
ia.do_assert(batch_orig_dt == "list_of_imgaug.PolygonsOnImage")
batch_unnormalized = batch_aug.polygons_aug
return batch_unnormalized
if not background:
# singlecore augmentation
for idx, batch in enumerate(batches):
batch_normalized, batch_orig_dt = _normalize_batch(idx, batch)
batch_normalized = self.augment_batch(
batch_normalized, hooks=hooks)
batch_unnormalized = _unnormalize_batch(
batch_normalized, batch, batch_orig_dt)
yield batch_unnormalized
else:
# multicore augmentation
import imgaug.multicore as multicore
id_to_batch_orig = dict()
def load_batches():
for idx, batch in enumerate(batches):
batch_normalized, batch_orig_dt = _normalize_batch(
idx, batch)
id_to_batch_orig[idx] = (batch, batch_orig_dt)
yield batch_normalized
with multicore.Pool(self) as pool:
for batch_aug in pool.imap_batches(load_batches()):
idx = batch_aug.data[0]
assert idx in id_to_batch_orig
batch_orig, batch_orig_dt = id_to_batch_orig[idx]
batch_unnormalized = _unnormalize_batch(
batch_aug, batch_orig, batch_orig_dt)
del id_to_batch_orig[idx]
yield batch_unnormalized
def augment_batch(self, batch, hooks=None):
"""
Augment a single batch.
Parameters
----------
batch : imgaug.augmentables.batches.Batch \
or imgaug.augmentables.batches.UnnormalizedBatch
A single batch to augment.
hooks : None or imgaug.HooksImages, optional
HooksImages object to dynamically interfere with the augmentation
process.
Returns
-------
imgaug.augmentables.batches.Batch \
or imgaug.augmentables.batches.UnnormalizedBatch
Augmented batch.
"""
batch_orig = batch
if isinstance(batch, UnnormalizedBatch):
batch = batch.to_normalized_batch()
augmentables = [(attr_name[:-len("_unaug")], attr)
for attr_name, attr
in batch.__dict__.items()
if attr_name.endswith("_unaug") and attr is not None]
augseq = self
if len(augmentables) > 1 and not self.deterministic:
augseq = self.to_deterministic()
# set attribute batch.T_aug with result of self.augment_T() for each
# batch.T_unaug that was not None
for attr_name, attr in augmentables:
aug = getattr(augseq, "augment_%s" % (attr_name,))(
attr, hooks=hooks)
setattr(batch, "%s_aug" % (attr_name,), aug)
if isinstance(batch_orig, UnnormalizedBatch):
batch = batch_orig.fill_from_augmented_normalized_batch(batch)
return batch
def augment_image(self, image, hooks=None):
"""
Augment a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
The image to augment.
Channel-axis is optional, but expected to be the last axis if present.
In most cases, this array should be of dtype ``uint8``, which is supported by all
augmenters. Support for other dtypes varies by augmenter -- see the respective
augmenter-specific documentation for more details.
hooks : None or imgaug.HooksImages, optional
HooksImages object to dynamically interfere with the augmentation process.
Returns
-------
img : ndarray
The corresponding augmented image.
"""
ia.do_assert(image.ndim in [2, 3],
"Expected image to have shape (height, width, [channels]), got shape %s." % (image.shape,))
return self.augment_images([image], hooks=hooks)[0]
def augment_images(self, images, parents=None, hooks=None):
"""
Augment multiple images.
Parameters
----------
images : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
Images to augment.
The input can be a list of numpy arrays or a single array. Each array is expected to
have shape ``(H, W, C)`` or ``(H, W)``, where H is the height, ``W`` is the width and
``C`` are the channels. Number of channels may differ between images.
If a list is chosen, height and width may differ per between images.
In most cases, this array (or these arrays) should be of dtype ``uint8``, which is
supported by all augmenters. Support for other dtypes varies by augmenter -- see the
respective augmenter-specific documentation for more details.
parents : None or list of imgaug.augmenters.Augmenter, optional
Parent augmenters that have previously been called before the
call to this function. Usually you can leave this parameter as None.
It is set automatically for child augmenters.
hooks : None or imgaug.HooksImages, optional
HooksImages object to dynamically interfere with the augmentation process.
Returns
-------
images_result : ndarray or list
Corresponding augmented images.
"""
if parents is not None and len(parents) > 0 and hooks is None:
# This is a child call. The data has already been validated and copied. We don't need to copy it again
# for hooks, as these don't exist. So we can augment here fully in-place.
if not self.activated or len(images) == 0:
return images
if self.deterministic:
state_orig = self.random_state.get_state()
images_result = self._augment_images(
images,
random_state=ia.copy_random_state(self.random_state),
parents=parents,
hooks=hooks
)
# move "forward" the random state, so that the next call to
# augment_images() will use different random values
ia.forward_random_state(self.random_state)
if self.deterministic:
self.random_state.set_state(state_orig)
return images_result
#
# Everything below is for non-in-place augmentation.
# It was either the first call (no parents) or hooks were provided.
#
if self.deterministic:
state_orig = self.random_state.get_state()
if parents is None:
parents = []
if ia.is_np_array(images):
input_type = "array"
input_added_axis = False
ia.do_assert(images.ndim in [3, 4],
"Expected 3d/4d array of form (N, height, width) or (N, height, width, channels), "
"got shape %s." % (images.shape,))
# copy the input, we don't want to augment it in-place
images_copy = np.copy(images)
if images_copy.ndim == 3 and images_copy.shape[-1] in [1, 3]:
warnings.warn("You provided a numpy array of shape %s as input to augment_images(), "
"which was interpreted as (N, H, W). The last dimension however has "
"value 1 or 3, which indicates that you provided a single image "
"with shape (H, W, C) instead. If that is the case, you should use "
"augment_image(image) or augment_images([image]), otherwise "
"you will not get the expected augmentations." % (images_copy.shape,))
# for 2D input images (i.e. shape (N, H, W)), we add a channel axis (i.e. (N, H, W, 1)),
# so that all augmenters can rely on the input having a channel axis and
# don't have to add if/else statements for 2D images
if images_copy.ndim == 3:
images_copy = images_copy[..., np.newaxis]
input_added_axis = True
elif ia.is_iterable(images):
input_type = "list"
input_added_axis = []
if len(images) == 0:
images_copy = []
else:
ia.do_assert(all(image.ndim in [2, 3] for image in images),
"Expected list of images with each image having shape (height, width) or "
+ "(height, width, channels), got shapes %s." % ([image.shape for image in images],))
# copy images and add channel axis for 2D images (see above,
# as for list inputs each image can have different shape, it
# is done here on a per images basis)
images_copy = []
input_added_axis = []
for image in images:
image_copy = np.copy(image)
if image.ndim == 2:
image_copy = image_copy[:, :, np.newaxis]
input_added_axis.append(True)
else:
input_added_axis.append(False)
images_copy.append(image_copy)
else:
raise Exception("Expected images as one numpy array or list/tuple of numpy arrays, got %s." % (
type(images),))
if hooks is not None:
images_copy = hooks.preprocess(images_copy, augmenter=self, parents=parents)
# the is_activated() call allows to use hooks that selectively
# deactivate specific augmenters in previously defined augmentation
# sequences
if (hooks is None and self.activated) \
or (hooks is not None
and hooks.is_activated(images_copy, augmenter=self, parents=parents, default=self.activated)):
if len(images) > 0:
images_result = self._augment_images(
images_copy,
random_state=ia.copy_random_state(self.random_state),
parents=parents,
hooks=hooks
)
# move "forward" the random state, so that the next call to
# augment_images() will use different random values
ia.forward_random_state(self.random_state)
else:
images_result = images_copy
else:
images_result = images_copy
if hooks is not None:
images_result = hooks.postprocess(images_result, augmenter=self, parents=parents)
# remove temporarily added channel axis for 2D input images
output_type = "list" if isinstance(images_result, list) else "array"
if input_type == "array":
if input_added_axis is True:
if output_type == "array":
images_result = np.squeeze(images_result, axis=3)
else:
images_result = [np.squeeze(image, axis=2) for image in images_result]
else: # if input_type == "list":
# This test was removed for now because hooks can change the type
# ia.do_assert(
# isinstance(images_result, list),
# "INTERNAL ERROR: Input was list, output was expected to be list too "
# "but got %s." % (type(images_result),)
# )
ia.do_assert(
len(images_result) == len(images),
"INTERNAL ERROR: Expected number of images to be unchanged after augmentation, "
"but was changed from %d to %d." % (len(images), len(images_result))
)
for i in sm.xrange(len(images_result)):
if input_added_axis[i] is True:
images_result[i] = np.squeeze(images_result[i], axis=2)
if self.deterministic:
self.random_state.set_state(state_orig)
return images_result
@abstractmethod
def _augment_images(self, images, random_state, parents, hooks):
"""
Augment multiple images.
This is the internal variation of `augment_images()`.
It is called from `augment_images()` and should usually not be called
directly.
It has to be implemented by every augmenter.
This method may transform the images in-place.
This method does not have to care about determinism or the
Augmenter instance's ``random_state`` variable. The parameter
``random_state`` takes care of both of these.
Parameters
----------
images : (N,H,W,C) ndarray or list of (H,W,C) ndarray
Images to augment.
They may be changed in-place.
Either a list of ``(H, W, C)`` arrays or a single ``(N, H, W, C)`` array,
where ``N`` is the number of images, ``H`` is the height of images, ``W``
is the width of images and ``C`` is the number of channels of images.
In the case of a list as input, ``H``, ``W`` and ``C`` may change per image.
random_state : numpy.random.RandomState
The random state to use for all sampling tasks during the augmentation.
parents : list of imgaug.augmenters.meta.Augmenter
See :func:`imgaug.augmenters.meta.Augmenter.augment_images`.
hooks : imgaug.HooksImages or None
See :func:`imgaug.augmenters.meta.Augmenter.augment_images`.
Returns
----------
images : (N,H,W,C) ndarray or list of (H,W,C) ndarray
The augmented images.
"""
raise NotImplementedError()
def augment_heatmaps(self, heatmaps, parents=None, hooks=None):
"""
Augment a heatmap.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage or list of imgaug.HeatmapsOnImage
Heatmap(s) to augment. Either a single heatmap or a list of
heatmaps.
parents : None or list of imgaug.augmenters.meta.Augmenter, optional
Parent augmenters that have previously been called before the
call to this function. Usually you can leave this parameter as None.
It is set automatically for child augmenters.
hooks : None or imaug.HooksHeatmaps, optional
HooksHeatmaps object to dynamically interfere with the augmentation process.
Returns
-------
heatmap_result : imgaug.HeatmapsOnImage or list of imgaug.HeatmapsOnImage
Corresponding augmented heatmap(s).
"""
if self.deterministic:
state_orig = self.random_state.get_state()
if parents is None:
parents = []
input_was_single_instance = False
if isinstance(heatmaps, ia.HeatmapsOnImage):
input_was_single_instance = True
heatmaps = [heatmaps]
ia.do_assert(ia.is_iterable(heatmaps),
"Expected to get list of imgaug.HeatmapsOnImage() instances, got %s." % (type(heatmaps),))
ia.do_assert(all([isinstance(heatmaps_i, ia.HeatmapsOnImage) for heatmaps_i in heatmaps]),
"Expected to get list of imgaug.HeatmapsOnImage() instances, got %s." % (
[type(el) for el in heatmaps],))
# copy, but only if topmost call or hooks are provided
if len(parents) == 0 or hooks is not None:
heatmaps_copy = [heatmaps_i.deepcopy() for heatmaps_i in heatmaps]
else:
heatmaps_copy = heatmaps
if hooks is not None:
heatmaps_copy = hooks.preprocess(heatmaps_copy, augmenter=self, parents=parents)
if (hooks is None and self.activated) \
or (hooks is not None
and hooks.is_activated(heatmaps_copy, augmenter=self, parents=parents, default=self.activated)):
if len(heatmaps_copy) > 0:
heatmaps_result = self._augment_heatmaps(
heatmaps_copy,
random_state=ia.copy_random_state(self.random_state),
parents=parents,
hooks=hooks
)
ia.forward_random_state(self.random_state)
else:
heatmaps_result = heatmaps_copy
else:
heatmaps_result = heatmaps_copy
if hooks is not None:
heatmaps_result = hooks.postprocess(heatmaps_result, augmenter=self, parents=parents)
if self.deterministic:
self.random_state.set_state(state_orig)
if input_was_single_instance:
return heatmaps_result[0]
return heatmaps_result
@abstractmethod
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
"""
Augment heatmaps on multiple images.
This is the internal version of ``augment_heatmaps()``.
It is called from ``augment_heatmaps()`` and should usually not be called
directly.
This method may heatmaps in-place.
This method does not have to care about determinism or the
Augmenter instance's ``random_state`` variable. The parameter
``random_state`` takes care of both of these.
Parameters
----------
heatmaps : list of imgaug.HeatmapsOnImage
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/pooling.py | augmentation/image_augmentation/helpers/imgaug/augmenters/pooling.py | """
Augmenters that apply pooling operations to images.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.AveragePooling((1, 3))
])
List of augmenters:
* AveragePooling
* MaxPooling
* MinPooling
* MedianPooling
"""
from __future__ import print_function, division, absolute_import
from abc import ABCMeta, abstractmethod
import numpy as np
import six
from . import meta
import imgaug as ia
from .. import parameters as iap
@six.add_metaclass(ABCMeta)
class _AbstractPoolingBase(meta.Augmenter):
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(_AbstractPoolingBase, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.kernel_size = iap.handle_discrete_kernel_size_param(
kernel_size,
"kernel_size",
value_range=(0, None),
allow_floats=False)
self.keep_size = keep_size
@abstractmethod
def _pool_image(self, image, kernel_size_h, kernel_size_w):
"""Apply pooling method with given kernel height/width to an image."""
def _draw_samples(self, augmentables, random_state):
nb_images = len(augmentables)
rss = ia.derive_random_states(random_state, 2)
mode = "single" if self.kernel_size[1] is None else "two"
kernel_sizes_h = self.kernel_size[0].draw_samples(
(nb_images,),
random_state=rss[0])
if mode == "single":
kernel_sizes_w = kernel_sizes_h
else:
kernel_sizes_w = self.kernel_size[1].draw_samples(
(nb_images,), random_state=rss[1])
return kernel_sizes_h, kernel_sizes_w
def _augment_images(self, images, random_state, parents, hooks):
if not self.keep_size:
images = list(images)
kernel_sizes_h, kernel_sizes_w = self._draw_samples(
images, random_state)
gen = enumerate(zip(images, kernel_sizes_h, kernel_sizes_w))
for i, (image, ksize_h, ksize_w) in gen:
if ksize_h >= 2 or ksize_w >= 2:
image_pooled = self._pool_image(
image,
max(ksize_h, 1), max(ksize_w, 1)
)
if self.keep_size:
image_pooled = ia.imresize_single_image(
image_pooled, image.shape[0:2])
images[i] = image_pooled
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
# pylint: disable=no-self-use
# For some reason pylint raises a warning here, which it doesn't seem
# to do for other classes that also implement this method with self use.
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents,
hooks):
# pylint: disable=no-self-use
# For some reason pylint raises a warning here, which it doesn't seem
# to do for other classes that also implement this method with self use.
return keypoints_on_images
def get_parameters(self):
return [self.kernel_size, self.keep_size]
# TODO rename kernel size parameters in all augmenters to kernel_size
# TODO add per_channel
# TODO add upscaling interpolation mode?
# TODO add dtype support
class AveragePooling(_AbstractPoolingBase):
"""
Apply average pooling to images.
This pools images with kernel sizes ``H x W`` by averaging the pixel
values within these windows. For e.g. ``2 x 2`` this halves the image
size. Optionally, the augmenter will automatically re-upscale the image
to the input size (by default this is activated).
This augmenter does not affect heatmaps, segmentation maps or
coordinates-based augmentables (e.g. keypoints, bounding boxes, ...).
Note that this augmenter is very similar to ``AverageBlur``.
``AverageBlur`` applies averaging within windows of given kernel size
*without* striding, while ``AveragePooling`` applies striding corresponding
to the kernel size, with optional upscaling afterwards. The upscaling
is configured to create "pixelated"/"blocky" images by default.
Attributes
----------
kernel_size : int or tuple of int or list of int \
or imgaug.parameters.StochasticParameter \
or tuple of tuple of int or tuple of list of int \
or tuple of imgaug.parameters.StochasticParameter, optional
The kernel size of the pooling operation.
* If an int, then that value will be used for all images for both
kernel height and width.
* If a tuple ``(a, b)``, then a value from the discrete range
``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image and used for both kernel height and width.
* If a StochasticParameter, then a value will be sampled per image
from that parameter per image and used for both kernel height and
width.
* If a tuple of tuple of int given as ``((a, b), (c, d))``, then two
values will be sampled independently from the discrete ranges
``[a..b]`` and ``[c..d]`` per image and used as the kernel height
and width.
* If a tuple of lists of int, then two values will be sampled
independently per image, one from the first list and one from the
second, and used as the kernel height and width.
* If a tuple of StochasticParameter, then two values will be sampled
indepdently per image, one from the first parameter and one from the
second, and used as the kernel height and width.
keep_size : bool, optional
After pooling, the result image will usually have a different
height/width compared to the original input image. If this
parameter is set to True, then the pooled image will be resized
to the input image's size, i.e. the augmenter's output shape is always
identical to the input shape.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = AveragePooling(2)
Creates an augmenter that always pools with a kernel size of ``2 x 2``.
>>> import imgaug.augmenters as iaa
>>> aug = AveragePooling(2, keep_size=False)
Creates an augmenter that always pools with a kernel size of ``2 x 2``
and does *not* resize back to the input image size, i.e. the resulting
images have half the resolution.
>>> import imgaug.augmenters as iaa
>>> aug = AveragePooling([2, 8])
Creates an augmenter that always pools either with a kernel size
of ``2 x 2`` or ``8 x 8``.
>>> import imgaug.augmenters as iaa
>>> aug = AveragePooling((1, 7))
Creates an augmenter that always pools with a kernel size of
``1 x 1`` (does nothing) to ``7 x 7``. The kernel sizes are always
symmetric.
>>> import imgaug.augmenters as iaa
>>> aug = AveragePooling(((1, 7), (1, 7)))
Creates an augmenter that always pools with a kernel size of
``H x W`` where ``H`` and ``W`` are both sampled independently from the
range ``[1..7]``. E.g. resulting kernel sizes could be ``3 x 7``
or ``5 x 1``.
"""
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(AveragePooling, self).__init__(
kernel_size=kernel_size, keep_size=keep_size,
name=name, deterministic=deterministic, random_state=random_state)
def _pool_image(self, image, kernel_size_h, kernel_size_w):
# TODO extend avg_pool to support pad_mode and set it here
# to reflection padding
return ia.avg_pool(
image,
(max(kernel_size_h, 1), max(kernel_size_w, 1)),
cval=128
)
class MaxPooling(_AbstractPoolingBase):
"""
Apply max pooling to images.
This pools images with kernel sizes ``H x W`` by taking the maximum
pixel value over windows. For e.g. ``2 x 2`` this halves the image
size. Optionally, the augmenter will automatically re-upscale the image
to the input size (by default this is activated).
The maximum within each pixel window is always taken channelwise.
This augmenter does not affect heatmaps, segmentation maps or
coordinates-based augmentables (e.g. keypoints, bounding boxes, ...).
Attributes
----------
kernel_size : int or tuple of int or list of int \
or imgaug.parameters.StochasticParameter \
or tuple of tuple of int or tuple of list of int \
or tuple of imgaug.parameters.StochasticParameter, optional
The kernel size of the pooling operation.
* If an int, then that value will be used for all images for both
kernel height and width.
* If a tuple ``(a, b)``, then a value from the discrete range
``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image and used for both kernel height and width.
* If a StochasticParameter, then a value will be sampled per image
from that parameter per image and used for both kernel height and
width.
* If a tuple of tuple of int given as ``((a, b), (c, d))``, then two
values will be sampled independently from the discrete ranges
``[a..b]`` and ``[c..d]`` per image and used as the kernel height
and width.
* If a tuple of lists of int, then two values will be sampled
independently per image, one from the first list and one from the
second, and used as the kernel height and width.
* If a tuple of StochasticParameter, then two values will be sampled
indepdently per image, one from the first parameter and one from the
second, and used as the kernel height and width.
keep_size : bool, optional
After pooling, the result image will usually have a different
height/width compared to the original input image. If this
parameter is set to True, then the pooled image will be resized
to the input image's size, i.e. the augmenter's output shape is always
identical to the input shape.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = MaxPooling(2)
Creates an augmenter that always pools with a kernel size of ``2 x 2``.
>>> import imgaug.augmenters as iaa
>>> aug = MaxPooling(2, keep_size=False)
Creates an augmenter that always pools with a kernel size of ``2 x 2``
and does *not* resize back to the input image size, i.e. the resulting
images have half the resolution.
>>> import imgaug.augmenters as iaa
>>> aug = MaxPooling([2, 8])
Creates an augmenter that always pools either with a kernel size
of ``2 x 2`` or ``8 x 8``.
>>> import imgaug.augmenters as iaa
>>> aug = MaxPooling((1, 7))
Creates an augmenter that always pools with a kernel size of
``1 x 1`` (does nothing) to ``7 x 7``. The kernel sizes are always
symmetric.
>>> import imgaug.augmenters as iaa
>>> aug = MaxPooling(((1, 7), (1, 7)))
Creates an augmenter that always pools with a kernel size of
``H x W`` where ``H`` and ``W`` are both sampled independently from the
range ``[1..7]``. E.g. resulting kernel sizes could be ``3 x 7``
or ``5 x 1``.
"""
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(MaxPooling, self).__init__(
kernel_size=kernel_size, keep_size=keep_size,
name=name, deterministic=deterministic, random_state=random_state)
def _pool_image(self, image, kernel_size_h, kernel_size_w):
# TODO extend max_pool to support pad_mode and set it here
# to reflection padding
return ia.max_pool(
image,
(max(kernel_size_h, 1), max(kernel_size_w, 1)),
cval=0
)
class MinPooling(_AbstractPoolingBase):
"""
Apply minimum pooling to images.
This pools images with kernel sizes ``H x W`` by taking the minimum
pixel value over windows. For e.g. ``2 x 2`` this halves the image
size. Optionally, the augmenter will automatically re-upscale the image
to the input size (by default this is activated).
The minimum within each pixel window is always taken channelwise.
This augmenter does not affect heatmaps, segmentation maps or
coordinates-based augmentables (e.g. keypoints, bounding boxes, ...).
Attributes
----------
kernel_size : int or tuple of int or list of int \
or imgaug.parameters.StochasticParameter \
or tuple of tuple of int or tuple of list of int \
or tuple of imgaug.parameters.StochasticParameter, optional
The kernel size of the pooling operation.
* If an int, then that value will be used for all images for both
kernel height and width.
* If a tuple ``(a, b)``, then a value from the discrete range
``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image and used for both kernel height and width.
* If a StochasticParameter, then a value will be sampled per image
from that parameter per image and used for both kernel height and
width.
* If a tuple of tuple of int given as ``((a, b), (c, d))``, then two
values will be sampled independently from the discrete ranges
``[a..b]`` and ``[c..d]`` per image and used as the kernel height
and width.
* If a tuple of lists of int, then two values will be sampled
independently per image, one from the first list and one from the
second, and used as the kernel height and width.
* If a tuple of StochasticParameter, then two values will be sampled
indepdently per image, one from the first parameter and one from the
second, and used as the kernel height and width.
keep_size : bool, optional
After pooling, the result image will usually have a different
height/width compared to the original input image. If this
parameter is set to True, then the pooled image will be resized
to the input image's size, i.e. the augmenter's output shape is always
identical to the input shape.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = MinPooling(2)
Creates an augmenter that always pools with a kernel size of ``2 x 2``.
>>> import imgaug.augmenters as iaa
>>> aug = MinPooling(2, keep_size=False)
Creates an augmenter that always pools with a kernel size of ``2 x 2``
and does *not* resize back to the input image size, i.e. the resulting
images have half the resolution.
>>> import imgaug.augmenters as iaa
>>> aug = MinPooling([2, 8])
Creates an augmenter that always pools either with a kernel size
of ``2 x 2`` or ``8 x 8``.
>>> import imgaug.augmenters as iaa
>>> aug = MinPooling((1, 7))
Creates an augmenter that always pools with a kernel size of
``1 x 1`` (does nothing) to ``7 x 7``. The kernel sizes are always
symmetric.
>>> import imgaug.augmenters as iaa
>>> aug = MinPooling(((1, 7), (1, 7)))
Creates an augmenter that always pools with a kernel size of
``H x W`` where ``H`` and ``W`` are both sampled independently from the
range ``[1..7]``. E.g. resulting kernel sizes could be ``3 x 7``
or ``5 x 1``.
"""
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(MinPooling, self).__init__(
kernel_size=kernel_size, keep_size=keep_size,
name=name, deterministic=deterministic, random_state=random_state)
def _pool_image(self, image, kernel_size_h, kernel_size_w):
# TODO extend pool to support pad_mode and set it here
# to reflection padding
# we use cval=255 here so that the minimum is always a pixel value,
# even if the image was automatically padded
return ia.pool(
image,
(kernel_size_h, kernel_size_w),
np.min,
cval=255,
preserve_dtype=True)
class MedianPooling(_AbstractPoolingBase):
"""
Apply median pooling to images.
This pools images with kernel sizes ``H x W`` by taking the median
pixel value over windows. For e.g. ``2 x 2`` this halves the image
size. Optionally, the augmenter will automatically re-upscale the image
to the input size (by default this is activated).
The median within each pixel window is always taken channelwise.
This augmenter does not affect heatmaps, segmentation maps or
coordinates-based augmentables (e.g. keypoints, bounding boxes, ...).
Attributes
----------
kernel_size : int or tuple of int or list of int \
or imgaug.parameters.StochasticParameter \
or tuple of tuple of int or tuple of list of int \
or tuple of imgaug.parameters.StochasticParameter, optional
The kernel size of the pooling operation.
* If an int, then that value will be used for all images for both
kernel height and width.
* If a tuple ``(a, b)``, then a value from the discrete range
``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image and used for both kernel height and width.
* If a StochasticParameter, then a value will be sampled per image
from that parameter per image and used for both kernel height and
width.
* If a tuple of tuple of int given as ``((a, b), (c, d))``, then two
values will be sampled independently from the discrete ranges
``[a..b]`` and ``[c..d]`` per image and used as the kernel height
and width.
* If a tuple of lists of int, then two values will be sampled
independently per image, one from the first list and one from the
second, and used as the kernel height and width.
* If a tuple of StochasticParameter, then two values will be sampled
indepdently per image, one from the first parameter and one from the
second, and used as the kernel height and width.
keep_size : bool, optional
After pooling, the result image will usually have a different
height/width compared to the original input image. If this
parameter is set to True, then the pooled image will be resized
to the input image's size, i.e. the augmenter's output shape is always
identical to the input shape.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = MedianPooling(2)
Creates an augmenter that always pools with a kernel size of ``2 x 2``.
>>> import imgaug.augmenters as iaa
>>> aug = MedianPooling(2, keep_size=False)
Creates an augmenter that always pools with a kernel size of ``2 x 2``
and does *not* resize back to the input image size, i.e. the resulting
images have half the resolution.
>>> import imgaug.augmenters as iaa
>>> aug = MedianPooling([2, 8])
Creates an augmenter that always pools either with a kernel size
of ``2 x 2`` or ``8 x 8``.
>>> import imgaug.augmenters as iaa
>>> aug = MedianPooling((1, 7))
Creates an augmenter that always pools with a kernel size of
``1 x 1`` (does nothing) to ``7 x 7``. The kernel sizes are always
symmetric.
>>> import imgaug.augmenters as iaa
>>> aug = MedianPooling(((1, 7), (1, 7)))
Creates an augmenter that always pools with a kernel size of
``H x W`` where ``H`` and ``W`` are both sampled independently from the
range ``[1..7]``. E.g. resulting kernel sizes could be ``3 x 7``
or ``5 x 1``.
"""
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(MedianPooling, self).__init__(
kernel_size=kernel_size, keep_size=keep_size,
name=name, deterministic=deterministic, random_state=random_state)
def _pool_image(self, image, kernel_size_h, kernel_size_w):
# TODO extend pool to support pad_mode and set it here
# to reflection padding
# we use cval=128 here to decrease the likelihood of unrepresentative
# results around the border
return ia.pool(
image,
(kernel_size_h, kernel_size_w),
np.median,
cval=128,
preserve_dtype=True)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/convolutional.py | augmentation/image_augmentation/helpers/imgaug/augmenters/convolutional.py | """
Augmenters that apply convolutions to images.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Sharpen((0.0, 1.0)),
iaa.Emboss((0.0, 1.0))
])
List of augmenters:
* Convolve
* Sharpen
* Emboss
* EdgeDetect
* DirectedEdgeDetect
For MotionBlur, see ``blur.py``.
"""
from __future__ import print_function, division, absolute_import
import types
import numpy as np
import cv2
import six.moves as sm
from . import meta
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
class Convolve(meta.Augmenter):
"""
Apply a Convolution to input images.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: yes; tested (4)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (4)
- (1) rejected by ``cv2.filter2D()``.
- (2) causes error: cv2.error: OpenCV(3.4.2) (...)/filter.cpp:4487: error: (-213:The function/feature is
not implemented) Unsupported combination of source format (=1), and destination format (=1) in
function 'getLinearFilter'.
- (3) mapped internally to ``int16``.
- (4) mapped internally to ``float32``.
Parameters
----------
matrix : None or (H, W) ndarray or imgaug.parameters.StochasticParameter or callable, optional
The weight matrix of the convolution kernel to apply.
* If None, the input images will not be changed.
* If a numpy array, that array will be used for all images and
channels as the kernel.
* If a callable, the parameter will be called for each image
via ``param(image, C, random_state)``. The function must either return
a list of ``C`` matrices (i.e. one per channel) or a 2D numpy array
(will be used for all channels) or a 3D ``HxWxC`` numpy array.
If a list is returned, each entry may be None, which will result
in no changes to the respective channel.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> matrix = np.array([[0, -1, 0],
>>> [-1, 4, -1],
>>> [0, -1, 0]])
>>> aug = iaa.Convolve(matrix=matrix)
convolves all input images with the kernel shown in the `matrix`
variable.
>>> def gen_matrix(image, nb_channels, random_state):
>>> matrix_A = np.array([[0, -1, 0],
>>> [-1, 4, -1],
>>> [0, -1, 0]])
>>> matrix_B = np.array([[0, 1, 0],
>>> [1, -4, 1],
>>> [0, 1, 0]])
>>> if image.shape[0] % 2 == 0:
>>> return [matrix_A] * nb_channels
>>> else:
>>> return [matrix_B] * nb_channels
>>> aug = iaa.Convolve(matrix=gen_matrix)
convolves images that have an even height with matrix A and images
with an odd height with matrix B.
"""
def __init__(self, matrix=None, name=None, deterministic=False, random_state=None):
super(Convolve, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
if matrix is None:
self.matrix = None
self.matrix_type = "None"
elif ia.is_np_array(matrix):
ia.do_assert(len(matrix.shape) == 2,
"Expected convolution matrix to have 2 axis, got %d (shape %s)." % (
len(matrix.shape), matrix.shape))
self.matrix = matrix
self.matrix_type = "constant"
elif isinstance(matrix, types.FunctionType):
self.matrix = matrix
self.matrix_type = "function"
else:
raise Exception("Expected float, int, tuple/list with 2 entries or StochasticParameter. Got %s." % (
type(matrix),))
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int32", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=self)
seed = random_state.randint(0, 10**6, 1)[0]
for i, image in enumerate(images):
_height, _width, nb_channels = images[i].shape
input_dtype = image.dtype
if image.dtype.type in [np.bool_, np.float16]:
image = image.astype(np.float32, copy=False)
elif image.dtype.type == np.int8:
image = image.astype(np.int16, copy=False)
if self.matrix_type == "None":
matrices = [None] * nb_channels
elif self.matrix_type == "constant":
matrices = [self.matrix] * nb_channels
elif self.matrix_type == "function":
matrices = self.matrix(images[i], nb_channels, ia.new_random_state(seed+i))
if ia.is_np_array(matrices) and matrices.ndim == 2:
matrices = np.tile(matrices[..., np.newaxis], (1, 1, nb_channels))
ia.do_assert(
(isinstance(matrices, list) and len(matrices) == nb_channels)
or (ia.is_np_array(matrices) and matrices.ndim == 3 and matrices.shape[2] == nb_channels),
"Callable provided to Convole must return either a list of 2D matrices (one per image channel) "
"or a 2D numpy array "
"or a 3D numpy array where the last dimension's size matches the number of image channels. "
"Got type %s." % (type(matrices),)
)
if ia.is_np_array(matrices):
# Shape of matrices is currently (H, W, C), but in the loop below we need the
# first axis to be the channel index to unify handling of lists of arrays
# and arrays. So we move the channel axis here to the start.
matrices = matrices.transpose((2, 0, 1))
else:
raise Exception("Invalid matrix type")
image_aug = image
for channel in sm.xrange(nb_channels):
if matrices[channel] is not None:
# ndimage.convolve caused problems here
# cv2.filter2D() always returns same output dtype as input dtype
image_aug[..., channel] = cv2.filter2D(image_aug[..., channel], -1, matrices[channel])
if input_dtype == np.bool_:
image_aug = image_aug > 0.5
elif input_dtype in [np.int8, np.float16]:
image_aug = iadt.restore_dtypes_(image_aug, input_dtype)
images[i] = image_aug
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
# TODO this can fail for some matrices, e.g. [[0, 0, 1]]
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
# TODO this can fail for some matrices, e.g. [[0, 0, 1]]
return keypoints_on_images
def get_parameters(self):
return [self.matrix, self.matrix_type]
def Sharpen(alpha=0, lightness=1, name=None, deterministic=False, random_state=None):
"""
Augmenter that sharpens images and overlays the result with the original image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
lightness : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the lightness/brightness of the sharped image.
Sane values are somewhere in the range ``(0.5, 2)``.
The value 0 results in an edge map. Values higher than 1 create bright
images. Default value is 1.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = Sharpen(alpha=(0.0, 1.0))
sharpens input images and overlays the sharpened image by a variable
amount over the old image.
>>> aug = Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))
sharpens input images with a variable lightness in the range
``0.75 <= x <= 2.0`` and with a variable alpha.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
lightness_param = iap.handle_continuous_param(lightness, "lightness", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
lightness_sample = lightness_param.draw_sample(random_state=random_state_func)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[-1, -1, -1],
[-1, 8+lightness_sample, -1],
[-1, -1, -1]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state)
def Emboss(alpha=0, strength=1, name=None, deterministic=False, random_state=None):
"""
Augmenter that embosses images and overlays the result with the original
image.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the range ``(0, 2)`` with 1 being the standard
embossing effect. Default value is 1.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
embosses an image with a variable strength in the range ``0.5 <= x <= 1.5``
and overlays the result with a variable alpha in the range ``0.0 <= a <= 1.0``
over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
strength_param = iap.handle_continuous_param(strength, "strength", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
strength_sample = strength_param.draw_sample(random_state=random_state_func)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[-1-strength_sample, 0-strength_sample, 0],
[0-strength_sample, 1, 0+strength_sample],
[0, 0+strength_sample, 1+strength_sample]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state)
# TODO tests
def EdgeDetect(alpha=0, name=None, deterministic=False, random_state=None):
"""
Augmenter that detects all edges in images, marks them in
a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = EdgeDetect(alpha=(0.0, 1.0))
detects edges in an image and overlays the result with a variable alpha
in the range ``0.0 <= a <= 1.0`` over the old image.
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state)
# TODO tests
# TODO merge EdgeDetect and DirectedEdgeDetect?
def DirectedEdgeDetect(alpha=0, direction=(0.0, 1.0), name=None, deterministic=False, random_state=None):
"""
Augmenter that detects edges that have certain directions and marks them
in a black and white image and then overlays the result with the original
image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle of edges to pronounce, where 0 represents 0 degrees and 1.0
represents 360 degrees (both clockwise, starting at the top).
Default value is ``(0.0, 1.0)``, i.e. pick a random angle per image.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=0)
turns input images into edge images in which edges are detected from
top side of the image (i.e. the top sides of horizontal edges are
added to the output).
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=90/360)
same as before, but detecting edges from the right (right side of each
vertical edge).
>>> aug = DirectedEdgeDetect(alpha=1.0, direction=(0.0, 1.0))
same as before, but detecting edges from a variable direction (anything
between 0 and 1.0, i.e. 0 degrees and 360 degrees, starting from the
top and moving clockwise).
>>> aug = DirectedEdgeDetect(alpha=(0.0, 0.3), direction=0)
generates edge images (edges detected from the top) and overlays them
with the input images by a variable amount between 0 and 30 percent
(e.g. for 0.3 then ``0.7*old_image + 0.3*edge_image``).
"""
alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
direction_param = iap.handle_continuous_param(direction, "direction", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
def create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
ia.do_assert(0 <= alpha_sample <= 1.0)
direction_sample = direction_param.draw_sample(random_state=random_state_func)
deg = int(direction_sample * 360) % 360
rad = np.deg2rad(deg)
x = np.cos(rad - 0.5*np.pi)
y = np.sin(rad - 0.5*np.pi)
direction_vector = np.array([x, y])
matrix_effect = np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
], dtype=np.float32)
for x in [-1, 0, 1]:
for y in [-1, 0, 1]:
if (x, y) != (0, 0):
cell_vector = np.array([x, y])
distance_deg = np.rad2deg(ia.angle_between_vectors(cell_vector, direction_vector))
distance = distance_deg / 180
similarity = (1 - distance)**4
matrix_effect[y+1, x+1] = similarity
matrix_effect = matrix_effect / np.sum(matrix_effect)
matrix_effect = matrix_effect * (-1)
matrix_effect[1, 1] = 1
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix = (1-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect
return [matrix] * nb_channels
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/geometric.py | augmentation/image_augmentation/helpers/imgaug/augmenters/geometric.py | """
Augmenters that apply affine transformations or other similar augmentations.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Affine(...),
iaa.PerspectiveTransform(...)
])
List of augmenters:
* Affine
* AffineCv2
* PiecewiseAffine
* PerspectiveTransform
* ElasticTransformation
* Rot90
"""
from __future__ import print_function, division, absolute_import
import math
from functools import partial
import numpy as np
from scipy import ndimage
from skimage import transform as tf
import cv2
import six.moves as sm
from . import meta
from . import blur as blur_lib
import imgaug as ia
from imgaug.augmentables.polys import _ConcavePolygonRecoverer
from .. import parameters as iap
from .. import dtypes as iadt
class Affine(meta.Augmenter):
"""
Augmenter to apply affine transformations to images.
This is mostly a wrapper around skimage's AffineTransform class and warp function.
Affine transformations involve:
- Translation ("move" image on the x-/y-axis)
- Rotation
- Scaling ("zoom" in/out)
- Shear (move one side of the image, turning a square into a trapezoid)
All such transformations can create "new" pixels in the image without a
defined content, e.g. if the image is translated to the left, pixels
are created on the right.
A method has to be defined to deal with these pixel values. The
parameters `cval` and `mode` of this class deal with this.
Some transformations involve interpolations between several pixels
of the input image to generate output pixel values. The parameter `order`
deals with the method of interpolation used for this.
dtype support::
if (backend="skimage", order in [0, 1])::
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (1)
* ``uint64``: no (2)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (1)
* ``int64``: no (2)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (2)
* ``bool``: yes; tested
- (1) scikit-image converts internally to float64, which might affect the accuracy of
large integers. In tests this seemed to not be an issue.
- (2) results too inaccurate
if (backend="skimage", order in [3, 4])::
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (1)
* ``uint64``: no (2)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (1)
* ``int64``: no (2)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: limited; tested (3)
* ``float128``: no (2)
* ``bool``: yes; tested
- (1) scikit-image converts internally to float64, which might affect the accuracy of
large integers. In tests this seemed to not be an issue.
- (2) results too inaccurate
- (3) ``NaN`` around minimum and maximum of float64 value range
if (backend="skimage", order=5])::
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested (1)
* ``uint64``: no (2)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested (1)
* ``int64``: no (2)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: limited; not tested (3)
* ``float128``: no (2)
* ``bool``: yes; tested
- (1) scikit-image converts internally to float64, which might affect the accuracy of
large integers. In tests this seemed to not be an issue.
- (2) results too inaccurate
- (3) ``NaN`` around minimum and maximum of float64 value range
if (backend="cv2", order=0)::
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: no (2)
* ``float16``: yes; tested (3)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (3)
- (1) rejected by cv2
- (2) changed to ``int32`` by cv2
- (3) mapped internally to ``float32``
if (backend="cv2", order=1):
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: yes; tested (4)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (4)
- (1) rejected by cv2
- (2) causes cv2 error: ``cv2.error: OpenCV(3.4.4) (...)imgwarp.cpp:1805: error:
(-215:Assertion failed) ifunc != 0 in function 'remap'``
- (3) mapped internally to ``int16``
- (4) mapped internally to ``float32``
if (backend="cv2", order=3):
* ``uint8``: yes; tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (2)
* ``int64``: no (2)
* ``float16``: yes; tested (4)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (1)
* ``bool``: yes; tested (4)
- (1) rejected by cv2
- (2) causes cv2 error: ``cv2.error: OpenCV(3.4.4) (...)imgwarp.cpp:1805: error:
(-215:Assertion failed) ifunc != 0 in function 'remap'``
- (3) mapped internally to ``int16``
- (4) mapped internally to ``float32``
Parameters
----------
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\
or dict {"x": number/tuple/list/StochasticParameter, "y": number/tuple/list/StochasticParameter},\
optional
Scaling factor to use,
where 1.0 represents no change and 0.5 is zoomed out to 50 percent of the original size.
* If a single number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled from the range
``a <= x <= b`` per image. That value will be used identically for
both x- and y-axis.
* If a list, then a random value will eb sampled from that list
per image.
* If a StochasticParameter, then from that parameter a value will
be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys "x" and/or "y".
Each of these keys can have the same values as described before
for this whole parameter (`scale`). Using a dictionary allows to
set different values for the axis. If they are set to the same
ranges, different values may still be sampled per axis.
translate_percent : None or number or tuple of number or list of number or imgaug.parameters.StochasticParameter or\
dict {"x": number/tuple/list/StochasticParameter, "y": number/tuple/list/StochasticParameter},\
optional
Translation in percent relative to the image height/width (x-translation, y-translation) to use,
where 0 represents no change and 0.5 is half of the image height/width.
* If None then equivalent to 0 unless translate_px has a non-None value.
* If a single number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled from the range
``a <= x <= b`` per image. That percent value will be used identically
for both x- and y-axis.
* If a list, then a random value will eb sampled from that list
per image.
* If a StochasticParameter, then from that parameter a value will
be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys "x" and/or "y".
Each of these keys can have the same values as described before
for this whole parameter (`translate_percent`).
Using a dictionary allows to set different values for the axis.
If they are set to the same ranges, different values may still
be sampled per axis.
translate_px : None or int or tuple of int or list of int or imgaug.parameters.StochasticParameter or\
dict {"x": int/tuple/list/StochasticParameter, "y": int/tuple/list/StochasticParameter},\
optional
Translation in pixels.
* If None then equivalent to 0.0 unless translate_percent has a non-None value.
* If a single int, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled from the discrete
range ``[a..b]`` per image. That number will be used identically
for both x- and y-axis.
* If a list, then a random value will eb sampled from that list
per image.
* If a StochasticParameter, then from that parameter a value will
be sampled per image (again, used for both x- and y-axis).
* If a dictionary, then it is expected to have the keys "x" and/or "y".
Each of these keys can have the same values as described before
for this whole parameter (`translate_px`).
Using a dictionary allows to set different values for the axis.
If they are set to the same ranges, different values may still
be sampled per axis.
rotate : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Rotation in degrees (_NOT_ radians), i.e. expected value range is
0 to 360 for positive rotations (may also be negative). Rotation
happens around the _center_ of the image, not the top left corner
as in some other frameworks.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled per image from the
range ``a <= x <= b`` and be used as the rotation value.
* If a list, then a random value will eb sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used to
sample the rotation value per image.
shear : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Shear in degrees (_NOT_ radians), i.e. expected value range is
0 to 360 for positive shear (may also be negative).
* If a float/int, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled per image from the
range ``a <= x <= b`` and be used as the rotation value.
* If a list, then a random value will eb sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used to
sample the shear value per image.
order : int or iterable of int or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
Interpolation order to use. Same meaning as in skimage:
* ``0``: ``Nearest-neighbor``
* ``1``: ``Bi-linear`` (default)
* ``2``: ``Bi-quadratic`` (not recommended by skimage)
* ``3``: ``Bi-cubic``
* ``4``: ``Bi-quartic``
* ``5``: ``Bi-quintic``
Method 0 and 1 are fast, 3 is a bit slower, 4 and 5 are very slow.
If the backend is ``cv2``, the mapping to OpenCV's interpolation modes
is as follows:
* ``0`` -> ``cv2.INTER_NEAREST``
* ``1`` -> ``cv2.INTER_LINEAR``
* ``2`` -> ``cv2.INTER_CUBIC``
* ``3`` -> ``cv2.INTER_CUBIC``
* ``4`` -> ``cv2.INTER_CUBIC``
As datatypes this parameter accepts:
* If a single int, then that order will be used for all images.
* If an iterable, then for each image a random value will be sampled
from that iterable (i.e. list of allowed order values).
* If imgaug.ALL, then equivalant to list ``[0, 1, 3, 4, 5]`` in case of backend ``skimage``
and otherwise ``[0, 1, 3]``.
* If StochasticParameter, then that parameter is queried per image
to sample the order value to use.
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
The constant value used for skimage's transform function.
This is the value used to fill up pixels in the result image that
didn't exist in the input image (e.g. when translating to the left,
some new pixels are created at the right). Such a fill-up with a
constant value only happens, when `mode` is "constant".
The expected value range is ``[0, 255]``. It may be a float value.
* If this is a single number, then that value will be used
(e.g. 0 results in black pixels).
* If a tuple ``(a, b)``, then a random value from the range ``a <= x <= b``
is picked per image.
* If a list, then a random value will be sampled from that list
per image.
* If imgaug.ALL, a value from the discrete range ``[0 .. 255]`` will be
sampled per image.
* If a StochasticParameter, a new value will be sampled from the
parameter per image.
fit_output : bool, optional
Whether the image after affine transformation is completely contained in the output image.
If False, parts of the image may be outside of the image plane or the image might make up only a small
part of the image plane. Activating this can be useful e.g. for rotations by 45 degrees to avoid that the
image corners are outside of the image plane.
Note that activating this will negate translation.
Note also that activating this may lead to image sizes differing from the input image sizes. To avoid this,
wrap ``Affine`` in ``KeepSizeByResize``, e.g. ``KeepSizeByResize(Affine(...))``.
mode : str or list of str or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
Parameter that defines the handling of newly created pixels.
Same meaning as in skimage (and numpy.pad):
* ``constant``: Pads with a constant value
* ``edge``: Pads with the edge values of array
* ``symmetric``: Pads with the reflection of the vector mirrored
along the edge of the array.
* ``reflect``: Pads with the reflection of the vector mirrored on
the first and last values of the vector along each axis.
* ``wrap``: Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the end values
are used to pad the beginning.
If ``cv2`` is chosen as the backend the mapping is as follows:
* ``constant`` -> ``cv2.BORDER_CONSTANT``
* ``edge`` -> ``cv2.BORDER_REPLICATE``
* ``symmetric`` -> ``cv2.BORDER_REFLECT``
* ``reflect`` -> ``cv2.BORDER_REFLECT_101``
* ``wrap`` -> ``cv2.BORDER_WRAP``
The datatype of the parameter may be:
* If a single string, then that mode will be used for all images.
* If a list of strings, then per image a random mode will be picked
from that list.
* If imgaug.ALL, then a random mode from all possible modes will be
picked.
* If StochasticParameter, then the mode will be sampled from that
parameter per image, i.e. it must return only the above mentioned
strings.
backend : str, optional
Framework to use as a backend. Valid values are ``auto``, ``skimage``
(scikit-image's warp) and ``cv2`` (OpenCV's warp).
If ``auto`` is used, the augmenter will automatically try
to use ``cv2`` where possible (order must be in ``[0, 1, 3]`` and
image's dtype uint8, otherwise skimage is chosen). It will
silently fall back to skimage if order/dtype is not supported by cv2.
cv2 is generally faster than skimage. It also supports RGB cvals,
while skimage will resort to intensity cvals (i.e. 3x the same value
as RGB). If ``cv2`` is chosen and order is 2 or 4, it will automatically
fall back to order 3.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Affine(scale=2.0)
zooms all images by a factor of 2.
>>> aug = iaa.Affine(translate_px=16)
translates all images on the x- and y-axis by 16 pixels (to the
right/top), fills up any new pixels with zero (black values).
>>> aug = iaa.Affine(translate_percent=0.1)
translates all images on the x- and y-axis by 10 percent of their
width/height (to the right/top), fills up any new pixels with zero
(black values).
>>> aug = iaa.Affine(rotate=35)
rotates all images by 35 degrees, fills up any new pixels with zero
(black values).
>>> aug = iaa.Affine(shear=15)
rotates all images by 15 degrees, fills up any new pixels with zero
(black values).
>>> aug = iaa.Affine(translate_px=(-16, 16))
translates all images on the x- and y-axis by a random value
between -16 and 16 pixels (to the right/top) (same for both axis, i.e.
sampled once per image), fills up any new pixels with zero (black values).
>>> aug = iaa.Affine(translate_px={"x": (-16, 16), "y": (-4, 4)})
translates all images on the x-axis by a random value
between -16 and 16 pixels (to the right) and on the y-axis by a
random value between -4 and 4 pixels to the top. Even if both ranges
were the same, both axis could use different samples.
Fills up any new pixels with zero (black values).
>>> aug = iaa.Affine(scale=2.0, order=[0, 1])
same as previously, but uses (randomly) either nearest neighbour
interpolation or linear interpolation.
>>> aug = iaa.Affine(translate_px=16, cval=(0, 255))
same as previously, but fills up any new pixels with a random
brightness (same for the whole image).
>>> aug = iaa.Affine(translate_px=16, mode=["constant", "edge"])
same as previously, but fills up the new pixels in only 50 percent
of all images with black values. In the other 50 percent of all cases,
the value of the nearest edge is used.
"""
VALID_DTYPES_CV2_ORDER_0 = {"uint8", "uint16", "int8", "int16", "int32",
"float16", "float32", "float64",
"bool"}
VALID_DTYPES_CV2_ORDER_NOT_0 = {"uint8", "uint16", "int8", "int16",
"float16", "float32", "float64",
"bool"}
def __init__(self, scale=1.0, translate_percent=None, translate_px=None, rotate=0.0, shear=0.0, order=1, cval=0,
mode="constant", fit_output=False, backend="auto", name=None, deterministic=False, random_state=None):
super(Affine, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
ia.do_assert(backend in ["auto", "skimage", "cv2"])
self.backend = backend
# skimage | cv2
# 0 | cv2.INTER_NEAREST
# 1 | cv2.INTER_LINEAR
# 2 | -
# 3 | cv2.INTER_CUBIC
# 4 | -
self.order_map_skimage_cv2 = {
0: cv2.INTER_NEAREST,
1: cv2.INTER_LINEAR,
2: cv2.INTER_CUBIC,
3: cv2.INTER_CUBIC,
4: cv2.INTER_CUBIC
}
# Peformance in skimage:
# 1.0x order 0
# 1.5x order 1
# 3.0x order 3
# 30.0x order 4
# 60.0x order 5
# measurement based on 256x256x3 batches, difference is smaller
# on smaller images (seems to grow more like exponentially with image
# size)
if order == ia.ALL:
if backend == "auto" or backend == "cv2":
self.order = iap.Choice([0, 1, 3])
else:
# dont use order=2 (bi-quadratic) because that is apparently currently not recommended (and throws
# a warning)
self.order = iap.Choice([0, 1, 3, 4, 5])
elif ia.is_single_integer(order):
ia.do_assert(0 <= order <= 5,
"Expected order's integer value to be in range 0 <= x <= 5, got %d." % (order,))
if backend == "cv2":
ia.do_assert(order in [0, 1, 3])
self.order = iap.Deterministic(order)
elif isinstance(order, list):
ia.do_assert(all([ia.is_single_integer(val) for val in order]),
"Expected order list to only contain integers, got types %s." % (
str([type(val) for val in order]),))
ia.do_assert(all([0 <= val <= 5 for val in order]),
"Expected all of order's integer values to be in range 0 <= x <= 5, got %s." % (str(order),))
if backend == "cv2":
ia.do_assert(all([val in [0, 1, 3] for val in order]))
self.order = iap.Choice(order)
elif isinstance(order, iap.StochasticParameter):
self.order = order
else:
raise Exception("Expected order to be imgaug.ALL, int, list of int or StochasticParameter, got %s." % (
type(order),))
if cval == ia.ALL:
# TODO change this so that it is dynamically created per image (or once per dtype)
self.cval = iap.Uniform(0, 255) # skimage transform expects float
else:
self.cval = iap.handle_continuous_param(cval, "cval", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
# constant, edge, symmetric, reflect, wrap
# skimage | cv2
# constant | cv2.BORDER_CONSTANT
# edge | cv2.BORDER_REPLICATE
# symmetric | cv2.BORDER_REFLECT
# reflect | cv2.BORDER_REFLECT_101
# wrap | cv2.BORDER_WRAP
self.mode_map_skimage_cv2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"symmetric": cv2.BORDER_REFLECT,
"reflect": cv2.BORDER_REFLECT_101,
"wrap": cv2.BORDER_WRAP
}
if mode == ia.ALL:
self.mode = iap.Choice(["constant", "edge", "symmetric", "reflect", "wrap"])
elif ia.is_string(mode):
self.mode = iap.Deterministic(mode)
elif isinstance(mode, list):
ia.do_assert(all([ia.is_string(val) for val in mode]))
self.mode = iap.Choice(mode)
elif isinstance(mode, iap.StochasticParameter):
self.mode = mode
else:
raise Exception("Expected mode to be imgaug.ALL, a string, a list of strings or StochasticParameter, "
+ "got %s." % (type(mode),))
# scale
if isinstance(scale, dict):
ia.do_assert("x" in scale or "y" in scale)
x = scale.get("x", 1.0)
y = scale.get("y", 1.0)
self.scale = (
iap.handle_continuous_param(x, "scale['x']", value_range=(0+1e-4, None), tuple_to_uniform=True,
list_to_choice=True),
iap.handle_continuous_param(y, "scale['y']", value_range=(0+1e-4, None), tuple_to_uniform=True,
list_to_choice=True)
)
else:
self.scale = iap.handle_continuous_param(scale, "scale", value_range=(0+1e-4, None), tuple_to_uniform=True,
list_to_choice=True)
# translate
if translate_percent is None and translate_px is None:
translate_px = 0
ia.do_assert(translate_percent is None or translate_px is None)
if translate_percent is not None:
# translate by percent
if isinstance(translate_percent, dict):
ia.do_assert("x" in translate_percent or "y" in translate_percent)
x = translate_percent.get("x", 0)
y = translate_percent.get("y", 0)
self.translate = (
iap.handle_continuous_param(x, "translate_percent['x']", value_range=None, tuple_to_uniform=True,
list_to_choice=True),
iap.handle_continuous_param(y, "translate_percent['y']", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
)
else:
self.translate = iap.handle_continuous_param(translate_percent, "translate_percent", value_range=None,
tuple_to_uniform=True, list_to_choice=True)
else:
# translate by pixels
if isinstance(translate_px, dict):
ia.do_assert("x" in translate_px or "y" in translate_px)
x = translate_px.get("x", 0)
y = translate_px.get("y", 0)
self.translate = (
iap.handle_discrete_param(x, "translate_px['x']", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=False),
iap.handle_discrete_param(y, "translate_px['y']", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
)
else:
self.translate = iap.handle_discrete_param(translate_px, "translate_px", value_range=None,
tuple_to_uniform=True, list_to_choice=True,
allow_floats=False)
self.rotate = iap.handle_continuous_param(rotate, "rotate", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.shear = iap.handle_continuous_param(shear, "shear", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.fit_output = fit_output
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
scale_samples, translate_samples, rotate_samples, shear_samples, \
cval_samples, mode_samples, order_samples = self._draw_samples(nb_images, random_state)
result = self._augment_images_by_samples(images, scale_samples, translate_samples, rotate_samples,
shear_samples, cval_samples, mode_samples, order_samples)
return result
def _augment_images_by_samples(self, images, scale_samples, translate_samples, rotate_samples, shear_samples,
cval_samples, mode_samples, order_samples, return_matrices=False):
nb_images = len(images)
input_was_array = ia.is_np_array(images)
input_dtype = None if not input_was_array else images.dtype
result = []
if return_matrices:
matrices = [None] * nb_images
for i in sm.xrange(nb_images):
image = images[i]
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(image.dtype)
scale_x, scale_y = scale_samples[0][i], scale_samples[1][i]
translate_x, translate_y = translate_samples[0][i], translate_samples[1][i]
if ia.is_single_float(translate_y):
translate_y_px = int(np.round(translate_y * images[i].shape[0]))
else:
translate_y_px = translate_y
if ia.is_single_float(translate_x):
translate_x_px = int(np.round(translate_x * images[i].shape[1]))
else:
translate_x_px = translate_x
rotate = rotate_samples[i]
shear = shear_samples[i]
cval = cval_samples[i]
mode = mode_samples[i]
order = order_samples[i]
if scale_x != 1.0 or scale_y != 1.0 or translate_x_px != 0 or translate_y_px != 0 or rotate != 0 \
or shear != 0:
cv2_bad_order = order not in [0, 1, 3]
if order == 0:
cv2_bad_dtype = image.dtype.name not in self.VALID_DTYPES_CV2_ORDER_0
else:
cv2_bad_dtype = image.dtype.name not in self.VALID_DTYPES_CV2_ORDER_NOT_0
cv2_bad_shape = image.shape[2] > 4
cv2_impossible = cv2_bad_order or cv2_bad_dtype or cv2_bad_shape
if self.backend == "skimage" or (self.backend == "auto" and cv2_impossible):
# cval contains 3 values as cv2 can handle 3, but skimage only 1
cval = cval[0]
# skimage does not clip automatically
cval = max(min(cval, max_value), min_value)
image_warped = self._warp_skimage(
image,
scale_x, scale_y,
translate_x_px, translate_y_px,
rotate, shear,
cval,
mode, order,
self.fit_output,
return_matrix=return_matrices,
)
else:
ia.do_assert(not cv2_bad_dtype,
"cv2 backend can only handle images of dtype uint8, float32 and float64, got %s." % (
image.dtype,))
image_warped = self._warp_cv2(
image,
scale_x, scale_y,
translate_x_px, translate_y_px,
rotate, shear,
tuple([int(v) for v in cval]),
self.mode_map_skimage_cv2[mode],
self.order_map_skimage_cv2[order],
self.fit_output,
return_matrix=return_matrices,
)
if return_matrices:
image_warped, matrix = image_warped
matrices[i] = matrix
result.append(image_warped)
else:
result.append(images[i])
# the shapes can change due to fit_output, then it may not be possible to return an array, even when the input
# was an array
if input_was_array:
nb_shapes = len(set([image.shape for image in result]))
if nb_shapes == 1:
result = np.array(result, input_dtype)
if return_matrices:
result = (result, matrices)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
nb_heatmaps = len(heatmaps)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/__init__.py | augmentation/image_augmentation/helpers/imgaug/augmenters/__init__.py | from __future__ import absolute_import
from imgaug.augmenters.arithmetic import *
from imgaug.augmenters.blend import *
from imgaug.augmenters.blur import *
from imgaug.augmenters.color import *
from imgaug.augmenters.contrast import *
from imgaug.augmenters.convolutional import *
from imgaug.augmenters.edges import *
from imgaug.augmenters.flip import *
from imgaug.augmenters.geometric import *
from imgaug.augmenters.meta import *
from imgaug.augmenters.pooling import *
from imgaug.augmenters.segmentation import *
from imgaug.augmenters.size import *
from imgaug.augmenters.weather import *
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/color.py | augmentation/image_augmentation/helpers/imgaug/augmenters/color.py | """
Augmenters that apply color space oriented changes.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Grayscale((0.0, 1.0)),
iaa.AddToHueAndSaturation((-10, 10))
])
List of augmenters:
* InColorspace (deprecated)
* WithColorspace
* AddToHueAndSaturation
* ChangeColorspace
* Grayscale
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import cv2
import six.moves as sm
from . import meta
from . import blend
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
@ia.deprecated(alt_func="WithColorspace")
def InColorspace(to_colorspace, from_colorspace="RGB", children=None, name=None, deterministic=False,
random_state=None):
"""Convert images to another colorspace."""
return WithColorspace(to_colorspace, from_colorspace, children, name, deterministic, random_state)
class WithColorspace(meta.Augmenter):
"""
Apply child augmenters within a specific colorspace.
This augumenter takes a source colorspace A and a target colorspace B
as well as children C. It changes images from A to B, then applies the
child augmenters C and finally changes the colorspace back from B to A.
See also ChangeColorspace() for more.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
to_colorspace : str
See :func:`imgaug.augmenters.ChangeColorspace.__init__`.
from_colorspace : str, optional
See :func:`imgaug.augmenters.ChangeColorspace.__init__`.
children : None or Augmenter or list of Augmenters, optional
See :func:`imgaug.augmenters.ChangeColorspace.__init__`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.WithColorspace(to_colorspace="HSV", from_colorspace="RGB",
>>> children=iaa.WithChannels(0, iaa.Add(10)))
This augmenter will add 10 to Hue value in HSV colorspace,
then change the colorspace back to the original (RGB).
"""
def __init__(self, to_colorspace, from_colorspace="RGB", children=None, name=None, deterministic=False,
random_state=None):
super(WithColorspace, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.to_colorspace = to_colorspace
self.from_colorspace = from_colorspace
self.children = meta.handle_children_list(children, self.name, "then")
def _augment_images(self, images, random_state, parents, hooks):
result = images
if hooks is None or hooks.is_propagating(images, augmenter=self, parents=parents, default=True):
result = ChangeColorspace(
to_colorspace=self.to_colorspace,
from_colorspace=self.from_colorspace
).augment_images(images=result)
result = self.children.augment_images(
images=result,
parents=parents + [self],
hooks=hooks
)
result = ChangeColorspace(
to_colorspace=self.from_colorspace,
from_colorspace=self.to_colorspace
).augment_images(images=result)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
result = heatmaps
if hooks is None or hooks.is_propagating(heatmaps, augmenter=self, parents=parents, default=True):
result = self.children.augment_heatmaps(
result,
parents=parents + [self],
hooks=hooks,
)
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
result = keypoints_on_images
if hooks is None or hooks.is_propagating(keypoints_on_images, augmenter=self, parents=parents, default=True):
result = self.children.augment_keypoints(
result,
parents=parents + [self],
hooks=hooks,
)
return result
def _to_deterministic(self):
aug = self.copy()
aug.children = aug.children.to_deterministic()
aug.deterministic = True
aug.random_state = ia.derive_random_state(self.random_state)
return aug
def get_parameters(self):
return [self.channels]
def get_children_lists(self):
return [self.children]
def __str__(self):
return "WithColorspace(from_colorspace=%s, to_colorspace=%s, name=%s, children=[%s], deterministic=%s)" % (
self.from_colorspace, self.to_colorspace, self.name, self.children, self.deterministic)
# TODO removed deterministic and random_state here as parameters, because this
# function creates multiple child augmenters. not sure if this is sensible
# (give them all the same random state instead?)
# TODO this is for now deactivated, because HSV images returned by opencv have value range 0-180 for the hue channel
# and are supposed to be angular representations, i.e. if values go below 0 or above 180 they are supposed to overflow
# to 180 and 0
"""
def AddToHueAndSaturation(value=0, per_channel=False, from_colorspace="RGB", channels=[0, 1], name=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
""
Augmenter that transforms images into HSV space, selects the H and S
channels and then adds a given range of values to these.
Parameters
----------
value : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
See :func:`imgaug.augmenters.arithmetic.Add.__init__()`.
per_channel : bool or float, optional
See :func:`imgaug.augmenters.arithmetic.Add.__init__()`.
from_colorspace : str, optional
See :func:`imgaug.augmenters.color.ChangeColorspace.__init__()`.
channels : int or list of int or None, optional
See :func:`imgaug.augmenters.meta.WithChannels.__init__()`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = AddToHueAndSaturation((-20, 20), per_channel=True)
Adds random values between -20 and 20 to the hue and saturation
(independently per channel and the same value for all pixels within
that channel).
""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return WithColorspace(
to_colorspace="HSV",
from_colorspace=from_colorspace,
children=meta.WithChannels(
channels=channels,
children=arithmetic.Add(value=value, per_channel=per_channel)
),
name=name
)
"""
class AddToHueAndSaturation(meta.Augmenter):
"""
Augmenter that increases/decreases hue and saturation by random values.
The augmenter first transforms images to HSV colorspace, then adds random values to the H and S channels
and afterwards converts back to RGB.
TODO add float support
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: no
* ``float64``: no
* ``float128``: no
* ``bool``: no
Parameters
----------
value : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
See :func:`imgaug.augmenters.arithmetic.Add.__init__()`.
per_channel : bool or float, optional
See :func:`imgaug.augmenters.arithmetic.Add.__init__()`.
from_colorspace : str, optional
See :func:`imgaug.augmenters.color.ChangeColorspace.__init__()`.
channels : int or list of int or None, optional
See :func:`imgaug.augmenters.meta.WithChannels.__init__()`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = AddToHueAndSaturation((-20, 20), per_channel=True)
Adds random values between -20 and 20 to the hue and saturation
(independently per channel and the same value for all pixels within
that channel).
"""
_LUT_CACHE = None
def __init__(self, value=0, per_channel=False, from_colorspace="RGB", name=None, deterministic=False,
random_state=None):
super(AddToHueAndSaturation, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.value = iap.handle_discrete_param(value, "value", value_range=(-255, 255), tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.per_channel = iap.handle_probability_param(per_channel, "per_channel")
# we don't change these in a modified to_deterministic() here, because they are called in _augment_images()
# with random states
self.colorspace_changer = ChangeColorspace(from_colorspace=from_colorspace, to_colorspace="HSV")
self.colorspace_changer_inv = ChangeColorspace(from_colorspace="HSV", to_colorspace=from_colorspace)
self.backend = "cv2"
# precompute tables for cv2.LUT
if self.backend == "cv2" and self._LUT_CACHE is None:
self._LUT_CACHE = (np.zeros((256*2, 256), dtype=np.int8),
np.zeros((256*2, 256), dtype=np.int8))
value_range = np.arange(0, 256, dtype=np.int16)
# this could be done slightly faster by vectorizing the loop
for i in sm.xrange(-255, 255+1):
table_hue = np.mod(value_range + i, 180)
table_saturation = np.clip(value_range + i, 0, 255)
self._LUT_CACHE[0][i, :] = table_hue
self._LUT_CACHE[1][i, :] = table_saturation
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = iadt.copy_dtypes_for_restore(images, force_list=True)
result = images
nb_images = len(images)
# surprisingly, placing this here seems to be slightly slower than placing it inside the loop
# if isinstance(images_hsv, list):
# images_hsv = [img.astype(np.int32) for img in images_hsv]
# else:
# images_hsv = images_hsv.astype(np.int32)
rss = ia.derive_random_states(random_state, 3)
images_hsv = self.colorspace_changer._augment_images(images, rss[0], parents + [self], hooks)
samples = self.value.draw_samples((nb_images, 2), random_state=rss[1]).astype(np.int32)
samples_hue = ((samples.astype(np.float32) / 255.0) * (360/2)).astype(np.int32)
per_channel = self.per_channel.draw_samples((nb_images,), random_state=rss[2])
rs_inv = random_state
ia.do_assert(-255 <= samples[0, 0] <= 255)
# this is needed if no cache for LUT is used:
# value_range = np.arange(0, 256, dtype=np.int16)
gen = enumerate(zip(images_hsv, samples, samples_hue, per_channel))
for i, (image_hsv, samples_i, samples_hue_i, per_channel_i) in gen:
assert image_hsv.dtype.name == "uint8"
sample_saturation = samples_i[0]
if per_channel_i > 0.5:
sample_hue = samples_hue_i[1]
else:
sample_hue = samples_hue_i[0]
if self.backend == "cv2":
# this has roughly the same speed as the numpy backend for 64x64 and is about 25% faster for 224x224
# code without using cache:
# table_hue = np.mod(value_range + sample_hue, 180)
# table_saturation = np.clip(value_range + sample_saturation, 0, 255)
# table_hue = table_hue.astype(np.uint8, copy=False)
# table_saturation = table_saturation.astype(np.uint8, copy=False)
# image_hsv[..., 0] = cv2.LUT(image_hsv[..., 0], table_hue)
# image_hsv[..., 1] = cv2.LUT(image_hsv[..., 1], table_saturation)
# code with using cache (at best maybe 10% faster for 64x64):
image_hsv[..., 0] = cv2.LUT(image_hsv[..., 0], self._LUT_CACHE[0][int(sample_hue)])
image_hsv[..., 1] = cv2.LUT(image_hsv[..., 1], self._LUT_CACHE[1][int(sample_saturation)])
else:
image_hsv = image_hsv.astype(np.int16) # int16 seems to be slightly faster than int32
# np.mod() works also as required here for negative values
image_hsv[..., 0] = np.mod(image_hsv[..., 0] + sample_hue, 180)
image_hsv[..., 1] = np.clip(image_hsv[..., 1] + sample_saturation, 0, 255)
image_hsv = image_hsv.astype(input_dtypes[i])
# the inverse colorspace changer has a deterministic output (always <from_colorspace>, so that can
# always provide it the same random state as input
image_rgb = self.colorspace_changer_inv._augment_images([image_hsv], rs_inv, parents + [self], hooks)[0]
result[i] = image_rgb
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.value, self.per_channel]
# TODO tests
# Note: Not clear whether this class will be kept (for anything aside from grayscale)
# other colorspaces dont really make sense and they also might not work correctly
# due to having no clearly limited range (like 0-255 or 0-1)
# TODO rename to ChangeColorspace3D and then introduce ChangeColorspace, which does not enforce 3d images?
class ChangeColorspace(meta.Augmenter):
"""
Augmenter to change the colorspace of images.
NOTE: This augmenter is not tested. Some colorspaces might work, others might not.
NOTE: This augmenter tries to project the colorspace value range on 0-255. It outputs dtype=uint8 images.
TODO check dtype support
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
to_colorspace : str or list of str or imgaug.parameters.StochasticParameter
The target colorspace.
Allowed strings are: ``RGB``, ``BGR``, ``GRAY``, ``CIE``, ``YCrCb``, ``HSV``, ``HLS``, ``Lab``, ``Luv``.
These are also accessible via ``ChangeColorspace.<NAME>``, e.g. ``ChangeColorspace.YCrCb``.
* If a string, it must be among the allowed colorspaces.
* If a list, it is expected to be a list of strings, each one
being an allowed colorspace. A random element from the list
will be chosen per image.
* If a StochasticParameter, it is expected to return string. A new
sample will be drawn per image.
from_colorspace : str, optional
The source colorspace (of the input images).
See `to_colorspace`. Only a single string is allowed.
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
The alpha value of the new colorspace when overlayed over the
old one. A value close to 1.0 means that mostly the new
colorspace is visible. A value close to 0.0 means, that mostly the
old image is visible.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
"""
RGB = "RGB"
BGR = "BGR"
GRAY = "GRAY"
CIE = "CIE"
YCrCb = "YCrCb"
HSV = "HSV"
HLS = "HLS"
Lab = "Lab"
Luv = "Luv"
COLORSPACES = {RGB, BGR, GRAY, CIE, YCrCb, HSV, HLS, Lab, Luv}
# TODO access cv2 COLOR_ variables directly instead of indirectly via dictionary mapping
CV_VARS = {
# RGB
"RGB2BGR": cv2.COLOR_RGB2BGR,
"RGB2GRAY": cv2.COLOR_RGB2GRAY,
"RGB2CIE": cv2.COLOR_RGB2XYZ,
"RGB2YCrCb": cv2.COLOR_RGB2YCR_CB,
"RGB2HSV": cv2.COLOR_RGB2HSV,
"RGB2HLS": cv2.COLOR_RGB2HLS,
"RGB2Lab": cv2.COLOR_RGB2LAB,
"RGB2Luv": cv2.COLOR_RGB2LUV,
# BGR
"BGR2RGB": cv2.COLOR_BGR2RGB,
"BGR2GRAY": cv2.COLOR_BGR2GRAY,
"BGR2CIE": cv2.COLOR_BGR2XYZ,
"BGR2YCrCb": cv2.COLOR_BGR2YCR_CB,
"BGR2HSV": cv2.COLOR_BGR2HSV,
"BGR2HLS": cv2.COLOR_BGR2HLS,
"BGR2Lab": cv2.COLOR_BGR2LAB,
"BGR2Luv": cv2.COLOR_BGR2LUV,
# HSV
"HSV2RGB": cv2.COLOR_HSV2RGB,
"HSV2BGR": cv2.COLOR_HSV2BGR,
# HLS
"HLS2RGB": cv2.COLOR_HLS2RGB,
"HLS2BGR": cv2.COLOR_HLS2BGR,
# Lab
"Lab2RGB": cv2.COLOR_Lab2RGB if hasattr(cv2, "COLOR_Lab2RGB") else cv2.COLOR_LAB2RGB,
"Lab2BGR": cv2.COLOR_Lab2BGR if hasattr(cv2, "COLOR_Lab2BGR") else cv2.COLOR_LAB2BGR
}
def __init__(self, to_colorspace, from_colorspace="RGB", alpha=1.0, name=None, deterministic=False,
random_state=None):
super(ChangeColorspace, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
# TODO somehow merge this with Alpha augmenter?
self.alpha = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True,
list_to_choice=True)
if ia.is_string(to_colorspace):
ia.do_assert(to_colorspace in ChangeColorspace.COLORSPACES)
self.to_colorspace = iap.Deterministic(to_colorspace)
elif ia.is_iterable(to_colorspace):
ia.do_assert(all([ia.is_string(colorspace) for colorspace in to_colorspace]))
ia.do_assert(all([(colorspace in ChangeColorspace.COLORSPACES) for colorspace in to_colorspace]))
self.to_colorspace = iap.Choice(to_colorspace)
elif isinstance(to_colorspace, iap.StochasticParameter):
self.to_colorspace = to_colorspace
else:
raise Exception("Expected to_colorspace to be string, list of strings or StochasticParameter, got %s." % (
type(to_colorspace),))
self.from_colorspace = from_colorspace
ia.do_assert(self.from_colorspace in ChangeColorspace.COLORSPACES)
ia.do_assert(from_colorspace != ChangeColorspace.GRAY)
self.eps = 0.001 # epsilon value to check if alpha is close to 1.0 or 0.0
def _augment_images(self, images, random_state, parents, hooks):
result = images
nb_images = len(images)
alphas = self.alpha.draw_samples((nb_images,), random_state=ia.copy_random_state(random_state))
to_colorspaces = self.to_colorspace.draw_samples((nb_images,), random_state=ia.copy_random_state(random_state))
for i in sm.xrange(nb_images):
alpha = alphas[i]
to_colorspace = to_colorspaces[i]
image = images[i]
ia.do_assert(0.0 <= alpha <= 1.0)
ia.do_assert(to_colorspace in ChangeColorspace.COLORSPACES)
if alpha == 0 or self.from_colorspace == to_colorspace:
pass # no change necessary
else:
# some colorspaces here should use image/255.0 according to the docs,
# but at least for conversion to grayscale that results in errors,
# ie uint8 is expected
if image.ndim != 3:
import warnings
warnings.warn(
"Received an image with %d dimensions in "
"ChangeColorspace._augment_image(), but expected 3 dimensions, i.e. shape "
"(height, width, channels)." % (image.ndim,)
)
elif image.shape[2] != 3:
import warnings
warnings.warn(
"Received an image with shape (H, W, C) and C=%d in "
"ChangeColorspace._augment_image(). Expected C to usually be 3 -- any "
"other value will likely result in errors. (Note that this function is "
"e.g. called during grayscale conversion and hue/saturation "
"changes.)" % (image.shape[2],)
)
if self.from_colorspace in [ChangeColorspace.RGB, ChangeColorspace.BGR]:
from_to_var_name = "%s2%s" % (self.from_colorspace, to_colorspace)
from_to_var = ChangeColorspace.CV_VARS[from_to_var_name]
img_to_cs = cv2.cvtColor(image, from_to_var)
else:
# convert to RGB
from_to_var_name = "%s2%s" % (self.from_colorspace, ChangeColorspace.RGB)
from_to_var = ChangeColorspace.CV_VARS[from_to_var_name]
img_rgb = cv2.cvtColor(image, from_to_var)
if to_colorspace == ChangeColorspace.RGB:
img_to_cs = img_rgb
else:
# convert from RGB to desired target colorspace
from_to_var_name = "%s2%s" % (ChangeColorspace.RGB, to_colorspace)
from_to_var = ChangeColorspace.CV_VARS[from_to_var_name]
img_to_cs = cv2.cvtColor(img_rgb, from_to_var)
# this will break colorspaces that have values outside 0-255 or 0.0-1.0
# TODO dont convert to uint8
if ia.is_integer_array(img_to_cs):
img_to_cs = np.clip(img_to_cs, 0, 255).astype(np.uint8)
else:
img_to_cs = np.clip(img_to_cs * 255, 0, 255).astype(np.uint8)
# for grayscale: covnert from (H, W) to (H, W, 3)
if len(img_to_cs.shape) == 2:
img_to_cs = img_to_cs[:, :, np.newaxis]
img_to_cs = np.tile(img_to_cs, (1, 1, 3))
result[i] = blend.blend_alpha(img_to_cs, image, alpha, self.eps)
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.to_colorspace, self.alpha]
# TODO rename to Grayscale3D and add Grayscale that keeps the image at 1D?
def Grayscale(alpha=0, from_colorspace="RGB", name=None, deterministic=False, random_state=None):
"""
Augmenter to convert images to their grayscale versions.
NOTE: Number of output channels is still 3, i.e. this augmenter just "removes" color.
TODO check dtype support
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
The alpha value of the grayscale image when overlayed over the
old image. A value close to 1.0 means, that mostly the new grayscale
image is visible. A value close to 0.0 means, that mostly the
old image is visible.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
from_colorspace : str, optional
The source colorspace (of the input images).
Allowed strings are: ``RGB``, ``BGR``, ``GRAY``, ``CIE``, ``YCrCb``, ``HSV``, ``HLS``, ``Lab``, ``Luv``.
See :func:`imgaug.augmenters.color.ChangeColorspace.__init__`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Grayscale(alpha=1.0)
creates an augmenter that turns images to their grayscale versions.
>>> aug = iaa.Grayscale(alpha=(0.0, 1.0))
creates an augmenter that turns images to their grayscale versions with
an alpha value in the range ``0 <= alpha <= 1``. An alpha value of 0.5 would
mean, that the output image is 50 percent of the input image and 50
percent of the grayscale image (i.e. 50 percent of color removed).
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ChangeColorspace(to_colorspace=ChangeColorspace.GRAY, alpha=alpha, from_colorspace=from_colorspace,
name=name, deterministic=deterministic, random_state=random_state)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/segmentation.py | augmentation/image_augmentation/helpers/imgaug/augmenters/segmentation.py | """
Augmenters that apply changes to images based on forms of segmentation.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.Superpixels(...)
])
List of augmenters:
* Superpixels
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from skimage import segmentation, measure
import six.moves as sm
from . import meta
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
class Superpixels(meta.Augmenter):
"""
Completely or partially transform images to their superpixel representation.
This implementation uses skimage's version of the SLIC algorithm.
dtype support::
if (image size <= max_size)::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: limited (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: limited (1)
* ``float16``: no (2)
* ``float32``: no (2)
* ``float64``: no (3)
* ``float128``: no (2)
* ``bool``: yes; tested
- (1) Superpixel mean intensity replacement requires computing these means as float64s.
This can cause inaccuracies for large integer values.
- (2) Error in scikit-image.
- (3) Loss of resolution in scikit-image.
if (image size > max_size)::
minimum of (
``imgaug.augmenters.segmentation.Superpixels(image size <= max_size)``,
:func:`imgaug.imgaug.imresize_many_images`
)
Parameters
----------
p_replace : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Defines the probability of any superpixel area being replaced by the
superpixel, i.e. by the average pixel color within its area:
* A probability of 0 would mean, that no superpixel area is replaced by
its average (image is not changed at all).
* A probability of 0.5 would mean, that half of all superpixels are
replaced by their average color.
* A probability of 1.0 would mean, that all superpixels are replaced
by their average color (resulting in a standard superpixel image).
Behaviour based on chosen datatypes for this parameter:
* If number, then that numbre will always be used.
* If tuple ``(a, b)``, then a random probability will be sampled from the
interval ``[a, b]`` per image.
* If a list, then a random value will be sampled from that list per
image.
* If this parameter is a StochasticParameter, it is expected to return
values between 0 and 1. Values ``>=0.5`` will be interpreted as the command
to replace a superpixel region with its mean. Recommended to be some
form of ``Binomial(...)``.
n_segments : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Target number of superpixels to generate. Lower numbers are faster.
* If a single int, then that value will always be used as the
number of segments.
* If a tuple ``(a, b)``, then a value from the discrete interval ``[a..b]``
will be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then that parameter will be queried to
draw one value per image.
max_size : int or None, optional
Maximum image size at which the superpixels are generated.
If the width or height of an image exceeds this value, it will be
downscaled so that the longest side matches `max_size`.
Though, the final output (superpixel) image has the same size as the
input image.
This is done to speed up the superpixel algorithm.
Use None to apply no downscaling.
interpolation : int or str, optional
Interpolation method to use during downscaling when `max_size` is
exceeded. Valid methods are the same as in
:func:`imgaug.imgaug.imresize_single_image`.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Superpixels(p_replace=1.0, n_segments=64)
generates ~64 superpixels per image and replaces all of them with
their average color (standard superpixel image).
>>> aug = iaa.Superpixels(p_replace=0.5, n_segments=64)
generates always ~64 superpixels per image and replaces half of them
with their average color, while the other half are left unchanged (i.e.
they still show the input image's content).
>>> aug = iaa.Superpixels(p_replace=(0.25, 1.0), n_segments=(16, 128))
generates between ~16 and ~128 superpixels per image and replaces
25 to 100 percent of them with their average color.
"""
def __init__(self, p_replace=0, n_segments=100, max_size=128, interpolation="linear",
name=None, deterministic=False, random_state=None):
super(Superpixels, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.p_replace = iap.handle_probability_param(p_replace, "p_replace", tuple_to_uniform=True,
list_to_choice=True)
self.n_segments = iap.handle_discrete_param(n_segments, "n_segments", value_range=(1, None),
tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.max_size = max_size
self.interpolation = interpolation
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
disallowed=["uint128", "uint256", "int128", "int256",
"float16", "float32", "float64", "float96", "float128", "float256"],
augmenter=self)
nb_images = len(images)
rss = ia.derive_random_states(random_state, 1+nb_images)
n_segments_samples = self.n_segments.draw_samples((nb_images,), random_state=rss[0])
for i, (image, rs) in enumerate(zip(images, rss[1:])):
# TODO this results in an error when n_segments is 0
replace_samples = self.p_replace.draw_samples((n_segments_samples[i],), random_state=rs)
if np.max(replace_samples) == 0:
# not a single superpixel would be replaced by its average color,
# i.e. the image would not be changed, so just keep it
pass
else:
image = images[i]
min_value, _center_value, max_value = iadt.get_value_range_of_dtype(image.dtype)
orig_shape = image.shape
if self.max_size is not None:
size = max(image.shape[0], image.shape[1])
if size > self.max_size:
resize_factor = self.max_size / size
new_height, new_width = int(image.shape[0] * resize_factor), int(image.shape[1] * resize_factor)
image = ia.imresize_single_image(image, (new_height, new_width),
interpolation=self.interpolation)
image_sp = np.copy(image)
segments = segmentation.slic(image, n_segments=n_segments_samples[i], compactness=10)
nb_channels = image.shape[2]
for c in sm.xrange(nb_channels):
# segments+1 here because otherwise regionprops always misses
# the last label
regions = measure.regionprops(segments+1, intensity_image=image[..., c])
for ridx, region in enumerate(regions):
# with mod here, because slic can sometimes create more superpixel
# than requested. replace_samples then does not have enough
# values, so we just start over with the first one again.
if replace_samples[ridx % len(replace_samples)] >= 0.5:
mean_intensity = region.mean_intensity
image_sp_c = image_sp[..., c]
if image_sp_c.dtype.kind in ["i", "u", "b"]:
# After rounding the value can end up slightly outside of the value_range.
# Hence, we need to clip. We do clip via min(max(...)) instead of np.clip
# because the latter one does not seem to keep dtypes for dtypes with
# large itemsizes (e.g. uint64).
value = int(np.round(mean_intensity))
value = min(max(value, min_value), max_value)
image_sp_c[segments == ridx] = value
else:
image_sp_c[segments == ridx] = mean_intensity
if orig_shape != image.shape:
image_sp = ia.imresize_single_image(image_sp, orig_shape[0:2], interpolation=self.interpolation)
images[i] = image_sp
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.p_replace, self.n_segments, self.max_size, self.interpolation]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/blur.py | augmentation/image_augmentation/helpers/imgaug/augmenters/blur.py | """
Augmenters that blur images.
Do not import directly from this file, as the categorization is not final.
Use instead ::
from imgaug import augmenters as iaa
and then e.g. ::
seq = iaa.Sequential([
iaa.GaussianBlur((0.0, 3.0)),
iaa.AverageBlur((2, 5))
])
List of augmenters:
* GaussianBlur
* AverageBlur
* MedianBlur
* BilateralBlur
* MotionBlur
"""
from __future__ import print_function, division, absolute_import
import warnings
import numpy as np
from scipy import ndimage
import cv2
import six.moves as sm
from . import meta
from . import convolutional as iaa_convolutional
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
# TODO add border mode, cval
def blur_gaussian_(image, sigma, ksize=None, backend="auto", eps=1e-3):
"""
Blur an image using gaussian blurring.
This operation might change the input image in-place.
dtype support::
if (backend="auto")::
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; tested (1)
* ``uint32``: yes; tested (2)
* ``uint64``: yes; tested (2)
* ``int8``: yes; tested (1)
* ``int16``: yes; tested (1)
* ``int32``: yes; tested (1)
* ``int64``: yes; tested (2)
* ``float16``: yes; tested (1)
* ``float32``: yes; tested (1)
* ``float64``: yes; tested (1)
* ``float128``: no
* ``bool``: yes; tested (1)
- (1) Handled by ``cv2``. See ``backend="cv2"``.
- (2) Handled by ``scipy``. See ``backend="scipy"``.
if (backend="cv2")::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (2)
* ``uint64``: no (3)
* ``int8``: yes; tested (4)
* ``int16``: yes; tested
* ``int32``: yes; tested (5)
* ``int64``: no (6)
* ``float16``: yes; tested (7)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (8)
* ``bool``: yes; tested (1)
- (1) Mapped internally to ``float32``. Otherwise causes ``TypeError: src data type = 0 is not supported``.
- (2) Causes ``TypeError: src data type = 6 is not supported``.
- (3) Causes ``cv2.error: OpenCV(3.4.5) (...)/filter.cpp:2957: error: (-213:The function/feature is not
implemented) Unsupported combination of source format (=4), and buffer format (=5) in function
'getLinearRowFilter'``.
- (4) Mapped internally to ``int16``. Otherwise causes ``cv2.error: OpenCV(3.4.5) (...)/filter.cpp:2957:
error: (-213:The function/feature is not implemented) Unsupported combination of source format (=1),
and buffer format (=5) in function 'getLinearRowFilter'``.
- (5) Mapped internally to ``float64``. Otherwise causes ``cv2.error: OpenCV(3.4.5) (...)/filter.cpp:2957:
error: (-213:The function/feature is not implemented) Unsupported combination of source format (=4),
and buffer format (=5) in function 'getLinearRowFilter'``.
- (6) Causes ``cv2.error: OpenCV(3.4.5) (...)/filter.cpp:2957: error: (-213:The function/feature is not
implemented) Unsupported combination of source format (=4), and buffer format (=5) in function
'getLinearRowFilter'``.
- (7) Mapped internally to ``float32``. Otherwise causes ``TypeError: src data type = 23 is not supported``.
- (8) Causes ``TypeError: src data type = 13 is not supported``.
if (backend="scipy")::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested (1)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no (2)
* ``bool``: yes; tested (3)
- (1) Mapped internally to ``float32``. Otherwise causes ``RuntimeError: array type dtype('float16')
not supported``.
- (2) Causes ``RuntimeError: array type dtype('float128') not supported``.
- (3) Mapped internally to ``float32``. Otherwise too inaccurate.
Parameters
----------
image : numpy.ndarray
The image to blur. Expected to be of shape ``(H, W)`` or ``(H, W, C)``.
sigma : number
Standard deviation of the gaussian blur. Larger numbers result in more large-scale blurring, which is overall
slower than small-scale blurring.
ksize : None or int, optional
Size in height/width of the gaussian kernel. This argument is only understood by the ``cv2`` backend.
If it is set to None, an appropriate value for `ksize` will automatically be derived from `sigma`.
The value is chosen tighter for larger sigmas to avoid as much as possible very large kernel sizes
and therey improve performance.
backend : {'auto', 'cv2', 'scipy'}, optional
Backend library to use. If ``auto``, then the likely best library will be automatically picked per image. That
is usually equivalent to ``cv2`` (OpenCV) and it will fall back to ``scipy`` for datatypes not supported by
OpenCV.
eps : number, optional
A threshold used to decide whether `sigma` can be considered zero.
Returns
-------
image : numpy.ndarray
The blurred image. Same shape and dtype as the input.
"""
if sigma > 0 + eps:
dtype = image.dtype
iadt.gate_dtypes(image,
allowed=["bool",
"uint8", "uint16", "uint32",
"int8", "int16", "int32", "int64", "uint64",
"float16", "float32", "float64"],
disallowed=["uint128", "uint256",
"int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
dts_not_supported_by_cv2 = ["uint32", "uint64", "int64", "float128"]
backend_to_use = backend
if backend == "auto":
backend_to_use = "cv2" if image.dtype.name not in dts_not_supported_by_cv2 else "scipy"
elif backend == "cv2":
assert image.dtype.name not in dts_not_supported_by_cv2,\
("Requested 'cv2' backend, but provided %s input image, which "
+ "cannot be handled by that backend. Choose a different backend or "
+ "set backend to 'auto' or use a different datatype.") % (image.dtype.name,)
elif backend == "scipy":
# can handle all dtypes that were allowed in gate_dtypes()
pass
if backend_to_use == "scipy":
if dtype.name == "bool":
# We convert bool to float32 here, because gaussian_filter() seems to only return True when
# the underlying value is approximately 1.0, not when it is above 0.5. So we do that here manually.
# cv2 does not support bool for gaussian blur
image = image.astype(np.float32, copy=False)
elif dtype.name == "float16":
image = image.astype(np.float32, copy=False)
# gaussian_filter() has no ksize argument
# TODO it does have a truncate argument that truncates at x standard deviations -- maybe can be used
# similarly to ksize
if ksize is not None:
warnings.warn("Requested 'scipy' backend or picked it automatically by backend='auto' "
"in blur_gaussian_(), but also provided 'ksize' argument, which is not understood by "
"that backend and will be ignored.")
# Note that while gaussian_filter can be applied to all channels at the same time, that should not
# be done here, because then the blurring would also happen across channels (e.g. red values might
# be mixed with blue values in RGB)
if image.ndim == 2:
image[:, :] = ndimage.gaussian_filter(image[:, :], sigma, mode="mirror")
else:
nb_channels = image.shape[2]
for channel in sm.xrange(nb_channels):
image[:, :, channel] = ndimage.gaussian_filter(image[:, :, channel], sigma, mode="mirror")
else:
if dtype.name == "bool":
image = image.astype(np.float32, copy=False)
elif dtype.name == "float16":
image = image.astype(np.float32, copy=False)
elif dtype.name == "int8":
image = image.astype(np.int16, copy=False)
elif dtype.name == "int32":
image = image.astype(np.float64, copy=False)
# ksize here is derived from the equation to compute sigma based on ksize,
# see https://docs.opencv.org/3.1.0/d4/d86/group__imgproc__filter.html -> cv::getGaussianKernel()
# example values:
# sig = 0.1 -> ksize = -1.666
# sig = 0.5 -> ksize = 0.9999
# sig = 1.0 -> ksize = 1.0
# sig = 2.0 -> ksize = 11.0
# sig = 3.0 -> ksize = 17.666
# ksize = ((sig - 0.8)/0.3 + 1)/0.5 + 1
if ksize is None:
if sigma < 3.0:
ksize = 3.3 * sigma # 99% of weight
elif sigma < 5.0:
ksize = 2.9 * sigma # 97% of weight
else:
ksize = 2.6 * sigma # 95% of weight
# we use 5x5 here as the minimum size as that simplifies comparisons with gaussian_filter() in the tests
# TODO reduce this to 3x3
ksize = int(max(ksize, 5))
else:
assert ia.is_single_integer(ksize), "Expected 'ksize' argument to be a number, got %s." % (type(ksize),)
ksize = ksize + 1 if ksize % 2 == 0 else ksize
if ksize > 0:
image_warped = cv2.GaussianBlur(image, (ksize, ksize), sigmaX=sigma, sigmaY=sigma,
borderType=cv2.BORDER_REFLECT_101)
# re-add channel axis removed by cv2 if input was (H, W, 1)
image = image_warped[..., np.newaxis] if image.ndim == 3 and image_warped.ndim == 2 else image_warped
if dtype.name == "bool":
image = image > 0.5
elif dtype.name != image.dtype.name:
image = iadt.restore_dtypes_(image, dtype)
return image
# TODO offer different values for sigma on x/y-axis, supported by cv2 but not by scipy
# TODO add channelwise flag - channelwise=False would be supported by scipy
class GaussianBlur(meta.Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Augmenter to blur images using gaussian kernels.
dtype support::
See :func:`imgaug.augmenters.blur.blur_gaussian_(backend="auto")`.
Parameters
----------
sigma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the gaussian kernel.
Values in the range ``0.0`` (no blur) to ``3.0`` (strong blur) are common.
* If a single float, that value will always be used as the standard
deviation.
* If a tuple ``(a, b)``, then a random value from the range ``a <= x <= b``
will be picked per image.
* If a list, then a random value will be sampled per image from
that list.
* If a StochasticParameter, then ``N`` samples will be drawn from
that parameter per ``N`` input images.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.GaussianBlur(sigma=1.5)
blurs all images using a gaussian kernel with standard deviation 1.5.
>>> aug = iaa.GaussianBlur(sigma=(0.0, 3.0))
blurs images using a gaussian kernel with a random standard deviation
from the range ``0.0 <= x <= 3.0``. The value is sampled per image.
"""
def __init__(self, sigma=0, name=None, deterministic=False, random_state=None):
super(GaussianBlur, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.sigma = iap.handle_continuous_param(sigma, "sigma", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
self.eps = 1e-3 # epsilon value to estimate whether sigma is sufficently above 0 to apply the blur
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.sigma.draw_samples((nb_images,), random_state=random_state)
for image, sig in zip(images, samples):
image[...] = blur_gaussian_(image, sigma=sig, eps=self.eps)
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.sigma]
class AverageBlur(meta.Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Blur an image by computing simple means over neighbourhoods.
The padding behaviour around the image borders is cv2's ``BORDER_REFLECT_101``.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: no (1)
* ``uint64``: no (2)
* ``int8``: yes; tested (3)
* ``int16``: yes; tested
* ``int32``: no (4)
* ``int64``: no (5)
* ``float16``: yes; tested (6)
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no
* ``bool``: yes; tested (7)
- (1) rejected by ``cv2.blur()``
- (2) loss of resolution in ``cv2.blur()`` (result is ``int32``)
- (3) ``int8`` is mapped internally to ``int16``, ``int8`` itself leads to cv2 error "Unsupported combination
of source format (=1), and buffer format (=4) in function 'getRowSumFilter'" in ``cv2``
- (4) results too inaccurate
- (5) loss of resolution in ``cv2.blur()`` (result is ``int32``)
- (6) ``float16`` is mapped internally to ``float32``
- (7) ``bool`` is mapped internally to ``float32``
Parameters
----------
k : int or tuple of int or tuple of tuple of int or imgaug.parameters.StochasticParameter\
or tuple of StochasticParameter, optional
Kernel size to use.
* If a single int, then that value will be used for the height
and width of the kernel.
* If a tuple of two ints ``(a, b)``, then the kernel size will be
sampled from the interval ``[a..b]``.
* If a tuple of two tuples of ints ``((a, b), (c, d))``, then per image
a random kernel height will be sampled from the interval ``[a..b]``
and a random kernel width will be sampled from the interval ``[c..d]``.
* If a StochasticParameter, then ``N`` samples will be drawn from
that parameter per ``N`` input images, each representing the kernel
size for the nth image.
* If a tuple ``(a, b)``, where either ``a`` or ``b`` is a tuple, then ``a``
and ``b`` will be treated according to the rules above. This leads
to different values for height and width of the kernel.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AverageBlur(k=5)
Blurs all images using a kernel size of ``5x5``.
>>> aug = iaa.AverageBlur(k=(2, 5))
Blurs images using a varying kernel size per image, which is sampled
from the interval ``[2..5]``.
>>> aug = iaa.AverageBlur(k=((5, 7), (1, 3)))
Blurs images using a varying kernel size per image, which's height
is sampled from the interval ``[5..7]`` and which's width is sampled
from ``[1..3]``.
"""
def __init__(self, k=1, name=None, deterministic=False, random_state=None):
super(AverageBlur, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
# TODO replace this by iap.handle_discrete_kernel_size()
self.mode = "single"
if ia.is_single_number(k):
self.k = iap.Deterministic(int(k))
elif ia.is_iterable(k):
ia.do_assert(len(k) == 2)
if all([ia.is_single_number(ki) for ki in k]):
self.k = iap.DiscreteUniform(int(k[0]), int(k[1]))
elif all([isinstance(ki, iap.StochasticParameter) for ki in k]):
self.mode = "two"
self.k = (k[0], k[1])
else:
k_tuple = [None, None]
if ia.is_single_number(k[0]):
k_tuple[0] = iap.Deterministic(int(k[0]))
elif ia.is_iterable(k[0]) and all([ia.is_single_number(ki) for ki in k[0]]):
k_tuple[0] = iap.DiscreteUniform(int(k[0][0]), int(k[0][1]))
else:
raise Exception("k[0] expected to be int or tuple of two ints, got %s" % (type(k[0]),))
if ia.is_single_number(k[1]):
k_tuple[1] = iap.Deterministic(int(k[1]))
elif ia.is_iterable(k[1]) and all([ia.is_single_number(ki) for ki in k[1]]):
k_tuple[1] = iap.DiscreteUniform(int(k[1][0]), int(k[1][1]))
else:
raise Exception("k[1] expected to be int or tuple of two ints, got %s" % (type(k[1]),))
self.mode = "two"
self.k = k_tuple
elif isinstance(k, iap.StochasticParameter):
self.k = k
else:
raise Exception("Expected int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(k),))
def _augment_images(self, images, random_state, parents, hooks):
iadt.gate_dtypes(images,
allowed=["bool", "uint8", "uint16", "int8", "int16", "float16", "float32", "float64"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int32", "int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=self)
nb_images = len(images)
if self.mode == "single":
samples = self.k.draw_samples((nb_images,), random_state=random_state)
samples = (samples, samples)
else:
rss = ia.derive_random_states(random_state, 2)
samples = (
self.k[0].draw_samples((nb_images,), random_state=rss[0]),
self.k[1].draw_samples((nb_images,), random_state=rss[1]),
)
for i, (image, kh, kw) in enumerate(zip(images, samples[0], samples[1])):
kernel_impossible = (kh == 0 or kw == 0)
kernel_does_nothing = (kh == 1 and kw == 1)
if not kernel_impossible and not kernel_does_nothing:
input_dtype = image.dtype
if image.dtype in [np.bool_, np.float16]:
image = image.astype(np.float32, copy=False)
elif image.dtype == np.int8:
image = image.astype(np.int16, copy=False)
image_aug = cv2.blur(image, (kh, kw))
# cv2.blur() removes channel axis for single-channel images
if image_aug.ndim == 2:
image_aug = image_aug[..., np.newaxis]
if input_dtype == np.bool_:
image_aug = image_aug > 0.5
elif input_dtype in [np.int8, np.float16]:
image_aug = iadt.restore_dtypes_(image_aug, input_dtype)
images[i] = image_aug
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.k]
class MedianBlur(meta.Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Blur an image by computing median values over neighbourhoods.
Median blurring can be used to remove small dirt from images.
At larger kernel sizes, its effects have some similarity with Superpixels.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
k : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Kernel size.
* If a single int, then that value will be used for the height and
width of the kernel. Must be an odd value.
* If a tuple of two ints ``(a, b)``, then the kernel size will be an
odd value sampled from the interval ``[a..b]``. ``a`` and ``b`` must both
be odd values.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then ``N`` samples will be drawn from
that parameter per ``N`` input images, each representing the kernel
size for the nth image. Expected to be discrete. If a sampled
value is not odd, then that value will be increased by 1.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.MedianBlur(k=5)
blurs all images using a kernel size of ``5x5``.
>>> aug = iaa.MedianBlur(k=(3, 7))
blurs images using a varying kernel size per image, which is
and odd value sampled from the interval ``[3..7]``, i.e. 3 or 5 or 7.
"""
def __init__(self, k=1, name=None, deterministic=False, random_state=None):
super(MedianBlur, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
# TODO replace this by iap.handle_discrete_kernel_size()
self.k = iap.handle_discrete_param(k, "k", value_range=(1, None), tuple_to_uniform=True, list_to_choice=True,
allow_floats=False)
if ia.is_single_integer(k):
ia.do_assert(k % 2 != 0, "Expected k to be odd, got %d. Add or subtract 1." % (int(k),))
elif ia.is_iterable(k):
ia.do_assert(all([ki % 2 != 0 for ki in k]),
"Expected all values in iterable k to be odd, but at least one was not. "
+ "Add or subtract 1 to/from that value.")
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.k.draw_samples((nb_images,), random_state=random_state)
for i, (image, ki) in enumerate(zip(images, samples)):
if ki > 1:
ki = ki + 1 if ki % 2 == 0 else ki
image_aug = cv2.medianBlur(image, ki)
# cv2.medianBlur() removes channel axis for single-channel images
if image_aug.ndim == 2:
image_aug = image_aug[..., np.newaxis]
images[i] = image_aug
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.k]
# TODO tests
class BilateralBlur(meta.Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Blur/Denoise an image using a bilateral filter.
Bilateral filters blur homogenous and textured areas, while trying to
preserve edges.
See http://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#bilateralfilter
for more information regarding the parameters.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
d : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Diameter of each pixel neighborhood with value range ``[1 .. inf)``.
High values for d lead to significantly worse performance. Values
equal or less than 10 seem to be good. Use ``<5`` for real-time
applications.
* If a single int, then that value will be used for the diameter.
* If a tuple of two ints ``(a, b)``, then the diameter will be a
value sampled from the interval ``[a..b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then ``N`` samples will be drawn from
that parameter per ``N`` input images, each representing the diameter
for the nth image. Expected to be discrete.
sigma_color : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Filter sigma in the color space with value range [1, inf). A larger value
of the parameter means that farther colors within the pixel neighborhood
(see sigma_space) will be mixed together, resulting in larger areas of
semi-equal color.
* If a single int, then that value will be used for the diameter.
* If a tuple of two ints ``(a, b)``, then the diameter will be a
value sampled from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then ``N`` samples will be drawn from
that parameter per ``N`` input images, each representing the diameter
for the nth image. Expected to be discrete.
sigma_space : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Filter sigma in the coordinate space with value range ``[1, inf)``. A larger value
of the parameter means that farther pixels will influence each other as long as
their colors are close enough (see sigma_color).
* If a single int, then that value will be used for the diameter.
* If a tuple of two ints ``(a, b)``, then the diameter will be a
value sampled from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then ``N`` samples will be drawn from
that parameter per ``N`` input images, each representing the diameter
for the nth image. Expected to be discrete.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.BilateralBlur(d=(3, 10), sigma_color=(10, 250), sigma_space=(10, 250))
blurs all images using a bilateral filter with max distance 3 to 10
and wide ranges for sigma_color and sigma_space.
"""
def __init__(self, d=1, sigma_color=(10, 250), sigma_space=(10, 250), name=None, deterministic=False,
random_state=None):
super(BilateralBlur, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.d = iap.handle_discrete_param(d, "d", value_range=(1, None), tuple_to_uniform=True, list_to_choice=True,
allow_floats=False)
self.sigma_color = iap.handle_continuous_param(sigma_color, "sigma_color", value_range=(1, None),
tuple_to_uniform=True, list_to_choice=True)
self.sigma_space = iap.handle_continuous_param(sigma_space, "sigma_space", value_range=(1, None),
tuple_to_uniform=True, list_to_choice=True)
def _augment_images(self, images, random_state, parents, hooks):
# Make sure that all images have 3 channels
ia.do_assert(all([image.shape[2] == 3 for image in images]),
("BilateralBlur can currently only be applied to images with 3 channels."
+ "Got channels: %s") % ([image.shape[2] for image in images],))
nb_images = len(images)
rss = ia.derive_random_states(random_state, 3)
samples_d = self.d.draw_samples((nb_images,), random_state=rss[0])
samples_sigma_color = self.sigma_color.draw_samples((nb_images,), random_state=rss[1])
samples_sigma_space = self.sigma_space.draw_samples((nb_images,), random_state=rss[2])
gen = enumerate(zip(images, samples_d, samples_sigma_color, samples_sigma_space))
for i, (image, di, sigma_color_i, sigma_space_i) in gen:
if di != 1:
images[i] = cv2.bilateralFilter(image, di, sigma_color_i, sigma_space_i)
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.d, self.sigma_color, self.sigma_space]
# TODO add k sizing via float/percentage
def MotionBlur(k=5, angle=(0, 360), direction=(-1.0, 1.0), order=1, name=None, deterministic=False, random_state=None):
"""
Augmenter that sharpens images and overlays the result with the original image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
k : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Kernel size to use.
* If a single int, then that value will be used for the height
and width of the kernel.
* If a tuple of two ints ``(a, b)``, then the kernel size will be
sampled from the interval ``[a..b]``.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then ``N`` samples will be drawn from
that parameter per ``N`` input images, each representing the kernel
size for the nth image.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle of the motion blur in degrees (clockwise, relative to top center direction).
* If a number, exactly that value will be used.
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmenters/overlay.py | augmentation/image_augmentation/helpers/imgaug/augmenters/overlay.py | """Alias for module blend.
Deprecated module. Original name for module blend.py. Was changed in 0.2.8.
"""
from __future__ import print_function, division, absolute_import
from . import blend
import imgaug as ia
@ia.deprecated(alt_func="imgaug.augmenters.blend.blend_alpha()",
comment="It has the exactly same interface.")
def blend_alpha(*args, **kwargs):
return blend.blend_alpha(*args, **kwargs)
@ia.deprecated(alt_func="imgaug.augmenters.blend.Alpha",
comment="It has the exactly same interface.")
def Alpha(*args, **kwargs):
return blend.Alpha(*args, **kwargs)
@ia.deprecated(alt_func="imgaug.augmenters.blend.AlphaElementwise",
comment="It has the exactly same interface.")
def AlphaElementwise(*args, **kwargs):
return blend.AlphaElementwise(*args, **kwargs)
@ia.deprecated(alt_func="imgaug.augmenters.blend.SimplexNoiseAlpha",
comment="It has the exactly same interface.")
def SimplexNoiseAlpha(*args, **kwargs):
return blend.SimplexNoiseAlpha(*args, **kwargs)
@ia.deprecated(alt_func="imgaug.augmenters.blend.FrequencyNoiseAlpha",
comment="It has the exactly same interface.")
def FrequencyNoiseAlpha(*args, **kwargs):
return blend.FrequencyNoiseAlpha(*args, **kwargs)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/lines.py | augmentation/image_augmentation/helpers/imgaug/augmentables/lines.py | from __future__ import print_function, division, absolute_import
import copy as copylib
import numpy as np
import skimage.draw
import skimage.measure
import cv2
from .. import imgaug as ia
from .utils import normalize_shape, project_coords, interpolate_points
# TODO Add Line class and make LineString a list of Line elements
# TODO add to_distance_maps(), compute_hausdorff_distance(), intersects(),
# find_self_intersections(), is_self_intersecting(),
# remove_self_intersections()
class LineString(object):
"""
Class representing line strings.
A line string is a collection of connected line segments, each
having a start and end point. Each point is given as its ``(x, y)``
absolute (sub-)pixel coordinates. The end point of each segment is
also the start point of the next segment.
The line string is not closed, i.e. start and end point are expected to
differ and will not be connected in drawings.
Parameters
----------
coords : iterable of tuple of number or ndarray
The points of the line string.
label : None or str, optional
The label of the line string.
"""
def __init__(self, coords, label=None):
"""Create a new LineString instance."""
# use the conditions here to avoid unnecessary copies of ndarray inputs
if ia.is_np_array(coords):
if coords.dtype.name != "float32":
coords = coords.astype(np.float32)
elif len(coords) == 0:
coords = np.zeros((0, 2), dtype=np.float32)
else:
assert ia.is_iterable(coords), (
"Expected 'coords' to be an iterable, "
"got type %s." % (type(coords),))
assert all([len(coords_i) == 2 for coords_i in coords]), (
"Expected 'coords' to contain (x,y) tuples, "
"got %s." % (str(coords),))
coords = np.float32(coords)
assert coords.ndim == 2 and coords.shape[-1] == 2, (
"Expected 'coords' to have shape (N, 2), got shape %s." % (
coords.shape,))
self.coords = coords
self.label = label
@property
def length(self):
"""
Get the total euclidean length of the line string.
Returns
-------
float
The length based on euclidean distance.
"""
if len(self.coords) == 0:
return 0
return np.sum(self.compute_neighbour_distances())
@property
def xx(self):
"""Get an array of x-coordinates of all points of the line string."""
return self.coords[:, 0]
@property
def yy(self):
"""Get an array of y-coordinates of all points of the line string."""
return self.coords[:, 1]
@property
def xx_int(self):
"""Get an array of discrete x-coordinates of all points."""
return np.round(self.xx).astype(np.int32)
@property
def yy_int(self):
"""Get an array of discrete y-coordinates of all points."""
return np.round(self.yy).astype(np.int32)
@property
def height(self):
"""Get the height of a bounding box encapsulating the line."""
if len(self.coords) <= 1:
return 0
return np.max(self.yy) - np.min(self.yy)
@property
def width(self):
"""Get the width of a bounding box encapsulating the line."""
if len(self.coords) <= 1:
return 0
return np.max(self.xx) - np.min(self.xx)
def get_pointwise_inside_image_mask(self, image):
"""
Get for each point whether it is inside of the given image plane.
Parameters
----------
image : ndarray or tuple of int
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
Returns
-------
ndarray
Boolean array with one value per point indicating whether it is
inside of the provided image plane (``True``) or not (``False``).
"""
if len(self.coords) == 0:
return np.zeros((0,), dtype=bool)
shape = normalize_shape(image)
height, width = shape[0:2]
x_within = np.logical_and(0 <= self.xx, self.xx < width)
y_within = np.logical_and(0 <= self.yy, self.yy < height)
return np.logical_and(x_within, y_within)
# TODO add closed=False/True?
def compute_neighbour_distances(self):
"""
Get the euclidean distance between each two consecutive points.
Returns
-------
ndarray
Euclidean distances between point pairs.
Same order as in `coords`. For ``N`` points, ``N-1`` distances
are returned.
"""
if len(self.coords) <= 1:
return np.zeros((0,), dtype=np.float32)
return np.sqrt(
np.sum(
(self.coords[:-1, :] - self.coords[1:, :]) ** 2,
axis=1
)
)
def compute_pointwise_distances(self, other, default=None):
"""
Compute the minimal distance between each point on self and other.
Parameters
----------
other : tuple of number \
or imgaug.augmentables.kps.Keypoint \
or imgaug.augmentables.LineString
Other object to which to compute the distances.
default
Value to return if `other` contains no points.
Returns
-------
list of float
Distances to `other` or `default` if not distance could be computed.
"""
import shapely.geometry
from .kps import Keypoint
if isinstance(other, Keypoint):
other = shapely.geometry.Point((other.x, other.y))
elif isinstance(other, LineString):
if len(other.coords) == 0:
return default
elif len(other.coords) == 1:
other = shapely.geometry.Point(other.coords[0, :])
else:
other = shapely.geometry.LineString(other.coords)
elif isinstance(other, tuple):
assert len(other) == 2
other = shapely.geometry.Point(other)
else:
raise ValueError(
("Expected Keypoint or LineString or tuple (x,y), "
+ "got type %s.") % (type(other),))
return [shapely.geometry.Point(point).distance(other)
for point in self.coords]
def compute_distance(self, other, default=None):
"""
Compute the minimal distance between the line string and `other`.
Parameters
----------
other : tuple of number \
or imgaug.augmentables.kps.Keypoint \
or imgaug.augmentables.LineString
Other object to which to compute the distance.
default
Value to return if this line string or `other` contain no points.
Returns
-------
float
Distance to `other` or `default` if not distance could be computed.
"""
# FIXME this computes distance pointwise, does not have to be identical
# with the actual min distance (e.g. edge center to other's point)
distances = self.compute_pointwise_distances(other, default=[])
if len(distances) == 0:
return default
return min(distances)
# TODO update BB's contains(), which can only accept Keypoint currently
def contains(self, other, max_distance=1e-4):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : tuple of number or imgaug.augmentables.kps.Keypoint
Point to check for.
max_distance : float
Maximum allowed euclidean distance between the point and the
closest point on the line. If the threshold is exceeded, the point
is not considered to be contained in the line.
Returns
-------
bool
True if the point is contained in the line string, False otherwise.
It is contained if its distance to the line or any of its points
is below a threshold.
"""
return self.compute_distance(other, default=np.inf) < max_distance
def project(self, from_shape, to_shape):
"""
Project the line string onto a differently shaped image.
E.g. if a point of the line string is on its original image at
``x=(10 of 100 pixels)`` and ``y=(20 of 100 pixels)`` and is projected
onto a new image with size ``(width=200, height=200)``, its new
position will be ``(x=20, y=40)``.
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.augmentables.lines.LineString
Line string with new coordinates.
"""
coords_proj = project_coords(self.coords, from_shape, to_shape)
return self.copy(coords=coords_proj)
def is_fully_within_image(self, image, default=False):
"""
Estimate whether the line string is fully inside the image area.
Parameters
----------
image : ndarray or tuple of int
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
default
Default value to return if the line string contains no points.
Returns
-------
bool
True if the line string is fully inside the image area.
False otherwise.
"""
if len(self.coords) == 0:
return default
return np.all(self.get_pointwise_inside_image_mask(image))
def is_partly_within_image(self, image, default=False):
"""
Estimate whether the line string is at least partially inside the image.
Parameters
----------
image : ndarray or tuple of int
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
default
Default value to return if the line string contains no points.
Returns
-------
bool
True if the line string is at least partially inside the image area.
False otherwise.
"""
if len(self.coords) == 0:
return default
# check mask first to avoid costly computation of intersection points
# whenever possible
mask = self.get_pointwise_inside_image_mask(image)
if np.any(mask):
return True
return len(self.clip_out_of_image(image)) > 0
def is_out_of_image(self, image, fully=True, partly=False, default=True):
"""
Estimate whether the line is partially/fully outside of the image area.
Parameters
----------
image : ndarray or tuple of int
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the
image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially
outside fo the image area.
default
Default value to return if the line string contains no points.
Returns
-------
bool
`default` if the line string has no points.
True if the line string is partially/fully outside of the image
area, depending on defined parameters.
False otherwise.
"""
if len(self.coords) == 0:
return default
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def clip_out_of_image(self, image):
"""
Clip off all parts of the line_string that are outside of the image.
Parameters
----------
image : ndarray or tuple of int
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
Returns
-------
list of imgaug.augmentables.lines.LineString
Line strings, clipped to the image shape.
The result may contain any number of line strins, including zero.
"""
if len(self.coords) == 0:
return []
inside_image_mask = self.get_pointwise_inside_image_mask(image)
ooi_mask = ~inside_image_mask
if len(self.coords) == 1:
if not np.any(inside_image_mask):
return []
return [self.copy()]
if np.all(inside_image_mask):
return [self.copy()]
# top, right, bottom, left image edges
# we subtract eps here, because intersection() works inclusively,
# i.e. not subtracting eps would be equivalent to 0<=x<=C for C being
# height or width
# don't set the eps too low, otherwise points at height/width seem
# to get rounded to height/width by shapely, which can cause problems
# when first clipping and then calling is_fully_within_image()
# returning false
height, width = normalize_shape(image)[0:2]
eps = 1e-3
edges = [
LineString([(0.0, 0.0), (width - eps, 0.0)]),
LineString([(width - eps, 0.0), (width - eps, height - eps)]),
LineString([(width - eps, height - eps), (0.0, height - eps)]),
LineString([(0.0, height - eps), (0.0, 0.0)])
]
intersections = self.find_intersections_with(edges)
points = []
gen = enumerate(zip(self.coords[:-1], self.coords[1:],
ooi_mask[:-1], ooi_mask[1:],
intersections))
for i, (line_start, line_end, ooi_start, ooi_end, inter_line) in gen:
points.append((line_start, False, ooi_start))
for p_inter in inter_line:
points.append((p_inter, True, False))
is_last = (i == len(self.coords) - 2)
if is_last and not ooi_end:
points.append((line_end, False, ooi_end))
lines = []
line = []
for i, (coord, was_added, ooi) in enumerate(points):
# remove any point that is outside of the image,
# also start a new line once such a point is detected
if ooi:
if len(line) > 0:
lines.append(line)
line = []
continue
if not was_added:
# add all points that were part of the original line string
# AND that are inside the image plane
line.append(coord)
else:
is_last_point = (i == len(points)-1)
# ooi is a numpy.bool_, hence the bool(.)
is_next_ooi = (not is_last_point
and bool(points[i+1][2]) is True)
# Add all points that were new (i.e. intersections), so
# long that they aren't essentially identical to other point.
# This prevents adding overlapping intersections multiple times.
# (E.g. when a line intersects with a corner of the image plane
# and also with one of its edges.)
p_prev = line[-1] if len(line) > 0 else None
# ignore next point if end reached or next point is out of image
p_next = None
if not is_last_point and not is_next_ooi:
p_next = points[i+1][0]
dist_prev = None
dist_next = None
if p_prev is not None:
dist_prev = np.linalg.norm(
np.float32(coord) - np.float32(p_prev))
if p_next is not None:
dist_next = np.linalg.norm(
np.float32(coord) - np.float32(p_next))
dist_prev_ok = (dist_prev is None or dist_prev > 1e-2)
dist_next_ok = (dist_next is None or dist_next > 1e-2)
if dist_prev_ok and dist_next_ok:
line.append(coord)
if len(line) > 0:
lines.append(line)
lines = [line for line in lines if len(line) > 0]
return [self.deepcopy(coords=line) for line in lines]
# TODO add tests for this
def find_intersections_with(self, other):
"""
Find all intersection points between the line string and `other`.
Parameters
----------
other : tuple of number or list of tuple of number or \
list of LineString or LineString
The other geometry to use during intersection tests.
Returns
-------
list of list of tuple of number
All intersection points. One list per pair of consecutive start
and end point, i.e. `N-1` lists of `N` points. Each list may
be empty or may contain multiple points.
"""
import shapely.geometry
geom = _convert_var_to_shapely_geometry(other)
result = []
for p_start, p_end in zip(self.coords[:-1], self.coords[1:]):
ls = shapely.geometry.LineString([p_start, p_end])
intersections = ls.intersection(geom)
intersections = list(_flatten_shapely_collection(intersections))
intersections_points = []
for inter in intersections:
if isinstance(inter, shapely.geometry.linestring.LineString):
inter_start = (inter.coords[0][0], inter.coords[0][1])
inter_end = (inter.coords[-1][0], inter.coords[-1][1])
intersections_points.extend([inter_start, inter_end])
else:
assert isinstance(inter, shapely.geometry.point.Point), (
"Expected to find shapely.geometry.point.Point or "
"shapely.geometry.linestring.LineString intersection, "
"actually found %s." % (type(inter),))
intersections_points.append((inter.x, inter.y))
# sort by distance to start point, this makes it later on easier
# to remove duplicate points
inter_sorted = sorted(
intersections_points,
key=lambda p: np.linalg.norm(np.float32(p) - p_start)
)
result.append(inter_sorted)
return result
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift/move the line string from one or more image sides.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the
top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the
right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the
bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the
left.
Returns
-------
result : imgaug.augmentables.lines.LineString
Shifted line string.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
coords = np.copy(self.coords)
coords[:, 0] += left - right
coords[:, 1] += top - bottom
return self.copy(coords=coords)
def draw_mask(self, image_shape, size_lines=1, size_points=0,
raise_if_out_of_image=False):
"""
Draw this line segment as a binary image mask.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the line mask.
size_lines : int, optional
Thickness of the line segments.
size_points : int, optional
Size of the points in pixels.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Boolean line mask of shape `image_shape` (no channel axis).
"""
heatmap = self.draw_heatmap_array(
image_shape,
alpha_lines=1.0, alpha_points=1.0,
size_lines=size_lines, size_points=size_points,
antialiased=False,
raise_if_out_of_image=raise_if_out_of_image)
return heatmap > 0.5
def draw_lines_heatmap_array(self, image_shape, alpha=1.0,
size=1, antialiased=True,
raise_if_out_of_image=False):
"""
Draw the line segments of the line string as a heatmap array.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the line mask.
alpha : float, optional
Opacity of the line string. Higher values denote a more visible
line string.
size : int, optional
Thickness of the line segments.
antialiased : bool, optional
Whether to draw the line with anti-aliasing activated.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Float array of shape `image_shape` (no channel axis) with drawn
line string. All values are in the interval ``[0.0, 1.0]``.
"""
assert len(image_shape) == 2 or (
len(image_shape) == 3 and image_shape[-1] == 1), (
"Expected (H,W) or (H,W,1) as image_shape, got %s." % (
image_shape,))
arr = self.draw_lines_on_image(
np.zeros(image_shape, dtype=np.uint8),
color=255, alpha=alpha, size=size,
antialiased=antialiased,
raise_if_out_of_image=raise_if_out_of_image
)
return arr.astype(np.float32) / 255.0
def draw_points_heatmap_array(self, image_shape, alpha=1.0,
size=1, raise_if_out_of_image=False):
"""
Draw the points of the line string as a heatmap array.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the point mask.
alpha : float, optional
Opacity of the line string points. Higher values denote a more
visible points.
size : int, optional
Size of the points in pixels.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Float array of shape `image_shape` (no channel axis) with drawn
line string points. All values are in the interval ``[0.0, 1.0]``.
"""
assert len(image_shape) == 2 or (
len(image_shape) == 3 and image_shape[-1] == 1), (
"Expected (H,W) or (H,W,1) as image_shape, got %s." % (
image_shape,))
arr = self.draw_points_on_image(
np.zeros(image_shape, dtype=np.uint8),
color=255, alpha=alpha, size=size,
raise_if_out_of_image=raise_if_out_of_image
)
return arr.astype(np.float32) / 255.0
def draw_heatmap_array(self, image_shape, alpha_lines=1.0, alpha_points=1.0,
size_lines=1, size_points=0, antialiased=True,
raise_if_out_of_image=False):
"""
Draw the line segments and points of the line string as a heatmap array.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the line mask.
alpha_lines : float, optional
Opacity of the line string. Higher values denote a more visible
line string.
alpha_points : float, optional
Opacity of the line string points. Higher values denote a more
visible points.
size_lines : int, optional
Thickness of the line segments.
size_points : int, optional
Size of the points in pixels.
antialiased : bool, optional
Whether to draw the line with anti-aliasing activated.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Float array of shape `image_shape` (no channel axis) with drawn
line segments and points. All values are in the
interval ``[0.0, 1.0]``.
"""
heatmap_lines = self.draw_lines_heatmap_array(
image_shape,
alpha=alpha_lines,
size=size_lines,
antialiased=antialiased,
raise_if_out_of_image=raise_if_out_of_image)
if size_points <= 0:
return heatmap_lines
heatmap_points = self.draw_points_heatmap_array(
image_shape,
alpha=alpha_points,
size=size_points,
raise_if_out_of_image=raise_if_out_of_image)
heatmap = np.dstack([heatmap_lines, heatmap_points])
return np.max(heatmap, axis=2)
# TODO only draw line on image of size BB around line, then paste into full
# sized image
def draw_lines_on_image(self, image, color=(0, 255, 0),
alpha=1.0, size=3,
antialiased=True,
raise_if_out_of_image=False):
"""
Draw the line segments of the line string on a given image.
Parameters
----------
image : ndarray or tuple of int
The image onto which to draw.
Expected to be ``uint8`` and of shape ``(H, W, C)`` with ``C``
usually being ``3`` (other values are not tested).
If a tuple, expected to be ``(H, W, C)`` and will lead to a new
``uint8`` array of zeros being created.
color : int or iterable of int
Color to use as RGB, i.e. three values.
alpha : float, optional
Opacity of the line string. Higher values denote a more visible
line string.
size : int, optional
Thickness of the line segments.
antialiased : bool, optional
Whether to draw the line with anti-aliasing activated.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
`image` with line drawn on it.
"""
from .. import dtypes as iadt
from ..augmenters import blend as blendlib
image_was_empty = False
if isinstance(image, tuple):
image_was_empty = True
image = np.zeros(image, dtype=np.uint8)
assert image.ndim in [2, 3], (
("Expected image or shape of form (H,W) or (H,W,C), "
+ "got shape %s.") % (image.shape,))
if len(self.coords) <= 1 or alpha < 0 + 1e-4 or size < 1:
return np.copy(image)
if raise_if_out_of_image \
and self.is_out_of_image(image, partly=False, fully=True):
raise Exception(
"Cannot draw line string '%s' on image with shape %s, because "
"it would be out of bounds." % (
self.__str__(), image.shape))
if image.ndim == 2:
assert ia.is_single_number(color), (
"Got a 2D image. Expected then 'color' to be a single number, "
"but got %s." % (str(color),))
color = [color]
elif image.ndim == 3 and ia.is_single_number(color):
color = [color] * image.shape[-1]
image = image.astype(np.float32)
height, width = image.shape[0:2]
# We can't trivially exclude lines outside of the image here, because
# even if start and end point are outside, there can still be parts of
# the line inside the image.
# TODO Do this with edge-wise intersection tests
lines = []
for line_start, line_end in zip(self.coords[:-1], self.coords[1:]):
# note that line() expects order (y1, x1, y2, x2), hence ([1], [0])
lines.append((line_start[1], line_start[0],
line_end[1], line_end[0]))
# skimage.draw.line can only handle integers
lines = np.round(np.float32(lines)).astype(np.int32)
# size == 0 is already covered above
# Note here that we have to be careful not to draw lines two times
# at their intersection points, e.g. for (p0, p1), (p1, 2) we could
# end up drawing at p1 twice, leading to higher values if alpha is used.
color = np.float32(color)
heatmap = np.zeros(image.shape[0:2], dtype=np.float32)
for line in lines:
if antialiased:
rr, cc, val = skimage.draw.line_aa(*line)
else:
rr, cc = skimage.draw.line(*line)
val = 1.0
# mask check here, because line() can generate coordinates
# outside of the image plane
rr_mask = np.logical_and(0 <= rr, rr < height)
cc_mask = np.logical_and(0 <= cc, cc < width)
mask = np.logical_and(rr_mask, cc_mask)
if np.any(mask):
rr = rr[mask]
cc = cc[mask]
val = val[mask] if not ia.is_single_number(val) else val
heatmap[rr, cc] = val * alpha
if size > 1:
kernel = np.ones((size, size), dtype=np.uint8)
heatmap = cv2.dilate(heatmap, kernel)
if image_was_empty:
image_blend = image + heatmap * color
else:
image_color_shape = image.shape[0:2]
if image.ndim == 3:
image_color_shape = image_color_shape + (1,)
image_color = np.tile(color, image_color_shape)
image_blend = blendlib.blend_alpha(image_color, image, heatmap)
image_blend = iadt.restore_dtypes_(image_blend, np.uint8)
return image_blend
def draw_points_on_image(self, image, color=(0, 128, 0),
alpha=1.0, size=3,
copy=True, raise_if_out_of_image=False):
"""
Draw the points of the line string on a given image.
Parameters
----------
image : ndarray or tuple of int
The image onto which to draw.
Expected to be ``uint8`` and of shape ``(H, W, C)`` with ``C``
usually being ``3`` (other values are not tested).
If a tuple, expected to be ``(H, W, C)`` and will lead to a new
``uint8`` array of zeros being created.
color : iterable of int
Color to use as RGB, i.e. three values.
alpha : float, optional
Opacity of the line string points. Higher values denote a more
visible points.
size : int, optional
Size of the points in pixels.
copy : bool, optional
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/polys.py | augmentation/image_augmentation/helpers/imgaug/augmentables/polys.py | from __future__ import print_function, division, absolute_import
import copy
import warnings
import numpy as np
import scipy.spatial.distance
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
from .. import imgaug as ia
from .utils import normalize_shape, interpolate_points
# TODO somehow merge with BoundingBox
# TODO add functions: simplify() (eg via shapely.ops.simplify()),
# extend(all_sides=0, top=0, right=0, bottom=0, left=0),
# intersection(other, default=None), union(other), iou(other), to_heatmap, to_mask
class Polygon(object):
"""
Class representing polygons.
Each polygon is parameterized by its corner points, given as absolute x- and y-coordinates
with sub-pixel accuracy.
Parameters
----------
exterior : list of imgaug.Keypoint or list of tuple of float or (N,2) ndarray
List of points defining the polygon. May be either a list of Keypoint objects or a list of tuples in xy-form
or a numpy array of shape (N,2) for N points in xy-form.
All coordinates are expected to be the absolute coordinates in the image, given as floats, e.g. x=10.7
and y=3.4 for a point at coordinates (10.7, 3.4). Their order is expected to be clock-wise. They are expected
to not be closed (i.e. first and last coordinate differ).
label : None or str, optional
Label of the polygon, e.g. a string representing the class.
"""
def __init__(self, exterior, label=None):
"""Create a new Polygon instance."""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
if isinstance(exterior, list):
if not exterior:
# for empty lists, make sure that the shape is (0, 2) and not (0,) as that is also expected when the
# input is a numpy array
self.exterior = np.zeros((0, 2), dtype=np.float32)
elif isinstance(exterior[0], Keypoint):
# list of Keypoint
self.exterior = np.float32([[point.x, point.y] for point in exterior])
else:
# list of tuples (x, y)
self.exterior = np.float32([[point[0], point[1]] for point in exterior])
else:
ia.do_assert(ia.is_np_array(exterior),
("Expected exterior to be a list of tuples (x, y) or "
+ "an (N, 2) array, got type %s") % (exterior,))
ia.do_assert(exterior.ndim == 2 and exterior.shape[1] == 2,
("Expected exterior to be a list of tuples (x, y) or "
+ "an (N, 2) array, got an array of shape %s") % (
exterior.shape,))
self.exterior = np.float32(exterior)
# Remove last point if it is essentially the same as the first point (polygons are always assumed to be
# closed anyways). This also prevents problems with shapely, which seems to add the last point automatically.
if len(self.exterior) >= 2 and np.allclose(self.exterior[0, :], self.exterior[-1, :]):
self.exterior = self.exterior[:-1]
self.label = label
@property
def xx(self):
"""
Return the x-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 0]
@property
def yy(self):
"""
Return the y-coordinates of all points in the exterior.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior as a float32 ndarray.
"""
return self.exterior[:, 1]
@property
def xx_int(self):
"""
Return the x-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
X-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.xx))
@property
def yy_int(self):
"""
Return the y-coordinates of all points in the exterior, rounded to the closest integer value.
Returns
-------
(N,2) ndarray
Y-coordinates of all points in the exterior, rounded to the closest integer value.
Result dtype is int32.
"""
return np.int32(np.round(self.yy))
@property
def is_valid(self):
"""
Estimate whether the polygon has a valid shape.
To to be considered valid, the polygons must be made up of at least 3 points and have concave shape.
Multiple consecutive points are allowed to have the same coordinates.
Returns
-------
bool
True if polygon has at least 3 points and is concave, otherwise False.
"""
if len(self.exterior) < 3:
return False
return self.to_shapely_polygon().is_valid
@property
def area(self):
"""
Estimate the area of the polygon.
Returns
-------
number
Area of the polygon.
"""
if len(self.exterior) < 3:
raise Exception("Cannot compute the polygon's area because it contains less than three points.")
poly = self.to_shapely_polygon()
return poly.area
@property
def height(self):
"""
Estimate the height of the polygon.
Returns
-------
number
Height of the polygon.
"""
yy = self.yy
return max(yy) - min(yy)
@property
def width(self):
"""
Estimate the width of the polygon.
Returns
-------
number
Width of the polygon.
"""
xx = self.xx
return max(xx) - min(xx)
def project(self, from_shape, to_shape):
"""
Project the polygon onto an image with different shape.
The relative coordinates of all points remain the same.
E.g. a point at (x=20, y=20) on an image (width=100, height=200) will be
projected on a new image (width=200, height=100) to (x=40, y=10).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Polygon
Polygon object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
ls_proj = self.to_line_string(closed=False).project(
from_shape, to_shape)
return self.copy(exterior=ls_proj.coords)
def find_closest_point_index(self, x, y, return_distance=False):
"""
Find the index of the point within the exterior that is closest to the given coordinates.
"Closeness" is here defined based on euclidean distance.
This method will raise an AssertionError if the exterior contains no points.
Parameters
----------
x : number
X-coordinate around which to search for close points.
y : number
Y-coordinate around which to search for close points.
return_distance : bool, optional
Whether to also return the distance of the closest point.
Returns
-------
int
Index of the closest point.
number
Euclidean distance to the closest point.
This value is only returned if `return_distance` was set to True.
"""
ia.do_assert(len(self.exterior) > 0)
distances = []
for x2, y2 in self.exterior:
d = (x2 - x) ** 2 + (y2 - y) ** 2
distances.append(d)
distances = np.sqrt(distances)
closest_idx = np.argmin(distances)
if return_distance:
return closest_idx, distances[closest_idx]
return closest_idx
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_fully_within_image(self, image):
"""
Estimate whether the polygon is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is fully inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=True)
# TODO keep this method? it is almost an alias for is_out_of_image()
def is_partly_within_image(self, image):
"""
Estimate whether the polygon is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
bool
True if the polygon is at least partially inside the image area.
False otherwise.
"""
return not self.is_out_of_image(image, fully=True, partly=False)
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the polygon is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the polygon is fully outside of the image area.
partly : bool, optional
Whether to return True if the polygon is at least partially outside fo the image area.
Returns
-------
bool
True if the polygon is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
# TODO this is inconsistent with line strings, which return a default
# value in these cases
if len(self.exterior) == 0:
raise Exception("Cannot determine whether the polygon is inside the image, because it contains no points.")
ls = self.to_line_string()
return ls.is_out_of_image(image, fully=fully, partly=partly)
@ia.deprecated(alt_func="Polygon.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self, image):
return self.clip_out_of_image(image)
# TODO this currently can mess up the order of points - change somehow to
# keep the order
def clip_out_of_image(self, image):
"""
Cut off all parts of the polygon that are outside of the image.
This operation may lead to new points being created.
As a single polygon may be split into multiple new polygons, the result
is always a list, which may contain more than one output polygon.
This operation will return an empty list if the polygon is completely
outside of the image plane.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the polygon.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must
contain at least two integers.
Returns
-------
list of imgaug.Polygon
Polygon, clipped to fall within the image dimensions.
Returned as a list, because the clipping can split the polygon into
multiple parts. The list may also be empty, if the polygon was
fully outside of the image plane.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
# if fully out of image, clip everything away, nothing remaining
if self.is_out_of_image(image, fully=True, partly=False):
return []
h, w = image.shape[0:2] if ia.is_np_array(image) else image[0:2]
poly_shapely = self.to_shapely_polygon()
poly_image = shapely.geometry.Polygon([(0, 0), (w, 0), (w, h), (0, h)])
multipoly_inter_shapely = poly_shapely.intersection(poly_image)
if not isinstance(multipoly_inter_shapely, shapely.geometry.MultiPolygon):
ia.do_assert(isinstance(multipoly_inter_shapely, shapely.geometry.Polygon))
multipoly_inter_shapely = shapely.geometry.MultiPolygon([multipoly_inter_shapely])
polygons = []
for poly_inter_shapely in multipoly_inter_shapely.geoms:
polygons.append(Polygon.from_shapely(poly_inter_shapely, label=self.label))
# Shapely changes the order of points, we try here to preserve it as
# much as possible.
# Note here, that all points of the new polygon might have high
# distance to the points on the old polygon. This can happen if the
# polygon overlaps with the image plane, but all of its points are
# outside of the image plane. The new polygon will not be made up of
# any of the old points.
polygons_reordered = []
for polygon in polygons:
best_idx = None
best_dist = None
for x, y in self.exterior:
point_idx, dist = polygon.find_closest_point_index(x=x, y=y, return_distance=True)
if best_idx is None or dist < best_dist:
best_idx = point_idx
best_dist = dist
if best_idx is not None:
polygon_reordered = polygon.change_first_point_by_index(best_idx)
polygons_reordered.append(polygon_reordered)
return polygons_reordered
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
"""
ls_shifted = self.to_line_string(closed=False).shift(
top=top, right=right, bottom=bottom, left=left)
return self.copy(exterior=ls_shifted.coords)
# TODO separate this into draw_face_on_image() and draw_border_on_image()
# TODO add tests for line thickness
def draw_on_image(self,
image,
color=(0, 255, 0), color_face=None,
color_lines=None, color_points=None,
alpha=1.0, alpha_face=None,
alpha_lines=None, alpha_points=None,
size=1, size_lines=None, size_points=None,
raise_if_out_of_image=False):
"""
Draw the polygon on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the polygon. Usually expected to be
of dtype ``uint8``, though other dtypes are also handled.
color : iterable of int, optional
The color to use for the whole polygon.
Must correspond to the channel layout of the image. Usually RGB.
The values for `color_face`, `color_lines` and `color_points`
will be derived from this color if they are set to ``None``.
This argument has no effect if `color_face`, `color_lines`
and `color_points` are all set anything other than ``None``.
color_face : None or iterable of int, optional
The color to use for the inner polygon area (excluding perimeter).
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from ``color * 1.0``.
color_lines : None or iterable of int, optional
The color to use for the line (aka perimeter/border) of the polygon.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from ``color * 0.5``.
color_points : None or iterable of int, optional
The color to use for the corner points of the polygon.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from ``color * 0.5``.
alpha : float, optional
The opacity of the whole polygon, where ``1.0`` denotes a completely
visible polygon and ``0.0`` an invisible one.
The values for `alpha_face`, `alpha_lines` and `alpha_points`
will be derived from this alpha value if they are set to ``None``.
This argument has no effect if `alpha_face`, `alpha_lines`
and `alpha_points` are all set anything other than ``None``.
alpha_face : None or number, optional
The opacity of the polygon's inner area (excluding the perimeter),
where ``1.0`` denotes a completely visible inner area and ``0.0``
an invisible one.
If this is ``None``, it will be derived from ``alpha * 0.5``.
alpha_lines : None or number, optional
The opacity of the polygon's line (aka perimeter/border),
where ``1.0`` denotes a completely visible line and ``0.0`` an
invisible one.
If this is ``None``, it will be derived from ``alpha * 1.0``.
alpha_points : None or number, optional
The opacity of the polygon's corner points, where ``1.0`` denotes
completely visible corners and ``0.0`` invisible ones.
If this is ``None``, it will be derived from ``alpha * 1.0``.
size : int, optional
Size of the polygon.
The sizes of the line and points are derived from this value,
unless they are set.
size_lines : None or int, optional
Thickness of the polygon's line (aka perimeter/border).
If ``None``, this value is derived from `size`.
size_points : int, optional
Size of the points in pixels.
If ``None``, this value is derived from ``3 * size``.
raise_if_out_of_image : bool, optional
Whether to raise an error if the polygon is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
result : (H,W,C) ndarray
Image with polygon drawn on it. Result dtype is the same as the input dtype.
"""
assert color is not None
assert alpha is not None
assert size is not None
color_face = color_face if color_face is not None else np.array(color)
color_lines = color_lines if color_lines is not None else np.array(color) * 0.5
color_points = color_points if color_points is not None else np.array(color) * 0.5
alpha_face = alpha_face if alpha_face is not None else alpha * 0.5
alpha_lines = alpha_lines if alpha_lines is not None else alpha
alpha_points = alpha_points if alpha_points is not None else alpha
size_lines = size_lines if size_lines is not None else size
size_points = size_points if size_points is not None else size * 3
if image.ndim == 2:
assert ia.is_single_number(color_face), (
"Got a 2D image. Expected then 'color_face' to be a single "
"number, but got %s." % (str(color_face),))
color_face = [color_face]
elif image.ndim == 3 and ia.is_single_number(color_face):
color_face = [color_face] * image.shape[-1]
if alpha_face < 0.01:
alpha_face = 0
elif alpha_face > 0.99:
alpha_face = 1
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw polygon %s on image with shape %s." % (
str(self), image.shape
))
# TODO np.clip to image plane if is_fully_within_image(), similar to how it is done for bounding boxes
# TODO improve efficiency by only drawing in rectangle that covers poly instead of drawing in the whole image
# TODO for a rectangular polygon, the face coordinates include the top/left boundary but not the right/bottom
# boundary. This may be unintuitive when not drawing the boundary. Maybe somehow remove the boundary
# coordinates from the face coordinates after generating both?
input_dtype = image.dtype
result = image.astype(np.float32)
rr, cc = skimage.draw.polygon(self.yy_int, self.xx_int, shape=image.shape)
if len(rr) > 0:
if alpha_face == 1:
result[rr, cc] = np.float32(color_face)
elif alpha_face == 0:
pass
else:
result[rr, cc] = (
(1 - alpha_face) * result[rr, cc, :]
+ alpha_face * np.float32(color_face)
)
ls_open = self.to_line_string(closed=False)
ls_closed = self.to_line_string(closed=True)
result = ls_closed.draw_lines_on_image(
result, color=color_lines, alpha=alpha_lines,
size=size_lines, raise_if_out_of_image=raise_if_out_of_image)
result = ls_open.draw_points_on_image(
result, color=color_points, alpha=alpha_points,
size=size_points, raise_if_out_of_image=raise_if_out_of_image)
if input_dtype.type == np.uint8:
result = np.clip(np.round(result), 0, 255).astype(input_dtype) # TODO make clipping more flexible
else:
result = result.astype(input_dtype)
return result
def extract_from_image(self, image):
"""
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
"""
ia.do_assert(image.ndim in [2, 3])
if len(self.exterior) <= 2:
raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.")
bb = self.to_bounding_box()
bb_area = bb.extract_from_image(image)
if self.is_out_of_image(image, fully=True, partly=False):
return bb_area
xx = self.xx_int
yy = self.yy_int
xx_mask = xx - np.min(xx)
yy_mask = yy - np.min(yy)
height_mask = np.max(yy_mask)
width_mask = np.max(xx_mask)
rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask))
mask = np.zeros((height_mask, width_mask), dtype=np.bool)
mask[rr_face, cc_face] = True
if image.ndim == 3:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2]))
return bb_area * mask
def change_first_point_by_coords(self, x, y, max_distance=1e-4,
raise_if_too_far_away=True):
"""
Set the first point of the exterior to the given point based on its coordinates.
If multiple points are found, the closest one will be picked.
If no matching points are found, an exception is raised.
Note: This method does *not* work in-place.
Parameters
----------
x : number
X-coordinate of the point.
y : number
Y-coordinate of the point.
max_distance : None or number, optional
Maximum distance past which possible matches are ignored.
If ``None`` the distance limit is deactivated.
raise_if_too_far_away : bool, optional
Whether to raise an exception if the closest found point is too
far away (``True``) or simply return an unchanged copy if this
object (``False``).
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
if len(self.exterior) == 0:
raise Exception("Cannot reorder polygon points, because it contains no points.")
closest_idx, closest_dist = self.find_closest_point_index(x=x, y=y, return_distance=True)
if max_distance is not None and closest_dist > max_distance:
if not raise_if_too_far_away:
return self.deepcopy()
closest_point = self.exterior[closest_idx, :]
raise Exception(
"Closest found point (%.9f, %.9f) exceeds max_distance of %.9f exceeded" % (
closest_point[0], closest_point[1], closest_dist)
)
return self.change_first_point_by_index(closest_idx)
def change_first_point_by_index(self, point_idx):
"""
Set the first point of the exterior to the given point based on its index.
Note: This method does *not* work in-place.
Parameters
----------
point_idx : int
Index of the desired starting point.
Returns
-------
imgaug.Polygon
Copy of this polygon with the new point order.
"""
ia.do_assert(0 <= point_idx < len(self.exterior))
if point_idx == 0:
return self.deepcopy()
exterior = np.concatenate(
(self.exterior[point_idx:, :], self.exterior[:point_idx, :]),
axis=0
)
return self.deepcopy(exterior=exterior)
def to_shapely_polygon(self):
"""
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])
def to_shapely_line_string(self, closed=False, interpolate=0):
"""
Convert this polygon to a Shapely LineString object.
Parameters
----------
closed : bool, optional
Whether to return the line string with the last point being identical to the first point.
interpolate : int, optional
Number of points to interpolate between any pair of two consecutive points. These points are added
to the final line string.
Returns
-------
shapely.geometry.LineString
The Shapely LineString matching the polygon's exterior.
"""
return _convert_points_to_shapely_line_string(self.exterior, closed=closed, interpolate=interpolate)
def to_bounding_box(self):
"""
Convert this polygon to a bounding box tightly containing the whole polygon.
Returns
-------
imgaug.BoundingBox
Tight bounding box around the polygon.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox
xx = self.xx
yy = self.yy
return BoundingBox(x1=min(xx), x2=max(xx), y1=min(yy), y2=max(yy), label=self.label)
def to_keypoints(self):
"""
Convert this polygon's `exterior` to ``Keypoint`` instances.
Returns
-------
list of imgaug.Keypoint
Exterior vertices as ``Keypoint`` instances.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [Keypoint(x=point[0], y=point[1]) for point in self.exterior]
def to_line_string(self, closed=True):
"""
Convert this polygon's `exterior` to a ``LineString`` instance.
Parameters
----------
closed : bool, optional
Whether to close the line string, i.e. to add the first point of
the `exterior` also as the last point at the end of the line string.
This has no effect if the polygon has a single point or zero
points.
Returns
-------
imgaug.augmentables.lines.LineString
Exterior of the polygon as a line string.
"""
from imgaug.augmentables.lines import LineString
if not closed or len(self.exterior) <= 1:
return LineString(self.exterior, label=self.label)
return LineString(
np.concatenate([self.exterior, self.exterior[0:1, :]], axis=0),
label=self.label)
@staticmethod
def from_shapely(polygon_shapely, label=None):
"""
Create a polygon from a Shapely polygon.
Note: This will remove any holes in the Shapely polygon.
Parameters
----------
polygon_shapely : shapely.geometry.Polygon
The shapely polygon.
label : None or str, optional
The label of the new polygon.
Returns
-------
imgaug.Polygon
A polygon with the same exterior as the Shapely polygon.
"""
# load shapely lazily, which makes the dependency more optional
import shapely.geometry
ia.do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon))
# polygon_shapely.exterior can be None if the polygon was instantiated without points
if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0:
return Polygon([], label=label)
exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])
return Polygon(exterior, label=label)
def exterior_almost_equals(self, other, max_distance=1e-4, points_per_edge=8):
"""
Estimate if this and other polygon's exterior are almost identical.
The two exteriors can have different numbers of points, but any point
randomly sampled on the exterior of one polygon should be close to the
closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with
polygons with fairly different shapes that will still be estimated as
equal by this method. In practice however this should be unlikely to be
the case. The probability for something like that goes down as the
interpolation parameter is increased.
Parameters
----------
other : imgaug.Polygon or (N,2) ndarray or list of tuple
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype ``float32`` and shape ``(N,2)`` with the
second dimension denoting xy-coordinates.
If this is a list of tuples, it is assumed to represent an exterior.
Each tuple then must contain exactly two numbers, denoting
xy-coordinates.
max_distance : number, optional
The maximum euclidean distance between a point on one polygon and
the closest point on the other polygon. If the distance is exceeded
for any such pair, the two exteriors are not viewed as equal. The
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/kps.py | augmentation/image_augmentation/helpers/imgaug/augmentables/kps.py | from __future__ import print_function, division, absolute_import
import copy
import numpy as np
import scipy.spatial.distance
import six.moves as sm
from .. import imgaug as ia
from .utils import normalize_shape, project_coords
def compute_geometric_median(X, eps=1e-5):
"""
Estimate the geometric median of points in 2D.
Code from https://stackoverflow.com/a/30305181
Parameters
----------
X : (N,2) ndarray
Points in 2D. Second axis must be given in xy-form.
eps : float, optional
Distance threshold when to return the median.
Returns
-------
(2,) ndarray
Geometric median as xy-coordinate.
"""
y = np.mean(X, 0)
while True:
D = scipy.spatial.distance.cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if scipy.spatial.distance.euclidean(y, y1) < eps:
return y1
y = y1
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x_int(self):
"""
Return the keypoint's x-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's x-coordinate, rounded to the closest integer.
"""
return int(np.round(self.x))
@property
def y_int(self):
"""
Return the keypoint's y-coordinate, rounded to the closest integer.
Returns
-------
result : int
Keypoint's y-coordinate, rounded to the closest integer.
"""
return int(np.round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int
Shape of the original image. (Before resize.)
to_shape : tuple of int
Shape of the new image. (After resize.)
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
xy_proj = project_coords([(self.x, self.y)], from_shape, to_shape)
return self.deepcopy(x=xy_proj[0][0], y=xy_proj[0][1])
def shift(self, x=0, y=0):
"""
Move the keypoint around on an image.
Parameters
----------
x : number, optional
Move by this value on the x axis.
y : number, optional
Move by this value on the y axis.
Returns
-------
imgaug.Keypoint
Keypoint object with new coordinates.
"""
return self.deepcopy(self.x + x, self.y + y)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3,
copy=True, raise_if_out_of_image=False):
"""
Draw the keypoint onto a given image.
The keypoint is drawn as a square.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoint.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of the keypoint. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of the keypoint. If set to ``S``, each square will have
size ``S x S``.
copy : bool, optional
Whether to copy the image before drawing the keypoint.
raise_if_out_of_image : bool, optional
Whether to raise an exception if the keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoint.
"""
if copy:
image = np.copy(image)
if image.ndim == 2:
assert ia.is_single_number(color), (
"Got a 2D image. Expected then 'color' to be a single number, "
"but got %s." % (str(color),))
elif image.ndim == 3 and ia.is_single_number(color):
color = [color] * image.shape[-1]
input_dtype = image.dtype
alpha_color = color
if alpha < 0.01:
# keypoint invisible, nothing to do
return image
elif alpha > 0.99:
alpha = 1
else:
image = image.astype(np.float32, copy=False)
alpha_color = alpha * np.array(color)
height, width = image.shape[0:2]
y, x = self.y_int, self.x_int
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height)
x1_clipped, x2_clipped = np.clip([x1, x2], 0, width)
y1_clipped, y2_clipped = np.clip([y1, y2], 0, height)
x1_clipped_ooi = (x1_clipped < 0 or x1_clipped >= width)
x2_clipped_ooi = (x2_clipped < 0 or x2_clipped >= width+1)
y1_clipped_ooi = (y1_clipped < 0 or y1_clipped >= height)
y2_clipped_ooi = (y2_clipped < 0 or y2_clipped >= height+1)
x_ooi = (x1_clipped_ooi and x2_clipped_ooi)
y_ooi = (y1_clipped_ooi and y2_clipped_ooi)
x_zero_size = (x2_clipped - x1_clipped) < 1 # min size is 1px
y_zero_size = (y2_clipped - y1_clipped) < 1
if not x_ooi and not y_ooi and not x_zero_size and not y_zero_size:
if alpha == 1:
image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = color
else:
image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = (
(1 - alpha)
* image[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
+ alpha_color
)
else:
if raise_if_out_of_image:
raise Exception(
"Cannot draw keypoint x=%.8f, y=%.8f on image with "
"shape %s." % (y, x, image.shape))
if image.dtype.name != input_dtype.name:
if input_dtype.name == "uint8":
image = np.clip(image, 0, 255, out=image)
image = image.astype(input_dtype, copy=False)
return image
def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):
"""
Generate nearby points to this keypoint based on manhattan distance.
To generate the first neighbouring points, a distance of S (step size) is moved from the
center point (this keypoint) to the top, right, bottom and left, resulting in four new
points. From these new points, the pattern is repeated. Overlapping points are ignored.
The resulting points have a shape similar to a square rotated by 45 degrees.
Parameters
----------
nb_steps : int
The number of steps to move from the center point. nb_steps=1 results in a total of
5 output points (1 center point + 4 neighbours).
step_size : number
The step size to move from every point to its neighbours.
return_array : bool, optional
Whether to return the generated points as a list of keypoints or an array
of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains
the x- (first value) and y- (second value) coordinates.
Returns
-------
points : list of imgaug.Keypoint or (N,2) ndarray
If return_array was False, then a list of Keypoint.
Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and
the second axis contains the x- (first value) and y- (second value) coordinates.
The center keypoint (the one on which this function was called) is always included.
"""
# TODO add test
# Points generates in manhattan style with S steps have a shape similar to a 45deg rotated
# square. The center line with the origin point has S+1+S = 1+2*S points (S to the left,
# S to the right). The lines above contain (S+1+S)-2 + (S+1+S)-2-2 + ... + 1 points. E.g.
# for S=2 it would be 3+1=4 and for S=3 it would be 5+3+1=9. Same for the lines below the
# center. Hence the total number of points is S+1+S + 2*(S^2).
points = np.zeros((nb_steps + 1 + nb_steps + 2*(nb_steps**2), 2), dtype=np.float32)
# we start at the bottom-most line and move towards the top-most line
yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + 1 + nb_steps)
# bottom-most line contains only one point
width = 1
nth_point = 0
for i_y, y in enumerate(yy):
if width == 1:
xx = [self.x]
else:
xx = np.linspace(self.x - (width-1)//2 * step_size, self.x + (width-1)//2 * step_size, width)
for x in xx:
points[nth_point] = [x, y]
nth_point += 1
if i_y < nb_steps:
width += 2
else:
width -= 2
if return_array:
return points
return [self.deepcopy(x=points[i, 0], y=points[i, 1]) for i in sm.xrange(points.shape[0])]
def copy(self, x=None, y=None):
"""
Create a shallow copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Shallow copy.
"""
return self.deepcopy(x=x, y=y)
def deepcopy(self, x=None, y=None):
"""
Create a deep copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Deep copy.
"""
x = self.x if x is None else x
y = self.y if y is None else y
return Keypoint(x=x, y=y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of imgaug.Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> image = np.zeros((70, 70))
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
self.keypoints = keypoints
self.shape = normalize_shape(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero keypoints.
Returns
-------
result : bool
True if this object contains zero keypoints.
"""
return len(self.keypoints) == 0
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : imgaug.KeypointsOnImage
Object containing all projected keypoints.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return self.deepcopy(keypoints, shape)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=3,
copy=True, raise_if_out_of_image=False):
"""
Draw all keypoints onto a given image.
Each keypoint is marked by a square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all keypoints. If a single int ``C``, then that is
equivalent to ``(C,C,C)``.
alpha : float, optional
The opacity of the drawn keypoint, where ``1.0`` denotes a fully
visible keypoint and ``0.0`` an invisible one.
size : int, optional
The size of each point. If set to ``C``, each square will have
size ``C x C``.
copy : bool, optional
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any keypoint is outside of the image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
image = np.copy(image) if copy else image
for keypoint in self.keypoints:
image = keypoint.draw_on_image(
image, color=color, alpha=alpha, size=size, copy=False,
raise_if_out_of_image=raise_if_out_of_image)
return image
def shift(self, x=0, y=0):
"""
Move the keypoints around on an image.
Parameters
----------
x : number, optional
Move each keypoint by this value on the x axis.
y : number, optional
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return self.deepcopy(keypoints)
@ia.deprecated(alt_func="KeypointsOnImage.to_xy_array()")
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
return self.to_xy_array()
def to_xy_array(self):
"""
Convert keypoint coordinates to ``(N,2)`` array.
Returns
-------
(N, 2) ndarray
Array containing the coordinates of all keypoints.
Shape is ``(N,2)`` with coordinates in xy-form.
"""
result = np.zeros((len(self.keypoints), 2), dtype=np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
@ia.deprecated(alt_func="KeypointsOnImage.from_xy_array()")
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image.
Each first entry ``coords[i, 0]`` is expected to be the x coordinate.
Each second entry ``coords[i, 1]`` is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
return KeypointsOnImage.from_xy_array(coords, shape)
@classmethod
def from_xy_array(cls, xy, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
xy : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image, given
as ``(N,2)`` array of xy-coordinates.
shape : tuple of int or ndarray
Shape tuple of the image on which the keypoints are placed.
Returns
-------
KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coord[0], y=coord[1]) for coord in xy]
return KeypointsOnImage(keypoints, shape)
# TODO add to_gaussian_heatmaps(), from_gaussian_heatmaps()
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape ``(H,W,N)`` in which all keypoint coordinates are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with a method that only supports the
augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
ia.do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
ia.do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height)
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by ``to_keypoint_image()`` back to a KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of keypoints.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y`` with
each containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int, optional
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
ia.do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
ia.do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def to_distance_maps(self, inverted=False):
"""
Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.
The k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.
This function can be used as a helper when augmenting keypoints with a method that only supports
the augmentation of images.
Parameters
-------
inverted : bool, optional
If True, inverted distance maps are returned where each distance value d is replaced
by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0
denoting exactly the position of the respective keypoint.
Returns
-------
distance_maps : (H,W,K) ndarray
A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location
``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.
In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width
of the array match the height and width in ``KeypointsOnImage.shape``.
"""
ia.do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)
yy = np.arange(0, height)
xx = np.arange(0, width)
grid_xx, grid_yy = np.meshgrid(xx, yy)
for i, keypoint in enumerate(self.keypoints):
y, x = keypoint.y, keypoint.x
distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2
distance_maps = np.sqrt(distance_maps)
if inverted:
return 1/(distance_maps+1)
return distance_maps
# TODO add option to if_not_found_coords to reuse old keypoint coords
@staticmethod
def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={"x": -1, "y": -1}, threshold=None, # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
nb_channels=None):
"""
Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.
Parameters
----------
distance_maps : (H,W,N) ndarray
The distance maps. N is the number of keypoints.
inverted : bool, optional
Whether the given distance maps were generated in inverted or normal mode.
if_not_found_coords : tuple or list or dict or None, optional
Coordinates to use for keypoints that cannot be found in ``distance_maps``.
If this is a list/tuple, it must have two integer values.
If it is a dictionary, it must have the keys ``x`` and ``y``, with each
containing one integer value.
If this is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : float, optional
The search for keypoints works by searching for the argmin (non-inverted) or
argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)
or minimum (inverted) value to accept in order to view a hit as a keypoint.
Use None to use no min/max.
nb_channels : None or int, optional
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to ``(height, width)``, otherwise ``(height, width, nb_channels)``.
Returns
-------
imgaug.KeypointsOnImage
The extracted keypoints.
"""
ia.do_assert(len(distance_maps.shape) == 3)
height, width, nb_keypoints = distance_maps.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
ia.do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (
type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
# TODO introduce voting here among all distance values that have min/max values
if inverted:
hitidx_flat = np.argmax(distance_maps[..., i])
else:
hitidx_flat = np.argmin(distance_maps[..., i])
hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))
if not inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] < threshold)
elif inverted and threshold is not None:
found = (distance_maps[hitidx_ndim[0], hitidx_ndim[1], i] >= threshold)
else:
found = True
if found:
keypoints.append(Keypoint(x=hitidx_ndim[1], y=hitidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self, keypoints=None, shape=None):
"""
Create a shallow copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Shallow copy.
"""
result = copy.copy(self)
if keypoints is not None:
result.keypoints = keypoints
if shape is not None:
result.shape = shape
return result
def deepcopy(self, keypoints=None, shape=None):
"""
Create a deep copy of the KeypointsOnImage object.
Parameters
----------
keypoints : None or list of imgaug.Keypoint, optional
List of keypoints on the image. If ``None``, the instance's
keypoints will be copied.
shape : tuple of int, optional
The shape of the image on which the keypoints are placed.
If ``None``, the instance's shape will be copied.
Returns
-------
imgaug.KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
if keypoints is None:
keypoints = [kp.deepcopy() for kp in self.keypoints]
if shape is None:
shape = tuple(self.shape)
return KeypointsOnImage(keypoints, shape)
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/segmaps.py | augmentation/image_augmentation/helpers/imgaug/augmentables/segmaps.py | from __future__ import print_function, division, absolute_import
import warnings
import numpy as np
import six.moves as sm
from .. import imgaug as ia
class SegmentationMapOnImage(object):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,1) ndarray or (H,W,C) ndarray
Array representing the segmentation map. May have datatypes bool, integer or float.
* If bool: Assumed to be of shape (H,W), (H,W,1) or (H,W,C). If (H,W) or (H,W,1) it
is assumed to be for the case of having a single class (where any False denotes
background). Otherwise there are assumed to be C channels, one for each class,
with each of them containing a mask for that class. The masks may overlap.
* If integer: Assumed to be of shape (H,W) or (H,W,1). Each pixel is assumed to
contain an integer denoting the class index. Classes are assumed to be
non-overlapping. The number of classes cannot be guessed from this input, hence
nb_classes must be set.
* If float: Assumed to b eof shape (H,W), (H,W,1) or (H,W,C) with meanings being
similar to the case of `bool`. Values are expected to fall always in the range
0.0 to 1.0 and are usually expected to be either 0.0 or 1.0 upon instantiation
of a new segmentation map. Classes may overlap.
shape : iterable of int
Shape of the corresponding image (NOT the segmentation map array). This is expected
to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually being 3. If there is no corresponding image,
then use the segmentation map's shape instead.
nb_classes : int or None
Total number of unique classes that may appear in an segmentation map, i.e. the max
class index plus 1. This may be None if the input array is of type bool or float. The number
of classes however must be provided if the input array is of type int, as then the
number of classes cannot be guessed.
"""
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
ia.do_assert(ia.is_np_array(arr), "Expected to get numpy array, got %s." % (type(arr),))
if arr.dtype.name == "bool":
ia.do_assert(arr.ndim in [2, 3])
self.input_was = ("bool", arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
arr = arr.astype(np.float32)
elif arr.dtype.kind in ["i", "u"]:
ia.do_assert(arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] == 1))
ia.do_assert(nb_classes is not None)
ia.do_assert(nb_classes > 0)
ia.do_assert(np.min(arr.flat[0:100]) >= 0)
ia.do_assert(np.max(arr.flat[0:100]) < nb_classes)
self.input_was = ("int", arr.dtype.type, arr.ndim)
if arr.ndim == 3:
arr = arr[..., 0]
# TODO improve efficiency here by building only sub-heatmaps for classes actually
# present in the image. This would also get rid of nb_classes.
arr = np.eye(nb_classes)[arr] # from class indices to one hot
arr = arr.astype(np.float32)
elif arr.dtype.kind == "f":
ia.do_assert(arr.ndim == 3)
self.input_was = ("float", arr.dtype.type, arr.ndim)
arr = arr.astype(np.float32)
else:
raise Exception(("Input was expected to be an ndarray any bool, int, uint or float dtype. "
+ "Got dtype %s.") % (arr.dtype.name,))
ia.do_assert(arr.ndim == 3)
ia.do_assert(arr.dtype.name == "float32")
self.arr = arr
# don't allow arrays here as an alternative to tuples as input
# as allowing arrays introduces risk to mix up 'arr' and 'shape' args
self.shape = shape
self.nb_classes = nb_classes if nb_classes is not None else arr.shape[2]
def get_arr_int(self, background_threshold=0.01, background_class_id=None):
"""
Get the segmentation map array as an integer array of shape (H, W).
Each pixel in that array contains an integer value representing the pixel's class.
If multiple classes overlap, the one with the highest local float value is picked.
If that highest local value is below `background_threshold`, the method instead uses
the background class id as the pixel's class value.
By default, class id 0 is the background class. This may only be changed if the original
input to the segmentation map object was an integer map.
Parameters
----------
background_threshold : float, optional
At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the
class-heatmaps has a value above this threshold, the method uses the background class
id instead.
background_class_id : None or int, optional
Class id to fall back to if no class-heatmap passes the threshold at a spatial
location. May only be provided if the original input was an integer mask and in these
cases defaults to 0. If the input were float or boolean masks, the background class id
may not be set as it is assumed that the background is implicitly defined
as 'any spatial location that has zero-like values in all masks'.
Returns
-------
result : (H,W) ndarray
Segmentation map array (int32).
If the original input consisted of boolean or float masks, then the highest possible
class id is ``1+C``, where ``C`` is the number of provided float/boolean masks. The value
``0`` in the integer mask then denotes the background class.
"""
if self.input_was[0] in ["bool", "float"]:
ia.do_assert(background_class_id is None,
"The background class id may only be changed if the original input to SegmentationMapOnImage "
+ "was an *integer* based segmentation map.")
if background_class_id is None:
background_class_id = 0
channelwise_max_idx = np.argmax(self.arr, axis=2)
# for bool and float input masks, we assume that the background is implicitly given,
# i.e. anything where all masks/channels have zero-like values
# for int, we assume that the background class is explicitly given and has the index 0
if self.input_was[0] in ["bool", "float"]:
result = 1 + channelwise_max_idx
else: # integer mask was provided
result = channelwise_max_idx
if background_threshold is not None and background_threshold > 0:
probs = np.amax(self.arr, axis=2)
result[probs < background_threshold] = background_class_id
return result.astype(np.int32)
# TODO
# def get_arr_bool(self, allow_overlapping=False, threshold=0.5, background_threshold=0.01, background_class_id=0):
# raise NotImplementedError()
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None,
return_foreground_mask=False):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
"""
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
ia.do_assert(nb_classes <= len(colors),
"Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (
nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = ia.imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = ia.imresize_single_image(
foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
def draw_on_image(self, image, alpha=0.75, resize="segmentation_map", background_threshold=0.01,
background_class_id=None, colors=None, draw_background=False):
"""
Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the segmentation map. Dtype is expected to be uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and segmentation map.
Higher values mean that the segmentation map will be more visible and the image less visible.
resize : {'segmentation_map', 'image'}, optional
In case of size differences between the image and segmentation map, either the image or
the segmentation map can be resized. This parameter controls which of the two will be
resized to the other's size.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
draw_background : bool, optional
If True, the background will be drawn like any other class.
If False, the background will not be drawn, i.e. the respective background pixels
will be identical with the image's RGB color at the corresponding spatial location
and no color overlay will be applied.
Returns
-------
mix : (H,W,3) ndarray
Rendered overlays (dtype is uint8).
"""
# assert RGB image
ia.do_assert(image.ndim == 3)
ia.do_assert(image.shape[2] == 3)
ia.do_assert(image.dtype.type == np.uint8)
ia.do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
ia.do_assert(resize in ["segmentation_map", "image"])
if resize == "image":
image = ia.imresize_single_image(image, self.arr.shape[0:2], interpolation="cubic")
segmap_drawn, foreground_mask = self.draw(
background_threshold=background_threshold,
background_class_id=background_class_id,
size=image.shape[0:2] if resize == "segmentation_map" else None,
colors=colors,
return_foreground_mask=True
)
if draw_background:
mix = np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
else:
foreground_mask = foreground_mask[..., np.newaxis]
mix = np.zeros_like(image)
mix += (~foreground_mask).astype(np.uint8) * image
mix += foreground_mask.astype(np.uint8) * np.clip(
(1-alpha) * image + alpha * segmap_drawn,
0,
255
).astype(np.uint8)
return mix
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the segmentation map on its top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the segmentation map. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the segmentation map. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the segmentation map. Must be 0 or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_padded = ia.pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the segmentation map on its sides so that its matches a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Padded segmentation map as SegmentationMapOnImage object.
pad_amounts : tuple of int
Amounts by which the segmentation map was padded on each side, given as a
tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_padded, pad_amounts = ia.pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval,
return_pad_amounts=True)
segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)
segmap.input_was = self.input_was
if return_pad_amounts:
return segmap, pad_amounts
else:
return segmap
@ia.deprecated(alt_func="SegmentationMapOnImage.resize()",
comment="resize() has the exactly same interface.")
def scale(self, *args, **kwargs):
return self.resize(*args, **kwargs)
def resize(self, sizes, interpolation="cubic"):
"""
Resize the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Resized segmentation map object.
"""
arr_resized = ia.imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_resized = np.clip(arr_resized, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_resized, shape=self.shape)
segmap.input_was = self.input_was
return segmap
def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):
"""
Convert segmentation map to heatmaps object.
Each segmentation map class will be represented as a single heatmap channel.
Parameters
----------
only_nonempty : bool, optional
If True, then only heatmaps for classes that appear in the segmentation map will be
generated. Additionally, a list of these class ids will be returned.
not_none_if_no_nonempty : bool, optional
If `only_nonempty` is True and for a segmentation map no channel was non-empty,
this function usually returns None as the heatmaps object. If however this parameter
is set to True, a heatmaps object with one channel (representing class 0)
will be returned as a fallback in these cases.
Returns
-------
imgaug.HeatmapsOnImage or None
Segmentation map as a heatmaps object.
If `only_nonempty` was set to True and no class appeared in the segmentation map,
then this is None.
class_indices : list of int
Class ids (0 to C-1) of the classes that were actually added to the heatmaps.
Only returned if `only_nonempty` was set to True.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.heatmaps import HeatmapsOnImage
if not only_nonempty:
return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=0.0, max_value=1.0)
else:
nonempty_mask = np.sum(self.arr, axis=(0, 1)) > 0 + 1e-4
if np.sum(nonempty_mask) == 0:
if not_none_if_no_nonempty:
nonempty_mask[0] = True
else:
return None, []
class_indices = np.arange(self.arr.shape[2])[nonempty_mask]
channels = self.arr[..., class_indices]
return HeatmapsOnImage(channels, self.shape, min_value=0.0, max_value=1.0), class_indices
@staticmethod
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
ia.do_assert(nb_classes is not None)
ia.do_assert(min(class_indices) >= 0)
ia.do_assert(max(class_indices) < nb_classes)
ia.do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
for heatmap_channel, mapped_channel in enumerate(class_indices):
arr_0to1_full[:, :, mapped_channel] = arr_0to1[:, :, heatmap_channel]
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape)
def copy(self):
"""
Create a shallow copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/batches.py | augmentation/image_augmentation/helpers/imgaug/augmentables/batches.py | from __future__ import print_function, division, absolute_import
import copy
import warnings
import numpy as np
from .. import imgaug as ia
from . import normalization as nlib
DEFAULT = "DEFAULT"
# TODO also support (H,W,C) for heatmaps of len(images) == 1
# TODO also support (H,W) for segmaps of len(images) == 1
class UnnormalizedBatch(object):
"""
Class for batches of unnormalized data before and after augmentation.
Parameters
----------
images : None \
or (N,H,W,C) ndarray \
or (N,H,W) ndarray \
or iterable of (H,W,C) ndarray \
or iterable of (H,W) ndarray
The images to augment.
heatmaps : None \
or (N,H,W,C) ndarray \
or imgaug.augmentables.heatmaps.HeatmapsOnImage \
or iterable of (H,W,C) ndarray \
or iterable of imgaug.augmentables.heatmaps.HeatmapsOnImage
The heatmaps to augment.
If anything else than ``HeatmapsOnImage``, then the number of heatmaps
must match the number of images provided via parameter `images`.
The number is contained either in ``N`` or the first iterable's size.
segmentation_maps : None \
or (N,H,W) ndarray \
or imgaug.augmentables.segmaps.SegmentationMapOnImage \
or iterable of (H,W) ndarray \
or iterable of imgaug.augmentables.segmaps.SegmentationMapOnImage
The segmentation maps to augment.
If anything else than ``SegmentationMapOnImage``, then the number of
segmaps must match the number of images provided via parameter
`images`. The number is contained either in ``N`` or the first
iterable's size.
keypoints : None \
or list of (N,K,2) ndarray \
or tuple of number \
or imgaug.augmentables.kps.Keypoint \
or iterable of (K,2) ndarray \
or iterable of tuple of number \
or iterable of imgaug.augmentables.kps.Keypoint \
or iterable of imgaug.augmentables.kps.KeypointOnImage \
or iterable of iterable of tuple of number \
or iterable of iterable of imgaug.augmentables.kps.Keypoint
The keypoints to augment.
If a tuple (or iterable(s) of tuple), then iterpreted as (x,y)
coordinates and must hence contain two numbers.
A single tuple represents a single coordinate on one image, an
iterable of tuples the coordinates on one image and an iterable of
iterable of tuples the coordinates on several images. Analogous if
``Keypoint`` objects are used instead of tuples.
If an ndarray, then ``N`` denotes the number of images and ``K`` the
number of keypoints on each image.
If anything else than ``KeypointsOnImage`` is provided, then the
number of keypoint groups must match the number of images provided
via parameter `images`. The number is contained e.g. in ``N`` or
in case of "iterable of iterable of tuples" in the first iterable's
size.
bounding_boxes : None \
or (N,B,4) ndarray \
or tuple of number \
or imgaug.augmentables.bbs.BoundingBox \
or imgaug.augmentables.bbs.BoundingBoxesOnImage \
or iterable of (B,4) ndarray \
or iterable of tuple of number \
or iterable of imgaug.augmentables.bbs.BoundingBox \
or iterable of imgaug.augmentables.bbs.BoundingBoxesOnImage \
or iterable of iterable of tuple of number \
or iterable of iterable imgaug.augmentables.bbs.BoundingBox
The bounding boxes to augment.
This is analogous to the `keypoints` parameter. However, each
tuple -- and also the last index in case of arrays -- has size 4,
denoting the bounding box coordinates ``x1``, ``y1``, ``x2`` and ``y2``.
polygons : None \
or (N,#polys,#points,2) ndarray \
or imgaug.augmentables.polys.Polygon \
or imgaug.augmentables.polys.PolygonsOnImage \
or iterable of (#polys,#points,2) ndarray \
or iterable of tuple of number \
or iterable of imgaug.augmentables.kps.Keypoint \
or iterable of imgaug.augmentables.polys.Polygon \
or iterable of imgaug.augmentables.polys.PolygonsOnImage \
or iterable of iterable of (#points,2) ndarray \
or iterable of iterable of tuple of number \
or iterable of iterable of imgaug.augmentables.kps.Keypoint \
or iterable of iterable of imgaug.augmentables.polys.Polygon \
or iterable of iterable of iterable of tuple of number \
or iterable of iterable of iterable of tuple of \
imgaug.augmentables.kps.Keypoint
The polygons to augment.
This is similar to the `keypoints` parameter. However, each polygon
may be made up of several ``(x,y)`` coordinates (three or more are
required for valid polygons).
The following datatypes will be interpreted as a single polygon on a
single image:
* ``imgaug.augmentables.polys.Polygon``
* ``iterable of tuple of number``
* ``iterable of imgaug.augmentables.kps.Keypoint``
The following datatypes will be interpreted as multiple polygons on a
single image:
* ``imgaug.augmentables.polys.PolygonsOnImage``
* ``iterable of imgaug.augmentables.polys.Polygon``
* ``iterable of iterable of tuple of number``
* ``iterable of iterable of imgaug.augmentables.kps.Keypoint``
* ``iterable of iterable of imgaug.augmentables.polys.Polygon``
The following datatypes will be interpreted as multiple polygons on
multiple images:
* ``(N,#polys,#points,2) ndarray``
* ``iterable of (#polys,#points,2) ndarray``
* ``iterable of iterable of (#points,2) ndarray``
* ``iterable of iterable of iterable of tuple of number``
* ``iterable of iterable of iterable of tuple of imgaug.augmentables.kps.Keypoint``
line_strings : None \
or (N,#lines,#points,2) ndarray \
or imgaug.augmentables.lines.LineString \
or imgaug.augmentables.lines.LineStringOnImage \
or iterable of (#lines,#points,2) ndarray \
or iterable of tuple of number \
or iterable of imgaug.augmentables.kps.Keypoint \
or iterable of imgaug.augmentables.lines.LineString \
or iterable of imgaug.augmentables.lines.LineStringOnImage \
or iterable of iterable of (#points,2) ndarray \
or iterable of iterable of tuple of number \
or iterable of iterable of imgaug.augmentables.kps.Keypoint \
or iterable of iterable of imgaug.augmentables.polys.LineString \
or iterable of iterable of iterable of tuple of number \
or iterable of iterable of iterable of tuple of \
imgaug.augmentables.kps.Keypoint
The line strings to augment.
See `polygons` for more details as polygons follow a similar
structure to line strings.
data
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None,
keypoints=None, bounding_boxes=None, polygons=None,
line_strings=None, data=None):
self.images_unaug = images
self.images_aug = None
self.heatmaps_unaug = heatmaps
self.heatmaps_aug = None
self.segmentation_maps_unaug = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints_unaug = keypoints
self.keypoints_aug = None
self.bounding_boxes_unaug = bounding_boxes
self.bounding_boxes_aug = None
self.polygons_unaug = polygons
self.polygons_aug = None
self.line_strings_unaug = line_strings
self.line_strings_aug = None
self.data = data
def to_normalized_batch(self):
"""Convert this unnormalized batch to an instance of Batch.
As this method is intended to be called before augmentation, it
assumes that none of the ``*_aug`` attributes is yet set.
It will produce an AssertionError otherwise.
The newly created Batch's ``*_unaug`` attributes will match the ones
in this batch, just in normalized form.
Returns
-------
imgaug.augmentables.batches.Batch
The batch, with ``*_unaug`` attributes being normalized.
"""
assert all([
attr is None for attr_name, attr in self.__dict__.items()
if attr_name.endswith("_aug")]), \
"Expected UnnormalizedBatch to not contain any augmented data " \
"before normalization, but at least one '*_aug' attribute was " \
"already set."
images_unaug = nlib.normalize_images(self.images_unaug)
shapes = None
if images_unaug is not None:
shapes = [image.shape for image in images_unaug]
return Batch(
images=images_unaug,
heatmaps=nlib.normalize_heatmaps(
self.heatmaps_unaug, shapes),
segmentation_maps=nlib.normalize_segmentation_maps(
self.segmentation_maps_unaug, shapes),
keypoints=nlib.normalize_keypoints(
self.keypoints_unaug, shapes),
bounding_boxes=nlib.normalize_bounding_boxes(
self.bounding_boxes_unaug, shapes),
polygons=nlib.normalize_polygons(
self.polygons_unaug, shapes),
line_strings=nlib.normalize_line_strings(
self.line_strings_unaug, shapes),
data=self.data
)
def fill_from_augmented_normalized_batch(self, batch_aug_norm):
"""
Fill this batch with (normalized) augmentation results.
This method receives a (normalized) Batch instance, takes all
``*_aug`` attributes out if it and assigns them to this
batch *in unnormalized form*. Hence, the datatypes of all ``*_aug``
attributes will match the datatypes of the ``*_unaug`` attributes.
Parameters
----------
batch_aug_norm: imgaug.augmentables.batches.Batch
Batch after normalization and augmentation.
Returns
-------
imgaug.augmentables.batches.UnnormalizedBatch
New UnnormalizedBatch instance. All ``*_unaug`` attributes are
taken from the old UnnormalizedBatch (without deepcopying them)
and all ``*_aug`` attributes are taken from `batch_normalized`
converted to unnormalized form.
"""
# we take here the .data from the normalized batch instead of from
# self for the rare case where one has decided to somehow change it
# during augmentation
batch = UnnormalizedBatch(
images=self.images_unaug,
heatmaps=self.heatmaps_unaug,
segmentation_maps=self.segmentation_maps_unaug,
keypoints=self.keypoints_unaug,
bounding_boxes=self.bounding_boxes_unaug,
polygons=self.polygons_unaug,
line_strings=self.line_strings_unaug,
data=batch_aug_norm.data
)
batch.images_aug = nlib.invert_normalize_images(
batch_aug_norm.images_aug, self.images_unaug)
batch.heatmaps_aug = nlib.invert_normalize_heatmaps(
batch_aug_norm.heatmaps_aug, self.heatmaps_unaug)
batch.segmentation_maps_aug = nlib.invert_normalize_segmentation_maps(
batch_aug_norm.segmentation_maps_aug, self.segmentation_maps_unaug)
batch.keypoints_aug = nlib.invert_normalize_keypoints(
batch_aug_norm.keypoints_aug, self.keypoints_unaug)
batch.bounding_boxes_aug = nlib.invert_normalize_bounding_boxes(
batch_aug_norm.bounding_boxes_aug, self.bounding_boxes_unaug)
batch.polygons_aug = nlib.invert_normalize_polygons(
batch_aug_norm.polygons_aug, self.polygons_unaug)
batch.line_strings_aug = nlib.invert_normalize_line_strings(
batch_aug_norm.line_strings_aug, self.line_strings_unaug)
return batch
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or list of (H,W,C) ndarray
The images to augment.
heatmaps : None or list of imgaug.augmentables.heatmaps.HeatmapsOnImage
The heatmaps to augment.
segmentation_maps : None or list of \
imgaug.augmentables.segmaps.SegmentationMapOnImage
The segmentation maps to augment.
keypoints : None or list of imgaug.augmentables.kps.KeypointOnImage
The keypoints to augment.
bounding_boxes : None \
or list of imgaug.augmentables.bbs.BoundingBoxesOnImage
The bounding boxes to augment.
polygons : None or list of imgaug.augmentables.polys.PolygonsOnImage
The polygons to augment.
line_strings : None or list of imgaug.augmentables.lines.LineStringsOnImage
The line strings to augment.
data
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, heatmaps=None, segmentation_maps=None,
keypoints=None, bounding_boxes=None, polygons=None,
line_strings=None, data=None):
self.images_unaug = images
self.images_aug = None
self.heatmaps_unaug = heatmaps
self.heatmaps_aug = None
self.segmentation_maps_unaug = segmentation_maps
self.segmentation_maps_aug = None
self.keypoints_unaug = keypoints
self.keypoints_aug = None
self.bounding_boxes_unaug = bounding_boxes
self.bounding_boxes_aug = None
self.polygons_unaug = polygons
self.polygons_aug = None
self.line_strings_unaug = line_strings
self.line_strings_aug = None
self.data = data
@property
@ia.deprecated("Batch.images_unaug")
def images(self):
return self.images_unaug
@property
@ia.deprecated("Batch.heatmaps_unaug")
def heatmaps(self):
return self.heatmaps_unaug
@property
@ia.deprecated("Batch.segmentation_maps_unaug")
def segmentation_maps(self):
return self.segmentation_maps_unaug
@property
@ia.deprecated("Batch.keypoints_unaug")
def keypoints(self):
return self.keypoints_unaug
@property
@ia.deprecated("Batch.bounding_boxes_unaug")
def bounding_boxes(self):
return self.bounding_boxes_unaug
@classmethod
def _deepcopy_obj(cls, obj):
if obj is None:
return None
elif ia.is_single_number(obj) or ia.is_string(obj):
return obj
elif isinstance(obj, list):
return [cls._deepcopy_obj(el) for el in obj]
elif isinstance(obj, tuple):
return tuple([cls._deepcopy_obj(el) for el in obj])
elif ia.is_np_array(obj):
return np.copy(obj)
elif hasattr(obj, "deepcopy"):
return obj.deepcopy()
else:
return copy.deepcopy(obj)
def deepcopy(self,
images_unaug=DEFAULT,
images_aug=DEFAULT,
heatmaps_unaug=DEFAULT,
heatmaps_aug=DEFAULT,
segmentation_maps_unaug=DEFAULT,
segmentation_maps_aug=DEFAULT,
keypoints_unaug=DEFAULT,
keypoints_aug=DEFAULT,
bounding_boxes_unaug=DEFAULT,
bounding_boxes_aug=DEFAULT,
polygons_unaug=DEFAULT,
polygons_aug=DEFAULT,
line_strings_unaug=DEFAULT,
line_strings_aug=DEFAULT):
def _copy_optional(self_attr, arg):
return self._deepcopy_obj(arg if arg is not DEFAULT else self_attr)
batch = Batch(
images=_copy_optional(self.images_unaug, images_unaug),
heatmaps=_copy_optional(self.heatmaps_unaug, heatmaps_unaug),
segmentation_maps=_copy_optional(self.segmentation_maps_unaug,
segmentation_maps_unaug),
keypoints=_copy_optional(self.keypoints_unaug, keypoints_unaug),
bounding_boxes=_copy_optional(self.bounding_boxes_unaug,
bounding_boxes_unaug),
polygons=_copy_optional(self.polygons_unaug, polygons_unaug),
line_strings=_copy_optional(self.line_strings_unaug,
line_strings_unaug),
data=copy.deepcopy(self.data)
)
batch.images_aug = _copy_optional(self.images_aug, images_aug)
batch.heatmaps_aug = _copy_optional(self.heatmaps_aug, heatmaps_aug)
batch.segmentation_maps_aug = _copy_optional(self.segmentation_maps_aug,
segmentation_maps_aug)
batch.keypoints_aug = _copy_optional(self.keypoints_aug, keypoints_aug)
batch.bounding_boxes_aug = _copy_optional(self.bounding_boxes_aug,
bounding_boxes_aug)
batch.polygons_aug = _copy_optional(self.polygons_aug, polygons_aug)
batch.line_strings_aug = _copy_optional(self.line_strings_aug,
line_strings_aug)
return batch
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/utils.py | augmentation/image_augmentation/helpers/imgaug/augmentables/utils.py | from __future__ import print_function, absolute_import, division
import numpy as np
import six.moves as sm
import imgaug as ia
# TODO integrate into keypoints
def normalize_shape(shape):
"""
Normalize a shape tuple or array to a shape tuple.
Parameters
----------
shape : tuple of int or ndarray
The input to normalize. May optionally be an array.
Returns
-------
tuple of int
Shape tuple.
"""
if isinstance(shape, tuple):
return shape
assert ia.is_np_array(shape), (
"Expected tuple of ints or array, got %s." % (type(shape),))
return shape.shape
# TODO integrate into keypoints
def project_coords(coords, from_shape, to_shape):
"""
Project coordinates from one image shape to another.
This performs a relative projection, e.g. a point at 60% of the old
image width will be at 60% of the new image width after projection.
Parameters
----------
coords : ndarray or tuple of number
Coordinates to project. Either a ``(N,2)`` numpy array or a tuple
of `(x,y)` coordinates.
from_shape : tuple of int or ndarray
Old image shape.
to_shape : tuple of int or ndarray
New image shape.
Returns
-------
ndarray
Projected coordinates as ``(N,2)`` ``float32`` numpy array.
"""
from_shape = normalize_shape(from_shape)
to_shape = normalize_shape(to_shape)
if from_shape[0:2] == to_shape[0:2]:
return coords
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
assert all([v > 0 for v in [from_height, from_width, to_height, to_width]])
# make sure to not just call np.float32(coords) here as the following lines
# perform in-place changes and np.float32(.) only copies if the input
# was *not* a float32 array
coords_proj = np.array(coords).astype(np.float32)
coords_proj[:, 0] = (coords_proj[:, 0] / from_width) * to_width
coords_proj[:, 1] = (coords_proj[:, 1] / from_height) * to_height
return coords_proj
def interpolate_point_pair(point_a, point_b, nb_steps):
if nb_steps < 1:
return []
x1, y1 = point_a
x2, y2 = point_b
vec = np.float32([x2 - x1, y2 - y1])
step_size = vec / (1 + nb_steps)
return [(x1 + (i + 1) * step_size[0], y1 + (i + 1) * step_size[1]) for i in sm.xrange(nb_steps)]
def interpolate_points(points, nb_steps, closed=True):
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
points_interp.extend([point_a] + interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
# close does not have to be reverted here, as last point is not included in the extend()
return points_interp
def interpolate_points_by_max_distance(points, max_distance, closed=True):
ia.do_assert(max_distance > 0, "max_distance must have value greater than 0, got %.8f" % (max_distance,))
if len(points) <= 1:
return points
if closed:
points = list(points) + [points[0]]
points_interp = []
for point_a, point_b in zip(points[:-1], points[1:]):
dist = np.sqrt((point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2)
nb_steps = int((dist / max_distance) - 1)
points_interp.extend([point_a] + interpolate_point_pair(point_a, point_b, nb_steps))
if not closed:
points_interp.append(points[-1])
return points_interp
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/__init__.py | augmentation/image_augmentation/helpers/imgaug/augmentables/__init__.py | from __future__ import absolute_import
from imgaug.augmentables.kps import *
from imgaug.augmentables.bbs import *
from imgaug.augmentables.polys import *
from imgaug.augmentables.lines import *
from imgaug.augmentables.heatmaps import *
from imgaug.augmentables.segmaps import *
from imgaug.augmentables.batches import *
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/heatmaps.py | augmentation/image_augmentation/helpers/imgaug/augmentables/heatmaps.py | from __future__ import print_function, division, absolute_import
import numpy as np
import six.moves as sm
from .. import imgaug as ia
class HeatmapsOnImage(object):
"""
Object representing heatmaps on images.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s).
Must be of dtype float32.
If multiple heatmaps are provided, then ``C`` is expected to denote their number.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
ia.do_assert(ia.is_np_array(arr), "Expected numpy array as heatmap input array, got type %s" % (type(arr),))
# TODO maybe allow 0-sized heatmaps? in that case the min() and max() must be adjusted
ia.do_assert(arr.shape[0] > 0 and arr.shape[1] > 0,
"Expected numpy array as heatmap with height and width greater than 0, got shape %s." % (
arr.shape,))
ia.do_assert(arr.dtype.type in [np.float32],
"Heatmap input array expected to be of dtype float32, got dtype %s." % (arr.dtype,))
ia.do_assert(arr.ndim in [2, 3], "Heatmap input array must be 2d or 3d, got shape %s." % (arr.shape,))
ia.do_assert(len(shape) in [2, 3],
"Argument 'shape' in HeatmapsOnImage expected to be 2d or 3d, got shape %s." % (shape,))
ia.do_assert(min_value < max_value)
if np.min(arr.flat[0:50]) < min_value - np.finfo(arr.dtype).eps \
or np.max(arr.flat[0:50]) > max_value + np.finfo(arr.dtype).eps:
import warnings
warnings.warn(
("Value range of heatmap was chosen to be (%.8f, %.8f), but "
"found actual min/max of (%.8f, %.8f). Array will be "
"clipped to chosen value range.") % (
min_value, max_value, np.min(arr), np.max(arr)))
arr = np.clip(arr, min_value, max_value)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
# don't allow arrays here as an alternative to tuples as input
# as allowing arrays introduces risk to mix up 'arr' and 'shape' args
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""
Get the heatmap's array within the value range originally provided in ``__init__()``.
The HeatmapsOnImage object saves heatmaps internally in the value range ``(min=0.0, max=1.0)``.
This function converts the internal representation to ``(min=min_value, max=max_value)``,
where ``min_value`` and ``max_value`` are provided upon instantiation of the object.
Returns
-------
result : (H,W) ndarray or (H,W,C) ndarray
Heatmap array. Dtype is float32.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
else:
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
# def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""
Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the heatmaps array is used.
cmap : str or None, optional
Color map of ``matplotlib`` to use in order to convert the heatmaps to RGB images.
If set to None, no color map will be used and the heatmaps will be converted
to simple intensity maps.
Returns
-------
heatmaps_drawn : list of (H,W,3) ndarray
Rendered heatmaps. One per heatmap array channel. Dtype is uint8.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# c:c+1 here, because the additional axis is needed by imresize_single_image
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = ia.imresize_single_image(heatmap_c, size, interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""
Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the heatmaps. Expected to be of dtype uint8.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and heatmaps.
Higher values mean that the heatmaps will be more visible and the image less visible.
cmap : str or None, optional
Color map to use. See :func:`imgaug.HeatmapsOnImage.draw` for details.
resize : {'heatmaps', 'image'}, optional
In case of size differences between the image and heatmaps, either the image or
the heatmaps can be resized. This parameter controls which of the two will be resized
to the other's size.
Returns
-------
mix : list of (H,W,3) ndarray
Rendered overlays. One per heatmap array channel. Dtype is uint8.
"""
# assert RGB image
ia.do_assert(image.ndim == 3)
ia.do_assert(image.shape[2] == 3)
ia.do_assert(image.dtype.type == np.uint8)
ia.do_assert(0 - 1e-8 <= alpha <= 1.0 + 1e-8)
ia.do_assert(resize in ["heatmaps", "image"])
if resize == "image":
image = ia.imresize_single_image(image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap
)
mix = [
np.clip((1-alpha) * image + alpha * heatmap_i, 0, 255).astype(np.uint8)
for heatmap_i
in heatmaps_drawn
]
return mix
def invert(self):
"""
Inverts each value in the heatmap, shifting low towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where ``v`` is the value at some spatial location, ``min`` is the minimum value in the heatmap
and ``max`` is the maximum value.
As the heatmap uses internally a 0.0 to 1.0 representation, this simply becomes ``v' = 1.0 - v``.
Note that the attributes ``min_value`` and ``max_value`` are not switched. They both keep their values.
This function can be useful e.g. when working with depth maps, where algorithms might have
an easier time representing the furthest away points with zeros, requiring an inverted
depth map.
Returns
-------
arr_inv : imgaug.HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(1 - self.arr_0to1, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""
Pad the heatmaps on their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps. Must be 0 or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps. Must be 0 or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps. Must be 0 or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps. Must be 0 or greater.
mode : string, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
Returns
-------
imgaug.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.
"""
arr_0to1_padded = ia.pad(self.arr_0to1, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)
return HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_0to1_padded, pad_amounts = ia.pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,
cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps
def avg_pool(self, block_size):
"""
Resize the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = ia.avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def max_pool(self, block_size):
"""
Resize the heatmap(s) array using max-pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = ia.max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
@ia.deprecated(alt_func="HeatmapsOnImage.resize()",
comment="resize() has the exactly same interface.")
def scale(self, *args, **kwargs):
return self.resize(*args, **kwargs)
def resize(self, sizes, interpolation="cubic"):
"""
Resize the heatmap(s) array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Returns
-------
imgaug.HeatmapsOnImage
Resized heatmaps object.
"""
arr_0to1_resized = ia.imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_resized = np.clip(arr_0to1_resized, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(arr_0to1_resized, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
def to_uint8(self):
"""
Convert this heatmaps object to a 0-to-255 array.
Returns
-------
arr_uint8 : (H,W,C) ndarray
Heatmap as a 0-to-255 array (dtype is uint8).
"""
# TODO this always returns (H,W,C), even if input ndarray was originall (H,W)
# does it make sense here to also return (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""
Create a heatmaps object from an heatmap array containing values ranging from 0.0 to 1.0.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is float32.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-1 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0.0, 1.0)`` array to value range ``(min_value, max_value)``.
E.g. if you started with heatmaps in the range ``(-1.0, 1.0)`` and projected these
to (0.0, 1.0), you should call this function with ``min_value=-1.0``, ``max_value=1.0``
so that :func:`imgaug.HeatmapsOnImage.get_arr` returns heatmap arrays having value
range (-1.0, 1.0).
max_value : float, optional
Maximum value for the heatmaps that to 0-to-255 array represents.
See parameter min_value for details.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape, min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
@classmethod
def change_normalization(cls, arr, source, target):
"""
Change the value range of a heatmap from one min-max to another min-max.
E.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of float
Current value range of the input array, given as (min, max), where both are float values.
target : tuple of float
Desired output value range of the array, given as (min, max), where both are float values.
Returns
-------
arr_target : ndarray
Input array, with value range projected to the desired target value range.
"""
ia.do_assert(ia.is_np_array(arr))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
ia.do_assert(isinstance(source, tuple))
ia.do_assert(len(source) == 2)
ia.do_assert(source[0] < source[1])
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
ia.do_assert(isinstance(target, tuple))
ia.do_assert(len(target) == 2)
ia.do_assert(target[0] < target[1])
# Check if source and target are the same (with a tiny bit of tolerance)
# if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
def copy(self):
"""
Create a shallow copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""
Create a deep copy of the Heatmaps object.
Returns
-------
imgaug.HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/normalization.py | augmentation/image_augmentation/helpers/imgaug/augmentables/normalization.py | from __future__ import print_function, division, absolute_import
import functools
import numpy as np
from .. import imgaug as ia
from .. import dtypes as iadt
def _preprocess_shapes(shapes):
if shapes is None:
return None
elif ia.is_np_array(shapes):
assert shapes.ndim in [3, 4]
return [image.shape for image in shapes]
else:
assert isinstance(shapes, list)
result = []
for shape_i in shapes:
if isinstance(shape_i, tuple):
result.append(shape_i)
else:
assert ia.is_np_array(shape_i)
result.append(shape_i.shape)
return result
def _assert_exactly_n_shapes(shapes, n, from_ntype, to_ntype):
if shapes is None:
raise ValueError(
("Tried to convert data of form '%s' to '%s'. This required %d "
+ "corresponding image shapes, but argument 'shapes' was set to "
+ "None. This can happen e.g. if no images were provided in a "
+ "Batch, as these would usually be used to automatically derive "
+ "image shapes.") % (from_ntype, to_ntype, n)
)
elif len(shapes) != n:
raise ValueError(
("Tried to convert data of form '%s' to '%s'. This required "
+ "exactly %d corresponding image shapes, but instead %d were "
+ "provided. This can happen e.g. if more images were provided "
+ "than corresponding augmentables, e.g. 10 images but only 5 "
+ "segmentation maps. It can also happen if there was a "
+ "misunderstanding about how an augmentable input would be "
+ "parsed. E.g. if a list of N (x,y)-tuples was provided as "
+ "keypoints and the expectation was that this would be parsed "
+ "as one keypoint per image for N images, but instead it was "
+ "parsed as N keypoints on 1 image (i.e. 'shapes' would have to "
+ "contain 1 shape, but N would be provided). To avoid this, it "
+ "is recommended to provide imgaug standard classes, e.g. "
+ "KeypointsOnImage for keypoints instead of lists of "
+ "tuples.") % (from_ntype, to_ntype, n, len(shapes))
)
def _assert_single_array_ndim(arr, ndim, shape_str, to_ntype):
if arr.ndim != ndim:
raise ValueError(
("Tried to convert an array to list of %s. Expected "
+ "that array to be of shape %s, i.e. %d-dimensional, but "
+ "got %d dimensions instead.") % (
to_ntype, shape_str, ndim, arr.ndim,)
)
def _assert_many_arrays_ndim(arrs, ndim, shape_str, to_ntype):
# For polygons, this can be a list of lists of arrays, hence we must
# flatten the lists here.
# itertools.chain.from_iterable() seems to flatten the arrays too, so it
# cannot be used here.
iterable_type_str = "iterable"
if len(arrs) == 0:
arrs_flat = []
elif ia.is_np_array(arrs[0]):
arrs_flat = arrs
else:
iterable_type_str = "iterable of iterable"
arrs_flat = [arr for arrs_sublist in arrs for arr in arrs_sublist]
if any([arr.ndim != ndim for arr in arrs_flat]):
raise ValueError(
("Tried to convert an %s of arrays to a list of "
+ "%s. Expected each array to be of shape %s, "
+ "i.e. to be %d-dimensional, but got dimensions %s "
+ "instead (array shapes: %s).") % (
iterable_type_str, to_ntype, shape_str, ndim,
", ".join([str(arr.ndim) for arr in arrs_flat]),
", ".join([str(arr.shape) for arr in arrs_flat]))
)
def _assert_single_array_last_dim_exactly(arr, size, to_ntype):
if arr.shape[-1] != size:
raise ValueError(
("Tried to convert an array to a list of %s. Expected the array's "
+ "last dimension to have size %d, but got %d instead (array "
+ "shape: %s).") % (
to_ntype, size, arr.shape[-1], str(arr.shape))
)
def _assert_many_arrays_last_dim_exactly(arrs, size, to_ntype):
# For polygons, this can be a list of lists of arrays, hence we must
# flatten the lists here.
# itertools.chain.from_iterable() seems to flatten the arrays too, so it
# cannot be used here.
iterable_type_str = "iterable"
if len(arrs) == 0:
arrs_flat = []
elif ia.is_np_array(arrs[0]):
arrs_flat = arrs
else:
iterable_type_str = "iterable of iterable"
arrs_flat = [arr for arrs_sublist in arrs for arr in arrs_sublist]
if any([arr.shape[-1] != size for arr in arrs_flat]):
raise ValueError(
("Tried to convert an %s of array to a list of %s. Expected the "
+ "arrays' last dimensions to have size %d, but got %s instead "
+ "(array shapes: %s).") % (
iterable_type_str, to_ntype, size,
", ".join([str(arr.shape[-1]) for arr in arrs_flat]),
", ".join([str(arr.shape) for arr in arrs_flat]))
)
def normalize_images(images):
if images is None:
return None
elif ia.is_np_array(images):
if images.ndim == 2:
return images[np.newaxis, ..., np.newaxis]
elif images.ndim == 3:
return images[..., np.newaxis]
else:
return images
elif ia.is_iterable(images):
result = []
for image in images:
assert image.ndim in [2, 3], (
("Got a list of arrays as argument 'images'. Expected each "
+ "array in that list to have 2 or 3 dimensions, i.e. shape "
+ "(H,W) or (H,W,C). Got %d dimensions "
+ "instead.") % (image.ndim,)
)
if image.ndim == 2:
result.append(image[..., np.newaxis])
else:
result.append(image)
return result
raise ValueError(
("Expected argument 'images' to be any of the following: "
+ "None or array or iterable of array. Got type: %s.") % (
type(images),)
)
def normalize_heatmaps(inputs, shapes=None):
# TODO get rid of this deferred import
from imgaug.augmentables.heatmaps import HeatmapsOnImage
shapes = _preprocess_shapes(shapes)
ntype = estimate_heatmaps_norm_type(inputs)
_assert_exactly_n_shapes_partial = functools.partial(
_assert_exactly_n_shapes,
from_ntype=ntype, to_ntype="List[HeatmapsOnImage]", shapes=shapes)
if ntype == "None":
return None
elif ntype == "array[float]":
_assert_single_array_ndim(inputs, 4, "(N,H,W,C)", "HeatmapsOnImage")
_assert_exactly_n_shapes_partial(n=len(inputs))
return [HeatmapsOnImage(attr_i, shape=shape_i)
for attr_i, shape_i in zip(inputs, shapes)]
elif ntype == "HeatmapsOnImage":
return [inputs]
elif ntype == "iterable[empty]":
return None
elif ntype == "iterable-array[float]":
_assert_many_arrays_ndim(inputs, 3, "(H,W,C)", "HeatmapsOnImage")
_assert_exactly_n_shapes_partial(n=len(inputs))
return [HeatmapsOnImage(attr_i, shape=shape_i)
for attr_i, shape_i in zip(inputs, shapes)]
else:
assert ntype == "iterable-HeatmapsOnImage", (
"Got unknown normalization type '%s'." % (ntype,))
return inputs # len allowed to differ from len of images
def normalize_segmentation_maps(inputs, shapes=None):
# TODO get rid of this deferred import
from imgaug.augmentables.segmaps import SegmentationMapOnImage
shapes = _preprocess_shapes(shapes)
ntype = estimate_segmaps_norm_type(inputs)
_assert_exactly_n_shapes_partial = functools.partial(
_assert_exactly_n_shapes,
from_ntype=ntype, to_ntype="List[SegmentationMapOnImage]",
shapes=shapes)
if ntype == "None":
return None
elif ntype in ["array[int]", "array[uint]", "array[bool]"]:
_assert_single_array_ndim(inputs, 3, "(N,H,W)",
"SegmentationMapOnImage")
_assert_exactly_n_shapes_partial(n=len(inputs))
if ntype == "array[bool]":
return [SegmentationMapOnImage(attr_i, shape=shape)
for attr_i, shape in zip(inputs, shapes)]
return [SegmentationMapOnImage(
attr_i, shape=shape, nb_classes=1+np.max(attr_i))
for attr_i, shape in zip(inputs, shapes)]
elif ntype == "SegmentationMapOnImage":
return [inputs]
elif ntype == "iterable[empty]":
return None
elif ntype in ["iterable-array[int]",
"iterable-array[uint]",
"iterable-array[bool]"]:
_assert_many_arrays_ndim(inputs, 2, "(H,W)", "SegmentationMapsOnImage")
_assert_exactly_n_shapes_partial(n=len(inputs))
if ntype == "iterable-array[bool]":
return [SegmentationMapOnImage(attr_i, shape=shape)
for attr_i, shape in zip(inputs, shapes)]
return [SegmentationMapOnImage(
attr_i, shape=shape, nb_classes=1+np.max(attr_i))
for attr_i, shape in zip(inputs, shapes)]
else:
assert ntype == "iterable-SegmentationMapOnImage", (
"Got unknown normalization type '%s'." % (ntype,))
return inputs # len allowed to differ from len of images
def normalize_keypoints(inputs, shapes=None):
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
shapes = _preprocess_shapes(shapes)
ntype = estimate_keypoints_norm_type(inputs)
_assert_exactly_n_shapes_partial = functools.partial(
_assert_exactly_n_shapes,
from_ntype=ntype, to_ntype="List[KeypointsOnImage]",
shapes=shapes)
if ntype == "None":
return inputs
elif ntype in ["array[float]", "array[int]", "array[uint]"]:
_assert_single_array_ndim(inputs, 3, "(N,K,2)", "KeypointsOnImage")
_assert_single_array_last_dim_exactly(inputs, 2, "KeypointsOnImage")
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
KeypointsOnImage.from_xy_array(attr_i, shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
elif ntype == "tuple[number,size=2]":
_assert_exactly_n_shapes_partial(n=1)
return [KeypointsOnImage([Keypoint(x=inputs[0], y=inputs[1])],
shape=shapes[0])]
elif ntype == "Keypoint":
_assert_exactly_n_shapes_partial(n=1)
return [KeypointsOnImage([inputs], shape=shapes[0])]
elif ntype == "KeypointsOnImage":
return [inputs]
elif ntype == "iterable[empty]":
return None
elif ntype in ["iterable-array[float]",
"iterable-array[int]",
"iterable-array[uint]"]:
_assert_many_arrays_ndim(inputs, 2, "(K,2)", "KeypointsOnImage")
_assert_many_arrays_last_dim_exactly(inputs, 2, "KeypointsOnImage")
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
KeypointsOnImage.from_xy_array(attr_i, shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
elif ntype == "iterable-tuple[number,size=2]":
_assert_exactly_n_shapes_partial(n=1)
return [KeypointsOnImage([Keypoint(x=x, y=y) for x, y in inputs],
shape=shapes[0])]
elif ntype == "iterable-Keypoint":
_assert_exactly_n_shapes_partial(n=1)
return [KeypointsOnImage(inputs, shape=shapes[0])]
elif ntype == "iterable-KeypointsOnImage":
return inputs
elif ntype == "iterable-iterable[empty]":
return None
elif ntype == "iterable-iterable-tuple[number,size=2]":
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
KeypointsOnImage.from_xy_array(
np.array(attr_i, dtype=np.float32),
shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
else:
assert ntype == "iterable-iterable-Keypoint", (
"Got unknown normalization type '%s'." % (ntype,))
_assert_exactly_n_shapes_partial(n=len(inputs))
return [KeypointsOnImage(attr_i, shape=shape)
for attr_i, shape
in zip(inputs, shapes)]
def normalize_bounding_boxes(inputs, shapes=None):
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
shapes = _preprocess_shapes(shapes)
ntype = estimate_bounding_boxes_norm_type(inputs)
_assert_exactly_n_shapes_partial = functools.partial(
_assert_exactly_n_shapes,
from_ntype=ntype, to_ntype="List[BoundingBoxesOnImage]",
shapes=shapes)
if ntype == "None":
return None
elif ntype in ["array[float]", "array[int]", "array[uint]"]:
_assert_single_array_ndim(inputs, 3, "(N,B,4)", "BoundingBoxesOnImage")
_assert_single_array_last_dim_exactly(inputs, 4, "BoundingBoxesOnImage")
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
BoundingBoxesOnImage.from_xyxy_array(attr_i, shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
elif ntype == "tuple[number,size=4]":
_assert_exactly_n_shapes_partial(n=1)
return [
BoundingBoxesOnImage(
[BoundingBox(
x1=inputs[0], y1=inputs[1],
x2=inputs[2], y2=inputs[3])],
shape=shapes[0])
]
elif ntype == "BoundingBox":
_assert_exactly_n_shapes_partial(n=1)
return [BoundingBoxesOnImage([inputs], shape=shapes[0])]
elif ntype == "BoundingBoxesOnImage":
return [inputs]
elif ntype == "iterable[empty]":
return None
elif ntype in ["iterable-array[float]",
"iterable-array[int]",
"iterable-array[uint]"]:
_assert_many_arrays_ndim(inputs, 2, "(B,4)", "BoundingBoxesOnImage")
_assert_many_arrays_last_dim_exactly(inputs, 4, "BoundingBoxesOnImage")
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
BoundingBoxesOnImage.from_xyxy_array(attr_i, shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
elif ntype == "iterable-tuple[number,size=4]":
_assert_exactly_n_shapes_partial(n=1)
return [
BoundingBoxesOnImage(
[BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2)
for x1, y1, x2, y2 in inputs],
shape=shapes[0])
]
elif ntype == "iterable-BoundingBox":
_assert_exactly_n_shapes_partial(n=1)
return [BoundingBoxesOnImage(inputs, shape=shapes[0])]
elif ntype == "iterable-BoundingBoxesOnImage":
return inputs
elif ntype == "iterable-iterable[empty]":
return None
elif ntype == "iterable-iterable-tuple[number,size=4]":
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
BoundingBoxesOnImage.from_xyxy_array(
np.array(attr_i, dtype=np.float32),
shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
else:
assert ntype == "iterable-iterable-BoundingBox", (
"Got unknown normalization type '%s'." % (ntype,))
_assert_exactly_n_shapes_partial(n=len(inputs))
return [BoundingBoxesOnImage(attr_i, shape=shape)
for attr_i, shape
in zip(inputs, shapes)]
def normalize_polygons(inputs, shapes=None):
# TODO get rid of this deferred import
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
return _normalize_polygons_and_line_strings(
cls_single=Polygon,
cls_oi=PolygonsOnImage,
axis_names=["#polys", "#points"],
estimate_ntype_func=estimate_polygons_norm_type,
inputs=inputs, shapes=shapes
)
def normalize_line_strings(inputs, shapes=None):
# TODO get rid of this deferred import
from imgaug.augmentables.lines import LineString, LineStringsOnImage
return _normalize_polygons_and_line_strings(
cls_single=LineString,
cls_oi=LineStringsOnImage,
axis_names=["#lines", "#points"],
estimate_ntype_func=estimate_line_strings_norm_type,
inputs=inputs, shapes=shapes
)
def _normalize_polygons_and_line_strings(cls_single, cls_oi, axis_names,
estimate_ntype_func,
inputs, shapes=None):
cls_single_name = cls_single.__name__
cls_oi_name = cls_oi.__name__
axis_names_4_str = "(N,%s,%s,2)" % (axis_names[0], axis_names[1])
axis_names_3_str = "(%s,%s,2)" % (axis_names[0], axis_names[1])
axis_names_2_str = "(%s,2)" % (axis_names[1],)
shapes = _preprocess_shapes(shapes)
ntype = estimate_ntype_func(inputs)
_assert_exactly_n_shapes_partial = functools.partial(
_assert_exactly_n_shapes,
from_ntype=ntype, to_ntype=("List[%s]" % (cls_oi_name,)),
shapes=shapes)
if ntype == "None":
return None
elif ntype in ["array[float]", "array[int]", "array[uint]"]:
_assert_single_array_ndim(inputs, 4, axis_names_4_str,
cls_oi_name)
_assert_single_array_last_dim_exactly(inputs, 2, cls_oi_name)
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
cls_oi(
[cls_single(points) for points in attr_i],
shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
elif ntype == cls_single_name:
_assert_exactly_n_shapes_partial(n=1)
return [cls_oi([inputs], shape=shapes[0])]
elif ntype == cls_oi_name:
return [inputs]
elif ntype == "iterable[empty]":
return None
elif ntype in ["iterable-array[float]",
"iterable-array[int]",
"iterable-array[uint]"]:
_assert_many_arrays_ndim(inputs, 3, axis_names_3_str,
cls_oi_name)
_assert_many_arrays_last_dim_exactly(inputs, 2, cls_oi_name)
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
cls_oi([cls_single(points) for points in attr_i], shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
elif ntype == "iterable-tuple[number,size=2]":
_assert_exactly_n_shapes_partial(n=1)
return [cls_oi([cls_single(inputs)], shape=shapes[0])]
elif ntype == "iterable-Keypoint":
_assert_exactly_n_shapes_partial(n=1)
return [cls_oi([cls_single(inputs)], shape=shapes[0])]
elif ntype == ("iterable-%s" % (cls_single_name,)):
_assert_exactly_n_shapes_partial(n=1)
return [cls_oi(inputs, shape=shapes[0])]
elif ntype == ("iterable-%s" % (cls_oi_name,)):
return inputs
elif ntype == "iterable-iterable[empty]":
return None
elif ntype in ["iterable-iterable-array[float]",
"iterable-iterable-array[int]",
"iterable-iterable-array[uint]"]:
_assert_many_arrays_ndim(inputs, 2, axis_names_2_str, cls_oi_name)
_assert_many_arrays_last_dim_exactly(inputs, 2, cls_oi_name)
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
cls_oi(
[cls_single(points) for points in attr_i],
shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
elif ntype == "iterable-iterable-tuple[number,size=2]":
_assert_exactly_n_shapes_partial(n=1)
return [
cls_oi([cls_single(attr_i) for attr_i in inputs],
shape=shapes[0])
]
elif ntype == "iterable-iterable-Keypoint":
_assert_exactly_n_shapes_partial(n=1)
return [
cls_oi([cls_single(attr_i) for attr_i in inputs],
shape=shapes[0])
]
elif ntype == ("iterable-iterable-%s" % (cls_single_name,)):
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
cls_oi(attr_i, shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
elif ntype == "iterable-iterable-iterable[empty]":
return None
else:
assert ntype in ["iterable-iterable-iterable-tuple[number,size=2]",
"iterable-iterable-iterable-Keypoint"], (
"Got unknown normalization type '%s'." % (ntype,))
_assert_exactly_n_shapes_partial(n=len(inputs))
return [
cls_oi(
[cls_single(points) for points in attr_i],
shape=shape)
for attr_i, shape
in zip(inputs, shapes)
]
def invert_normalize_images(images, images_old):
if images_old is None:
assert images is None
return None
elif ia.is_np_array(images_old):
if images_old.ndim == 2:
assert images.shape[0] == 1
assert images.shape[3] == 1
return images[0, ..., 0]
elif images_old.ndim == 3:
assert images.shape[3] == 1
return images[..., 0]
else:
return images
elif ia.is_iterable(images_old):
result = []
for image, image_old in zip(images, images_old):
if image_old.ndim == 2:
assert image.shape[2] == 1
result.append(image[:, :, 0])
else:
assert image.ndim == 3
assert image_old.ndim == 3
result.append(image)
return result
raise ValueError(
("Expected argument 'images_old' to be any of the following: "
+ "None or array or iterable of array. Got type: %s.") % (
type(images_old),)
)
def invert_normalize_heatmaps(heatmaps, heatmaps_old):
ntype = estimate_heatmaps_norm_type(heatmaps_old)
if ntype == "None":
assert heatmaps is None
return heatmaps
elif ntype == "array[float]":
assert len(heatmaps) == heatmaps_old.shape[0]
input_dtype = heatmaps_old.dtype
return restore_dtype_and_merge(
[hm_i.arr_0to1 for hm_i in heatmaps],
input_dtype)
elif ntype == "HeatmapsOnImage":
assert len(heatmaps) == 1
return heatmaps[0]
elif ntype == "iterable[empty]":
assert heatmaps is None
return []
elif ntype == "iterable-array[float]":
nonempty, _, _ = find_first_nonempty(heatmaps_old)
input_dtype = nonempty.dtype
return [restore_dtype_and_merge(hm_i.arr_0to1, input_dtype)
for hm_i in heatmaps]
else:
assert ntype == "iterable-HeatmapsOnImage", (
"Got unknown normalization type '%s'." % (ntype,))
return heatmaps
def invert_normalize_segmentation_maps(segmentation_maps,
segmentation_maps_old):
ntype = estimate_segmaps_norm_type(segmentation_maps_old)
if ntype == "None":
assert segmentation_maps is None
return segmentation_maps
elif ntype in ["array[int]", "array[uint]", "array[bool]"]:
assert len(segmentation_maps) == segmentation_maps_old.shape[0]
input_dtype = segmentation_maps_old.dtype
return restore_dtype_and_merge(
[segmap_i.get_arr_int() for segmap_i in segmentation_maps],
input_dtype)
elif ntype == "SegmentationMapOnImage":
assert len(segmentation_maps) == 1
return segmentation_maps[0]
elif ntype == "iterable[empty]":
assert segmentation_maps is None
return []
elif ntype in ["iterable-array[int]",
"iterable-array[uint]",
"iterable-array[bool]"]:
nonempty, _, _ = find_first_nonempty(segmentation_maps_old)
input_dtype = nonempty.dtype
return [restore_dtype_and_merge(segmap_i.get_arr_int(), input_dtype)
for segmap_i in segmentation_maps]
else:
assert ntype == "iterable-SegmentationMapOnImage", (
"Got unknown normalization type '%s'." % (ntype,))
return segmentation_maps
def invert_normalize_keypoints(keypoints, keypoints_old):
ntype = estimate_keypoints_norm_type(keypoints_old)
if ntype == "None":
assert keypoints is None
return keypoints
elif ntype in ["array[float]", "array[int]", "array[uint]"]:
assert len(keypoints) == 1
input_dtype = keypoints_old.dtype
return restore_dtype_and_merge(
[kpsoi.to_xy_array() for kpsoi in keypoints],
input_dtype)
elif ntype == "tuple[number,size=2]":
assert len(keypoints) == 1
assert len(keypoints[0].keypoints) == 1
return (keypoints[0].keypoints[0].x,
keypoints[0].keypoints[0].y)
elif ntype == "Keypoint":
assert len(keypoints) == 1
assert len(keypoints[0].keypoints) == 1
return keypoints[0].keypoints[0]
elif ntype == "KeypointsOnImage":
assert len(keypoints) == 1
return keypoints[0]
elif ntype == "iterable[empty]":
assert keypoints is None
return []
elif ntype in ["iterable-array[float]",
"iterable-array[int]",
"iterable-array[uint]"]:
nonempty, _, _ = find_first_nonempty(keypoints_old)
input_dtype = nonempty.dtype
return [
restore_dtype_and_merge(kps_i.to_xy_array(), input_dtype)
for kps_i in keypoints]
elif ntype == "iterable-tuple[number,size=2]":
assert len(keypoints) == 1
return [
(kp.x, kp.y) for kp in keypoints[0].keypoints]
elif ntype == "iterable-Keypoint":
assert len(keypoints) == 1
return keypoints[0].keypoints
elif ntype == "iterable-KeypointsOnImage":
return keypoints
elif ntype == "iterable-iterable[empty]":
assert keypoints is None
return keypoints_old[:]
elif ntype == "iterable-iterable-tuple[number,size=2]":
return [
[(kp.x, kp.y) for kp in kpsoi.keypoints]
for kpsoi in keypoints]
else:
assert ntype == "iterable-iterable-Keypoint", (
"Got unknown normalization type '%s'." % (ntype,))
return [
[kp for kp in kpsoi.keypoints]
for kpsoi in keypoints]
def invert_normalize_bounding_boxes(bounding_boxes, bounding_boxes_old):
ntype = estimate_normalization_type(bounding_boxes_old)
if ntype == "None":
assert bounding_boxes is None
return bounding_boxes
elif ntype in ["array[float]", "array[int]", "array[uint]"]:
assert len(bounding_boxes) == 1
input_dtype = bounding_boxes_old.dtype
return restore_dtype_and_merge([
bbsoi.to_xyxy_array() for bbsoi in bounding_boxes
], input_dtype)
elif ntype == "tuple[number,size=4]":
assert len(bounding_boxes) == 1
assert len(bounding_boxes[0].bounding_boxes) == 1
bb = bounding_boxes[0].bounding_boxes[0]
return bb.x1, bb.y1, bb.x2, bb.y2
elif ntype == "BoundingBox":
assert len(bounding_boxes) == 1
assert len(bounding_boxes[0].bounding_boxes) == 1
return bounding_boxes[0].bounding_boxes[0]
elif ntype == "BoundingBoxesOnImage":
assert len(bounding_boxes) == 1
return bounding_boxes[0]
elif ntype == "iterable[empty]":
assert bounding_boxes is None
return []
elif ntype in ["iterable-array[float]",
"iterable-array[int]",
"iterable-array[uint]"]:
nonempty, _, _ = find_first_nonempty(bounding_boxes_old)
input_dtype = nonempty.dtype
return [
restore_dtype_and_merge(bbsoi.to_xyxy_array(), input_dtype)
for bbsoi in bounding_boxes]
elif ntype == "iterable-tuple[number,size=4]":
assert len(bounding_boxes) == 1
return [
(bb.x1, bb.y1, bb.x2, bb.y2)
for bb in bounding_boxes[0].bounding_boxes]
elif ntype == "iterable-BoundingBox":
assert len(bounding_boxes) == 1
return bounding_boxes[0].bounding_boxes
elif ntype == "iterable-BoundingBoxesOnImage":
return bounding_boxes
elif ntype == "iterable-iterable[empty]":
assert bounding_boxes is None
return bounding_boxes_old[:]
elif ntype == "iterable-iterable-tuple[number,size=4]":
return [
[(bb.x1, bb.y1, bb.x2, bb.y2) for bb in bbsoi.bounding_boxes]
for bbsoi in bounding_boxes]
else:
assert ntype == "iterable-iterable-BoundingBox", (
"Got unknown normalization type '%s'." % (ntype,))
return [
[bb for bb in bbsoi.bounding_boxes]
for bbsoi in bounding_boxes]
def invert_normalize_polygons(polygons, polygons_old):
return _invert_normalize_polygons_and_line_strings(
polygons, polygons_old, estimate_polygons_norm_type,
"Polygon",
"PolygonsOnImage",
lambda psoi: psoi.polygons,
lambda poly: poly.exterior)
def invert_normalize_line_strings(line_strings, line_strings_old):
return _invert_normalize_polygons_and_line_strings(
line_strings, line_strings_old, estimate_line_strings_norm_type,
"LineString",
"LineStringsOnImage",
lambda lsoi: lsoi.line_strings,
lambda ls: ls.coords)
def _invert_normalize_polygons_and_line_strings(inputs, inputs_old,
estimate_ntype_func,
cls_single_name,
cls_oi_name,
get_entities_func,
get_points_func):
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
ntype = estimate_ntype_func(inputs_old)
if ntype == "None":
assert inputs is None
return inputs
elif ntype in ["array[float]", "array[int]", "array[uint]"]:
input_dtype = inputs_old.dtype
return restore_dtype_and_merge([
[get_points_func(entity) for entity in get_entities_func(oi)]
for oi in inputs
], input_dtype)
elif ntype == cls_single_name:
assert len(inputs) == 1
assert len(get_entities_func(inputs[0])) == 1
return get_entities_func(inputs[0])[0]
elif ntype == cls_oi_name:
assert len(inputs) == 1
return inputs[0]
elif ntype == "iterable[empty]":
assert inputs is None
return []
elif ntype in ["iterable-array[float]",
"iterable-array[int]",
"iterable-array[uint]"]:
nonempty, _, _ = find_first_nonempty(inputs_old)
input_dtype = nonempty.dtype
return [
restore_dtype_and_merge(
[get_points_func(entity) for entity in get_entities_func(oi)],
input_dtype)
for oi in inputs
]
elif ntype == "iterable-tuple[number,size=2]":
assert len(inputs) == 1
assert len(get_entities_func(inputs[0])) == 1
return [(point[0], point[1])
for point in get_points_func(get_entities_func(inputs[0])[0])]
elif ntype == "iterable-Keypoint":
assert len(inputs) == 1
assert len(get_entities_func(inputs[0])) == 1
return [Keypoint(x=point[0], y=point[1])
for point in get_points_func(get_entities_func(inputs[0])[0])]
elif ntype == ("iterable-%s" % (cls_single_name,)):
assert len(inputs) == 1
assert len(get_entities_func(inputs[0])) == len(inputs_old)
return get_entities_func(inputs[0])
elif ntype == ("iterable-%s" % (cls_oi_name,)):
return inputs
elif ntype == "iterable-iterable[empty]":
assert inputs is None
return inputs_old[:]
elif ntype in ["iterable-iterable-array[float]",
"iterable-iterable-array[int]",
"iterable-iterable-array[uint]"]:
nonempty, _, _ = find_first_nonempty(inputs_old)
input_dtype = nonempty.dtype
return [
[restore_dtype_and_merge(get_points_func(entity), input_dtype)
for entity in get_entities_func(oi)]
for oi in inputs
]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/augmentables/bbs.py | augmentation/image_augmentation/helpers/imgaug/augmentables/bbs.py | from __future__ import print_function, division, absolute_import
import copy
import numpy as np
import skimage.draw
import skimage.measure
from .. import imgaug as ia
from .utils import normalize_shape, project_coords
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""
Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right corners. Both are given
as x and y-coordinates. The corners are intended to lie inside the bounding box area.
As a result, a bounding box that lies completely inside the image but has maximum extensions
would have coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that coordinates
are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
ia.do_assert(x2 >= x1)
if y1 > y2:
y2, y1 = y1, y2
ia.do_assert(y2 >= y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def x1_int(self):
"""
Return the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.x1)) # use numpy's round to have consistent behaviour between python versions
@property
def y1_int(self):
"""
Return the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest integer.
"""
return int(np.round(self.y1)) # use numpy's round to have consistent behaviour between python versions
@property
def x2_int(self):
"""
Return the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.x2)) # use numpy's round to have consistent behaviour between python versions
@property
def y2_int(self):
"""
Return the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest integer.
"""
return int(np.round(self.y2)) # use numpy's round to have consistent behaviour between python versions
@property
def height(self):
"""
Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""
Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""
Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""
Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""
Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. `height * width`.
"""
return self.height * self.width
# TODO add test for tuple of number
def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : tuple of number or imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
if isinstance(other, tuple):
x, y = other
else:
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
# TODO add tests for ndarray inputs
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
out : imgaug.BoundingBox
BoundingBox object with new coordinates.
"""
coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],
from_shape, to_shape)
return self.copy(
x1=coords_proj[0][0],
y1=coords_proj[0][1],
x2=coords_proj[1][0],
y2=coords_proj[1][1],
label=self.label)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""
Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all sides.
top : number, optional
Value by which to extend the bounding box size along its top side.
right : number, optional
Value by which to extend the bounding box size along its right side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom side.
left : number, optional
Value by which to extend the bounding box size along its left side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""
Compute the intersection bounding box of this bounding box and another one.
Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box
will exist, but then also has a height and width of zero.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is an intersection.
If there is no intersection, the default value will be returned, which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""
Compute the union bounding box of this bounding box and another one.
This is equivalent to drawing a bounding box around all corners points of both
bounding boxes.
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""
Compute the IoU of this bounding box with another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
else:
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
def is_partly_within_image(self, image):
"""
Estimate whether the bounding box is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is at least partially inside the image area. False otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""
Estimate whether the bounding box is partially or fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is
assumed to represent the image shape and must contain at least two integers.
fully : bool, optional
Whether to return True if the bounding box is fully outside fo the image area.
partly : bool, optional
Whether to return True if the bounding box is at least partially outside fo the
image area.
Returns
-------
bool
True if the bounding box is partially/fully outside of the image area, depending
on defined parameters. False otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
@ia.deprecated(alt_func="BoundingBox.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self, *args, **kwargs):
return self.clip_out_of_image(*args, **kwargs)
def clip_out_of_image(self, image):
"""
Clip off all parts of the bounding box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape and must contain at least two integers.
Returns
-------
result : imgaug.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
ia.do_assert(height > 0)
ia.do_assert(width > 0)
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""
Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray(uint8)
The image onto which to draw the bounding box.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where 1.0 denotes no transparency and
0.0 is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is larger than 1, then
additional pixels will be added around the bounding box (i.e. extension towards the
outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of the
image. If set to False, no error will be raised and only the parts inside the image
will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
result : (H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if thickness is not None:
ia.warn_deprecated(
"Usage of argument 'thickness' in BoundingBox.draw_on_image() "
"is deprecated. The argument was renamed to 'size'."
)
size = thickness
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(size):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of drawing means that the border lies
# just barely outside of the image, making the border disappear, even though the BB
# is fully inside the image. Here we correct for that because of beauty reasons.
# Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if ia.is_float_array(result):
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (1 - alpha) * result[rr, cc, :] + alpha * color
result = np.clip(result, 0, 255).astype(input_dtype)
return result
# TODO add tests for pad and pad_max
def extract_from_image(self, image, pad=True, pad_max=None,
prevent_zero_size=True):
"""
Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent height or width of the extracted image from becoming zero.
If this is set to True and height or width of the bounding box is below 1, the height/width will
be increased to 1. This can be useful to prevent problems, e.g. with image saving or plotting.
If it is set to False, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or
``W`` potentially being 0.
Returns
-------
image : (H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box is partially/fully
outside of the image. If prevent_zero_size is activated, it is guarantueed that ``H'>0``
and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions round them to H.
# That is technically sensible, but in the case of extraction leads to a black border,
# which is both ugly and unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons.
# Same is the case for x coordinates.
fully_within = self.is_fully_within_image(image)
if fully_within:
y1, y2 = np.clip([y1, y2], 0, height-1)
x1, x2 = np.clip([x1, x2], 0, width-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
if pad:
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + pad_left
width = width + pad_left
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + pad_top
height = height + pad_top
y1 = 0
if x2 >= width:
pad_right = x2 - width
if y2 >= height:
pad_bottom = y2 - height
paddings = [pad_top, pad_right, pad_bottom, pad_left]
any_padded = any([val > 0 for val in paddings])
if any_padded:
if pad_max is None:
pad_max = max(paddings)
image = ia.pad(
image,
top=min(pad_top, pad_max),
right=min(pad_right, pad_max),
bottom=min(pad_bottom, pad_max),
left=min(pad_left, pad_max)
)
return image[y1:y2, x1:x2]
else:
within_image = (
(0, 0, 0, 0)
<= (x1, y1, x2, y2)
< (width, height, width, height)
)
out_height, out_width = (y2 - y1), (x2 - x1)
nonzero_height = (out_height > 0)
nonzero_width = (out_width > 0)
if within_image and nonzero_height and nonzero_width:
return image[y1:y2, x1:x2]
if prevent_zero_size:
out_height = 1
out_width = 1
else:
out_height = 0
out_width = 0
if image.ndim == 2:
return np.zeros((out_height, out_width), dtype=image.dtype)
return np.zeros((out_height, out_width, image.shape[-1]),
dtype=image.dtype)
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""
Convert the corners of the bounding box to keypoints (clockwise, starting at top left).
Returns
-------
list of imgaug.Keypoint
Corners of the bounding box as keypoints.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a shallow copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=self.label if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not None, then the x1 coordinate of the copied object will be set to this value.
y1 : None or number
If not None, then the y1 coordinate of the copied object will be set to this value.
x2 : None or number
If not None, then the x2 coordinate of the copied object will be set to this value.
y2 : None or number
If not None, then the y2 coordinate of the copied object will be set to this value.
label : None or string
If not None, then the label of the copied object will be set to this value.
Returns
-------
imgaug.BoundingBox
Deep copy.
"""
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> image = np.zeros((100, 100))
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
self.shape = normalize_shape(shape)
# TODO remove this? here it is image height at BoundingBox it is bounding box height
@property
def height(self):
"""
Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width at BoundingBox it is bounding box width
@property
def width(self):
"""
Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""
Returns whether this object contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
bounding_boxes : imgaug.BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
bounding_boxes = [bb.project(self.shape, shape)
for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
ia.do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""
Convert the BoundingBoxesOnImage object to an (N,4) ndarray.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
(N,4) ndarray array, where ``N`` denotes the number of bounding boxes and ``4`` denotes the
top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes. If a single int ``C``, then
that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/external/poly_point_isect_py2py3.py | augmentation/image_augmentation/helpers/imgaug/external/poly_point_isect_py2py3.py | # BentleyOttmann sweep-line implementation
# (for finding all intersections in a set of line segments)
__all__ = (
"isect_segments",
"isect_polygon",
# same as above but includes segments with each intersections
"isect_segments_include_segments",
"isect_polygon_include_segments",
# for testing only (correct but slow)
"isect_segments__naive",
"isect_polygon__naive",
)
# ----------------------------------------------------------------------------
# Main Poly Intersection
# Defines to change behavior.
#
# Whether to ignore intersections of line segments when both
# their end points form the intersection point.
USE_IGNORE_SEGMENT_ENDINGS = True
USE_DEBUG = True
USE_VERBOSE = False
# checks we should NOT need,
# but do them in case we find a test-case that fails.
USE_PARANOID = False
# Support vertical segments,
# (the bentley-ottmann method doesn't support this).
# We use the term 'START_VERTICAL' for a vertical segment,
# to differentiate it from START/END/INTERSECTION
USE_VERTICAL = True
# end defines!
# ------------
# ---------
# Constants
X, Y = 0, 1
# -----------------------------------------------------------------------------
# Switchable Number Implementation
NUMBER_TYPE = 'native'
if NUMBER_TYPE == 'native':
Real = float
NUM_EPS = Real("1e-10")
NUM_INF = Real(float("inf"))
elif NUMBER_TYPE == 'decimal':
# Not passing tests!
import decimal
Real = decimal.Decimal
decimal.getcontext().prec = 80
NUM_EPS = Real("1e-10")
NUM_INF = Real(float("inf"))
elif NUMBER_TYPE == 'numpy':
import numpy
Real = numpy.float64
del numpy
NUM_EPS = Real("1e-10")
NUM_INF = Real(float("inf"))
elif NUMBER_TYPE == 'gmpy2':
# Not passing tests!
import gmpy2
gmpy2.set_context(gmpy2.ieee(128))
Real = gmpy2.mpz
NUM_EPS = Real(float("1e-10"))
NUM_INF = gmpy2.get_emax_max()
del gmpy2
else:
raise Exception("Type not found")
NUM_EPS_SQ = NUM_EPS * NUM_EPS
NUM_ZERO = Real(0.0)
NUM_ONE = Real(1.0)
class Event:
__slots__ = (
"type",
"point",
"segment",
# this is just cache,
# we may remove or calculate slope on the fly
"slope",
"span",
) + (() if not USE_DEBUG else (
# debugging only
"other",
"in_sweep",
))
class Type:
END = 0
INTERSECTION = 1
START = 2
if USE_VERTICAL:
START_VERTICAL = 3
def __init__(self, type, point, segment, slope):
assert(isinstance(point, tuple))
self.type = type
self.point = point
self.segment = segment
# will be None for INTERSECTION
self.slope = slope
if segment is not None:
self.span = segment[1][X] - segment[0][X]
if USE_DEBUG:
self.other = None
self.in_sweep = False
# note that this isn't essential,
# it just avoids non-deterministic ordering, see #9.
def __hash__(self):
return hash(self.point)
def is_vertical(self):
# return self.segment[0][X] == self.segment[1][X]
return self.span == NUM_ZERO
def y_intercept_x(self, x):
# vertical events only for comparison (above_all check)
# never added into the binary-tree its self
if USE_VERTICAL:
if self.is_vertical():
return None
if x <= self.segment[0][X]:
return self.segment[0][Y]
elif x >= self.segment[1][X]:
return self.segment[1][Y]
# use the largest to avoid float precision error with nearly vertical lines.
delta_x0 = x - self.segment[0][X]
delta_x1 = self.segment[1][X] - x
if delta_x0 > delta_x1:
ifac = delta_x0 / self.span
fac = NUM_ONE - ifac
else:
fac = delta_x1 / self.span
ifac = NUM_ONE - fac
assert(fac <= NUM_ONE)
return (self.segment[0][Y] * fac) + (self.segment[1][Y] * ifac)
@staticmethod
def Compare(sweep_line, this, that):
if this is that:
return 0
if USE_DEBUG:
if this.other is that:
return 0
current_point_x = sweep_line._current_event_point_x
this_y = this.y_intercept_x(current_point_x)
that_y = that.y_intercept_x(current_point_x)
# print(this_y, that_y)
if USE_VERTICAL:
if this_y is None:
this_y = this.point[Y]
if that_y is None:
that_y = that.point[Y]
delta_y = this_y - that_y
assert((delta_y < NUM_ZERO) == (this_y < that_y))
# NOTE, VERY IMPORTANT TO USE EPSILON HERE!
# otherwise w/ float precision errors we get incorrect comparisons
# can get very strange & hard to debug output without this.
if abs(delta_y) > NUM_EPS:
return -1 if (delta_y < NUM_ZERO) else 1
else:
this_slope = this.slope
that_slope = that.slope
if this_slope != that_slope:
if sweep_line._before:
return -1 if (this_slope > that_slope) else 1
else:
return 1 if (this_slope > that_slope) else -1
delta_x_p1 = this.segment[0][X] - that.segment[0][X]
if delta_x_p1 != NUM_ZERO:
return -1 if (delta_x_p1 < NUM_ZERO) else 1
delta_x_p2 = this.segment[1][X] - that.segment[1][X]
if delta_x_p2 != NUM_ZERO:
return -1 if (delta_x_p2 < NUM_ZERO) else 1
return 0
def __repr__(self):
return ("Event(0x%x, s0=%r, s1=%r, p=%r, type=%d, slope=%r)" % (
id(self),
self.segment[0], self.segment[1],
self.point,
self.type,
self.slope,
))
class SweepLine:
__slots__ = (
# A map holding all intersection points mapped to the Events
# that form these intersections.
# {Point: set(Event, ...), ...}
"intersections",
"queue",
# Events (sorted set of ordered events, no values)
#
# note: START & END events are considered the same so checking if an event is in the tree
# will return true if its opposite side is found.
# This is essential for the algorithm to work, and why we don't explicitly remove START events.
# Instead, the END events are never added to the current sweep, and removing them also removes the start.
"_events_current_sweep",
# The point of the current Event.
"_current_event_point_x",
# A flag to indicate if we're slightly before or after the line.
"_before",
)
def __init__(self):
self.intersections = {}
self._current_event_point_x = None
self._events_current_sweep = RBTree(cmp=Event.Compare, cmp_data=self)
self._before = True
def get_intersections(self):
"""
Return a list of unordered intersection points.
"""
if Real is float:
return list(self.intersections.keys())
else:
return [(float(p[0]), float(p[1])) for p in self.intersections.keys()]
# Not essential for implementing this algorithm, but useful.
def get_intersections_with_segments(self):
"""
Return a list of unordered intersection '(point, segment)' pairs,
where segments may contain 2 or more values.
"""
if Real is float:
return [
(p, [event.segment for event in event_set])
for p, event_set in self.intersections.items()
]
else:
return [
(
(float(p[0]), float(p[1])),
[((float(event.segment[0][0]), float(event.segment[0][1])),
(float(event.segment[1][0]), float(event.segment[1][1])))
for event in event_set],
)
for p, event_set in self.intersections.items()
]
# Checks if an intersection exists between two Events 'a' and 'b'.
def _check_intersection(self, a, b):
# Return immediately in case either of the events is null, or
# if one of them is an INTERSECTION event.
if ((a is None or b is None) or
(a.type == Event.Type.INTERSECTION) or
(b.type == Event.Type.INTERSECTION)):
return
if a is b:
return
# Get the intersection point between 'a' and 'b'.
p = isect_seg_seg_v2_point(
a.segment[0], a.segment[1],
b.segment[0], b.segment[1])
# No intersection exists.
if p is None:
return
# If the intersection is formed by both the segment endings, AND
# USE_IGNORE_SEGMENT_ENDINGS is true,
# return from this method.
if USE_IGNORE_SEGMENT_ENDINGS:
if ((len_squared_v2v2(p, a.segment[0]) < NUM_EPS_SQ or
len_squared_v2v2(p, a.segment[1]) < NUM_EPS_SQ) and
(len_squared_v2v2(p, b.segment[0]) < NUM_EPS_SQ or
len_squared_v2v2(p, b.segment[1]) < NUM_EPS_SQ)):
return
# Add the intersection.
events_for_point = self.intersections.pop(p, set())
is_new = len(events_for_point) == 0
events_for_point.add(a)
events_for_point.add(b)
self.intersections[p] = events_for_point
# If the intersection occurs to the right of the sweep line, OR
# if the intersection is on the sweep line and it's above the
# current event-point, add it as a new Event to the queue.
if is_new and p[X] >= self._current_event_point_x:
event_isect = Event(Event.Type.INTERSECTION, p, None, None)
self.queue.offer(p, event_isect)
def _sweep_to(self, p):
if p[X] == self._current_event_point_x:
# happens in rare cases,
# we can safely ignore
return
self._current_event_point_x = p[X]
def insert(self, event):
assert(event not in self._events_current_sweep)
assert(not USE_VERTICAL or event.type != Event.Type.START_VERTICAL)
if USE_DEBUG:
assert(event.in_sweep == False)
assert(event.other.in_sweep == False)
self._events_current_sweep.insert(event, None)
if USE_DEBUG:
event.in_sweep = True
event.other.in_sweep = True
def remove(self, event):
try:
self._events_current_sweep.remove(event)
if USE_DEBUG:
assert(event.in_sweep == True)
assert(event.other.in_sweep == True)
event.in_sweep = False
event.other.in_sweep = False
return True
except KeyError:
if USE_DEBUG:
assert(event.in_sweep == False)
assert(event.other.in_sweep == False)
return False
def above(self, event):
return self._events_current_sweep.succ_key(event, None)
def below(self, event):
return self._events_current_sweep.prev_key(event, None)
'''
def above_all(self, event):
while True:
event = self.above(event)
if event is None:
break
yield event
'''
def above_all(self, event):
# assert(event not in self._events_current_sweep)
return self._events_current_sweep.key_slice(event, None, reverse=False)
def handle(self, p, events_current):
if len(events_current) == 0:
return
# done already
# self._sweep_to(events_current[0])
assert(p[0] == self._current_event_point_x)
if not USE_IGNORE_SEGMENT_ENDINGS:
if len(events_current) > 1:
for i in range(0, len(events_current) - 1):
for j in range(i + 1, len(events_current)):
self._check_intersection(
events_current[i], events_current[j])
for e in events_current:
self.handle_event(e)
def handle_event(self, event):
t = event.type
if t == Event.Type.START:
# print(" START")
self._before = False
self.insert(event)
e_above = self.above(event)
e_below = self.below(event)
self._check_intersection(event, e_above)
self._check_intersection(event, e_below)
if USE_PARANOID:
self._check_intersection(e_above, e_below)
elif t == Event.Type.END:
# print(" END")
self._before = True
e_above = self.above(event)
e_below = self.below(event)
self.remove(event)
self._check_intersection(e_above, e_below)
if USE_PARANOID:
self._check_intersection(event, e_above)
self._check_intersection(event, e_below)
elif t == Event.Type.INTERSECTION:
# print(" INTERSECTION")
self._before = True
event_set = self.intersections[event.point]
# note: events_current aren't sorted.
reinsert_stack = [] # Stack
for e in event_set:
# Since we know the Event wasn't already removed,
# we want to insert it later on.
if self.remove(e):
reinsert_stack.append(e)
self._before = False
# Insert all Events that we were able to remove.
while reinsert_stack:
e = reinsert_stack.pop()
self.insert(e)
e_above = self.above(e)
e_below = self.below(e)
self._check_intersection(e, e_above)
self._check_intersection(e, e_below)
if USE_PARANOID:
self._check_intersection(e_above, e_below)
elif (USE_VERTICAL and
(t == Event.Type.START_VERTICAL)):
# just check sanity
assert(event.segment[0][X] == event.segment[1][X])
assert(event.segment[0][Y] <= event.segment[1][Y])
# In this case we only need to find all segments in this span.
y_above_max = event.segment[1][Y]
# self.insert(event)
for e_above in self.above_all(event):
if e_above.type == Event.Type.START_VERTICAL:
continue
y_above = e_above.y_intercept_x(
self._current_event_point_x)
if USE_IGNORE_SEGMENT_ENDINGS:
if y_above >= y_above_max - NUM_EPS:
break
else:
if y_above > y_above_max:
break
# We know this intersects,
# so we could use a faster function now:
# ix = (self._current_event_point_x, y_above)
# ...however best use existing functions
# since it does all sanity checks on endpoints... etc.
self._check_intersection(event, e_above)
# self.remove(event)
class EventQueue:
__slots__ = (
# note: we only ever pop_min, this could use a 'heap' structure.
# The sorted map holding the points -> event list
# [Point: Event] (tree)
"events_scan",
)
def __init__(self, segments, line):
self.events_scan = RBTree()
# segments = [s for s in segments if s[0][0] != s[1][0] and s[0][1] != s[1][1]]
for s in segments:
assert(s[0][X] <= s[1][X])
slope = slope_v2v2(*s)
if s[0] == s[1]:
pass
elif USE_VERTICAL and (s[0][X] == s[1][X]):
e_start = Event(Event.Type.START_VERTICAL, s[0], s, slope)
if USE_DEBUG:
e_start.other = e_start # FAKE, avoid error checking
self.offer(s[0], e_start)
else:
e_start = Event(Event.Type.START, s[0], s, slope)
e_end = Event(Event.Type.END, s[1], s, slope)
if USE_DEBUG:
e_start.other = e_end
e_end.other = e_start
self.offer(s[0], e_start)
self.offer(s[1], e_end)
line.queue = self
def offer(self, p, e):
"""
Offer a new event ``s`` at point ``p`` in this queue.
"""
existing = self.events_scan.setdefault(
p, ([], [], [], []) if USE_VERTICAL else
([], [], []))
# Can use double linked-list for easy insertion at beginning/end
'''
if e.type == Event.Type.END:
existing.insert(0, e)
else:
existing.append(e)
'''
existing[e.type].append(e)
# return a set of events
def poll(self):
"""
Get, and remove, the first (lowest) item from this queue.
:return: the first (lowest) item from this queue.
:rtype: Point, Event pair.
"""
assert(len(self.events_scan) != 0)
p, events_current = self.events_scan.pop_min()
return p, events_current
def isect_segments_impl(segments, include_segments=False):
# order points left -> right
if Real is float:
segments = [
# in nearly all cases, comparing X is enough,
# but compare Y too for vertical lines
(s[0], s[1]) if (s[0] <= s[1]) else
(s[1], s[0])
for s in segments]
else:
segments = [
# in nearly all cases, comparing X is enough,
# but compare Y too for vertical lines
(
(Real(s[0][0]), Real(s[0][1])),
(Real(s[1][0]), Real(s[1][1])),
) if (s[0] <= s[1]) else
(
(Real(s[1][0]), Real(s[1][1])),
(Real(s[0][0]), Real(s[0][1])),
)
for s in segments]
sweep_line = SweepLine()
queue = EventQueue(segments, sweep_line)
while len(queue.events_scan) > 0:
if USE_VERBOSE:
print(len(queue.events_scan), sweep_line._current_event_point_x)
p, e_ls = queue.poll()
for events_current in e_ls:
if events_current:
sweep_line._sweep_to(p)
sweep_line.handle(p, events_current)
if include_segments is False:
return sweep_line.get_intersections()
else:
return sweep_line.get_intersections_with_segments()
def isect_polygon_impl(points, include_segments=False):
n = len(points)
segments = [
(tuple(points[i]), tuple(points[(i + 1) % n]))
for i in range(n)]
return isect_segments_impl(segments, include_segments=include_segments)
def isect_segments(segments):
return isect_segments_impl(segments, include_segments=False)
def isect_polygon(segments):
return isect_polygon_impl(segments, include_segments=False)
def isect_segments_include_segments(segments):
return isect_segments_impl(segments, include_segments=True)
def isect_polygon_include_segments(segments):
return isect_polygon_impl(segments, include_segments=True)
# ----------------------------------------------------------------------------
# 2D math utilities
def slope_v2v2(p1, p2):
if p1[X] == p2[X]:
if p1[Y] < p2[Y]:
return NUM_INF
else:
return -NUM_INF
else:
return (p2[Y] - p1[Y]) / (p2[X] - p1[X])
def sub_v2v2(a, b):
return (
a[0] - b[0],
a[1] - b[1])
def dot_v2v2(a, b):
return (
(a[0] * b[0]) +
(a[1] * b[1]))
def len_squared_v2v2(a, b):
c = sub_v2v2(a, b)
return dot_v2v2(c, c)
def line_point_factor_v2(p, l1, l2, default=NUM_ZERO):
u = sub_v2v2(l2, l1)
h = sub_v2v2(p, l1)
dot = dot_v2v2(u, u)
return (dot_v2v2(u, h) / dot) if dot != NUM_ZERO else default
def isect_seg_seg_v2_point(v1, v2, v3, v4, bias=NUM_ZERO):
# Only for predictability and hashable point when same input is given
if v1 > v2:
v1, v2 = v2, v1
if v3 > v4:
v3, v4 = v4, v3
if (v1, v2) > (v3, v4):
v1, v2, v3, v4 = v3, v4, v1, v2
div = (v2[0] - v1[0]) * (v4[1] - v3[1]) - (v2[1] - v1[1]) * (v4[0] - v3[0])
if div == NUM_ZERO:
return None
vi = (((v3[0] - v4[0]) *
(v1[0] * v2[1] - v1[1] * v2[0]) - (v1[0] - v2[0]) *
(v3[0] * v4[1] - v3[1] * v4[0])) / div,
((v3[1] - v4[1]) *
(v1[0] * v2[1] - v1[1] * v2[0]) - (v1[1] - v2[1]) *
(v3[0] * v4[1] - v3[1] * v4[0])) / div,
)
fac = line_point_factor_v2(vi, v1, v2, default=-NUM_ONE)
if fac < NUM_ZERO - bias or fac > NUM_ONE + bias:
return None
fac = line_point_factor_v2(vi, v3, v4, default=-NUM_ONE)
if fac < NUM_ZERO - bias or fac > NUM_ONE + bias:
return None
# vi = round(vi[X], 8), round(vi[Y], 8)
return vi
# ----------------------------------------------------------------------------
# Simple naive line intersect, (for testing only)
def isect_segments__naive(segments):
"""
Brute force O(n2) version of ``isect_segments`` for test validation.
"""
isect = []
# order points left -> right
if Real is float:
segments = [
(s[0], s[1]) if s[0][X] <= s[1][X] else
(s[1], s[0])
for s in segments]
else:
segments = [
(
(Real(s[0][0]), Real(s[0][1])),
(Real(s[1][0]), Real(s[1][1])),
) if (s[0] <= s[1]) else
(
(Real(s[1][0]), Real(s[1][1])),
(Real(s[0][0]), Real(s[0][1])),
)
for s in segments]
n = len(segments)
for i in range(n):
a0, a1 = segments[i]
for j in range(i + 1, n):
b0, b1 = segments[j]
if a0 not in (b0, b1) and a1 not in (b0, b1):
ix = isect_seg_seg_v2_point(a0, a1, b0, b1)
if ix is not None:
# USE_IGNORE_SEGMENT_ENDINGS handled already
isect.append(ix)
return isect
def isect_polygon__naive(points):
"""
Brute force O(n2) version of ``isect_polygon`` for test validation.
"""
isect = []
n = len(points)
if Real is float:
pass
else:
points = [(Real(p[0]), Real(p[1])) for p in points]
for i in range(n):
a0, a1 = points[i], points[(i + 1) % n]
for j in range(i + 1, n):
b0, b1 = points[j], points[(j + 1) % n]
if a0 not in (b0, b1) and a1 not in (b0, b1):
ix = isect_seg_seg_v2_point(a0, a1, b0, b1)
if ix is not None:
if USE_IGNORE_SEGMENT_ENDINGS:
if ((len_squared_v2v2(ix, a0) < NUM_EPS_SQ or
len_squared_v2v2(ix, a1) < NUM_EPS_SQ) and
(len_squared_v2v2(ix, b0) < NUM_EPS_SQ or
len_squared_v2v2(ix, b1) < NUM_EPS_SQ)):
continue
isect.append(ix)
return isect
# ----------------------------------------------------------------------------
# Inline Libs
#
# bintrees: 2.0.2, extracted from:
# http://pypi.python.org/pypi/bintrees
#
# - Removed unused functions, such as slicing and range iteration.
# - Added 'cmp' and and 'cmp_data' arguments,
# so we can define our own comparison that takes an arg.
# Needed for sweep-line.
# - Added support for 'default' arguments for prev_item/succ_item,
# so we can avoid exception handling.
# -------
# ABCTree
from operator import attrgetter
_sentinel = object()
class _ABCTree(object):
def __init__(self, items=None, cmp=None, cmp_data=None):
"""T.__init__(...) initializes T; see T.__class__.__doc__ for signature"""
self._root = None
self._count = 0
if cmp is None:
def cmp(cmp_data, a, b):
if a < b:
return -1
elif a > b:
return 1
else:
return 0
self._cmp = cmp
self._cmp_data = cmp_data
if items is not None:
self.update(items)
def clear(self):
"""T.clear() -> None. Remove all items from T."""
def _clear(node):
if node is not None:
_clear(node.left)
_clear(node.right)
node.free()
_clear(self._root)
self._count = 0
self._root = None
@property
def count(self):
"""Get items count."""
return self._count
def get_value(self, key):
node = self._root
while node is not None:
cmp = self._cmp(self._cmp_data, key, node.key)
if cmp == 0:
return node.value
elif cmp < 0:
node = node.left
else:
node = node.right
raise KeyError(str(key))
def pop_item(self):
"""T.pop_item() -> (k, v), remove and return some (key, value) pair as a
2-tuple; but raise KeyError if T is empty.
"""
if self.is_empty():
raise KeyError("pop_item(): tree is empty")
node = self._root
while True:
if node.left is not None:
node = node.left
elif node.right is not None:
node = node.right
else:
break
key = node.key
value = node.value
self.remove(key)
return key, value
popitem = pop_item # for compatibility to dict()
def min_item(self):
"""Get item with min key of tree, raises ValueError if tree is empty."""
if self.is_empty():
raise ValueError("Tree is empty")
node = self._root
while node.left is not None:
node = node.left
return node.key, node.value
def max_item(self):
"""Get item with max key of tree, raises ValueError if tree is empty."""
if self.is_empty():
raise ValueError("Tree is empty")
node = self._root
while node.right is not None:
node = node.right
return node.key, node.value
def succ_item(self, key, default=_sentinel):
"""Get successor (k,v) pair of key, raises KeyError if key is max key
or key does not exist. optimized for pypy.
"""
# removed graingets version, because it was little slower on CPython and much slower on pypy
# this version runs about 4x faster with pypy than the Cython version
# Note: Code sharing of succ_item() and ceiling_item() is possible, but has always a speed penalty.
node = self._root
succ_node = None
while node is not None:
cmp = self._cmp(self._cmp_data, key, node.key)
if cmp == 0:
break
elif cmp < 0:
if (succ_node is None) or self._cmp(self._cmp_data, node.key, succ_node.key) < 0:
succ_node = node
node = node.left
else:
node = node.right
if node is None: # stay at dead end
if default is _sentinel:
raise KeyError(str(key))
return default
# found node of key
if node.right is not None:
# find smallest node of right subtree
node = node.right
while node.left is not None:
node = node.left
if succ_node is None:
succ_node = node
elif self._cmp(self._cmp_data, node.key, succ_node.key) < 0:
succ_node = node
elif succ_node is None: # given key is biggest in tree
if default is _sentinel:
raise KeyError(str(key))
return default
return succ_node.key, succ_node.value
def prev_item(self, key, default=_sentinel):
"""Get predecessor (k,v) pair of key, raises KeyError if key is min key
or key does not exist. optimized for pypy.
"""
# removed graingets version, because it was little slower on CPython and much slower on pypy
# this version runs about 4x faster with pypy than the Cython version
# Note: Code sharing of prev_item() and floor_item() is possible, but has always a speed penalty.
node = self._root
prev_node = None
while node is not None:
cmp = self._cmp(self._cmp_data, key, node.key)
if cmp == 0:
break
elif cmp < 0:
node = node.left
else:
if (prev_node is None) or self._cmp(self._cmp_data, prev_node.key, node.key) < 0:
prev_node = node
node = node.right
if node is None: # stay at dead end (None)
if default is _sentinel:
raise KeyError(str(key))
return default
# found node of key
if node.left is not None:
# find biggest node of left subtree
node = node.left
while node.right is not None:
node = node.right
if prev_node is None:
prev_node = node
elif self._cmp(self._cmp_data, prev_node.key, node.key) < 0:
prev_node = node
elif prev_node is None: # given key is smallest in tree
if default is _sentinel:
raise KeyError(str(key))
return default
return prev_node.key, prev_node.value
def __repr__(self):
"""T.__repr__(...) <==> repr(x)"""
tpl = "%s({%s})" % (self.__class__.__name__, '%s')
return tpl % ", ".join(("%r: %r" % item for item in self.items()))
def __contains__(self, key):
"""k in T -> True if T has a key k, else False"""
try:
self.get_value(key)
return True
except KeyError:
return False
def __len__(self):
"""T.__len__() <==> len(x)"""
return self.count
def is_empty(self):
"""T.is_empty() -> False if T contains any items else True"""
return self.count == 0
def set_default(self, key, default=None):
"""T.set_default(k[,d]) -> T.get(k,d), also set T[k]=d if k not in T"""
try:
return self.get_value(key)
except KeyError:
self.insert(key, default)
return default
setdefault = set_default # for compatibility to dict()
def get(self, key, default=None):
"""T.get(k[,d]) -> T[k] if k in T, else d. d defaults to None."""
try:
return self.get_value(key)
except KeyError:
return default
def pop(self, key, *args):
"""T.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised
"""
if len(args) > 1:
raise TypeError("pop expected at most 2 arguments, got %d" % (1 + len(args)))
try:
value = self.get_value(key)
self.remove(key)
return value
except KeyError:
if len(args) == 0:
raise
else:
return args[0]
def prev_key(self, key, default=_sentinel):
"""Get predecessor to key, raises KeyError if key is min key
or key does not exist.
"""
item = self.prev_item(key, default)
return default if item is default else item[0]
def succ_key(self, key, default=_sentinel):
"""Get successor to key, raises KeyError if key is max key
or key does not exist.
"""
item = self.succ_item(key, default)
return default if item is default else item[0]
def pop_min(self):
"""T.pop_min() -> (k, v), remove item with minimum key, raise ValueError
if T is empty.
"""
item = self.min_item()
self.remove(item[0])
return item
def pop_max(self):
"""T.pop_max() -> (k, v), remove item with maximum key, raise ValueError
if T is empty.
"""
item = self.max_item()
self.remove(item[0])
return item
def min_key(self):
"""Get min key of tree, raises ValueError if tree is empty. """
return self.min_item()[0]
def max_key(self):
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/external/__init__.py | augmentation/image_augmentation/helpers/imgaug/external/__init__.py | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false | |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/image_augmentation/helpers/imgaug/external/opensimplex.py | augmentation/image_augmentation/helpers/imgaug/external/opensimplex.py | """
This is a copy of the OpenSimplex library,
based on commit d861cb290531ad15825f21dc4cc35c5d4f407259 from 20.07.2017.
"""
# Based on: https://gist.github.com/KdotJPG/b1270127455a94ac5d19
import sys
from ctypes import c_long
from math import floor as _floor
if sys.version_info[0] < 3:
def floor(num):
return int(_floor(num))
else:
floor = _floor
STRETCH_CONSTANT_2D = -0.211324865405187 # (1/Math.sqrt(2+1)-1)/2
SQUISH_CONSTANT_2D = 0.366025403784439 # (Math.sqrt(2+1)-1)/2
STRETCH_CONSTANT_3D = -1.0 / 6 # (1/Math.sqrt(3+1)-1)/3
SQUISH_CONSTANT_3D = 1.0 / 3 # (Math.sqrt(3+1)-1)/3
STRETCH_CONSTANT_4D = -0.138196601125011 # (1/Math.sqrt(4+1)-1)/4
SQUISH_CONSTANT_4D = 0.309016994374947 # (Math.sqrt(4+1)-1)/4
NORM_CONSTANT_2D = 47
NORM_CONSTANT_3D = 103
NORM_CONSTANT_4D = 30
DEFAULT_SEED = 0
# Gradients for 2D. They approximate the directions to the
# vertices of an octagon from the center.
GRADIENTS_2D = (
5, 2, 2, 5,
-5, 2, -2, 5,
5, -2, 2, -5,
-5, -2, -2, -5,
)
# Gradients for 3D. They approximate the directions to the
# vertices of a rhombicuboctahedron from the center, skewed so
# that the triangular and square facets can be inscribed inside
# circles of the same radius.
GRADIENTS_3D = (
-11, 4, 4, -4, 11, 4, -4, 4, 11,
11, 4, 4, 4, 11, 4, 4, 4, 11,
-11, -4, 4, -4, -11, 4, -4, -4, 11,
11, -4, 4, 4, -11, 4, 4, -4, 11,
-11, 4, -4, -4, 11, -4, -4, 4, -11,
11, 4, -4, 4, 11, -4, 4, 4, -11,
-11, -4, -4, -4, -11, -4, -4, -4, -11,
11, -4, -4, 4, -11, -4, 4, -4, -11,
)
# Gradients for 4D. They approximate the directions to the
# vertices of a disprismatotesseractihexadecachoron from the center,
# skewed so that the tetrahedral and cubic facets can be inscribed inside
# spheres of the same radius.
GRADIENTS_4D = (
3, 1, 1, 1, 1, 3, 1, 1, 1, 1, 3, 1, 1, 1, 1, 3,
-3, 1, 1, 1, -1, 3, 1, 1, -1, 1, 3, 1, -1, 1, 1, 3,
3, -1, 1, 1, 1, -3, 1, 1, 1, -1, 3, 1, 1, -1, 1, 3,
-3, -1, 1, 1, -1, -3, 1, 1, -1, -1, 3, 1, -1, -1, 1, 3,
3, 1, -1, 1, 1, 3, -1, 1, 1, 1, -3, 1, 1, 1, -1, 3,
-3, 1, -1, 1, -1, 3, -1, 1, -1, 1, -3, 1, -1, 1, -1, 3,
3, -1, -1, 1, 1, -3, -1, 1, 1, -1, -3, 1, 1, -1, -1, 3,
-3, -1, -1, 1, -1, -3, -1, 1, -1, -1, -3, 1, -1, -1, -1, 3,
3, 1, 1, -1, 1, 3, 1, -1, 1, 1, 3, -1, 1, 1, 1, -3,
-3, 1, 1, -1, -1, 3, 1, -1, -1, 1, 3, -1, -1, 1, 1, -3,
3, -1, 1, -1, 1, -3, 1, -1, 1, -1, 3, -1, 1, -1, 1, -3,
-3, -1, 1, -1, -1, -3, 1, -1, -1, -1, 3, -1, -1, -1, 1, -3,
3, 1, -1, -1, 1, 3, -1, -1, 1, 1, -3, -1, 1, 1, -1, -3,
-3, 1, -1, -1, -1, 3, -1, -1, -1, 1, -3, -1, -1, 1, -1, -3,
3, -1, -1, -1, 1, -3, -1, -1, 1, -1, -3, -1, 1, -1, -1, -3,
-3, -1, -1, -1, -1, -3, -1, -1, -1, -1, -3, -1, -1, -1, -1, -3,
)
def overflow(x):
# Since normal python ints and longs can be quite humongous we have to use
# this hack to make them be able to overflow
return c_long(x).value
class OpenSimplex(object):
"""
OpenSimplex n-dimensional gradient noise functions.
"""
def __init__(self, seed=DEFAULT_SEED):
"""
Initiate the class and generate permutation arrays from a seed number.
"""
# Initializes the class using a permutation array generated from a 64-bit seed.
# Generates a proper permutation (i.e. doesn't merely perform N
# successive pair swaps on a base array)
perm = self._perm = [0] * 256 # Have to zero fill so we can properly loop over it later
perm_grad_index_3D = self._perm_grad_index_3D = [0] * 256
source = [i for i in range(0, 256)]
seed = overflow(seed * 6364136223846793005 + 1442695040888963407)
seed = overflow(seed * 6364136223846793005 + 1442695040888963407)
seed = overflow(seed * 6364136223846793005 + 1442695040888963407)
for i in range(255, -1, -1):
seed = overflow(seed * 6364136223846793005 + 1442695040888963407)
r = int((seed + 31) % (i + 1))
if r < 0:
r += i + 1
perm[i] = source[r]
perm_grad_index_3D[i] = int((perm[i] % (len(GRADIENTS_3D) / 3)) * 3)
source[r] = source[i]
def _extrapolate2d(self, xsb, ysb, dx, dy):
perm = self._perm
index = perm[(perm[xsb & 0xFF] + ysb) & 0xFF] & 0x0E
g1, g2 = GRADIENTS_2D[index:index + 2]
return g1 * dx + g2 * dy
def _extrapolate3d(self, xsb, ysb, zsb, dx, dy, dz):
perm = self._perm
index = self._perm_grad_index_3D[
(perm[(perm[xsb & 0xFF] + ysb) & 0xFF] + zsb) & 0xFF
]
g1, g2, g3 = GRADIENTS_3D[index:index + 3]
return g1 * dx + g2 * dy + g3 * dz
def _extrapolate4d(self, xsb, ysb, zsb, wsb, dx, dy, dz, dw):
perm = self._perm
index = perm[(
perm[(
perm[(perm[xsb & 0xFF] + ysb) & 0xFF] + zsb
) & 0xFF] + wsb
) & 0xFF] & 0xFC
g1, g2, g3, g4 = GRADIENTS_4D[index:index + 4]
return g1 * dx + g2 * dy + g3 * dz + g4 * dw
def noise2d(self, x, y):
"""
Generate 2D OpenSimplex noise from X,Y coordinates.
"""
# Place input coordinates onto grid.
stretch_offset = (x + y) * STRETCH_CONSTANT_2D
xs = x + stretch_offset
ys = y + stretch_offset
# Floor to get grid coordinates of rhombus (stretched square) super-cell origin.
xsb = floor(xs)
ysb = floor(ys)
# Skew out to get actual coordinates of rhombus origin. We'll need these later.
squish_offset = (xsb + ysb) * SQUISH_CONSTANT_2D
xb = xsb + squish_offset
yb = ysb + squish_offset
# Compute grid coordinates relative to rhombus origin.
xins = xs - xsb
yins = ys - ysb
# Sum those together to get a value that determines which region we're in.
in_sum = xins + yins
# Positions relative to origin point.
dx0 = x - xb
dy0 = y - yb
value = 0
# Contribution (1,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_2D
dy1 = dy0 - 0 - SQUISH_CONSTANT_2D
attn1 = 2 - dx1 * dx1 - dy1 * dy1
extrapolate = self._extrapolate2d
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, dx1, dy1)
# Contribution (0,1)
dx2 = dx0 - 0 - SQUISH_CONSTANT_2D
dy2 = dy0 - 1 - SQUISH_CONSTANT_2D
attn2 = 2 - dx2 * dx2 - dy2 * dy2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, dx2, dy2)
if in_sum <= 1: # We're inside the triangle (2-Simplex) at (0,0)
zins = 1 - in_sum
if zins > xins or zins > yins: # (0,0) is one of the closest two triangular vertices
if xins > yins:
xsv_ext = xsb + 1
ysv_ext = ysb - 1
dx_ext = dx0 - 1
dy_ext = dy0 + 1
else:
xsv_ext = xsb - 1
ysv_ext = ysb + 1
dx_ext = dx0 + 1
dy_ext = dy0 - 1
else: # (1,0) and (0,1) are the closest two vertices.
xsv_ext = xsb + 1
ysv_ext = ysb + 1
dx_ext = dx0 - 1 - 2 * SQUISH_CONSTANT_2D
dy_ext = dy0 - 1 - 2 * SQUISH_CONSTANT_2D
else: # We're inside the triangle (2-Simplex) at (1,1)
zins = 2 - in_sum
if zins < xins or zins < yins: # (0,0) is one of the closest two triangular vertices
if xins > yins:
xsv_ext = xsb + 2
ysv_ext = ysb + 0
dx_ext = dx0 - 2 - 2 * SQUISH_CONSTANT_2D
dy_ext = dy0 + 0 - 2 * SQUISH_CONSTANT_2D
else:
xsv_ext = xsb + 0
ysv_ext = ysb + 2
dx_ext = dx0 + 0 - 2 * SQUISH_CONSTANT_2D
dy_ext = dy0 - 2 - 2 * SQUISH_CONSTANT_2D
else: # (1,0) and (0,1) are the closest two vertices.
dx_ext = dx0
dy_ext = dy0
xsv_ext = xsb
ysv_ext = ysb
xsb += 1
ysb += 1
dx0 = dx0 - 1 - 2 * SQUISH_CONSTANT_2D
dy0 = dy0 - 1 - 2 * SQUISH_CONSTANT_2D
# Contribution (0,0) or (1,1)
attn0 = 2 - dx0 * dx0 - dy0 * dy0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb, ysb, dx0, dy0)
# Extra Vertex
attn_ext = 2 - dx_ext * dx_ext - dy_ext * dy_ext
if attn_ext > 0:
attn_ext *= attn_ext
value += attn_ext * attn_ext * extrapolate(xsv_ext, ysv_ext, dx_ext, dy_ext)
return value / NORM_CONSTANT_2D
def noise3d(self, x, y, z):
"""
Generate 3D OpenSimplex noise from X,Y,Z coordinates.
"""
# Place input coordinates on simplectic honeycomb.
stretch_offset = (x + y + z) * STRETCH_CONSTANT_3D
xs = x + stretch_offset
ys = y + stretch_offset
zs = z + stretch_offset
# Floor to get simplectic honeycomb coordinates of rhombohedron (stretched cube) super-cell origin.
xsb = floor(xs)
ysb = floor(ys)
zsb = floor(zs)
# Skew out to get actual coordinates of rhombohedron origin. We'll need these later.
squish_offset = (xsb + ysb + zsb) * SQUISH_CONSTANT_3D
xb = xsb + squish_offset
yb = ysb + squish_offset
zb = zsb + squish_offset
# Compute simplectic honeycomb coordinates relative to rhombohedral origin.
xins = xs - xsb
yins = ys - ysb
zins = zs - zsb
# Sum those together to get a value that determines which region we're in.
in_sum = xins + yins + zins
# Positions relative to origin point.
dx0 = x - xb
dy0 = y - yb
dz0 = z - zb
value = 0
extrapolate = self._extrapolate3d
if in_sum <= 1: # We're inside the tetrahedron (3-Simplex) at (0,0,0)
# Determine which two of (0,0,1), (0,1,0), (1,0,0) are closest.
a_point = 0x01
a_score = xins
b_point = 0x02
b_score = yins
if a_score >= b_score and zins > b_score:
b_score = zins
b_point = 0x04
elif a_score < b_score and zins > a_score:
a_score = zins
a_point = 0x04
# Now we determine the two lattice points not part of the tetrahedron that may contribute.
# This depends on the closest two tetrahedral vertices, including (0,0,0)
wins = 1 - in_sum
if wins > a_score or wins > b_score: # (0,0,0) is one of the closest two tetrahedral vertices.
c = b_point if (b_score > a_score) else a_point # Our other closest vertex is the closest out of a and b.
if (c & 0x01) == 0:
xsv_ext0 = xsb - 1
xsv_ext1 = xsb
dx_ext0 = dx0 + 1
dx_ext1 = dx0
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx_ext1 = dx0 - 1
if (c & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0
if (c & 0x01) == 0:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext0 -= 1
dy_ext0 += 1
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1
if (c & 0x04) == 0:
zsv_ext0 = zsb
zsv_ext1 = zsb - 1
dz_ext0 = dz0
dz_ext1 = dz0 + 1
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1
else: # (0,0,0) is not one of the closest two tetrahedral vertices.
c = (a_point | b_point) # Our two extra vertices are determined by the closest two.
if (c & 0x01) == 0:
xsv_ext0 = xsb
xsv_ext1 = xsb - 1
dx_ext0 = dx0 - 2 * SQUISH_CONSTANT_3D
dx_ext1 = dx0 + 1 - SQUISH_CONSTANT_3D
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 1 - 2 * SQUISH_CONSTANT_3D
dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_3D
if (c & 0x02) == 0:
ysv_ext0 = ysb
ysv_ext1 = ysb - 1
dy_ext0 = dy0 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 + 1 - SQUISH_CONSTANT_3D
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy0 - 1 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_3D
if (c & 0x04) == 0:
zsv_ext0 = zsb
zsv_ext1 = zsb - 1
dz_ext0 = dz0 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 + 1 - SQUISH_CONSTANT_3D
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz0 - 1 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_3D
# Contribution (0,0,0)
attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb + 0, ysb + 0, zsb + 0, dx0, dy0, dz0)
# Contribution (1,0,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_3D
dy1 = dy0 - 0 - SQUISH_CONSTANT_3D
dz1 = dz0 - 0 - SQUISH_CONSTANT_3D
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, dx1, dy1, dz1)
# Contribution (0,1,0)
dx2 = dx0 - 0 - SQUISH_CONSTANT_3D
dy2 = dy0 - 1 - SQUISH_CONSTANT_3D
dz2 = dz1
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, dx2, dy2, dz2)
# Contribution (0,0,1)
dx3 = dx2
dy3 = dy1
dz3 = dz0 - 1 - SQUISH_CONSTANT_3D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, dx3, dy3, dz3)
elif in_sum >= 2: # We're inside the tetrahedron (3-Simplex) at (1,1,1)
# Determine which two tetrahedral vertices are the closest, out of (1,1,0), (1,0,1), (0,1,1) but not (1,1,1).
a_point = 0x06
a_score = xins
b_point = 0x05
b_score = yins
if a_score <= b_score and zins < b_score:
b_score = zins
b_point = 0x03
elif a_score > b_score and zins < a_score:
a_score = zins
a_point = 0x03
# Now we determine the two lattice points not part of the tetrahedron that may contribute.
# This depends on the closest two tetrahedral vertices, including (1,1,1)
wins = 3 - in_sum
if wins < a_score or wins < b_score: # (1,1,1) is one of the closest two tetrahedral vertices.
c = b_point if (b_score < a_score) else a_point # Our other closest vertex is the closest out of a and b.
if (c & 0x01) != 0:
xsv_ext0 = xsb + 2
xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 2 - 3 * SQUISH_CONSTANT_3D
dx_ext1 = dx0 - 1 - 3 * SQUISH_CONSTANT_3D
else:
xsv_ext0 = xsv_ext1 = xsb
dx_ext0 = dx_ext1 = dx0 - 3 * SQUISH_CONSTANT_3D
if (c & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - 3 * SQUISH_CONSTANT_3D
if (c & 0x01) != 0:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 += 1
dy_ext0 -= 1
else:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - 3 * SQUISH_CONSTANT_3D
if (c & 0x04) != 0:
zsv_ext0 = zsb + 1
zsv_ext1 = zsb + 2
dz_ext0 = dz0 - 1 - 3 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 - 3 * SQUISH_CONSTANT_3D
else:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - 3 * SQUISH_CONSTANT_3D
else: # (1,1,1) is not one of the closest two tetrahedral vertices.
c = (a_point & b_point) # Our two extra vertices are determined by the closest two.
if (c & 0x01) != 0:
xsv_ext0 = xsb + 1
xsv_ext1 = xsb + 2
dx_ext0 = dx0 - 1 - SQUISH_CONSTANT_3D
dx_ext1 = dx0 - 2 - 2 * SQUISH_CONSTANT_3D
else:
xsv_ext0 = xsv_ext1 = xsb
dx_ext0 = dx0 - SQUISH_CONSTANT_3D
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D
if (c & 0x02) != 0:
ysv_ext0 = ysb + 1
ysv_ext1 = ysb + 2
dy_ext0 = dy0 - 1 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 - 2 * SQUISH_CONSTANT_3D
else:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy0 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D
if (c & 0x04) != 0:
zsv_ext0 = zsb + 1
zsv_ext1 = zsb + 2
dz_ext0 = dz0 - 1 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 - 2 * SQUISH_CONSTANT_3D
else:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz0 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D
# Contribution (1,1,0)
dx3 = dx0 - 1 - 2 * SQUISH_CONSTANT_3D
dy3 = dy0 - 1 - 2 * SQUISH_CONSTANT_3D
dz3 = dz0 - 0 - 2 * SQUISH_CONSTANT_3D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 1, ysb + 1, zsb + 0, dx3, dy3, dz3)
# Contribution (1,0,1)
dx2 = dx3
dy2 = dy0 - 0 - 2 * SQUISH_CONSTANT_3D
dz2 = dz0 - 1 - 2 * SQUISH_CONSTANT_3D
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 1, ysb + 0, zsb + 1, dx2, dy2, dz2)
# Contribution (0,1,1)
dx1 = dx0 - 0 - 2 * SQUISH_CONSTANT_3D
dy1 = dy3
dz1 = dz2
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 0, ysb + 1, zsb + 1, dx1, dy1, dz1)
# Contribution (1,1,1)
dx0 = dx0 - 1 - 3 * SQUISH_CONSTANT_3D
dy0 = dy0 - 1 - 3 * SQUISH_CONSTANT_3D
dz0 = dz0 - 1 - 3 * SQUISH_CONSTANT_3D
attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb + 1, ysb + 1, zsb + 1, dx0, dy0, dz0)
else: # We're inside the octahedron (Rectified 3-Simplex) in between.
# Decide between point (0,0,1) and (1,1,0) as closest
p1 = xins + yins
if p1 > 1:
a_score = p1 - 1
a_point = 0x03
a_is_further_side = True
else:
a_score = 1 - p1
a_point = 0x04
a_is_further_side = False
# Decide between point (0,1,0) and (1,0,1) as closest
p2 = xins + zins
if p2 > 1:
b_score = p2 - 1
b_point = 0x05
b_is_further_side = True
else:
b_score = 1 - p2
b_point = 0x02
b_is_further_side = False
# The closest out of the two (1,0,0) and (0,1,1) will replace the furthest out of the two decided above, if closer.
p3 = yins + zins
if p3 > 1:
score = p3 - 1
if a_score <= b_score and a_score < score:
a_point = 0x06
a_is_further_side = True
elif a_score > b_score and b_score < score:
b_point = 0x06
b_is_further_side = True
else:
score = 1 - p3
if a_score <= b_score and a_score < score:
a_point = 0x01
a_is_further_side = False
elif a_score > b_score and b_score < score:
b_point = 0x01
b_is_further_side = False
# Where each of the two closest points are determines how the extra two vertices are calculated.
if a_is_further_side == b_is_further_side:
if a_is_further_side: # Both closest points on (1,1,1) side
# One of the two extra points is (1,1,1)
dx_ext0 = dx0 - 1 - 3 * SQUISH_CONSTANT_3D
dy_ext0 = dy0 - 1 - 3 * SQUISH_CONSTANT_3D
dz_ext0 = dz0 - 1 - 3 * SQUISH_CONSTANT_3D
xsv_ext0 = xsb + 1
ysv_ext0 = ysb + 1
zsv_ext0 = zsb + 1
# Other extra point is based on the shared axis.
c = (a_point & b_point)
if (c & 0x01) != 0:
dx_ext1 = dx0 - 2 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D
xsv_ext1 = xsb + 2
ysv_ext1 = ysb
zsv_ext1 = zsb
elif (c & 0x02) != 0:
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D
xsv_ext1 = xsb
ysv_ext1 = ysb + 2
zsv_ext1 = zsb
else:
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 - 2 * SQUISH_CONSTANT_3D
xsv_ext1 = xsb
ysv_ext1 = ysb
zsv_ext1 = zsb + 2
else:# Both closest points on (0,0,0) side
# One of the two extra points is (0,0,0)
dx_ext0 = dx0
dy_ext0 = dy0
dz_ext0 = dz0
xsv_ext0 = xsb
ysv_ext0 = ysb
zsv_ext0 = zsb
# Other extra point is based on the omitted axis.
c = (a_point | b_point)
if (c & 0x01) == 0:
dx_ext1 = dx0 + 1 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_3D
xsv_ext1 = xsb - 1
ysv_ext1 = ysb + 1
zsv_ext1 = zsb + 1
elif (c & 0x02) == 0:
dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 + 1 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_3D
xsv_ext1 = xsb + 1
ysv_ext1 = ysb - 1
zsv_ext1 = zsb + 1
else:
dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 + 1 - SQUISH_CONSTANT_3D
xsv_ext1 = xsb + 1
ysv_ext1 = ysb + 1
zsv_ext1 = zsb - 1
else: # One point on (0,0,0) side, one point on (1,1,1) side
if a_is_further_side:
c1 = a_point
c2 = b_point
else:
c1 = b_point
c2 = a_point
# One contribution is a _permutation of (1,1,-1)
if (c1 & 0x01) == 0:
dx_ext0 = dx0 + 1 - SQUISH_CONSTANT_3D
dy_ext0 = dy0 - 1 - SQUISH_CONSTANT_3D
dz_ext0 = dz0 - 1 - SQUISH_CONSTANT_3D
xsv_ext0 = xsb - 1
ysv_ext0 = ysb + 1
zsv_ext0 = zsb + 1
elif (c1 & 0x02) == 0:
dx_ext0 = dx0 - 1 - SQUISH_CONSTANT_3D
dy_ext0 = dy0 + 1 - SQUISH_CONSTANT_3D
dz_ext0 = dz0 - 1 - SQUISH_CONSTANT_3D
xsv_ext0 = xsb + 1
ysv_ext0 = ysb - 1
zsv_ext0 = zsb + 1
else:
dx_ext0 = dx0 - 1 - SQUISH_CONSTANT_3D
dy_ext0 = dy0 - 1 - SQUISH_CONSTANT_3D
dz_ext0 = dz0 + 1 - SQUISH_CONSTANT_3D
xsv_ext0 = xsb + 1
ysv_ext0 = ysb + 1
zsv_ext0 = zsb - 1
# One contribution is a _permutation of (0,0,2)
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D
xsv_ext1 = xsb
ysv_ext1 = ysb
zsv_ext1 = zsb
if (c2 & 0x01) != 0:
dx_ext1 -= 2
xsv_ext1 += 2
elif (c2 & 0x02) != 0:
dy_ext1 -= 2
ysv_ext1 += 2
else:
dz_ext1 -= 2
zsv_ext1 += 2
# Contribution (1,0,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_3D
dy1 = dy0 - 0 - SQUISH_CONSTANT_3D
dz1 = dz0 - 0 - SQUISH_CONSTANT_3D
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, dx1, dy1, dz1)
# Contribution (0,1,0)
dx2 = dx0 - 0 - SQUISH_CONSTANT_3D
dy2 = dy0 - 1 - SQUISH_CONSTANT_3D
dz2 = dz1
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, dx2, dy2, dz2)
# Contribution (0,0,1)
dx3 = dx2
dy3 = dy1
dz3 = dz0 - 1 - SQUISH_CONSTANT_3D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, dx3, dy3, dz3)
# Contribution (1,1,0)
dx4 = dx0 - 1 - 2 * SQUISH_CONSTANT_3D
dy4 = dy0 - 1 - 2 * SQUISH_CONSTANT_3D
dz4 = dz0 - 0 - 2 * SQUISH_CONSTANT_3D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 1, ysb + 1, zsb + 0, dx4, dy4, dz4)
# Contribution (1,0,1)
dx5 = dx4
dy5 = dy0 - 0 - 2 * SQUISH_CONSTANT_3D
dz5 = dz0 - 1 - 2 * SQUISH_CONSTANT_3D
attn5 = 2 - dx5 * dx5 - dy5 * dy5 - dz5 * dz5
if attn5 > 0:
attn5 *= attn5
value += attn5 * attn5 * extrapolate(xsb + 1, ysb + 0, zsb + 1, dx5, dy5, dz5)
# Contribution (0,1,1)
dx6 = dx0 - 0 - 2 * SQUISH_CONSTANT_3D
dy6 = dy4
dz6 = dz5
attn6 = 2 - dx6 * dx6 - dy6 * dy6 - dz6 * dz6
if attn6 > 0:
attn6 *= attn6
value += attn6 * attn6 * extrapolate(xsb + 0, ysb + 1, zsb + 1, dx6, dy6, dz6)
# First extra vertex
attn_ext0 = 2 - dx_ext0 * dx_ext0 - dy_ext0 * dy_ext0 - dz_ext0 * dz_ext0
if attn_ext0 > 0:
attn_ext0 *= attn_ext0
value += attn_ext0 * attn_ext0 * extrapolate(xsv_ext0, ysv_ext0, zsv_ext0, dx_ext0, dy_ext0, dz_ext0)
# Second extra vertex
attn_ext1 = 2 - dx_ext1 * dx_ext1 - dy_ext1 * dy_ext1 - dz_ext1 * dz_ext1
if attn_ext1 > 0:
attn_ext1 *= attn_ext1
value += attn_ext1 * attn_ext1 * extrapolate(xsv_ext1, ysv_ext1, zsv_ext1, dx_ext1, dy_ext1, dz_ext1)
return value / NORM_CONSTANT_3D
def noise4d(self, x, y, z, w):
"""
Generate 4D OpenSimplex noise from X,Y,Z,W coordinates.
"""
# Place input coordinates on simplectic honeycomb.
stretch_offset = (x + y + z + w) * STRETCH_CONSTANT_4D
xs = x + stretch_offset
ys = y + stretch_offset
zs = z + stretch_offset
ws = w + stretch_offset
# Floor to get simplectic honeycomb coordinates of rhombo-hypercube super-cell origin.
xsb = floor(xs)
ysb = floor(ys)
zsb = floor(zs)
wsb = floor(ws)
# Skew out to get actual coordinates of stretched rhombo-hypercube origin. We'll need these later.
squish_offset = (xsb + ysb + zsb + wsb) * SQUISH_CONSTANT_4D
xb = xsb + squish_offset
yb = ysb + squish_offset
zb = zsb + squish_offset
wb = wsb + squish_offset
# Compute simplectic honeycomb coordinates relative to rhombo-hypercube origin.
xins = xs - xsb
yins = ys - ysb
zins = zs - zsb
wins = ws - wsb
# Sum those together to get a value that determines which region we're in.
in_sum = xins + yins + zins + wins
# Positions relative to origin po.
dx0 = x - xb
dy0 = y - yb
dz0 = z - zb
dw0 = w - wb
value = 0
extrapolate = self._extrapolate4d
if in_sum <= 1: # We're inside the pentachoron (4-Simplex) at (0,0,0,0)
# Determine which two of (0,0,0,1), (0,0,1,0), (0,1,0,0), (1,0,0,0) are closest.
a_po = 0x01
a_score = xins
b_po = 0x02
b_score = yins
if a_score >= b_score and zins > b_score:
b_score = zins
b_po = 0x04
elif a_score < b_score and zins > a_score:
a_score = zins
a_po = 0x04
if a_score >= b_score and wins > b_score:
b_score = wins
b_po = 0x08
elif a_score < b_score and wins > a_score:
a_score = wins
a_po = 0x08
# Now we determine the three lattice pos not part of the pentachoron that may contribute.
# This depends on the closest two pentachoron vertices, including (0,0,0,0)
uins = 1 - in_sum
if uins > a_score or uins > b_score: # (0,0,0,0) is one of the closest two pentachoron vertices.
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/csv_augmentation/augment_ctgan_regression.py | augmentation/csv_augmentation/augment_ctgan_regression.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ _____ _____ _ _
/ _ \ | ___ \_ _| _ / __ \/ ___|| | | |
/ /_\ \| |_/ / | | (_) | / \/\ `--. | | | |
| _ || __/ | | | | `--. \| | | |
| | | || | _| |_ _ | \__/\/\__/ /\ \_/ /
\_| |_/\_| \___/ (_) \____/\____/ \___/
Augment CSV files for classification problems using
CTGAN.
'''
import os
try:
from ctgan import CTGANSynthesizer
except:
os.system('pip3 install ctgan==0.2.1')
from ctgan import CTGANSynthesizer
import time, random
import pandas as pd
import numpy as np
def augment_ctgan_regression(csvfile):
data=pd.read_csv(csvfile)
ctgan = CTGANSynthesizer()
ctgan.fit(data,epochs=10) #15
percent_generated=1
df_gen = ctgan.sample(int(len(data)*percent_generated))
print('augmented with %s samples'%(str(len(df_gen))))
print(df_gen)
# now add both togrther to make new .CSV file
newfile1='augmented_'+csvfile
df_gen.to_csv(newfile1, index=0)
# now combine augmented and regular dataset
data2=pd.read_csv('augmented_'+csvfile)
frames = [data, data2]
result = pd.concat(frames)
newfile2='augmented_combined_'+csvfile
result.to_csv(newfile2, index=0)
return [csvfile,newfile1,newfile2] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/csv_augmentation/augment_ctgan_classification.py | augmentation/csv_augmentation/augment_ctgan_classification.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ _____ _____ _ _
/ _ \ | ___ \_ _| _ / __ \/ ___|| | | |
/ /_\ \| |_/ / | | (_) | / \/\ `--. | | | |
| _ || __/ | | | | `--. \| | | |
| | | || | _| |_ _ | \__/\/\__/ /\ \_/ /
\_| |_/\_| \___/ (_) \____/\____/ \___/
'''
import os
try:
from ctgan import CTGANSynthesizer
except:
os.system('pip3 install ctgan==0.2.1')
from ctgan import CTGANSynthesizer
import time, random
import pandas as pd
import numpy as np
def find_nearestval(value,values):
distances=list()
for i in range(len(values)):
newvalue=values[i]-value
distances.append(newvalue)
minimum=min(distances)
minind=distances.index(minimum)
newvalue=values[minind]
# print('value --> newvalue')
# print(value)
# print(newvalue)
return newvalue
def get_index_positions(list_of_elems, element):
''' Returns the indexes of all occurrences of give element in
the list- listOfElements '''
index_pos_list = []
index_pos = 0
while True:
try:
# Search for item in list from indexPos to the end of list
index_pos = list_of_elems.index(element, index_pos)
# Add the index position in list
index_pos_list.append(index_pos)
index_pos += 1
except ValueError as e:
break
return index_pos_list
def augment_ctgan_classification(csvfile):
data=pd.read_csv(csvfile)
ctgan = CTGANSynthesizer()
ctgan.fit(data,epochs=10) #15
percent_generated=1
df_gen = ctgan.sample(int(len(data)*percent_generated))
df_gen['class_']=df_gen['class_'].apply(np.floor)
values=list(set(list(data['class_'])))
newclass=df_gen['class_']
newclass2=list()
for i in range(len(newclass)):
if newclass[i] not in values:
newvalue=find_nearestval(newclass[i], values)
newclass2.append(newvalue)
else:
newclass2.append(newclass[i])
df_gen['class_']=newclass2
# now count each value and balance
classcol=list(df_gen['class_'])
unique_classes=list(set(df_gen['class_']))
counts=list()
for i in range(len(unique_classes)):
counts.append(classcol.count(unique_classes[i]))
minval=min(counts)
print(minval)
# now balance out the classes by removing all to minimum value
for i in range(len(unique_classes)):
print(unique_classes[i])
index_pos_list=get_index_positions(classcol,unique_classes[i])
while len(index_pos_list) >= minval:
index_pos_list=get_index_positions(classcol,unique_classes[i])
random_ind=random.choice(index_pos_list)
df_gen=df_gen.drop(df_gen.index[random_ind])
classcol=list(df_gen['class_'])
print('augmented with %s samples'%(str(len(unique_classes)*minval)))
print(df_gen)
# now add both togrther to make new .CSV file
newfile1='augmented_'+csvfile
df_gen.to_csv(newfile1, index=0)
# now combine augmented and regular dataset
data2=pd.read_csv('augmented_'+csvfile)
frames = [data, data2]
result = pd.concat(frames)
newfile='augmented_combined_'+csvfile
result.to_csv(newfile, index=0)
return [csvfile, newfile1, newfile2] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/csv_augmentation/augment.py | augmentation/csv_augmentation/augment.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ _____ _____ _ _
/ _ \ | ___ \_ _| _ / __ \/ ___|| | | |
/ /_\ \| |_/ / | | (_) | / \/\ `--. | | | |
| _ || __/ | | | | `--. \| | | |
| | | || | _| |_ _ | \__/\/\__/ /\ \_/ /
\_| |_/\_| \___/ (_) \____/\____/ \___/
This section of Allie's API augments CSV files with default_csv_augmenters.
Usage: python3 augment.py [folder] [augment_type]
All augment_type options include:
["augment_ctgan_classification", "augment_ctgan_regression"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/augmentation/csv_augmentation
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def csv_augment(augmentation_set, csvfile, basedir):
# only load the relevant featuresets for featurization to save memory
if augmentation_set=='augment_ctgan_classification':
augment_ctgan_classification.augment_ctgan_classification(csvfile)
elif augmentation_set=='augment_ctgan_regression':
augment_ctgan_regression.augment_ctgan_regression(csvfile)
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
csv_transcribe=settings['transcribe_csv']
default_csv_transcribers=settings['default_csv_transcriber']
try:
# assume 1 type of feature_set
augmentation_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
augmentation_sets=settings['default_csv_augmenters']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'augment_ctgan_classification' in augmentation_sets:
import augment_ctgan_classification
if 'augment_ctgan_regression' in augmentation_sets:
import augment_ctgan_regression
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## NOW AUGMENT!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.csv']:
filename=[listdir[i]]
for j in range(len(augmentation_sets)):
augmentation_set=augmentation_sets[j]
for k in range(len(filename)):
filename=csv_augment(augmentation_set, filename[k], basedir) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/csv_augmentation/helpers/augment_tgan.py | augmentation/csv_augmentation/helpers/augment_tgan.py | import os
import pandas as pd
try:
from tgan.model import TGANModel
except:
os.system('pip3 install tgan==0.1.0')
'''
following this tutorial:
https://nbviewer.jupyter.org/github/aleju/imgaug-doc/blob/master/notebooks/A01%20-%20Load%20and%20Augment%20an%20Image.ipynb
'''
def augment_tgan(csvfile):
data=pd.read_csv(csvfile)
cols=list(data)
cols_num=list()
for i in range(len(cols)-1):
cols_num.append(i)
tgan = TGANModel(cols_num)
tgan.fit(data)
# now create number of samples (10%)
num_samples = int(0.10*len(data))
samples = tgan.sample(num_samples)
print(samples)
augment_tgan('gender_all.csv') | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
mohammadasim98/met3r | https://github.com/mohammadasim98/met3r/blob/3259e41b7190eaee15b09f23d252e8aaa12b15e7/setup.py | setup.py | import os
from setuptools import setup, find_packages
import subprocess
import sys
from distutils.cmd import Command
with open('requirements.txt') as f:
required = f.read().splitlines()
__version__ = "1.0.1"
class GetSubmodules(Command):
def run(self):
subprocess.check_call(['git', 'submodule', 'update', "--init", "--recursive"])
setup(
name="met3r",
version=__version__,
author="Mohammad Asim, Christopher Wewer, Thomas Wimmer, Bernt Schiele, Jan Eric Lenssen",
author_email="masim@mpi-inf.mpg.de, cwewer@mpi-inf.mpg.de, twimmer@mpi-inf.mpg.de, schiele@mpi-inf.mpg.de, jlenssen@mpi-inf.mpg.de",
description="Official Code for 'MEt3R: Measuring Multi-View Consistency in Generated Images'",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/mohammadasim98/met3r',
packages=[
"met3r",
"mast3r/dust3r",
"mast3r/dust3r/croco",
"mast3r/dust3r/croco/models",
"mast3r/dust3r/croco/models/curope",
"mast3r/dust3r/croco/utils",
"mast3r/dust3r/dust3r",
"mast3r/dust3r/dust3r/cloud_opt",
"mast3r/dust3r/dust3r/heads",
"mast3r/dust3r/dust3r/utils",
"mast3r/dust3r/dust3r/datasets",
"mast3r/dust3r/dust3r/datasets/base",
"mast3r/dust3r/dust3r/datasets/utils",
"mast3r",
"mast3r/mast3r",
"mast3r/mast3r/cloud_opt",
"mast3r/mast3r/cloud_opt/utils",
"mast3r/mast3r/colmap",
"mast3r/mast3r/utils",
"mast3r/mast3r/datasets",
"mast3r/mast3r/datasets/base",
"mast3r/mast3r/datasets/utils",
],
install_requires=[
"torch",
"torchvision",
"iopath",
"roma",
"matplotlib",
"tqdm",
"opencv-python",
"scipy",
"einops",
"numpy",
"jaxtyping",
"pytorch-lightning",
"torchmetrics",
"pyglet<2",
"timm==0.4.12",
"huggingface-hub[torch]>=0.22",
"lpips",
"featup@git+https://github.com/mhamilton723/FeatUp",
"pytorch3d@git+https://github.com/facebookresearch/pytorch3d.git",
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.6',
cmdclass={"submodule": GetSubmodules}
) | python | MIT | 3259e41b7190eaee15b09f23d252e8aaa12b15e7 | 2026-01-05T07:13:39.862185Z | false |
mohammadasim98/met3r | https://github.com/mohammadasim98/met3r/blob/3259e41b7190eaee15b09f23d252e8aaa12b15e7/test/random.py | test/random.py | import torch
from met3r import MET3R
import unittest
class MET3RTest(unittest.TestCase):
def setUp(self):
self.metric = MET3R().cuda()
def random_inputs(self):
inputs = torch.randn((10, 2, 3, 256, 256)).cuda()
inputs = inputs.clip(-1, 1)
score, mask = self.metric(inputs)
self.assertTrue(0.3 <= score <= 0.35)
if __name__ == '__main__':
unittest.main() | python | MIT | 3259e41b7190eaee15b09f23d252e8aaa12b15e7 | 2026-01-05T07:13:39.862185Z | false |
mohammadasim98/met3r | https://github.com/mohammadasim98/met3r/blob/3259e41b7190eaee15b09f23d252e8aaa12b15e7/met3r/path_to_mast3r.py | met3r/path_to_mast3r.py | # Copyright (C) 2024-present Naver Corporation. All rights reserved.
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
#
# --------------------------------------------------------
# mast3r submodule import
# --------------------------------------------------------
import sys
import os.path as path
HERE_PATH = path.normpath(path.dirname(__file__))
MASt3R_REPO_PATH = path.normpath(path.join(HERE_PATH, '../mast3r'))
MASt3R_LIB_PATH = path.join(MASt3R_REPO_PATH, 'mast3r')
# check the presence of models directory in repo to be sure its cloned
if path.isdir(MASt3R_LIB_PATH):
# workaround for sibling import
sys.path.insert(0, MASt3R_REPO_PATH)
else:
raise ImportError(f"mast3r is not initialized, could not find: {MASt3R_LIB_PATH}.\n "
"Did you forget to run 'git submodule update --init --recursive' ?") | python | MIT | 3259e41b7190eaee15b09f23d252e8aaa12b15e7 | 2026-01-05T07:13:39.862185Z | false |
mohammadasim98/met3r | https://github.com/mohammadasim98/met3r/blob/3259e41b7190eaee15b09f23d252e8aaa12b15e7/met3r/path_to_dust3r.py | met3r/path_to_dust3r.py | # Copyright (C) 2024-present Naver Corporation. All rights reserved.
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
#
# --------------------------------------------------------
# dust3r submodule import
# --------------------------------------------------------
import sys
import os.path as path
HERE_PATH = path.normpath(path.dirname(__file__))
DUSt3R_REPO_PATH = path.normpath(path.join(HERE_PATH, '../mast3r/dust3r'))
DUSt3R_LIB_PATH = path.join(DUSt3R_REPO_PATH, 'dust3r')
# check the presence of models directory in repo to be sure its cloned
if path.isdir(DUSt3R_LIB_PATH):
# workaround for sibling import
sys.path.insert(0, DUSt3R_REPO_PATH)
else:
raise ImportError(f"dust3r is not initialized, could not find: {DUSt3R_LIB_PATH}.\n "
"Did you forget to run 'git submodule update --init --recursive' ?") | python | MIT | 3259e41b7190eaee15b09f23d252e8aaa12b15e7 | 2026-01-05T07:13:39.862185Z | false |
mohammadasim98/met3r | https://github.com/mohammadasim98/met3r/blob/3259e41b7190eaee15b09f23d252e8aaa12b15e7/met3r/__init__.py | met3r/__init__.py | from .met3r import MEt3R | python | MIT | 3259e41b7190eaee15b09f23d252e8aaa12b15e7 | 2026-01-05T07:13:39.862185Z | false |
mohammadasim98/met3r | https://github.com/mohammadasim98/met3r/blob/3259e41b7190eaee15b09f23d252e8aaa12b15e7/met3r/met3r.py | met3r/met3r.py |
import sys
import os
import os.path as path
from typing import Literal, Optional, Union
import torch
from torch import Tensor
from torch.nn import Identity, functional as F
from pathlib import Path
from torch.nn import Module
from jaxtyping import Float, Bool
from typing import Union, Tuple
from einops import rearrange, repeat
from torchvision.models.optical_flow import raft_large
from torchmetrics.functional.image import structural_similarity_index_measure
# Load Pytorch3D
from pytorch3d.structures import Pointclouds
from pytorch3d.renderer import (
FoVPerspectiveCameras,
PerspectiveCameras,
PointsRasterizationSettings,
PointsRenderer,
PointsRasterizer,
AlphaCompositor,
)
from lpips import LPIPS
HERE_PATH = path.normpath(path.dirname(__file__))
MASt3R_REPO_PATH = path.normpath(path.join(HERE_PATH, '../mast3r'))
DUSt3R_REPO_PATH = path.normpath(path.join(HERE_PATH, '../mast3r/dust3r'))
MASt3R_LIB_PATH = path.join(MASt3R_REPO_PATH, 'mast3r')
DUSt3R_LIB_PATH = path.join(DUSt3R_REPO_PATH, 'dust3r')
# check the presence of models directory in repo to be sure its cloned
if path.isdir(MASt3R_LIB_PATH) and path.isdir(DUSt3R_LIB_PATH):
# workaround for sibling import
sys.path.insert(0, MASt3R_REPO_PATH)
sys.path.insert(0, DUSt3R_REPO_PATH)
else:
raise ImportError(f"mast3r and dust3r is not initialized, could not find: {MASt3R_LIB_PATH}.\n "
"Did you forget to run 'git submodule update --init --recursive' ?")
from dust3r.utils.geometry import xy_grid
def freeze_model(m: Module) -> None:
for param in m.parameters():
param.requires_grad = False
m.eval()
def convert_to_buffer(module: torch.nn.Module, persistent: bool = True):
# Recurse over child modules.
for name, child in list(module.named_children()):
convert_to_buffer(child, persistent)
# Also re-save buffers to change persistence.
for name, parameter_or_buffer in (
*module.named_parameters(recurse=False),
*module.named_buffers(recurse=False),
):
value = parameter_or_buffer.detach().clone()
delattr(module, name)
module.register_buffer(name, value, persistent=persistent)
backbone_to_weights = {
"mast3r": "naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric",
"dust3r": "naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt"
}
class MEt3R(Module):
def __init__(
self,
img_size: Optional[int] = 256,
use_norm: Optional[bool]=True,
backbone: Literal["mast3r", "dust3r", "raft"] = "mast3r",
feature_backbone: Optional[Literal["dino16", "dinov2", "maskclip", "vit", "clip", "resnet50"]] = "dino16",
feature_backbone_weights: Optional[Union[str, Path]] = "mhamilton723/FeatUp",
upsampler: Optional[Literal["featup", "nearest", "bilinear", "bicubic"]] = "featup",
distance: Literal["cosine", "lpips", "rmse", "psnr", "mse", "ssim"] = "cosine",
freeze: bool=True,
rasterizer_kwargs: dict = {}
) -> None:
"""Initialize MET3R
Args:
img_size (int, optional): Image size for rasterization. Set to None to allow for rasterization with the input resolution on the fly. Defaults to 224.
use_norm (bool, optional): Whether to use norm layers in FeatUp. Refer to https://github.com/mhamilton723/FeatUp?tab=readme-ov-file#using-pretrained-upsamplers. Defaults to True.
feature_backbone (str, optional): Feature backbone for FeatUp. Select from ["dino16", "dinov2", "maskclip", "vit", "clip", "resnet50"]. Defaults to "dino16".
feature_backbone_weights (str | Path, optional): Weight path for FeatUp upsampler. Defaults to "mhamilton723/FeatUp".
upsampler (str, optional): Set upsampling types. Defaults to "featup".
distance (str): Select which distance to compute. Default to "cosine" for computing feature dissimilarity.
freeze (bool, optional): Set whether to freeze the model. Defaults to True.
rasterizer_kwargs (dict): Additional argument for point cloud render from PyTorch3D. Default to an empty dict.
"""
super().__init__()
self.img_size = img_size
self.upsampler = upsampler
self.backbone = backbone
self.distance = distance
if upsampler == "featup" and "FeatUp" not in feature_backbone_weights:
raise ValueError("Need to specify the correct weight path on huggingface for using `upsampler=\"featup\"`. Set `feature_backbone_weights=\"mhamilton723/FeatUp\"`")
if distance == "cosine":
if "FeatUp" in feature_backbone_weights:
# Load featup
from featup.util import norm, unnorm
self.norm = norm
if feature_backbone not in ["dino16", "dinov2", "maskclip", "vit", "clip", "resnet50"]:
raise ValueError("Provide `feature_backone` is not implemented for `FeatUp`. Please select from [\"dino16\", \"dinov2\", \"maskclip\", \"vit\", \"clip\", \"resnet50\"] in conjunction with `feature_backbone_weights=\"mhamilton723/FeatUp\"`")
if use_norm is None:
raise ValueError("When using `FeatUp`, specify `use_norm` as either `True` or `False`. Currently it is set to `None`")
featup = torch.hub.load(feature_backbone_weights, feature_backbone, use_norm=use_norm)
self.feature_model = featup.model
if upsampler == "featup":
self.upsampler_model = featup.upsampler
if freeze:
freeze_model(self.upsampler_model)
convert_to_buffer(self.upsampler_model, persistent=False)
else:
self.norm = Identity()
self.feature_model = torch.hub.load(feature_backbone_weights, feature_backbone)
if freeze:
freeze_model(self.feature_model)
convert_to_buffer(self.feature_model, persistent=False)
if backbone == "mast3r":
from mast3r.model import AsymmetricMASt3R
self.backbone_model = AsymmetricMASt3R.from_pretrained(backbone_to_weights[backbone])
elif backbone == "dust3r":
from dust3r.model import AsymmetricCroCo3DStereo
self.backbone_model = AsymmetricCroCo3DStereo.from_pretrained(backbone_to_weights[backbone])
elif backbone == "raft":
self.backbone_model = raft_large(pretrained=True, progress=False)
else:
raise NotImplementedError("Specificed backbone for warping is not available. Please select from ['mast3r', 'dust3r', 'raft']")
if freeze:
freeze_model(self.backbone_model)
convert_to_buffer(self.backbone_model, persistent=False)
if backbone in ["mast3r", "dust3r"]:
if self.img_size is not None:
self.set_rasterizer(
image_size=img_size,
points_per_pixel=10,
bin_size=0,
**rasterizer_kwargs
)
self.compositor = AlphaCompositor()
if distance == "lpips":
self.lpips = LPIPS(spatial=True)
def _distance(self, inp1: Tensor, inp2: Tensor, mask: Optional[Tensor]=None, eps: float=1e-5):
if self.distance == "cosine":
# Get feature dissimilarity score map
score_map = 1 - (inp1 * inp2).sum(1) / (torch.linalg.norm(inp1, dim=1) * torch.linalg.norm(inp2, dim=1) + eps)
score_map = score_map[:, None]
elif self.distance == "mse":
score_map = ((inp1 - inp2)**2).mean(1, keepdim=True)
elif self.distance == "psnr":
score_map = 20 * torch.log10(255.0 / (torch.sqrt(((inp1 - inp2)**2)).mean(1, keepdim=True) + eps))
elif self.distance == "rmse":
score_map = ((inp1 - inp2)**2).mean(1, keepdim=True)**0.5
elif self.distance == "lpips":
score_map = self.lpips(2 * inp1 - 1, 2 * inp2 - 1)
score_map = score_map[:, None]
elif self.distance == "ssim":
_, score_map = structural_similarity_index_measure(inp1, inp2, return_full_image=True)
print(score_map.shape)
print(mask.shape)
result = [score_map[:, 0]]
if mask is not None:
# Weighted averate of score map with computed mask
weighted = (score_map * mask[:, None]).sum(-1).sum(-1) / (mask[:, None].sum(-1).sum(-1) + eps)
result.append(weighted.mean(1))
return tuple(result)
def _interpolate(self, inp1: Tensor, inp2: Tensor):
if self.upsampler == "featup":
feat = self.upsampler_model(inp1, inp2)
# Important for specific backbone which may not return with correct dimensions
feat = F.interpolate(feat, (inp2.shape[-2:]), mode="bilinear")
else:
feat = F.interpolate(inp1, (inp2.shape[-2:]), mode=self.upsampler)
return feat
def _get_features(self, images):
return self.feature_model(self.norm(images))
def set_rasterizer(
self,
image_size,
points_per_pixel=10,
bin_size=0,
**kwargs
) -> None:
raster_settings = PointsRasterizationSettings(
image_size=image_size,
points_per_pixel=points_per_pixel,
bin_size=bin_size,
**kwargs
)
self.rasterizer = PointsRasterizer(cameras=None, raster_settings=raster_settings)
def render(
self,
point_clouds: Pointclouds,
**kwargs
) -> Tuple[
Float[Tensor, "b h w c"],
Float[Tensor, "b 2 h w n"]
]:
"""Adoped from Pytorch3D https://pytorch3d.readthedocs.io/en/latest/modules/renderer/points/renderer.html
Args:
point_clouds (pytorch3d.structures.PointCloud): Point cloud object to render
Returns:
images (Float[Tensor, "b h w c"]): Rendered images
zbuf (Float[Tensor, "b k h w n"]): Z-buffers for points per pixel
"""
with torch.autocast("cuda", enabled=False):
fragments = self.rasterizer(point_clouds, **kwargs)
r = self.rasterizer.raster_settings.radius
dists2 = fragments.dists.permute(0, 3, 1, 2)
weights = 1 - dists2 / (r * r)
images = self.compositor(
fragments.idx.long().permute(0, 3, 1, 2),
weights,
point_clouds.features_packed().permute(1, 0),
**kwargs,
)
# permute so image comes at the end
images = images.permute(0, 2, 3, 1)
return images, fragments.zbuf
def warp_image(self, image: torch.Tensor, flow: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Warp an input image using an optical flow field and compute a mask for gaps.
Args:
image (torch.Tensor): The input image of shape (B, C, H, W), where
B is the batch size,
C is the number of channels,
H is the height,
W is the width.
flow (torch.Tensor): The optical flow of shape (B, 2, H, W), where the 2 channels
correspond to the horizontal and vertical flow components.
Returns:
tuple[torch.Tensor, torch.Tensor]: A tuple containing:
- The warped image of shape (B, C, H, W).
- A mask of shape (B, 1, H, W) indicating gaps due to warping (1 for valid pixels, 0 for gaps).
"""
B, C, H, W = image.shape
# Generate a grid of coordinates for the image
y, x = torch.meshgrid(
torch.arange(H, device=image.device, dtype=torch.float32),
torch.arange(W, device=image.device, dtype=torch.float32),
indexing="ij"
)
# Normalize the grid coordinates to the range [-1, 1]
x = x / (W - 1) * 2 - 1
y = y / (H - 1) * 2 - 1
grid = torch.stack((x, y), dim=2).unsqueeze(0) # Shape: (1, H, W, 2)
grid = grid.repeat(B, 1, 1, 1) # Repeat for batch size
# Normalize flow from pixel space to normalized coordinates
flow = flow.clone()
flow[:, 0, :, :] = flow[:, 0, :, :] / (W - 1) * 2 # Normalize horizontal flow
flow[:, 1, :, :] = flow[:, 1, :, :] / (H - 1) * 2 # Normalize vertical flow
# Add the flow to the grid
flow = flow.permute(0, 2, 3, 1) # Shape: (B, H, W, 2)
warped_grid = grid + flow
# Clip grid values to ensure they are within bounds
warped_grid[..., 0] = torch.clamp(warped_grid[..., 0], -1, 1)
warped_grid[..., 1] = torch.clamp(warped_grid[..., 1], -1, 1)
# Use grid_sample to warp the image
warped_image = F.grid_sample(image, warped_grid, mode="bilinear", padding_mode="border", align_corners=True)
# Compute a mask for valid pixels
mask = F.grid_sample(
torch.ones((B, 1, H, W), device=image.device, dtype=image.dtype),
warped_grid, mode="bilinear", padding_mode="zeros", align_corners=True
)
mask = (mask > 0.999).float() # Threshold to create a binary mask
return warped_image, mask
def forward(
self,
images: Float[Tensor, "b 2 c h w"],
return_overlap_mask: bool=False,
return_score_map: bool=False,
return_projections: bool=False
) -> Tuple[
float,
Bool[Tensor, "b h w"] | None,
Float[Tensor, "b h w"] | None,
Float[Tensor, "b 2 c h w"] | None
]:
"""Forward function to compute MET3R
Args:
images (Float[Tensor, "b 2 c h w"]): Normalized input image pairs with values ranging in [-1, 1],
return_overlap_mask (bool, False): Return 2D map overlapping mask
return_score_map (bool, False): Return 2D map of feature dissimlarity (Unweighted)
return_projections (bool, False): Return projected feature maps
Return:
score (Float[Tensor, "b"]): MET3R score which consists of weighted mean of feature dissimlarity
mask (bool[Tensor, "b c h w"], optional): Overlapping mask
feat_dissim_maps (bool[Tensor, "b h w"], optional): Feature dissimilarity score map
proj_feats (bool[Tensor, "b h w c"], optional): Projected and rendered features
"""
*_, h, w = images.shape
# Set rasterization settings on the fly based on input resolution
if self.img_size is None:
raster_settings = PointsRasterizationSettings(
image_size=(h, w),
radius = 0.01,
points_per_pixel = 10,
bin_size=0
)
self.rasterizer = PointsRasterizer(cameras=None, raster_settings=raster_settings)
b, k, *_ = images.shape
images = rearrange(images, "b k c h w -> (b k) c h w")
images = (images + 1) / 2
if self.distance == "cosine":
# NOTE: Compute features
lr_feat = self._get_features(images)
# NOTE: Transform feature to higher resolution either using `interpolate` or `FeatUp`
hr_feat = self._interpolate(lr_feat, images)
# K=2 since we only compare an image pairs
hr_feat = rearrange(hr_feat, "(b k) ... -> b k ...", k=2)
images = rearrange(images, "(b k) ... -> b k ...", k=2)
images = 2 * images - 1
# NOTE: Apply Backbone MASt3R/DUSt3R/RAFT to warp one view to the other and compute overlap masks
if self.backbone == "raft":
flow = self.backbone_model(images[:, 0, ...], images[:, 1, ...])[-1]
if self.distance == "cosine":
view1 = hr_feat[:, 0, ...]
view2 = hr_feat[:, 1, ...]
else:
view1 = images[:, 0, ...]
view2 = images[:, 1, ...]
warped_view, mask = self.warp_image(view2, flow)
rendering = torch.stack([view1, warped_view], dim=1)
else:
view1 = {"img": images[:, 0, ...], "instance": [""]}
view2 = {"img": images[:, 1, ...], "instance": [""]}
pred1, pred2 = self.backbone_model(view1, view2)
ptmps = torch.stack([pred1["pts3d"], pred2["pts3d_in_other_view"]], dim=1).detach()
conf = torch.stack([pred1["conf"], pred2["conf"]], dim=1).detach()
# NOTE: Get canonical point map using the confidences
confs11 = conf.unsqueeze(-1) - 0.999
canon = (confs11 * ptmps).sum(1) / confs11.sum(1)
# Define principal point
pp = torch.tensor([w /2 , h / 2], device=canon.device)
# NOTE: Estimating fx and fy for a given canonical point map
B, H, W, THREE = canon.shape
assert THREE == 3
# centered pixel grid
pixels = xy_grid(W, H, device=canon.device).view(1, -1, 2) - pp.view(-1, 1, 2) # B,HW,2
canon = canon.flatten(1, 2) # (B, HW, 3)
# direct estimation of focal
u, v = pixels.unbind(dim=-1)
x, y, z = canon.unbind(dim=-1)
fx_votes = (u * z) / x
fy_votes = (v * z) / y
# assume square pixels, hence same focal for X and Y
f_votes = torch.stack((fx_votes.view(B, -1), fy_votes.view(B, -1)), dim=-1)
focal = torch.nanmedian(f_votes, dim=-2)[0]
# Normalized focal length
focal[..., 0] = 1 + focal[..., 0]/w
focal[..., 1] = 1 + focal[..., 1]/h
focal = repeat(focal, "b c -> (b k) c", k=2)
# NOTE: Unproject feature on the point cloud
ptmps = rearrange(ptmps, "b k h w c -> (b k) (h w) c", b=b, k=2)
if self.distance == "cosine":
features = rearrange(hr_feat, "b k c h w -> (b k) (h w) c", k=2)
else:
images = (images + 1) / 2
features = rearrange(images, "b k c h w-> (b k) (h w) c", k=2)
point_cloud = Pointclouds(points=ptmps, features=features)
# NOTE: Project and Render
R = torch.eye(3)
R[0, 0] *= -1
R[1, 1] *= -1
R = repeat(R, "... -> (b k) ...", b=b, k=2)
T = torch.zeros((3, ))
T = repeat(T, "... -> (b k) ...", b=b, k=2)
# Define Pytorch3D camera for projection
cameras = PerspectiveCameras(device=ptmps.device, R=R, T=T, focal_length=focal)
# Render via point rasterizer to get projected features
with torch.autocast("cuda", enabled=False):
rendering, zbuf = self.render(point_cloud, cameras=cameras, background_color=[-10000] * features.shape[-1])
rendering = rearrange(rendering, "(b k) h w c -> b k c h w", b=b, k=2)
# Compute overlapping mask
non_overlap_mask = (rendering == -10000)
overlap_mask = (1 - non_overlap_mask.float()).prod(2).prod(1)
# Zero out regions which do not overlap
rendering[non_overlap_mask] = 0.0
# Mask for weighted sum
mask = overlap_mask
# NOTE: Uncomment for incorporating occlusion masks along with overlap mask
# zbuf = rearrange(zbuf, "(b k) ... -> b k ...", b=b, k=2)
# closest_z = zbuf[..., 0]
# diff = (closest_z[:, 0, ...] - closest_z[:, 1, ...]).abs()
# mask = (~(diff > 0.5) * (closest_z != -1).prod(1)) * mask
# NOTE: Compute scores as either feature dissimilarity, RMSE, LPIPS, SSIM, MSE, or PSNR
score_map, weighted = self._distance(rendering[:, 0, ...], rendering[:, 1, ...], mask=mask)
outputs = [weighted]
if return_overlap_mask:
outputs.append(mask)
if return_score_map:
outputs.append(score_map)
if return_projections:
outputs.append(rendering)
return (*outputs, )
| python | MIT | 3259e41b7190eaee15b09f23d252e8aaa12b15e7 | 2026-01-05T07:13:39.862185Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/manifest_cleaner/scripts/stats.py | people/wu_ros_tools/manifest_cleaner/scripts/stats.py | #!/usr/bin/python
import rospy
import xml.dom.minidom
import os
import os.path
import sys
import collections
import operator
from urlparse import urlparse
import httplib
def get_code(url):
x = urlparse(url)
try:
conn = httplib.HTTPConnection(x.netloc)
conn.request("HEAD", x.path)
return conn.getresponse().status
except StandardError:
return None
def get_element(doc, name):
elements = manifest.getElementsByTagName(name)
if len(elements) == 0:
return None
element = elements[0]
return element
def get_text(doc, name):
element = get_element(doc, name)
if element is None or len(element.childNodes)==0:
return ''
return element.childNodes[0].data
def report(name, rdict):
sorted_report = sorted(rdict.iteritems(), key=operator.itemgetter(1))
print
print "=" * 5, name, "="*(40-len(name))
for (value, count) in sorted_report:
print value, count
def extract_items(s, split=False):
authors = []
if 'Maintained by' in s:
i = s.index('Maintained by')
s = s[:i] + s[i+1+len('Maintained by'):]
for a in s.split(','):
for b in a.split('and'):
if split and '/' in b:
b = b[:b.index('/')]
authors.append(b.strip())
return authors
#diff mainpage.dox ../pr2_sith/mainpage.dox | grep '^>'
authors = collections.defaultdict(list)
descriptions = collections.defaultdict(int)
briefs = collections.defaultdict(int)
reviews = collections.defaultdict(int)
licenses = collections.defaultdict(int)
urls = collections.defaultdict(int)
packages = []
stacks = []
if len(sys.argv)<=1 or '-h' in sys.argv:
print "Need to specify a directory to search through as the first parameter"
print " [use the -web flag to ping the address "
print " specified in the URL tag to see if it exists ] "
exit(1)
check_urls = '-web' in sys.argv
for root, subFolders, files in os.walk(sys.argv[1]):
if 'manifest.xml' in files:
is_package = True
elif 'stack.xml' in files:
is_package = False
else:
continue
package = os.path.basename(root)
if is_package:
manifest_xml = open("%s/manifest.xml"%root, 'r').read()
else:
manifest_xml = open("%s/stack.xml"%root, 'r').read()
try:
manifest = xml.dom.minidom.parseString(manifest_xml)
except:
continue
node = {'name': package}
author = get_text(manifest, 'author')
for a_name in extract_items(author, True):
authors[a_name].append(package)
node['author'] = author
description_xml = get_element(manifest, 'description')
if not description_xml:
node['description'] = None
node['brief'] = None
else:
description = get_text(manifest, 'description').strip()
brief = description_xml.getAttribute('brief').strip()
node['description'] = 'minimal' if description==package else 'detailed'
node['brief'] = 'minimal' if brief==package else 'detailed'
descriptions[ node['description'] ] += 1
briefs[ node['brief'] ] += 1
review_xml = get_element(manifest, 'review')
if review_xml is None:
review = None
else:
review = review_xml.getAttribute('status')
node[ 'review' ] = review
reviews[review] += 1
license = get_text(manifest, 'license')
node[ 'license' ] = license
for lic in extract_items(license):
licenses[lic] += 1
url = get_text(manifest, 'url')
if url is not None:
if check_urls:
url = get_code(url)
else:
url = 'specified'
node[ 'url' ] = url
urls[url] += 1
if is_package:
packages.append(node)
else:
stacks.append(node)
lengths = collections.defaultdict(int)
for d in packages + stacks:
for a,b in d.iteritems():
if type(b)==type(u''):
b = b.encode('ascii', 'replace')
if len(str(b)) > lengths[a]:
lengths[a] = len(str(b))
if len(str(a)) > lengths[a]:
lengths[a] = len(str(a))
fields = ['name', 'description', 'brief', 'license', 'url', 'review', 'author']
if len(stacks)>0:
for field in fields:
print ("%%-%ds"%lengths[field])%field,
print
for field in fields:
print "=" * lengths[field],
print
for d in stacks:
for field in fields:
print ("%%-%ds"%lengths[field])%str(d[field]),
print
print
if len(packages)>0:
for field in fields:
print ("%%-%ds"%lengths[field])%field,
print
for field in fields:
print "=" * lengths[field],
print
for d in packages:
for field in fields:
val = d[field]
if type(val)==type(u''):
val = val.encode('ascii', 'replace')
print val,
n = lengths[field] - len(val)-1
if n>0:
print " "*n,
else:
print ("%%-%ds"%lengths[field])%str(val),
print
report('Descriptions', descriptions)
report('Brief Descriptions', briefs)
report('Reviews', reviews)
report('Licenses', licenses)
report('Urls', urls)
print
name = "Authors"
print "=" * 5, name, "="*(40-len(name))
for a,c in sorted(authors.items()):
a = a.encode('ascii', 'replace')
print "%s %4d"%(a.strip(),len(c))
print ' ',
for (i,b) in enumerate(c):
print "%-30s"%b,
if i%3==2:
print '\n ',
print
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/catkinize_this/scripts/catkinize_this.py | people/wu_ros_tools/catkinize_this/scripts/catkinize_this.py | #!/usr/bin/python
import rospy
from xml.dom import minidom
import subprocess
import os.path
found = {}
def get_root(package):
p = subprocess.Popen(['rospack', 'find', package], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out.strip()
def is_catkinized(package, root=None):
if package in found:
return found[package]
if root is None:
root = get_root(package)
if root=='':
found[package] = None
return None
if os.path.exists('%s/package.xml'%root):
found[package] = True
return True
elif os.path.exists('%s/manifest.xml'%root):
found[package] = False
return False
else:
found[package] = None
return None
def get_links(name):
if not os.path.exists(name):
return []
xmldoc = minidom.parse(name)
if 'package.xml' in name:
builds = xmldoc.getElementsByTagName('build_depend')
runs = xmldoc.getElementsByTagName('run_depend')
return [item.firstChild.nodeValue for item in builds+runs]
elif 'manifest.xml' in name:
itemlist = xmldoc.getElementsByTagName('depend')
return [item.attributes['package'].value for item in itemlist]
else:
None
def check_status(package, depth=0, dlimit=0):
root = get_root(package)
is_cat = is_catkinized(package, root)
if is_cat:
links = get_links('%s/package.xml'%root)
elif is_cat==False:
links = get_links('%s/manifest.xml'%root)
else:
return
s = "%s%s"%(" "*depth, package)
print s, " "*(50-len(s)), "CATKIN" if is_cat else "ROSPACK"
if depth < dlimit:
for p2 in links:
if p2 not in found:
check_status(p2, depth+1, dlimit)
import sys
limit = 0
for arg in sys.argv[1:]:
if '--n' in arg:
limit = int(arg[3:])
for arg in sys.argv[1:]:
check_status(arg, dlimit=limit)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/joy_listener/setup.py | people/wu_ros_tools/joy_listener/setup.py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
package_info = generate_distutils_setup(
packages=['joy_listener'],
package_dir={'': 'src'},
)
setup(**package_info)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/joy_listener/src/joy_listener/__init__.py | people/wu_ros_tools/joy_listener/src/joy_listener/__init__.py | import roslib; roslib.load_manifest('joy_listener')
import rospy
from sensor_msgs.msg import Joy
class JoyListener(dict):
def __init__(self, wait_time=1.0, joy_topic='/joy'):
self.wait_time = wait_time
self.sub = rospy.Subscriber(joy_topic, Joy, self.joy_cb, queue_size=1)
self.last_time = rospy.Time(0)
self.axes_cb = None
def joy_cb(self, msg):
buttons = msg.buttons
now = rospy.Time.now()
if (now- self.last_time ).to_sec() < self.wait_time:
return
for button, function in self.iteritems():
if buttons[button]:
self.last_time = now
function()
break
if self.axes_cb:
self.axes_cb(msg.axes)
PS3_BUTTONS = ['select', 'left_joy', 'right_joy', 'start', 'up', 'right', 'down', 'left', 'l2', 'r2', 'l1', 'r1', 'triangle', 'circle', 'x', 'square', 'ps3']
def PS3(name):
return PS3_BUTTONS.index(name)
WII_BUTTONS = ['1', '2', 'a', 'b', '+', '-', 'left', 'right', 'up', 'down', 'home']
def WII(name):
return WII_BUTTONS.index(name)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/rosbaglive/scripts/play.py | people/wu_ros_tools/rosbaglive/scripts/play.py | #!/usr/bin/python
import rospy
import rosbag
import sys
# F#@K IT WE'LL DO IT LIVE
if __name__=='__main__':
rospy.init_node('rosbaglive')
bagfn = None
should_loop = False
loop_sleep = 0.1
for arg in sys.argv[1:]:
if ".bag" in arg:
bagfn = arg
elif arg=='-l':
should_loop = True
elif arg[0:2]=='-d':
loop_sleep = float(arg[2:])
if bagfn is None:
rospy.logerr("No Bag specified!")
exit(1)
bag = rosbag.Bag(bagfn)
pubs = {}
rospy.loginfo('Start read')
last = None
data = []
for topic, msg, t in bag.read_messages():
if topic not in pubs:
pub = rospy.Publisher(topic, type(msg), latch=('map' in topic))
pubs[topic] = pub
if t!=last:
data.append( (t, []) )
last = t
data[-1][1].append( (topic, msg) )
rospy.loginfo('Done read')
start = rospy.Time.now()
sim_start = None
while not rospy.is_shutdown():
for t, msgs in data:
now = rospy.Time.now()
if sim_start is None:
sim_start = t
else:
real_time = now - start
sim_time = t - sim_start
if sim_time > real_time:
rospy.sleep( sim_time - real_time)
for (topic, msg) in msgs:
if 'header' in dir(msg):
msg.header.stamp = now
elif 'transforms' in dir(msg):
for tf in msg.transforms:
tf.header.stamp = now
pub = pubs[topic]
pub.publish(msg)
if rospy.is_shutdown():
break
if not should_loop:
break
rospy.sleep(loop_sleep)
bag.close()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/easy_markers/setup.py | people/wu_ros_tools/easy_markers/setup.py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
package_info = generate_distutils_setup(
packages=['easy_markers'],
package_dir={'': 'src'},
)
setup(**package_info)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/easy_markers/scripts/interactive_marker_demo.py | people/wu_ros_tools/easy_markers/scripts/interactive_marker_demo.py | #!/usr/bin/python
import rospy
from easy_markers.interactive import InteractiveGenerator
def callback(feedback):
print feedback
if __name__=='__main__':
rospy.init_node('itest')
ig = InteractiveGenerator()
ig.makeMarker(controls=["move_x", "rotate_x"])
ig.makeMarker(controls=["move_y", "rotate_y"], pose=[1,0,0], description="X")
rospy.spin()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/people/wu_ros_tools/easy_markers/scripts/std_marker_demo.py | people/wu_ros_tools/easy_markers/scripts/std_marker_demo.py | #!/usr/bin/python
import rospy
from easy_markers.generator import *
if __name__=='__main__':
rospy.init_node('some_markers')
pub = rospy.Publisher('/visualization_marker', Marker)
gen = MarkerGenerator()
gen.ns = '/awesome_markers'
gen.type = Marker.SPHERE_LIST
gen.scale = [.3]*3
gen.frame_id = '/base_link'
while not rospy.is_shutdown():
gen.counter = 0
t = rospy.get_time()
gen.color = [1,0,0,1]
m = gen.marker(points= [(0, i, (i+t)%5.0) for i in range(10)])
pub.publish(m)
gen.color = [0,1,0,1]
m = gen.marker(points= [(0, i, (i-t)%5.0) for i in range(10)])
pub.publish(m)
rospy.sleep(.1)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.