repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-8.31.18.py | from random import shuffle
import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
from scipy.stats.mstats import gmean
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print(str("failed to find embedding for:" + word).encode('utf-8'))
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath, 'r', encoding='utf8') as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r', encoding='utf8')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_semi_hard_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
triplets = {}
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
expected_text = set(entity2same[key])
expected_ids = [entity2unique[i] for i in expected_text]
for positive in expected_text:
k = entity2unique[positive]
nearest = t.get_nns_by_vector(predictions[k], NNlen)
dist_k = t.get_distance(index, k)
semi_hards = []
for n in nearest:
if n == index or n in expected_ids or n == k:
continue
n_dist = t.get_distance(index, n)
if n_dist > dist_k:
semi_hards.append(unique_text[n])
# shuffle(semi_hards)
# semi_hards = semi_hards[0:20]
for i in semi_hards:
triplets['anchor'].append(key)
triplets['positive'].append(unique_text[k])
triplets['negative'].append(i)
return triplets
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w', encoding='utf8') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(str(train).encode('utf-8'))
print("TEST")
print(str(test).encode('utf-8'))
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(str(entity2same_train).encode('utf-8'))
print(str(entity2same_test).encode('utf-8'))
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
triplets = generate_triplets_from_ANN(embedder_model, sequences, entity2unique, entity2same_train, unique_text, True)
print(len(triplets['anchor']))
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, train_match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 22,999 | 35.624204 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/seq2seqTriplet.py | '''Sequence to sequence example in Keras (character-level).
This script demonstrates how to implement a basic character-level
sequence-to-sequence model. We apply it to translating
short English sentences into short French sentences,
character-by-character. Note that it is fairly unusual to
do character-level machine translation, as word-level
models are more common in this domain.
# Summary of the algorithm
- We start with input sequences from a domain (e.g. English sentences)
and correspding target sequences from another domain
(e.g. French sentences).
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
- A decoder LSTM is trained to turn the target sequences into
the same sequence but offset by one timestep in the future,
a training process called "teacher forcing" in this context.
Is uses as initial state the state vectors from the encoder.
Effectively, the decoder learns to generate `targets[t+1...]`
given `targets[...t]`, conditioned on the input sequence.
- In inference mode, when we want to decode unknown input sequences, we:
- Encode the input sequence into state vectors
- Start with a target sequence of size 1
(just the start-of-sequence character)
- Feed the state vectors and 1-char target sequence
to the decoder to produce predictions for the next character
- Sample the next character using these predictions
(we simply use argmax).
- Append the sampled character to the target sequence
- Repeat until we generate the end-of-sequence character or we
hit the character limit.
# Data download
English to French sentence pairs.
http://www.manythings.org/anki/fra-eng.zip
Lots of neat sentence pairs datasets can be found at:
http://www.manythings.org/anki/
# References
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
'''
from __future__ import print_function
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
from keras.callbacks import ModelCheckpoint, EarlyStopping
batch_size = 64 # Batch size for training.
epochs = 100 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
num_samples = 100000 # Number of samples to train on.
# Path to the data txt file on disk.
data_path = 'tripletTranslate'
# Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
arr = line.split('|')
input_text = arr[0]
target_text = arr[1]
arr = input_text.split()
# data quality issues. In the canonical name, the first name had better not be just a letter or empty
if arr[0].strip()=='' or len(arr[0].strip()) <= 1:
continue
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
filepath = 's2s.h5'
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
# check 5 epochs
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks_list,
validation_split=0.2)
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state
# and a "start of sequence" token as target.
# Output will be the next target token
# 3) Repeat with the current target token and current states
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# Reverse-lookup token index to decode sequences back to
# something readable.
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence
for seq_index in range(100):
# Take one sequence (part of the training set)
# for trying out decoding.
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', input_texts[seq_index])
print('Decoded sentence:', decoded_sentence) | 9,670 | 39.634454 | 106 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM_hpo.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
obj = {}
obj['accuracy'] = accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
args = parser.parse_args()
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 19,474 | 35.88447 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-schroffloss.py | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# import theano as T
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=1
ALPHA=30
USE_GRU=True
DEBUG = False
DEBUG_DATA_LENGTH = 20000
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = True
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(T.tensor.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
"""
# if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=schroff_triplet_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,846 | 37.463028 | 167 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-8.29.18.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
from scipy.stats.mstats import gmean
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,235 | 36.061082 | 163 | py |
smt | smt-master/doc/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SMT documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 6 19:36:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from smt import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"matplotlib.sphinxext.plot_directive",
"numpydoc",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "SMT"
copyright = "2017, John Hwang"
author = "John Hwang, Mohamed Amine Bouhlel, Remi Lafage"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Take the full version when no need to distinguish version and release
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "bizstyle"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"rightsidebar": False,
"sidebarwidth": 250,
"body_min_width": 1100,
"body_max_width": 1100,
}
html_logo = "smt_logo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "SMTdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"SMT.tex",
"SMT Documentation",
"John Hwang, Mohamed Amine Bouhlel, Remi Lafage",
"manual",
)
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "smt", "SMT Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"SMT",
"SMT Documentation",
author,
"SMT",
"One line description of project.",
"Miscellaneous",
)
]
| 5,197 | 28.873563 | 79 | py |
pyzor | pyzor-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Pyzor documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 7 15:20:07 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pyzor'
copyright = u'2014, Frank Tobin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pyzordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Pyzor.tex', u'Pyzor Documentation',
u'Frank Tobin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyzor', u'Pyzor Documentation',
[u'Frank Tobin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pyzor', u'Pyzor Documentation',
u'Frank Tobin', 'Pyzor', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 8,310 | 30.244361 | 79 | py |
MINDER | MINDER-main/scripts/build_fm_index.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import logging
import multiprocessing
import re
import ftfy
import torch
import tqdm
import pickle
from seal.index import FMIndex
from datasets import load_dataset
import json
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
def process(line):
tokens = tokenize(line)
return tokens
def preprocess_file(input_path, labels, format="kilt", lowercase=False, tokenize=False, pid2query=None):
if pid2query:
with open(args.pid2query, 'rb') as f:
pid2query = pickle.load(f)
if args.id2code:
with open(args.id2code, 'rb') as f:
id2code = pickle.load(f)
if format =="msmarco":
f = load_dataset(input_path, split="train")
pieces_it = ((pp['docid'], pp["title"], pp["text"]) for pp in f)
pieces_it = tqdm.tqdm(pieces_it)
for idx, title, text in pieces_it:
idx = idx.strip()
title = title.strip()
text = re.sub(r"\s+", " ", text)
text = ftfy.fix_text(text)
text = text.replace("BULLET::::", "")
text = text.replace("SECTION::::", "")
text = text.strip()
if args.include_query:
if args.query_format == 'free':
queries = pid2query[idx]
query = ''
for s in queries:
query =query + " " + s
if args.query_format == 'stable':
queries = pid2query[idx]
query = ''
for s in queries:
query =query + " || " + s + ' @@'
if args.include_code:
code = id2code[idx]
code = " || " + ' '.join([*code.strip()]) + ' @@'
if not text:
continue
if tokenize:
title = " ".join(word_tokenize(title))
text = " ".join(word_tokenize(text))
title = f"{title} {args.delim}"
if args.include_title and title:
text = f"{title} {text}"
if args.include_query and query:
text = f"{text} {query}"
if lowercase:
text = text.lower()
labels.append(idx)
yield text
else:
with open(input_path, "r", 2**16) as f:
if format == "dpr":
next(f)
pieces_it = csv.reader(f, delimiter="\t", quotechar='"')
pieces_it = ((pp[0], pp[2], pp[1]) for pp in pieces_it if len(pp) == 3)
elif format == "kilt":
pieces_it = (line.strip() for line in f)
pieces_it = (line.split("\t", 2) for line in pieces_it)
pieces_it = ((pp[0], pp[1], pp[2]) for pp in pieces_it if len(pp) == 3)
pieces_it = tqdm.tqdm(pieces_it)
for idx, title, text in pieces_it:
idx = idx.strip()
title = title.strip()
text = re.sub(r"\s+", " ", text)
text = ftfy.fix_text(text)
text = text.replace("BULLET::::", "")
text = text.replace("SECTION::::", "")
text = text.strip()
if args.include_query:
if args.query_format == 'free':
queries = pid2query[idx]
query = ''
for s in queries:
query =query + " " + s
if args.query_format == 'stable':
queries = pid2query[idx]
query = ''
for s in queries:
query =query + " || " + s + ' @@'
if args.include_code:
code = id2code[idx]
code = " || " + ' '.join([*code.strip()]) + ' @@'
if not text:
continue
if tokenize:
title = " ".join(word_tokenize(title))
text = " ".join(word_tokenize(text))
title = f"{title} {args.delim}"
if args.include_title and title:
text = f"{title} {text}"
if args.include_query and query:
text = f"{text} {query}"
if args.include_code and code:
text = f"{text} {code}"
if lowercase:
text = text.lower()
labels.append(idx)
yield text
def build_index(input_path):
labels = []
index = FMIndex()
lines = preprocess_file(input_path, labels, args.format, lowercase=args.lowercase, tokenize=args.tokenize, pid2query=args.pid2query)
print('start build index')
with multiprocessing.Pool(args.jobs) as p:
sequences = p.imap(process, lines)
index.initialize(sequences)
print('start build index 2')
index.labels = labels
return index
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("output")
parser.add_argument("--jobs", type=int, default=1)
parser.add_argument("--include_title", action="store_true")
parser.add_argument("--delim", default="@@")
parser.add_argument("--format", choices=["kilt", "dpr", "msmarco"], default="kilt")
parser.add_argument("--hf_model", default=None, type=str)
parser.add_argument("--lowercase", action="store_true")
parser.add_argument("--tokenize", action="store_true")
parser.add_argument('--pid2query', default=None, type=str)
parser.add_argument('--id2code', default=None, type=str)
parser.add_argument("--include_query", action="store_true")
parser.add_argument("--include_code", action="store_true")
parser.add_argument("--query_format", choices=["free", "stable"], default="free")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
print(args)
if args.tokenize:
from spacy.lang.en import English
nlp = English()
_tokenizer = nlp.tokenizer
def word_tokenize(text):
return [t.text.strip() for t in _tokenizer(text)]
if args.hf_model is not None:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(args.hf_model, use_fast=False)
is_bart = "bart" in args.hf_model
def tokenize(text):
text = text.strip()
if is_bart:
text = " " + text
with tokenizer.as_target_tokenizer():
return tokenizer(text, add_special_tokens=False)["input_ids"] + [tokenizer.eos_token_id]
else:
bart = torch.hub.load("pytorch/fairseq", "bart.large").eval()
def tokenize(text):
return bart.encode(" " + text.strip()).tolist()[1:]
delim = tokenize(args.delim)[:-1]
index = build_index(args.input)
print('start build index 3')
index.save(args.output)
| 7,216 | 29.974249 | 136 | py |
MINDER | MINDER-main/seal/keys.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import defaultdict
from heapq import heappop, heappush
from itertools import chain, islice, product
from typing import *
import torch
from more_itertools import chunked
from tqdm import tqdm
from seal import FMIndex
def deduplicate(list_of_lists):
present = set()
result = []
for el in list_of_lists:
x = el
if isinstance(el[0], float):
el = el[1]
if isinstance(el, torch.Tensor):
t_el = tuple(el.tolist())
else:
t_el = tuple(el)
if t_el in present:
continue
else:
present.add(t_el)
result.append(x)
return result
def decompose_query_into_keys(query, word_tokenizer, length=3):
strings = set()
query = query.strip()
tokens = word_tokenizer(query)
tokens = [t.text for t in tokens]
for i in range(len(tokens)):
for j in range(i + 1, min(1 + len(tokens), i + length + 1)):
span = tokens[i:j]
for upper in product(*([[True, False]] * (j - i))):
ss = [s[0].upper() + s[1:] if u else s for u, s in zip(upper, span)]
ss = " " + " ".join(ss)
strings.add(ss)
strings = list(strings)
return strings
def strip(seq, symbols_start, symbols_end):
i = 0
while i < len(seq) and seq[i] in symbols_start:
i += 1
j = len(seq)
while j > i and seq[j - 1] in symbols_end:
j -= 1
return seq[i:j]
@torch.inference_mode()
def rescore_keys(model, inputs, list_of_decoded, batch_size=100, length_penalty=0.0, progress_bar=False, prefix=[],
strip_from_bos=[], strip_from_eos=[]):
device = next(model.parameters()).device
if inputs is None:
batch_in = [[model.config.bos_token_id, model.config.eos_token_id]] * len(list_of_decoded)
else:
batch_in = list(inputs)
list_of_decoded = [[x[1] if isinstance(x[0], float) else x for x in xx] for xx in list_of_decoded]
maxlen = max([len(i) for i in batch_in])
input_ids = [i + ([model.config.pad_token_id] * (maxlen - len(i))) for i in batch_in]
input_ids = [torch.LongTensor(i).to(device) for i in input_ids]
input_ids = torch.stack(input_ids, 0)
attention_mask = input_ids != model.config.pad_token_id
attention_mask = attention_mask.byte()
encoder_outputs = model._prepare_encoder_decoder_kwargs_for_generation(
input_ids, {'attention_mask': attention_mask})['encoder_outputs'].last_hidden_state
decoder_inputs = enumerate(list_of_decoded)
decoder_inputs = [(idx, di) for idx, ddi in decoder_inputs for di in ddi]
all_out = {i: [] for i, _ in enumerate(list_of_decoded)}
for batch in chunked(tqdm(decoder_inputs) if progress_bar else decoder_inputs, batch_size):
idxs = []
batch_in_decoder_orig = []
batch_in_decoder = []
for i, di in batch:
stripped = [model.config.decoder_start_token_id] + prefix + strip(di, strip_from_bos, strip_from_eos)
if stripped:
idxs.append(i)
batch_in_decoder_orig.append(di)
batch_in_decoder.append(stripped)
batch_in_decoder = [torch.LongTensor(di) for di in batch_in_decoder]
batch_in_decoder = [
torch.cat(
[torch.LongTensor([model.config.decoder_start_token_id]), di]
) if di[0] != model.config.decoder_start_token_id else di for di in batch_in_decoder]
maxlen = max([len(di) for di in batch_in_decoder])
batch_decoder_input_ids = [
torch.cat(
[di, torch.LongTensor([model.config.pad_token_id] * (maxlen - len(di)))])
for di in batch_in_decoder]
batch_decoder_input_ids = [di for di in batch_decoder_input_ids]
batch_decoder_input_ids = torch.stack(batch_decoder_input_ids, 0).to(device)
batch_input_ids = torch.stack([input_ids[idx] for idx in idxs], 0)
batch_attention_mask = torch.stack([attention_mask[idx] for idx in idxs], 0)
batch_encoder_outputs = torch.stack([encoder_outputs[idx] for idx in idxs], 0)
logits = model(
input_ids=batch_input_ids,
attention_mask=batch_attention_mask,
encoder_outputs=(batch_encoder_outputs, None, None),
decoder_input_ids=batch_decoder_input_ids[:, :-1],
).logits
logprobs = logits.log_softmax(-1)
logprobs = torch.gather(logprobs, -1, batch_decoder_input_ids[:, 1:].unsqueeze(-1))
logprobs[batch_decoder_input_ids[:, 1:] < 2] = 0.0
logprobs = logprobs[:, len(prefix):]
logprobs = logprobs.squeeze(-1).sum(-1)
logprobs = logprobs.tolist()
for i, di, bdi, ll in zip(idxs, batch_in_decoder_orig, batch_decoder_input_ids, logprobs):
sco = ll / (len(di) ** length_penalty)
all_out[i].append((sco, di))
return [v for k, v in sorted(all_out.items())]
# @torch.inference_mode()
@torch.no_grad()
def compute_unigram_scores(model, inputs, index: FMIndex, tokenizer=None, tolist=True, temperature=1.0, prefix=[]):
device = next(model.parameters()).device
if isinstance(inputs[0], str):
batch = tokenizer(inputs, padding=True, return_tensors='pt')
else:
batch_in = list(inputs)
maxlen = max([len(i) for i in batch_in])
input_ids = [i + ([model.config.pad_token_id] * (maxlen - len(i))) for i in batch_in]
input_ids = [torch.LongTensor(i).to(device) for i in input_ids]
input_ids = torch.stack(input_ids, 0)
attention_mask = input_ids != model.config.pad_token_id
attention_mask = attention_mask.byte()
batch = dict(input_ids=input_ids, attention_mask=attention_mask)
batch = {k: v.to(device) for k, v in batch.items()}
decoder_input_ids = torch.full_like(batch['input_ids'][:, :1 + len(prefix)], model.config.decoder_start_token_id)
for i, idx in enumerate(prefix, start=1):
decoder_input_ids[:, i] = idx
logits = model(**batch, decoder_input_ids=decoder_input_ids).logits[:, 0 + len(prefix)]
if temperature != 1.0:
logits /= temperature
logprobs = logits.log_softmax(-1)
if tolist:
return logprobs.tolist()
else:
return logprobs
def aggregate_evidence(ngrams_and_scores: List[Tuple[List[int], float]], unigram_scores: Optional[List[float]] = None,
index: Optional[FMIndex] = None, max_occurrences_1: int = 1500,
max_occurrences_2: int = 10_000_000, n_docs_complete_score: int = 500, alpha: float = 2.0,
beta: float = 0.8, length_penalty: float = 0.0, use_fm_index_frequency: bool = True,
add_best_unigrams_to_ngrams: bool = False, use_top_k_unigrams=1000, sort_by_length=False,
sort_by_freq=False, smoothing=5.0, allow_overlaps=False, single_key=0.0,
single_key_add_unigrams=False, unigrams_ignore_free_places=False, tokenizer=None) -> Tuple[List[int], List[float]]:
def repetition(ngram, score, coverage):
if not coverage:
return score
ngram = set(ngram)
coeff = 1.0 - beta + (beta * len(ngram.difference(coverage)) / len(ngram))
return coeff * score
ntokens = float(index.beginnings[-1])
ngrams_and_scores = [(ngram.tolist() if isinstance(ngram, torch.Tensor) else ngram, sr) for ngram, sr in ngrams_and_scores]
counts = {tuple(): len(index)}
if not use_fm_index_frequency:
try:
cutoff = sorted(ngrams_and_scores, key=lambda x: x[1])[0][1] - 0.1
except IndexError as e:
print(ngrams_and_scores)
raise e
else:
cutoff = None
unigrams = {0, 1, 2}
for i in range(len(ngrams_and_scores)):
ngram, sr = ngrams_and_scores[i]
if ngram[0] == 45056:
sr = min(sr+ 14, -1.0)
# sr *= 0.1
if len(ngram) == 1:
unigrams.add(ngram[0])
count = index.get_count(ngram)
# print(tokenizer.decode(ngram),sr,count)
# if count < 2000:
# count = index.get_doc_count(ngram)
# print(ngram, count)
counts[tuple(ngram)] = count
if count == 0:
sco = 0.0
elif use_fm_index_frequency:
sr -= 1e-10
sr *= (1.0 - length_penalty) ** (len(ngram) - 1.0)
snr = math.log((count + smoothing) / (ntokens + smoothing))
sco = \
(sr + math.log(1 - math.exp(snr))) - \
(snr + math.log(1 - math.exp(sr)))
sco = max(sco, 0.0)
sco **= alpha
else:
sco = sr - cutoff
sco = max(sco, 0.0)
sco *= (1.0 - length_penalty) ** (len(ngram) - 1.0)
sco **= alpha
ngrams_and_scores[i] = (ngram, sco)
# print(tokenizer.decode(ngram),'**', sco)
if unigram_scores is not None:
unigram_scores = unigram_scores[:]
best = sorted(range(len(unigram_scores)), reverse=True, key=lambda i: unigram_scores[i])
best = best[:use_top_k_unigrams]
best = set(best)
unigram_scores = [s if i in best else float('-inf') for i, s in enumerate(unigram_scores)]
for i in range(len(unigram_scores)):
if i in unigrams:
unigram_scores[i] = 0.0
continue
sr = unigram_scores[i]
ngram = [i]
count = index.get_count(ngram)
if count == 0:
sco = 0.0
elif use_fm_index_frequency:
snr = math.log((count + smoothing) / (ntokens + smoothing))
sco = \
(sr + math.log(1 - math.exp(snr))) - \
(snr + math.log(1 - math.exp(sr)))
sco = max(sco, 0.0)
else:
sco = sr - cutoff
sco = max(sco, 0.0)
sco **= alpha
if sco == 0.0:
unigram_scores[i] = 0.0
continue
unigram_scores[i] = sco
if add_best_unigrams_to_ngrams:
best_unigrams = sorted(list(range(len(unigram_scores))), key=lambda x: -unigram_scores[x])[:len(ngrams_and_scores)]
for i in best_unigrams:
counts[tuple([i])] = index.get_count([i])
ngrams_and_scores.append(([i], unigram_scores[i]))
# rare ngrams (occurring less than max_hits) --> used for the first stage and full scoring
rare_ngrams = defaultdict(float)
# frequent ngrams --> used just for full scoring
freq_ngrams = defaultdict(float)
# computing scores for all ngrams
for ngram, sco in ngrams_and_scores:
count = index.get_count(ngram)
if count > max_occurrences_2:
continue
elif sco == 0.0:
continue
elif count > max_occurrences_1 or sco < 0.0:
ngrams = freq_ngrams
# ngrams = rare_ngrams
else:
ngrams = rare_ngrams
ngram = tuple(ngram)
ngrams[ngram] = sco
# else:
rare_ngrams = {k: v for k, v in sorted(rare_ngrams.items(), key=lambda x: x[1], reverse=True)}
# rare_ngrams = remove_redundant_ngrams(rare_ngrams)
freq_ngrams = {k: v for k, v in sorted(freq_ngrams.items(), key=lambda x: x[1], reverse=True)}
# freq_ngrams = remove_redundant_ngrams(freq_ngrams)
all_ngrams = {k: v for k, v in \
sorted(
chain(rare_ngrams.items(), freq_ngrams.items()),
key=lambda x: x[1], reverse=True)}
covered_points = set()
first_stage = defaultdict(lambda: [0.0, [], [[], 0.0]])
for ngram, sco in rare_ngrams.items():
# idfs[ngram] = idf(ngram, index)
# each ngram only considered once for doc
doc_done = defaultdict(set)
for row in islice(range(*index.get_range(list(ngram))), max_occurrences_1):
tok_end = index.locate(row)
tok_start = tok_end - len(ngram)
doc = index.get_doc_index(tok_end)
new = all([i not in covered_points for i in range(tok_start, tok_end)])
if sort_by_length:
order = (len(ngram), sco)
max_order = (len(first_stage[doc][2][0]), first_stage[doc][2][1])
elif sort_by_freq:
order = (-counts[tuple(ngram)], sco)
max_order = (-counts[tuple(first_stage[doc][2][0])], first_stage[doc][2][1])
else:
order = sco
max_order = first_stage[doc][2][1]
if order > max_order:
first_stage[doc][2] = [ngram, sco]
if new:
for tok in range(tok_start, tok_end):
covered_points.add(tok)
if new or allow_overlaps:
if ngram not in doc_done[doc]:
doc_done[doc].add(ngram)
first_stage[doc][0] += sco
first_stage[doc][1].append((ngram, sco))
for doc, doc_info in first_stage.items():
current_coverage = set()
current_score = 0.0
for i in range(len(doc_info[1])):
tt, sco = doc_info[1][i]
tts = set(tt)
new_sco = repetition(tts, sco, current_coverage)
current_score += new_sco
doc_info[1][i] = [tt, new_sco]
current_coverage |= tts
doc_info[0] = current_score
to_fully_score = sorted(first_stage.items(),
key=lambda x: (1.0 - single_key) * (-x[1][0]) + single_key * (-x[1][2][1]))[:n_docs_complete_score]
results = defaultdict(lambda:
[
0.0, # score
[], # ngrams found
None, # places filled
None, # full doc tokens
[[], 0.0] # max ngram
])
trie = {}
for ngram, score in all_ngrams.items():
if len(ngram) < 1 or score <= 0.0:
continue
current = trie
for t in ngram:
current = current.setdefault(t, {})
current[-1] = score
for doc, _ in to_fully_score:
doc_tokens = [2] + index.get_doc(doc)[:-1]
results[doc][3] = doc_tokens
if unigram_scores is not None:
type_scores = {t: unigram_scores[t] for t in doc_tokens}
else:
type_scores = {t: 0.0 for t in doc_tokens}
matches = {}
open_matches = []
for i in range(len(doc_tokens)):
open_matches = [(m.get(doc_tokens[i]), l + 1, n) for (m, l, n) in open_matches] + [
(trie.get(doc_tokens[i]), 1, [])]
for _, _, n in open_matches:
n.append(doc_tokens[i])
new_open_matches = []
while open_matches:
m, l, n = open_matches.pop()
if m is None:
continue
new_open_matches.append((m, l, n))
if -1 in m:
start = i - l + 1
end = i + 1
matches.setdefault(tuple(n), [m[-1], []])[1].append((start, end))
open_matches = new_open_matches
greedy_matches = []
for n, (s, d) in matches.items():
if sort_by_length:
order = (-len(n), -s)
max_order = (-len(results[doc][4][0]), -results[doc][4][1])
elif sort_by_freq:
order = (counts[tuple(n)], -s)
max_order = (counts[tuple(results[doc][4][0])], -results[doc][4][1])
else:
order = -s
max_order = -results[doc][4][1]
for (i, j) in d:
heappush(greedy_matches, (-s, n, s, i, j))
if order < max_order:
results[doc][4] = [n, s]
current_coverage = set()
ngrams = []
prev = None
f = 0
free = [True] * len(doc_tokens)
while greedy_matches:
order, n, s, i, j = heappop(greedy_matches)
n_set = set(n)
if prev == n:
new_s = ngrams[-1][1]
elif not n_set:
new_s = 0.0
else:
new_s = repetition(n_set, s, current_coverage)
if new_s <= 0.0:
continue
if allow_overlaps or all(free[i:j]):
pass
else:
continue
if prev == n:
f += 1
ngrams[-1] = (n, new_s)
else:
f = 1
prev = n
current_coverage |= n_set
ngrams.append((n, new_s))
free[i:j] = [False] * (j - i)
if unigrams_ignore_free_places:
free = [True for _ in free]
single_key_score = results[doc][4][1]
multi_key_score = sum([s for n, s in ngrams])
unigram_score = 0.0
for t, f in Counter([t for t, b in zip(doc_tokens, free) if b]).items():
s = type_scores[t]
if s > 0.0:
n = (t,)
s = repetition(n, s, current_coverage)
if s != 0.0:
unigram_score += s
ngrams.append((n, s))
if single_key_add_unigrams:
single_key_score += unigram_score
multi_key_score += unigram_score
results[doc][0] = (1.0 - single_key) * multi_key_score + single_key * single_key_score
results[doc][1] = ngrams
results = {k: v for k, v in sorted(results.items(), key=lambda x: -x[1][0])}
return results, all_ngrams
###################test for remove intersestive
# if unigrams_ignore_free_places:
# free = [True for _ in free]
# single_key_score = results[doc][4][1]
# multi_key_score = sum([s for n, s in ngrams])
# unigram_score = 0.0
# for t, f in Counter([t for t, b in zip(doc_tokens, free) if b]).items():
# s = type_scores[t]
# if s > 0.0:
# n = (t,)
# s = repetition(n, s, current_coverage)
# if s != 0.0:
# unigram_score += s
# ngrams.append((n, s))
# max_s = 0.0
# max_n = ''
# for n, s in ngrams:
# if s > max_s:
# max_s = s
# max_n = n
# results[doc][0] = max_s
# results[doc][1] = [(max_n,max_s)]
# results = {k: v for k, v in sorted(results.items(), key=lambda x: -x[1][0])}
# return results, all_ngrams | 18,977 | 34.079482 | 138 | py |
MINDER | MINDER-main/seal/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
def _remove_ignore_keys_(state_dict):
ignore_keys = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(k, None)
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
def load_state_dict_from_lightning_checkpoint(model, path):
state_dict = torch.load(path, map_location="cpu")
# state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
# for key in ['shared.weight', 'encoder.embed_tokens.weight', 'decoder.embed_tokens.weight']:
# state_dict[key] = torch.cat([state_dict[key], torch.zeros_like(state_dict[key][:1])], 0)
# _remove_ignore_keys_(state_dict)
# if hasattr(model, "lm_head"):
# model.lm_head = _make_linear_from_emb(model.model.shared)
model.load_state_dict(state_dict)
def load_state_dict_from_fairseq_checkpoint(model, path):
state_dict = torch.load(path, map_location="cpu")["model"]
state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
for key in ["shared.weight", "encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]:
state_dict[key] = torch.cat([state_dict[key], torch.zeros_like(state_dict[key][:1])], 0)
_remove_ignore_keys_(state_dict)
if hasattr(model, "lm_head"):
model.lm_head = _make_linear_from_emb(model.model.shared)
new_state_dict = {}
for key in model.model.state_dict():
new_state_dict[key] = state_dict[key]
model.model.load_state_dict(new_state_dict) | 2,021 | 35.107143 | 102 | py |
MINDER | MINDER-main/seal/beam_search.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import UserDict
from typing import *
import warnings
from more_itertools import chunked
import torch
from torch import nn
import torch.distributed as dist
from transformers import LogitsProcessor, BeamScorer, BeamSearchScorer, LogitsProcessorList, StoppingCriteriaList, HammingDiversityLogitsProcessor
from transformers.generation_utils import BeamSearchOutput, validate_stopping_criteria, BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput
from transformers.generation_logits_process import TopKLogitsWarper
from seal.index import FMIndex
stopword_token_ids = [
10, # a
41, # an
660, # An
5, # the
1941, # THE
20, # The
7, # to
6, # and
]
class IndexBasedLogitsProcessor(LogitsProcessor):
"""
Class that masks logit, meant to be used during decoding. The logit mask is determined by finding the range of rows
in the FM-index that correspond to the previously decoded token ( $O(n log V)$ ), then finding all tokens in that
interval ( $O(V log V)$ ).
"""
def __init__(
self,
index: FMIndex,
num_beams: int,
pad_token_id: int = 0,
eos_token_id: int = 2,
force_decoding_from: Optional[List[int]] = None,
stop_at_count: int = 0,
always_allow_eos: bool = False,
forced_bos_token_id: Optional[int] = None,
):
self.index = index
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
self._num_beams = num_beams
self.log_odds_weight = 0.0
self.force_decoding_from = force_decoding_from
self.force_decoding_second_token = None
self.block_initial_stopwords = False
self.stop_at_count = stop_at_count
self.always_allow_eos = always_allow_eos
self.forced_bos_token_id = forced_bos_token_id
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
mask = torch.full_like(scores, float('-inf'))
if self.forced_bos_token_id is not None:
if input_ids.size(1) == 1:
mask[:, self.forced_bos_token_id] = 0.0
return scores + mask
# else:
# input_ids = input_ids[:, 1:]
if input_ids.size(1) == 1:
distinct = self.index.occurring_distinct
distinct = torch.LongTensor(distinct).to(scores.device)
mask[:, distinct] = 0.0
else:
input_ids_list = input_ids.view(-1, self._num_beams, input_ids.shape[-1]).tolist()
lows = []
highs = []
fm_index_counts = []
for batch_id, beam_sent in enumerate(input_ids_list):
for beam_id, sent in enumerate(beam_sent):
if sent[-1] in (self.eos_token_id, self.pad_token_id):
low = 0
high = 0
count = 0
elif self.force_decoding_from is not None:
low, high = self.index.get_range(self.force_decoding_from + sent[1:])
count = self.index.get_count(self.force_decoding_from + sent[1:-1])
else:
low, high = self.index.get_range(sent[1:])
count = self.index.get_count(sent[1:-1])
lows.append(low)
highs.append(high)
fm_index_counts.append(count)
fm_index_result = self.index.get_distinct_count_multi(lows, highs)
fm_index_result = fm_index_result[::-1]
fm_index_counts = fm_index_counts[::-1]
for batch_id, beam_sent in enumerate(input_ids_list):
for beam_id, sent in enumerate(beam_sent):
if self.stop_at_count > 0 and fm_index_counts[-1] <= self.stop_at_count:
fm_index_result.pop()
fm_index_counts.pop()
distinct = [self.eos_token_id]
elif sent[-1] == self.eos_token_id:
fm_index_result.pop()
fm_index_counts.pop()
distinct = [self.pad_token_id]
elif sent[-1] == self.pad_token_id:
fm_index_result.pop()
fm_index_counts.pop()
distinct = [self.pad_token_id]
else:
fm_index_counts.pop()
distinct, _ = fm_index_result.pop()
distinct = torch.LongTensor(distinct).to(scores.device)
mask[batch_id * self._num_beams + beam_id, distinct] = 0
if self.always_allow_eos:
mask[:, self.eos_token_id] = 0.0
return scores + mask
def constrained_beam_search(
model,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
constrained_decoding_processor: Optional[IndexBasedLogitsProcessor] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = None,
sample: bool = False,
topk: int = 0,
**model_kwargs,
) -> Union[BeamSearchOutput, torch.LongTensor]:
if topk > 0:
topk_warper = TopKLogitsWarper(topk)
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
if len(stopping_criteria) == 0:
warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning)
pad_token_id = pad_token_id if pad_token_id is not None else model.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else model.config.eos_token_id
output_scores = output_scores if output_scores is not None else model.config.output_scores
output_attentions = output_attentions if output_attentions is not None else model.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else model.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else model.config.return_dict_in_generate
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
beam_indices = (
tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None
)
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and model.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = model(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id`
# cannot be generated both before and after the `nn.functional.log_softmax` operation.
next_token_logits = model.adjust_logits_during_generation(next_token_logits, cur_len=cur_len)
if topk:
next_token_logits = topk_warper(input_ids, next_token_logits)
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores_processed = logits_processor(input_ids, next_token_scores)
next_token_scores_no_prev = next_token_scores_processed
next_token_scores = next_token_scores_no_prev + beam_scores[:, None].expand_as(next_token_scores)
if constrained_decoding_processor is not None:
next_token_scores_constrained_no_prev = constrained_decoding_processor(input_ids, next_token_scores_processed)
next_token_scores_constrained = next_token_scores_constrained_no_prev + beam_scores[:, None].expand_as(next_token_scores)
# if return_masked_scores:
# next_token_scores = next_token_scores_constrained
else:
next_token_scores_constrained_no_prev = next_token_scores_no_prev
next_token_scores_constrained = next_token_scores
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores_processed,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if model.config.is_encoder_decoder else (outputs.attentions,)
)
if model.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if model.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
if sample:
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
weights = next_token_scores_constrained_no_prev.view(batch_size * num_beams, vocab_size).log_softmax(-1).exp()
nans = torch.isnan(weights.sum(-1))
weights[nans, :] = 0.0
weights[nans, eos_token_id] = 1.0
next_tokens = torch.multinomial(weights, 1, replacement=True).view(batch_size, 1 * num_beams)
next_token_scores = next_token_scores.gather(-1, next_tokens)
# next_token_scores = next_token_scores.reshape(batch_size, num_beams, 1)
# next_token_scores[:, :, :] = 0.0
# next_token_scores = next_token_scores.reshape(batch_size, 1 * num_beams)
else:
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
next_token_scores_constrained = next_token_scores_constrained.view(batch_size, num_beams * vocab_size)
next_token_scores_constrained, next_tokens = torch.topk(
next_token_scores_constrained, 2 * num_beams, dim=1, largest=True, sorted=True
)
next_token_scores = next_token_scores.gather(-1, next_tokens)
next_indices = (next_tokens / vocab_size).long()
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = model._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=model.config.is_encoder_decoder
)
if model_kwargs["past"] is not None:
model_kwargs["past"] = model._reorder_cache(model_kwargs["past"], beam_idx)
if return_dict_in_generate and output_scores:
beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
else:
num_return_sequences = beam_scorer.num_beam_hyps_to_keep
# return only as many indices as sequences
beam_indices = tuple(
(beam_indices[i * num_beams : i * num_beams + num_return_sequences] for i in range(batch_size))
)
beam_indices = sum(beam_indices, ())
if model.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=beam_indices,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=beam_indices,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
@torch.inference_mode()
def fm_index_generate(
model,
index: FMIndex,
input_ids: torch.LongTensor,
attention_mask: torch.LongTensor,
min_length: int = 3,
max_length: int = 25,
length_penalty: float = 1.0,
num_beams: int = 3,
diverse_bs_groups: int = 1,
diverse_bs_penalty: float = 0.0,
eos_token_id: Optional[int] = None,
force_decoding_from: Optional[List[int]] = None,
always_allow_eos: bool = False,
keep_history: bool = False,
disable_fm_index: bool = False,
sample: bool = False,
stop_at_count: int = 0,
topk: int = 0,
transformers_output: bool = False,
**kwargs,
):
if 'forced_bos_token_id' in kwargs:
forced_bos_token_id = kwargs.pop('forced_bos_token_id')
else:
forced_bos_token_id = model.config.forced_bos_token_id
if sample:
orig_num_beams = num_beams
input_ids = input_ids.repeat(num_beams, 1)
attention_mask = attention_mask.repeat(num_beams, 1)
num_beams = 1
device = input_ids.device
if eos_token_id is None:
eos_token_id = model.config.eos_token_id
logits_processor = model._get_logits_processor(
encoder_input_ids=input_ids,
repetition_penalty=None,
no_repeat_ngram_size=0,
encoder_no_repeat_ngram_size=0,
bad_words_ids=None,
min_length=min_length,
max_length=max_length,
eos_token_id=None,
prefix_allowed_tokens_fn=None,
forced_bos_token_id=forced_bos_token_id,
forced_eos_token_id=None,
num_beams=num_beams,
num_beam_groups=1,
diversity_penalty=0.0,
remove_invalid_values=True)
if diverse_bs_groups > 1 and diverse_bs_penalty > 0.0:
logits_processor.append(
HammingDiversityLogitsProcessor(
diversity_penalty=diverse_bs_penalty,
num_beams=num_beams,
num_beam_groups=diverse_bs_groups,
)
)
if not disable_fm_index:
constrained_decoding_processor = IndexBasedLogitsProcessor(
num_beams=num_beams // diverse_bs_groups,
index=index,
pad_token_id=model.config.pad_token_id,
eos_token_id=eos_token_id or model.config.eos_token_id,
force_decoding_from=force_decoding_from,
stop_at_count=stop_at_count,
always_allow_eos=always_allow_eos,
forced_bos_token_id=forced_bos_token_id,
)
if diverse_bs_groups > 1:
logits_processor.append(constrained_decoding_processor)
else:
constrained_decoding_processor = None
stopping_criteria = model._get_stopping_criteria(
max_length=max_length,
max_time=None,
#max_new_tokens=None,
#start_length=None
)
model_kwargs = model._prepare_encoder_decoder_kwargs_for_generation(
input_ids, {'attention_mask': attention_mask})
model_kwargs['use_cache'] = True
decoder_input_ids = model._prepare_decoder_input_ids_for_generation(
batch_size=input_ids.size(0),
decoder_start_token_id=model.config.decoder_start_token_id,
bos_token_id=model.config.bos_token_id,
)
if keep_history:
beam_scorer = BeamSearchScorerWithMemory(
batch_size=decoder_input_ids.shape[0],
num_beams=num_beams,
device=device,
length_penalty=length_penalty,
do_early_stopping=False,
num_beam_hyps_to_keep=num_beams,
min_length=min_length,
max_length=max_length,
num_beam_groups=diverse_bs_groups,
)
else:
beam_scorer = BeamSearchScorer(
batch_size=decoder_input_ids.shape[0],
num_beams=num_beams,
device=device,
length_penalty=length_penalty,
do_early_stopping=False,
num_beam_hyps_to_keep=num_beams,
num_beam_groups=diverse_bs_groups,
)
decoder_input_ids, model_kwargs = model._expand_inputs_for_generation(
decoder_input_ids,
expand_size=num_beams,
is_encoder_decoder=True,
**model_kwargs)
if diverse_bs_groups > 1:
out = model.group_beam_search(
input_ids=decoder_input_ids,
beam_scorer=beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
output_scores=True,
pad_token_id=model.config.pad_token_id,
eos_token_id=eos_token_id,
**model_kwargs)
else:
out = constrained_beam_search(
model,
input_ids=decoder_input_ids,
beam_scorer=beam_scorer,
logits_processor=logits_processor,
constrained_decoding_processor=constrained_decoding_processor,
stopping_criteria=stopping_criteria,
output_scores=True,
pad_token_id=model.config.pad_token_id,
eos_token_id=eos_token_id,
sample=sample,
topk=topk,
**model_kwargs)
if transformers_output:
return out
if sample:
out = [[(h[0] * h[1].size(0) ** length_penalty, h[1].tolist()) for b in bb for h in b.beams if h[0] > float('-inf')] for bb in chunked(beam_scorer._beam_hyps, orig_num_beams)]
else:
out = [[(h[0] * h[1].size(0) ** length_penalty, h[1].tolist()) for h in b.beams if h[0] > float('-inf')] for b in beam_scorer._beam_hyps]
return out
class BeamSearchScorerWithMemory(BeamScorer):
def __init__(
self,
batch_size: int,
num_beams: int,
device: torch.device,
length_penalty: Optional[float] = 1.0,
do_early_stopping: Optional[bool] = False,
num_beam_hyps_to_keep: Optional[int] = 1,
num_beam_groups: Optional[int] = 1,
min_length: Optional[int] = 15,
max_length: Optional[int] = 25,
**kwargs,
):
self.num_beams = num_beams
self.device = device
self.length_penalty = length_penalty
self.do_early_stopping = do_early_stopping
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
self.num_beam_groups = num_beam_groups
self.group_size = self.num_beams // self.num_beam_groups
self.min_length = min_length
self.max_length = max_length
self._is_init = False
self._beam_hyps = [
BeamHypothesesWithMemory(
num_beams=self.num_beams,
length_penalty=self.length_penalty,
early_stopping=self.do_early_stopping,
min_length=self.min_length,
max_length=self.max_length)
for _ in range(batch_size)
]
self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device)
if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
raise ValueError(
f"`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` "
f"has to be divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
)
if "max_length" in kwargs:
warnings.warn(
"Passing `max_length` to BeamSearchScorer is deprecated and has no effect."
"`max_length` should be passed directly to `beam_search(...)`, `beam_sample(...)`"
",or `group_beam_search(...)`."
)
@property
def is_done(self) -> bool:
return self._done.all()
def process(
self,
input_ids: torch.LongTensor,
next_scores: torch.FloatTensor,
next_tokens: torch.LongTensor,
next_indices: torch.LongTensor,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
) -> Tuple[torch.Tensor]:
cur_len = input_ids.shape[-1]
batch_size = len(self._beam_hyps)
if not (batch_size == (input_ids.shape[0] // self.group_size)):
if self.num_beam_groups > 1:
raise ValueError(
f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
f"size of {self.group_size} is expected by the beam scorer."
)
else:
raise ValueError(
f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
f"{self.group_size} is expected by the beam scorer."
)
device = input_ids.device
next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
if self.num_beams < len(beam_hyp):
raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
if eos_token_id is None or pad_token_id is None:
raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
# pad the batch
next_beam_scores[batch_idx, :] = 0
next_beam_tokens[batch_idx, :] = pad_token_id
next_beam_indices[batch_idx, :] = 0
continue
# next tokens for this sentence
beam_idx = 0
broken = False
for beam_token_rank, (next_token, next_score, next_index) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
):
batch_beam_idx = batch_idx * self.group_size + next_index
beam_hyp.add(
torch.cat([
input_ids[batch_beam_idx],
next_token.view(1),
]),
next_score.item(),
)
# add to generated hypotheses if end of sentence
if broken:
pass
elif (eos_token_id is not None) and (next_token.item() == eos_token_id):
pass
else:
# add next predicted token since it is not eos_token
next_beam_scores[batch_idx, beam_idx] = next_score
next_beam_tokens[batch_idx, beam_idx] = next_token
next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
beam_idx += 1
# once the beam for next step is full, don't add more tokens to it.
if beam_idx == self.group_size:
broken = True
if beam_idx < self.group_size:
raise ValueError(
f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id: {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
)
# Check if we are done so that we can save a pad step if all(done)
self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
next_scores[batch_idx].max().item(), cur_len
)
return UserDict(
{
"next_beam_scores": next_beam_scores.view(-1),
"next_beam_tokens": next_beam_tokens.view(-1),
"next_beam_indices": next_beam_indices.view(-1),
}
)
def finalize(
self,
input_ids: torch.LongTensor,
final_beam_scores: torch.FloatTensor,
final_beam_tokens: torch.LongTensor,
final_beam_indices: torch.LongTensor,
max_length: int,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
) -> Tuple[torch.LongTensor]:
batch_size = len(self._beam_hyps)
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
for beam_id in range(self.num_beams):
batch_beam_idx = batch_idx * self.num_beams + beam_id
final_score = final_beam_scores[batch_beam_idx].item()
final_tokens = input_ids[batch_beam_idx]
beam_hyp.add(
final_tokens.clone(),
final_score)
decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, 3)
best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
return UserDict(
{
"sequences": decoded,
"sequence_scores": best_scores,
}
)
class BeamHypothesesWithMemory:
def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, min_length: int, max_length: int):
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
self.min_length = min_length
self.max_length = max_length
self._best = None
def __len__(self):
return len(self.beams)
def add(self, hyp: torch.LongTensor, sum_logprobs: float):
size = hyp.size(0)
score = sum_logprobs / (size ** self.length_penalty)
self.beams.append((score, hyp))
def is_done(self, best_sum_logprobs: float, cur_len: int) -> bool:
return cur_len >= self.max_length
| 31,228 | 40.090789 | 183 | py |
SRU_for_GCI | SRU_for_GCI-master/main.py | #!/usr/bin/env python
# coding: utf-8
# Import header files
import math
import argparse
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
import sys
import numpy as np
import pylab
from matplotlib import pyplot as plt
import time
import sys
from models.sru import SRU, trainSRU
from models.eSRU_1LF import eSRU_1LF, train_eSRU_1LF
from models.eSRU_2LF import eSRU_2LF, train_eSRU_2LF
from utils.utilFuncs import env_config, loadTrainingData, loadTrueNetwork, getCausalNodes, count_parameters, getGeneTrainingData
# Read input command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda:3',
help='device, default: cuda:3')
parser.add_argument('--dataset', type=str, default='VAR',
help='dataset type, default: VAR')
parser.add_argument('--dsid', type=int, default=1,
help='dataset id, default: 1')
parser.add_argument('--T', type=int, default=10,
help='training size, default: 10')
parser.add_argument('--F', type=int, default=10,
help='chaos, default: 10')
parser.add_argument('--n', type=int, default=10,
help='num of timeseries, default: 10')
parser.add_argument('--model', type=str, default='sru',
help='[sru, gru, lstm]: select your model')
parser.add_argument('--nepochs', type=int, default=500,
help='sets max_iter, default: 500')
parser.add_argument('--mu1', type=float, default=1,
help='sets mu1 parameter, default: 1')
parser.add_argument('--mu2', type=float, default=1,
help='sets mu2 parameter, default: 1')
parser.add_argument('--mu3', type=float, default=1,
help='sets mu3 parameter, default: 1')
parser.add_argument('--lr', type=float, default=0.005,
help='sets learning rate, default: 0.005')
parser.add_argument('--joblog', type=str, default="",
help='name of job logfile, default=""')
args = parser.parse_args()
deviceName = args.device
model_name = args.model
max_iter = args.nepochs
mu1 = args.mu1
mu2 = args.mu2
mu3 = args.mu3
dataset = args.dataset
dataset_id = args.dsid
T = args.T
F = args.F
n = args.n
lr = args.lr
jobLogFilename = args.joblog
###############################
# Global simulation settings
###############################
verbose = 0 # Verbosity level
#################################
# Pytorch environment
#################################
device, seed = env_config(True, deviceName) # true --> use GPU
print("Computational Resource: %s" % (device))
######################################
# Create input data in batch format
######################################
if(dataset == 'gene'):
Xtrain, Gref = getGeneTrainingData(dataset_id, device)
n1 = Xtrain.shape[0]
if(n != n1):
print("Error::Dimension mismatch for input training data..")
numTotalSamples = Xtrain.shape[1]
Xtrain = Xtrain.float().to(device)
# Make input signal zero mean and appropriately scaled
Xtrain = Xtrain - Xtrain.mean()
inputSignalMultiplier = 50
Xtrain = inputSignalMultiplier * Xtrain
elif(dataset == 'var'):
fileName = "data/var/S_%s_T_%s_dataset_%s.npz" % (F, T, dataset_id)
ld = np.load(fileName)
X_np = ld['X_np']
Gref = ld['Gref']
numTotalSamples = T
Xtrain = torch.from_numpy(X_np)
Xtrain = Xtrain.float().to(device)
inputSignalMultiplier = 1
Xtrain = inputSignalMultiplier * Xtrain
elif(dataset == 'lorenz'):
fileName = "data/lorenz96/F_%s_T_%s_dataset_%s.npz" % (F, T, dataset_id)
ld = np.load(fileName)
X_np = ld['X_np']
Gref = ld['Gref']
numTotalSamples = T
Xtrain = torch.from_numpy(X_np)
Xtrain = Xtrain.float().to(device)
inputSignalMultiplier = 1
Xtrain = inputSignalMultiplier * Xtrain
elif(dataset == 'netsim'):
fileName = "data/netsim/sim3_subject_%s.npz" % (dataset_id)
ld = np.load(fileName)
X_np = ld['X_np']
Gref = ld['Gref']
numTotalSamples = T
Xtrain = torch.from_numpy(X_np)
Xtrain = Xtrain.float().to(device)
inputSignalMultiplier = 1
Xtrain = inputSignalMultiplier * Xtrain
else:
print("Dataset is not supported")
if(verbose >= 1):
plt.figure(1)
plt.xlabel("t")
plt.ylabel("x0(t)")
plt.plot(range(numTotalSamples),Xtrain.cpu().numpy()[0][:])
plt.show(block=False)
plt.pause(0.1)
######################################
# SRU Cell parameters
######################################
#######################################
# Model training parameters
######################################
if(model_name == 'sru'):
lr_gamma = 0.99
lr_update_gap = 4
staggerTrainWin = 1
stoppingThresh = 1e-5;
trainVerboseLvl = 2
lr = lr
lambda1 = mu1
lambda2 = mu2
n_inp_channels = n
n_out_channels = 1
if(dataset == 'gene'):
A = [0.0, 0.01, 0.1, 0.5, 0.99]; #0.75
dim_iid_stats = 10 #math.ceil(n) #1.5n
dim_rec_stats = 10 #math.ceil(n) #1.5n
dim_final_stats = 10 #d * len(A) #math.ceil(n/2)
dim_rec_stats_feedback = 10 #d * len(A)
batchSize = 21
blk_size = batchSize
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'var'):
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10 #math.ceil(n) #1.5n
dim_rec_stats = 10 #math.ceil(n) #1.5n
dim_final_stats = 10 #d * len(A) #math.ceil(n/2) #n
dim_rec_stats_feedback = 10 #d * len(A) #math.ceil(n/2) #n
batchSize = 250
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'lorenz'):
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10
dim_rec_stats_feedback = 10
batchSize = 250
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'netsim'):
A = [0.0, 0.01, 0.05, 0.1, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10
dim_rec_stats_feedback = 10
batchSize = 10 #100
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
else:
print("Unsupported dataset encountered")
elif(model_name == 'eSRU_1LF' or model_name == 'eSRU_2LF'):
lr_gamma = 0.99
lr_update_gap = 4
staggerTrainWin = 1
stoppingThresh = 1e-5;
trainVerboseLvl = 2
lr = lr
lambda1 = mu1
lambda2 = mu2
lambda3 = mu3
n_inp_channels = n
n_out_channels = 1
if(dataset == 'gene'):
A = [0.05, 0.1, 0.2, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10
dim_rec_stats_feedback = 10
batchSize = 21
blk_size = int(batchSize)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'var'):
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10 #math.ceil(n) #1.5n
dim_rec_stats = 10 #math.ceil(n) #1.5n
dim_final_stats = 10 #d * len(A) #math.ceil(n/2) #n
dim_rec_stats_feedback = 10 #d * len(A) #math.ceil(n/2) #n
batchSize = 250
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'lorenz'):
#lr = 0.01
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10 #d*len(A)
dim_rec_stats_feedback = 10 #d*len(A)
batchSize = 250
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
elif(dataset == 'netsim'):
A = [0.0, 0.01, 0.1, 0.99];
dim_iid_stats = 10
dim_rec_stats = 10
dim_final_stats = 10 #d*len(A)
dim_rec_stats_feedback = 10 #d*len(A)
batchSize = 10 #10 #100
blk_size = int(batchSize/2)
numBatches = int(numTotalSamples/batchSize)
else:
print("Unsupported dataset encountered")
else:
print("Unsupported model encountered")
############################################
# Evaluate ROC plots (regress mu2)
############################################
if 1:
Gest = torch.zeros(n, n, requires_grad = False)
if(model_name == 'sru'):
for predictedNode in range(n):
start = time.time()
print("node = %d" % (predictedNode))
model = SRU(n_inp_channels, n_out_channels, dim_iid_stats, dim_rec_stats, dim_rec_stats_feedback, dim_final_stats,A, device)
model.to(device) # shift to CPU/GPU memory
print(count_parameters(model))
model, lossVec = trainSRU(model, Xtrain, device, numBatches, batchSize, blk_size, predictedNode, max_iter,
lambda1, lambda2, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, trainVerboseLvl)
Gest.data[predictedNode, :] = torch.norm(model.lin_xr2phi.weight.data[:,:n], p=2, dim=0)
print("Elapsed time (1) = % s seconds" % (time.time() - start))
elif(model_name == 'eSRU_1LF'):
for predictedNode in range(n):
start = time.time()
print("node = %d" % (predictedNode))
model = eSRU_1LF(n_inp_channels, n_out_channels, dim_iid_stats, dim_rec_stats, dim_rec_stats_feedback, dim_final_stats,A, device)
model.to(device) # shift to CPU/GPU memory
print(count_parameters(model))
model, lossVec = train_eSRU_1LF(model, Xtrain, device, numBatches, batchSize, blk_size, predictedNode, max_iter,
lambda1, lambda2, lambda3, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, trainVerboseLvl)
Gest.data[predictedNode, :] = torch.norm(model.lin_xr2phi.weight.data[:,:n], p=2, dim=0)
print("Elapsed time (1) = % s seconds" % (time.time() - start))
elif(model_name == 'eSRU_2LF'):
for predictedNode in range(n):
start = time.time()
print("node = %d" % (predictedNode))
model = eSRU_2LF(n_inp_channels, n_out_channels, dim_iid_stats, dim_rec_stats, dim_rec_stats_feedback, dim_final_stats,A, device)
model.to(device) # shift to CPU/GPU memory
print(count_parameters(model))
model, lossVec = train_eSRU_2LF(model, Xtrain, device, numBatches, batchSize, blk_size, predictedNode, max_iter,
lambda1, lambda2, lambda3, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, trainVerboseLvl)
Gest.data[predictedNode, :] = torch.norm(model.lin_xr2phi.weight.data[:,:n], p=2, dim=0)
print("Elapsed time (1) = % s seconds" % (time.time() - start))
else:
print("Unsupported model encountered")
print(Gref)
print(Gest)
if(jobLogFilename != ""):
if(model_name == 'eSRU_1LF' or model_name == 'eSRU_2LF'):
np.savez(jobLogFilename,
Gref=Gref,
Gest=Gest.detach().cpu().numpy(),
model=model_name,
dataset=dataset,
dsid=dataset_id,
T=T,
F=F,
nepochs=max_iter,
mu1=mu1,
mu2=mu2,
mu3=mu3,
lr=lr,
batchSize=batchSize,
blk_size=blk_size,
numBatches=numBatches,
dim_iid_stats=dim_iid_stats,
dim_rec_stats=dim_rec_stats,
dim_final_stats=dim_final_stats,
dim_rec_stats_feedback=dim_rec_stats_feedback)
else:
np.savez(jobLogFilename, Gref=Gref, Gest=Gest.detach().cpu().numpy(), model=model_name, dataset=dataset, dsid=dataset_id, T=T, F=F, nepochs=max_iter, mu1=mu1, mu2=mu2, lr=lr)
# sleep for one seconds followed by printing
# the exit key for tmux consumption
time.sleep(1)
print("#RUN_COMPLETE #RUN_COMPLETE #RUN_COMPLETE #RUN_COMPLETE")
| 12,432 | 33.72905 | 186 | py |
SRU_for_GCI | SRU_for_GCI-master/models/esru_2LF.py | import time
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from copy import deepcopy
# Statistical Recurrent Unit class (based on paper by Junier B. Oliva, arXiv:1703.00381v1)
class eSRU_2LF(torch.nn.Module):
def __init__(self,
n_inp_channels, # dimension of input sequence
n_out_channels, # dimension of output (predicted) sequence
dim_iid_stats, # dimension of iid statistics \phi
dim_rec_stats, # dimension of recurrent stats u
dim_rec_stats_feedback, # dimension of recurrent starts fed back as 'r' to generate iid stats
dim_final_stats, # dimension of final stats u
A, # Set of scales for exponentially weighted moving averages
device # CPU/GPU memory for storing tensors
):
# inherit the default attributes of Module class
super(eSRU_2LF, self).__init__()
# initialization of SRU parameters
self.type = 'eSRU_2LF'
self.n_inp_channels = n_inp_channels # dimension of input data
self.n_out_channels = n_out_channels # dimension of predicted output
self.dim_iid_stats = dim_iid_stats # dimension of 'phi_t'
self.dim_rec_stats = dim_rec_stats # dimension of 'u_t'
self.dim_final_stats = dim_final_stats # dimension of 'o_t'
self.dim_rec_stats_feedback = dim_rec_stats_feedback # dimension of 'r_t'
self.numScales = len(A)
# Take kroneck product: A \otimes 1_{dim_iid_stats}
self.A_mask = torch.Tensor([x for x in(A) for i in range(dim_iid_stats)]).view(1, -1)
self.A_mask.requires_grad = False
self.A_mask = self.A_mask.to(device) # shift to GPU memory
# Initialization of SRU cell's tensors
self.phi_t = torch.zeros(dim_iid_stats,1, requires_grad=True, device=device)
self.phi_tile = torch.zeros(dim_iid_stats*self.numScales,1, requires_grad=True, device=device)
self.r_t = torch.zeros(dim_rec_stats_feedback,1, requires_grad=True, device=device)
self.o_t = torch.zeros(dim_final_stats,1, requires_grad=True, device=device)
self.y_t = torch.zeros(n_out_channels,1, requires_grad=True, device=device)
self.u_t = torch.zeros(1, dim_rec_stats * self.numScales, requires_grad=True, device=device)
self.u_t_prev = torch.zeros(1, dim_rec_stats * self.numScales, device=device)
# MLPs in SRU cell
self.lin_xr2phi = nn.Linear(n_inp_channels + dim_rec_stats_feedback, dim_iid_stats, bias=True)
self.lin_r1 = nn.Linear(dim_rec_stats_feedback, dim_rec_stats_feedback, bias=True)
self.lin_r2 = nn.Linear(dim_rec_stats_feedback, dim_rec_stats_feedback, bias=True)
self.lin_o = nn.Linear(self.numScales*dim_rec_stats, dim_final_stats, bias=True)
self.lin_y = nn.Linear(dim_final_stats, n_out_channels, bias=True)
# Fixed random matrices for sketching hidden state to lower dimensions
self.intrMat_h2r_transpose = (1/math.sqrt(dim_rec_stats_feedback)) * torch.randn(self.numScales*dim_rec_stats, dim_rec_stats_feedback, requires_grad=False, device=device)
# SRU forward pass
def forward(self, x_t):
# Generate feedback statistics
self.r_t = torch.matmul(self.u_t_prev, self.intrMat_h2r_transpose) # sketch of hidden state
self.r_t = F.elu(self.lin_r1(self.r_t)) # layer 1
self.r_t = F.elu(self.lin_r2(self.r_t)) # layer 2
# Generate iid statistics: phi_t
self.phi_t = F.elu(self.lin_xr2phi(torch.cat((x_t, torch.flatten(self.r_t)))))
# Generate multiscale recurrent statistics: u_t
self.phi_tile = self.phi_t.repeat(1, self.numScales)
self.u_t = torch.mul(self.A_mask, self.u_t_prev) + torch.mul((1-self.A_mask), self.phi_tile)
self.u_t_prev.data = self.u_t.data
# Generate final statistics: o_t
self.o_t = F.elu(self.lin_o(self.u_t))
# Generate predicted output: y_t
self.y_t = self.lin_y(self.o_t)
return self.y_t
def reset_recurrent_stats(self):
self.u_t_prev.fill_(0)
############################################
# trainSRU_eSRU_2LF
############################################
def train_eSRU_2LF(model, trainingData, device, numBatches, batchSize, blk_size, predictedIdx, max_iter,
lambda1, lambda2, lambda3, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, verbose):
stoppingCntr = 0
stoppingCntrThr = 10
n = trainingData.shape[0]
numTotalSamples = trainingData.shape[1]
wtMtxRow = torch.zeros(model.numScales * model.dim_final_stats, 1, requires_grad = False, device=device)
lin_xr2phi_weight = deepcopy(model.lin_xr2phi.weight.data)
lin_xr2phi_bias = deepcopy(model.lin_xr2phi.bias.data)
lin_r1_weight = deepcopy(model.lin_r1.weight.data)
lin_r1_bias = deepcopy(model.lin_r1.bias.data)
lin_r2_weight = deepcopy(model.lin_r2.weight.data)
lin_r2_bias = deepcopy(model.lin_r2.bias.data)
lin_o_weight = deepcopy(model.lin_o.weight.data)
lin_o_bias = deepcopy(model.lin_o.bias.data)
lin_y_weight = deepcopy(model.lin_y.weight.data)
lin_y_bias = deepcopy(model.lin_y.bias.data)
#####################################
# Initialize miscellaneous tensors
#####################################
IdxArr = torch.unsqueeze(torch.arange(1,n+1, dtype=torch.float),1) # 1 to n array for plotting purposes
estWeights = torch.zeros(n, 1, requires_grad = False)
prevWeights = torch.zeros(model.dim_iid_stats, n, requires_grad = False, device=device)
lossVec = torch.zeros(max_iter,2)
lossVec.to(device)
mseLoss = nn.MSELoss(reduction = 'sum')
L1Loss = nn.L1Loss(reduction = 'sum')
softshrink1 = torch.nn.Softshrink(lambda1)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_update_gap, lr_gamma)
batchCntr = 0
trainingLoss = 0
fitErr = 0
start_time = 0
stop_time = start_time + blk_size -1
optimizer.zero_grad()
for epoch in range(max_iter):
start1 = time.time()
# Make deep copy of trainable model parameters to use later in checking the stopping criterion
with torch.no_grad():
lin_xr2phi_weight[:,:] = model.lin_xr2phi.weight.data[:,:]
lin_xr2phi_bias[:] = model.lin_xr2phi.bias.data[:]
lin_r1_weight[:,:] = model.lin_r1.weight.data[:,:]
lin_r1_bias[:] = model.lin_r1.bias.data[:]
lin_r2_weight[:,:] = model.lin_r2.weight.data[:,:]
lin_r2_bias[:] = model.lin_r2.bias.data[:]
lin_o_weight[:,:] = model.lin_o.weight.data[:,:]
lin_o_bias[:] = model.lin_o.bias.data[:]
lin_y_weight[:,:] = model.lin_y.weight.data[:,:]
lin_y_bias[:] = model.lin_y.bias.data[:]
# Update start and stop times for next training batch
printEpoch = 0
batchCntr = batchCntr + 1
if(batchCntr == numBatches+1):
batchCntr = 1
trainingLoss = 0
fitErr = 0
# print epoch summary
if(verbose > 0):
printEpoch = 1
if(staggerTrainWin == 0):
offset = 0
else:
offset = math.floor(np.random.uniform()*(batchSize-blk_size))
start_time = (batchCntr-1)*batchSize + offset
stop_time = start_time + blk_size - 1
# Reset recurrent stats u_t
optimizer.zero_grad()
model.reset_recurrent_stats()
# Forward pass
smooth_loss_list = []
for tt in range(start_time,stop_time,1):
model.forward(trainingData[:,tt])
smooth_loss = (1/(blk_size-1))*mseLoss(torch.flatten(model.y_t), torch.unsqueeze(trainingData[predictedIdx,tt+1], 0))
smooth_loss_list.append(smooth_loss)
#lossVec[epoch][0] = smooth_loss.item()
# Use autograd to compute the backward pass (accumulate gradients on each pass).
model.lin_xr2phi.weight.retain_grad()
sum([smooth_loss_list[i] for i in range(blk_size-1)]).backward()
lossVec[epoch][0] = sum([smooth_loss_list[i].item() for i in range(blk_size-1)])
#print("111: %s" % torch.cuda.memory_allocated(device))
optimizer.step()
optimizer.zero_grad()
#Adjust for regularization
lr_current = optimizer.param_groups[0]['lr']
softshrink1 = nn.Softshrink(lambda1*lr)
softshrink2 = nn.Softshrink(lambda2*lr)
softshrink3 = nn.Softshrink(lambda3*lr)
with torch.no_grad():
# Update all network parameters except for input layer weight matrix
model.lin_xr2phi.weight[:,n:].data = softshrink1(model.lin_xr2phi.weight[:,n:]).data
model.lin_xr2phi.bias.data = softshrink1(model.lin_xr2phi.bias).data
model.lin_r1.weight.data = softshrink1(model.lin_r1.weight).data
model.lin_r1.bias.data = softshrink1(model.lin_r1.bias).data
model.lin_r2.weight.data = softshrink1(model.lin_r2.weight).data
model.lin_r2.bias.data = softshrink1(model.lin_r2.bias).data
#model.lin_o.weight.data = softshrink1(model.lin_o.weight).data
model.lin_o.bias.data = softshrink1(model.lin_o.bias).data
model.lin_y.weight.data = softshrink1(model.lin_y.weight).data
model.lin_y.bias.data = softshrink1(model.lin_y.bias).data
# Update input layer weight matrix
inpWgtMtx = model.lin_xr2phi.weight[:,:n]
l2normTensor = torch.norm(inpWgtMtx, p=2, dim=0, keepdim=True) # 1 x n row tensor
#model.lin_xr2phi.weight.data[:,:n] = ((inpWgtMtx / torch.clamp(l2normTensor, min=(lambda2 * lr_current * 0.1)))
# * torch.clamp(l2normTensor - (lr_current * lambda2), min=0.0))
model.lin_xr2phi.weight.data[:,:n] = inpWgtMtx*(softshrink2(l2normTensor)/torch.clamp(l2normTensor, min=lambda2*lr_current*0.1))
# Update the weight matrix mapping multi-time scale hidden state to
# the lag sensitive features for prediction purpose
for rr in range(model.dim_final_stats):
wtMtxRow.data = model.lin_o.weight.data[rr,:]
#reshape wtMtxRow as (numScales x dim_rec_stats) matrix
wtMtxRowReshaped = wtMtxRow.view(model.numScales, model.dim_rec_stats)
l2normTensor1 = torch.norm(wtMtxRowReshaped, p=2, dim=0, keepdim=True) # 1 x dim_final_stats row tensor
model.lin_o.weight.data[rr,:] = (wtMtxRowReshaped*(softshrink3(l2normTensor1)/torch.clamp(l2normTensor1, min=lambda3*lr_current*0.1))).flatten().data[:]
# Compute and log regularization loss without updating gradients
loss1 = lambda1*((torch.norm(model.lin_y.weight.data, 1)+ torch.norm(model.lin_y.bias.data, 1) +
torch.norm(model.lin_xr2phi.weight[:,n:].data, 1)) + torch.norm(model.lin_xr2phi.bias.data, 1) +
torch.norm(model.lin_o.weight.data, 1) + torch.norm(model.lin_o.bias.data, 1) +
torch.norm(model.lin_r1.weight.data, 1) + torch.norm(model.lin_r1.bias.data, 1) +
torch.norm(model.lin_r2.weight.data, 1) + torch.norm(model.lin_r2.bias.data, 1))
lossVec[epoch][1] = lossVec[epoch][1] + loss1.item()
loss2 = lambda2*torch.sum(torch.norm(model.lin_xr2phi.weight.data, p=2, dim=0)[:n])
lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
# Again force gradient to be zero (just to be extra safe)
optimizer.zero_grad()
scheduler.step()
# Record total-loss for current epoch
lossVec[epoch][1] = lossVec[epoch][1] + lossVec[epoch][0]
trainingLoss = trainingLoss + lossVec[epoch][1]
fitErr = fitErr + lossVec[epoch][0]
with torch.no_grad():
paramDelta = (mseLoss(model.lin_y.weight, lin_y_weight)
+ mseLoss(model.lin_y.bias, lin_y_bias)
+ mseLoss(model.lin_xr2phi.weight, lin_xr2phi_weight)
+ mseLoss(model.lin_xr2phi.bias, lin_xr2phi_bias)
+ mseLoss(model.lin_o.weight, lin_o_weight)
+ mseLoss(model.lin_o.bias, lin_o_bias)
+ mseLoss(model.lin_r1.weight, lin_r1_weight)
+ mseLoss(model.lin_r1.bias, lin_r1_bias)
+ mseLoss(model.lin_r2.weight, lin_r2_weight)
+ mseLoss(model.lin_r2.bias, lin_r2_bias)).data
if(printEpoch == 1):
print('Predicted Node = %d \t epoch = %s \t lr = %.4f \t Training loss = %.4f \t Fit error = %.4f \t Delta = %f' % (predictedIdx, epoch, optimizer.param_groups[0]['lr'], trainingLoss, fitErr, paramDelta))
#for col in range(n):
# estWeights.data[col] = torch.norm(model.lin_xr2phi.weight.data[:,col], 2)
#print(torch.cat((IdxArr, estWeights), 1)[:10])
#print(sruCell.lin_xr2phi.weight.grad.data[:,:n_inp_channels])
#print(optimizer.param_groups[0]['lr']*sruCell.lin_o.weight.grad.data[0,:])
#print(model.lin_o.weight.grad.data)
#print(model.lin_xr2phi.weight.data[0,:])
#print(optimizer.param_groups[0]['lr']*model.lin_xr2phi.weight.grad.data[0,:])
#print(model.o_t.data)
#print(model.lin_r.weight.data[0,:])
#print(model.lin_y.weight.data)
#print("-------")
# Stopping criterion
if(paramDelta < stoppingThresh):
stoppingCntr = stoppingCntr + 1
if(stoppingCntr == stoppingCntrThr):
break
else:
stoppingCntr = 0
# run your code
if(printEpoch == 1):
print("Elapsed time (1) = % s seconds" % (time.time() - start1))
return model, lossVec
| 14,315 | 47.040268 | 216 | py |
SRU_for_GCI | SRU_for_GCI-master/models/esru_1LF.py | import time
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from copy import deepcopy
# Statistical Recurrent Unit class (based on paper by Junier B. Oliva, arXiv:1703.00381v1)
class eSRU_1LF(torch.nn.Module):
def __init__(self,
n_inp_channels, # dimension of input sequence
n_out_channels, # dimension of output (predicted) sequence
dim_iid_stats, # dimension of iid statistics \phi
dim_rec_stats, # dimension of recurrent stats u
dim_rec_stats_feedback, # dimension of recurrent starts fed back as 'r' to generate iid stats
dim_final_stats, # dimension of final stats u
A, # Set of scales for exponentially weighted moving averages
device # CPU/GPU memory for storing tensors
):
# inherit the default attributes of Module class
super(eSRU_1LF, self).__init__()
# initialization of SRU parameters
self.type = 'eSRU_1LF'
self.n_inp_channels = n_inp_channels # dimension of input data
self.n_out_channels = n_out_channels # dimension of predicted output
self.dim_iid_stats = dim_iid_stats # dimension of 'phi_t'
self.dim_rec_stats = dim_rec_stats # dimension of 'u_t'
self.dim_final_stats = dim_final_stats # dimension of 'o_t'
self.dim_rec_stats_feedback = dim_rec_stats_feedback # dimension of 'r_t'
self.numScales = len(A)
# Take kroneck product: A \otimes 1_{dim_iid_stats}
self.A_mask = torch.Tensor([x for x in(A) for i in range(dim_iid_stats)]).view(1, -1)
self.A_mask.requires_grad = False
self.A_mask = self.A_mask.to(device) # shift to GPU memory
# Initialization of SRU cell's tensors
self.phi_t = torch.zeros(dim_iid_stats,1, requires_grad=True, device=device)
self.phi_tile = torch.zeros(dim_iid_stats*self.numScales,1, requires_grad=True, device=device)
self.r_t = torch.zeros(dim_rec_stats_feedback,1, requires_grad=True, device=device)
self.o_t = torch.zeros(dim_final_stats,1, requires_grad=True, device=device)
self.y_t = torch.zeros(n_out_channels,1, requires_grad=True, device=device)
self.u_t = torch.zeros(1, dim_rec_stats * self.numScales, requires_grad=True, device=device)
self.u_t_prev = torch.zeros(1, dim_rec_stats * self.numScales, device=device)
# MLPs in SRU cell
self.lin_xr2phi = nn.Linear(n_inp_channels + dim_rec_stats_feedback, dim_iid_stats, bias=True)
self.lin_r1 = nn.Linear(dim_rec_stats_feedback, dim_rec_stats_feedback, bias=True)
self.lin_o = nn.Linear(self.numScales*dim_rec_stats, dim_final_stats, bias=True)
self.lin_y = nn.Linear(dim_final_stats, n_out_channels, bias=True)
# Fixed random matrices for sketching hidden state to lower dimensions
self.intrMat_h2r_transpose = (1/math.sqrt(dim_rec_stats_feedback)) * torch.randn(self.numScales*dim_rec_stats, dim_rec_stats_feedback, requires_grad=False, device=device)
# SRU forward pass
def forward(self, x_t):
# Generate feedback statistics
self.r_t = torch.matmul(self.u_t_prev, self.intrMat_h2r_transpose) # sketch of hidden state
self.r_t = F.elu(self.lin_r1(self.r_t))
# Generate iid statistics: phi_t
self.phi_t = F.elu(self.lin_xr2phi(torch.cat((x_t, torch.flatten(self.r_t)))))
# Generate multiscale recurrent statistics: u_t
self.phi_tile = self.phi_t.repeat(1, self.numScales)
self.u_t = torch.mul(self.A_mask, self.u_t_prev) + torch.mul((1-self.A_mask), self.phi_tile)
self.u_t_prev.data = self.u_t.data
# Generate final statistics: o_t
self.o_t = F.elu(self.lin_o(self.u_t))
# Generate predicted output: y_t
self.y_t = self.lin_y(self.o_t)
return self.y_t
def reset_recurrent_stats(self):
self.u_t_prev.fill_(0)
############################################
# trainSRU
############################################
def train_eSRU_1LF(model, trainingData, device, numBatches, batchSize, blk_size, predictedIdx, max_iter,
lambda1, lambda2, lambda3, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, verbose):
stoppingCntr = 0
stoppingCntrThr = 10
n = trainingData.shape[0]
numTotalSamples = trainingData.shape[1]
wtMtxRow = torch.zeros(model.numScales * model.dim_final_stats, 1, requires_grad = False, device=device)
lin_xr2phi_weight = deepcopy(model.lin_xr2phi.weight.data)
lin_xr2phi_bias = deepcopy(model.lin_xr2phi.bias.data)
lin_r1_weight = deepcopy(model.lin_r1.weight.data)
lin_r1_bias = deepcopy(model.lin_r1.bias.data)
lin_o_weight = deepcopy(model.lin_o.weight.data)
lin_o_bias = deepcopy(model.lin_o.bias.data)
lin_y_weight = deepcopy(model.lin_y.weight.data)
lin_y_bias = deepcopy(model.lin_y.bias.data)
#####################################
# Initialize miscellaneous tensors
#####################################
IdxArr = torch.unsqueeze(torch.arange(1,n+1, dtype=torch.float),1) # 1 to n array for plotting purposes
estWeights = torch.zeros(n, 1, requires_grad = False)
prevWeights = torch.zeros(model.dim_iid_stats, n, requires_grad = False, device=device)
lossVec = torch.zeros(max_iter,2)
lossVec.to(device)
mseLoss = nn.MSELoss(reduction = 'sum')
L1Loss = nn.L1Loss(reduction = 'sum')
softshrink1 = torch.nn.Softshrink(lambda1)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_update_gap, lr_gamma)
batchCntr = 0
trainingLoss = 0
fitErr = 0
start_time = 0
stop_time = start_time + blk_size -1
optimizer.zero_grad()
for epoch in range(max_iter):
start1 = time.time()
# Make deep copy of trainable model parameters
with torch.no_grad():
lin_xr2phi_weight[:,:] = model.lin_xr2phi.weight.data[:,:]
lin_xr2phi_bias[:] = model.lin_xr2phi.bias.data[:]
lin_r1_weight[:,:] = model.lin_r1.weight.data[:,:]
lin_r1_bias[:] = model.lin_r1.bias.data[:]
lin_o_weight[:,:] = deepcopy(model.lin_o.weight.data[:,:])
lin_o_bias[:] = deepcopy(model.lin_o.bias.data[:])
lin_y_weight[:,:] = deepcopy(model.lin_y.weight.data[:,:])
lin_y_bias[:] = deepcopy(model.lin_y.bias.data[:])
# Update start and stop times for next training batch
printEpoch = 0
batchCntr = batchCntr + 1
if(batchCntr == numBatches+1):
batchCntr = 1
trainingLoss = 0
fitErr = 0
# print epoch summary
if(verbose > 0):
printEpoch = 1
if(staggerTrainWin == 0):
offset = 0
else:
offset = math.floor(np.random.uniform()*(batchSize-blk_size))
start_time = (batchCntr-1)*batchSize + offset
stop_time = start_time + blk_size - 1
# Reset recurrent stats u_t
optimizer.zero_grad()
model.reset_recurrent_stats()
# Forward pass
smooth_loss_list = []
for tt in range(start_time,stop_time,1):
model.forward(trainingData[:,tt])
smooth_loss = (1/(blk_size-1))*mseLoss(torch.flatten(model.y_t), torch.unsqueeze(trainingData[predictedIdx,tt+1], 0))
smooth_loss_list.append(smooth_loss)
#lossVec[epoch][0] = smooth_loss.item()
# Use autograd to compute the backward pass (accumulate gradients on each pass).
model.lin_xr2phi.weight.retain_grad()
sum([smooth_loss_list[i] for i in range(blk_size-1)]).backward()
lossVec[epoch][0] = sum([smooth_loss_list[i].item() for i in range(blk_size-1)])
#print("111: %s" % torch.cuda.memory_allocated(device))
optimizer.step()
optimizer.zero_grad()
#Adjust for regularization
lr_current = optimizer.param_groups[0]['lr']
softshrink1 = nn.Softshrink(lambda1*lr)
softshrink2 = nn.Softshrink(lambda2*lr)
softshrink3 = nn.Softshrink(lambda3*lr)
with torch.no_grad():
# Update all network parameters except for input layer weight matrix
model.lin_xr2phi.weight[:,n:].data = softshrink1(model.lin_xr2phi.weight[:,n:]).data
model.lin_xr2phi.bias.data = softshrink1(model.lin_xr2phi.bias).data
model.lin_r1.weight.data = softshrink1(model.lin_r1.weight).data
model.lin_r1.bias.data = softshrink1(model.lin_r1.bias).data
#model.lin_o.weight.data = softshrink1(model.lin_o.weight).data
model.lin_o.bias.data = softshrink1(model.lin_o.bias).data
model.lin_y.weight.data = softshrink1(model.lin_y.weight).data
model.lin_y.bias.data = softshrink1(model.lin_y.bias).data
# Update input layer weight matrix
inpWgtMtx = model.lin_xr2phi.weight[:,:n]
l2normTensor = torch.norm(inpWgtMtx, p=2, dim=0, keepdim=True) # 1 x n row tensor
#model.lin_xr2phi.weight.data[:,:n] = ((inpWgtMtx / torch.clamp(l2normTensor, min=(lambda2 * lr_current * 0.1)))
# * torch.clamp(l2normTensor - (lr_current * lambda2), min=0.0))
model.lin_xr2phi.weight.data[:,:n] = inpWgtMtx*(softshrink2(l2normTensor)/torch.clamp(l2normTensor, min=lambda2*lr_current*0.1))
# Update the weight matrix mapping multi-time scale hidden state to
# the lag sensitive features for prediction purpose
for rr in range(model.dim_final_stats):
wtMtxRow.data = model.lin_o.weight.data[rr,:]
#reshape wtMtxRow as (numScales x dim_rec_stats) matrix
wtMtxRowReshaped = wtMtxRow.view(model.numScales, model.dim_rec_stats)
l2normTensor1 = torch.norm(wtMtxRowReshaped, p=2, dim=0, keepdim=True) # 1 x dim_final_stats row tensor
model.lin_o.weight.data[rr,:] = (wtMtxRowReshaped*(softshrink3(l2normTensor1)/torch.clamp(l2normTensor1, min=lambda3*lr_current*0.1))).flatten().data[:]
# Compute and log regularization loss without updating gradients
loss1 = lambda1*((torch.norm(model.lin_y.weight.data, 1)+ torch.norm(model.lin_y.bias.data, 1) +
torch.norm(model.lin_xr2phi.weight[:,n:].data, 1)) + torch.norm(model.lin_xr2phi.bias.data, 1) +
torch.norm(model.lin_o.weight.data, 1) + torch.norm(model.lin_o.bias.data, 1) +
torch.norm(model.lin_r1.weight.data, 1) + torch.norm(model.lin_r1.bias.data, 1))
lossVec[epoch][1] = lossVec[epoch][1] + loss1.item()
loss2 = lambda2*torch.sum(torch.norm(model.lin_xr2phi.weight.data, p=2, dim=0)[:n])
lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
# Again force gradient to be zero (just to be extra safe)
optimizer.zero_grad()
scheduler.step()
# Record total-loss for current epoch
lossVec[epoch][1] = lossVec[epoch][1] + lossVec[epoch][0]
trainingLoss = trainingLoss + lossVec[epoch][1]
fitErr = fitErr + lossVec[epoch][0]
with torch.no_grad():
paramDelta = (mseLoss(model.lin_y.weight, lin_y_weight)
+ mseLoss(model.lin_y.bias, lin_y_bias)
+ mseLoss(model.lin_xr2phi.weight, lin_xr2phi_weight)
+ mseLoss(model.lin_xr2phi.bias, lin_xr2phi_bias)
+ mseLoss(model.lin_o.weight, lin_o_weight)
+ mseLoss(model.lin_o.bias, lin_o_bias)
+ mseLoss(model.lin_r1.weight, lin_r1_weight)
+ mseLoss(model.lin_r1.bias, lin_r1_bias)).data
if(printEpoch == 1):
print('Predicted Node = %d \t epoch = %s \t lr = %.4f \t Training loss = %.4f \t Fit error = %.4f \t Delta = %f' % (predictedIdx, epoch, optimizer.param_groups[0]['lr'], trainingLoss, fitErr, paramDelta))
#for col in range(n):
# estWeights.data[col] = torch.norm(model.lin_xr2phi.weight.data[:,col], 2)
#print(torch.cat((IdxArr, estWeights), 1)[:10])
#print(sruCell.lin_xr2phi.weight.grad.data[:,:n_inp_channels])
#print(optimizer.param_groups[0]['lr']*sruCell.lin_o.weight.grad.data[0,:])
#print(model.lin_o.weight.grad.data)
#print(model.lin_xr2phi.weight.data[0,:])
#print(optimizer.param_groups[0]['lr']*model.lin_xr2phi.weight.grad.data[0,:])
#print(model.o_t.data)
#print(model.lin_r.weight.data[0,:])
#print(model.lin_y.weight.data)
#print("-------")
# Stopping criterion
if(paramDelta < stoppingThresh):
stoppingCntr = stoppingCntr + 1
if(stoppingCntr == stoppingCntrThr):
break
else:
stoppingCntr = 0
# run your code
if(printEpoch == 1):
print("Elapsed time (1) = % s seconds" % (time.time() - start1))
return model, lossVec
| 13,531 | 45.501718 | 216 | py |
SRU_for_GCI | SRU_for_GCI-master/models/sru.py | import time
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from copy import deepcopy
# Statistical Recurrent Unit class (based on paper by Junier B. Oliva, arXiv:1703.00381v1)
class SRU(torch.nn.Module):
def __init__(self,
n_inp_channels, # dimension of input sequence
n_out_channels, # dimension of output (predicted) sequence
dim_iid_stats, # dimension of iid statistics \phi
dim_rec_stats, # dimension of recurrent stats u
dim_rec_stats_feedback, # dimension of recurrent starts fed back as 'r' to generate iid stats
dim_final_stats, # dimension of final stats u
A, # Set of scales for exponentially weighted moving averages
device # CPU/GPU memory for storing tensors
):
# inherit the default attributes of Module class
super(SRU, self).__init__()
# initialization of SRU parameters
self.type = 'sru'
self.n_inp_channels = n_inp_channels # dimension of input data
self.n_out_channels = n_out_channels # dimension of predicted output
self.dim_iid_stats = dim_iid_stats # dimension of 'phi_t'
self.dim_rec_stats = dim_rec_stats # dimension of 'u_t'
self.dim_final_stats = dim_final_stats # dimension of 'o_t'
self.dim_rec_stats_feedback = dim_rec_stats_feedback # dimension of 'r_t'
self.numScales = len(A)
# Take kroneck product: A \otimes 1_{dim_iid_stats}
self.A_mask = torch.Tensor([x for x in(A) for i in range(dim_iid_stats)]).view(1, -1)
self.A_mask.requires_grad = False
self.A_mask = self.A_mask.to(device) # shift to GPU memory
# Initialization of SRU cell's tensors
self.phi_t = torch.zeros(dim_iid_stats,1, requires_grad=True, device=device)
self.phi_tile = torch.zeros(dim_iid_stats*self.numScales,1, requires_grad=True, device=device)
self.r_t = torch.zeros(dim_rec_stats_feedback,1, requires_grad=True, device=device)
self.o_t = torch.zeros(dim_final_stats,1, requires_grad=True, device=device)
self.y_t = torch.zeros(n_out_channels,1, requires_grad=True, device=device)
self.u_t = torch.zeros(1, dim_rec_stats * self.numScales, requires_grad=True, device=device)
self.u_t_prev = torch.zeros(1, dim_rec_stats * self.numScales, device=device)
# MLPs in SRU cell
self.lin_xr2phi = nn.Linear(n_inp_channels + dim_rec_stats_feedback, dim_iid_stats, bias=True)
self.lin_r = nn.Linear(self.numScales*dim_rec_stats, dim_rec_stats_feedback, bias=True)
self.lin_o = nn.Linear(self.numScales*dim_rec_stats, dim_final_stats, bias=True)
self.lin_y = nn.Linear(dim_final_stats, n_out_channels, bias=True)
#self.lin_xr2phi.weight.data.uniform_(-0.1,0.1)
#self.lin_r.weight.data.uniform_(-0.1,0.1)
#self.lin_o.weight.data.uniform_(-0.1,0.1)
#self.lin_y.weight.data.uniform_(-0.1,0.1)
# total number of parameteres
self.numParams = ( (n_inp_channels + dim_rec_stats_feedback)*dim_iid_stats +
self.numScales*dim_rec_stats*dim_rec_stats_feedback +
self.numScales*dim_rec_stats*dim_final_stats +
dim_final_stats*n_out_channels +
dim_iid_stats + dim_rec_stats_feedback + dim_final_stats + n_out_channels)
# SRU forward pass
def forward(self, x_t):
# Update r_t
self.r_t = F.elu(self.lin_r(self.u_t_prev))
# Generate iid statistics: phi_t
self.phi_t = F.elu(self.lin_xr2phi(torch.cat((x_t, torch.flatten(self.r_t)))))
# Generate multiscale recurrent statistics: u_t
self.phi_tile = self.phi_t.repeat(1, self.numScales)
self.u_t = torch.mul(self.A_mask, self.u_t_prev) + torch.mul((1-self.A_mask), self.phi_tile)
self.u_t_prev.data = self.u_t.data
# Generate final statistics: o_t
self.o_t = F.elu(self.lin_o(self.u_t))
# Generate predicted output: y_t
self.y_t = self.lin_y(self.o_t)
return self.y_t
def reset_recurrent_stats(self):
self.u_t_prev.fill_(0)
############################################
# trainSRU
############################################
def trainSRU(model, trainingData, device, numBatches, batchSize, blk_size, predictedIdx, max_iter,
lambda1, lambda2, lr, lr_gamma, lr_update_gap, staggerTrainWin, stoppingThresh, verbose):
stoppingCntr = 0
stoppingCntrThr = 10
proxUpdate = True
n = trainingData.shape[0]
numTotalSamples = trainingData.shape[1]
lin_xr2phi_weight = deepcopy(model.lin_xr2phi.weight.data)
lin_xr2phi_bias = deepcopy(model.lin_xr2phi.bias.data)
lin_r_weight = deepcopy(model.lin_r.weight.data)
lin_r_bias = deepcopy(model.lin_r.bias.data)
lin_o_weight = deepcopy(model.lin_o.weight.data)
lin_o_bias = deepcopy(model.lin_o.bias.data)
lin_y_weight = deepcopy(model.lin_y.weight.data)
lin_y_bias = deepcopy(model.lin_y.bias.data)
#####################################
# Initialize miscellaneous tensors
#####################################
IdxArr = torch.unsqueeze(torch.arange(1,n+1, dtype=torch.float),1) # 1 to n array for plotting purposes
estWeights = torch.zeros(n, 1, requires_grad = False)
prevWeights = torch.zeros(model.dim_iid_stats, n, requires_grad = False, device=device)
lossVec = torch.zeros(max_iter,2)
lossVec.to(device)
mseLoss = nn.MSELoss(reduction = 'sum')
L1Loss = nn.L1Loss(reduction = 'sum')
if(proxUpdate):
softshrink1 = torch.nn.Softshrink(lambda1)
#hardshrink2 = nn.Hardshrink(hs2)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, lr_update_gap, lr_gamma)
batchCntr = 0
trainingLoss = 0
fitErr = 0
start_time = 0
stop_time = start_time + blk_size -1
optimizer.zero_grad()
for epoch in range(max_iter):
start1 = time.time()
# Make deep copy of trainable model parameters
with torch.no_grad():
lin_xr2phi_weight[:,:] = model.lin_xr2phi.weight.data[:,:]
lin_xr2phi_bias[:] = model.lin_xr2phi.bias.data[:]
lin_r_weight[:,:] = model.lin_r.weight.data[:,:]
lin_r_bias[:] = model.lin_r.bias.data[:]
lin_o_weight[:,:] = deepcopy(model.lin_o.weight.data[:,:])
lin_o_bias[:] = deepcopy(model.lin_o.bias.data[:])
lin_y_weight[:,:] = deepcopy(model.lin_y.weight.data[:,:])
lin_y_bias[:] = deepcopy(model.lin_y.bias.data[:])
# Update start and stop times for next training batch
printEpoch = 0
batchCntr = batchCntr + 1
if(batchCntr == numBatches+1):
batchCntr = 1
trainingLoss = 0
fitErr = 0
# print epoch summary
if(verbose > 0):
printEpoch = 1
if(staggerTrainWin == 0):
offset = 0
else:
offset = math.floor(np.random.uniform()*(batchSize-blk_size))
start_time = (batchCntr-1)*batchSize + offset
stop_time = start_time + blk_size - 1
# Reset recurrent stats u_t
optimizer.zero_grad()
model.reset_recurrent_stats()
# Forward pass
smooth_loss_list = []
for tt in range(start_time,stop_time,1):
model.forward(trainingData[:,tt])
smooth_loss = (1/(blk_size-1))*mseLoss(torch.flatten(model.y_t), torch.unsqueeze(trainingData[predictedIdx,tt+1], 0))
smooth_loss_list.append(smooth_loss)
#lossVec[epoch][0] = smooth_loss.item()
# Use autograd to compute the backward pass (accumulate gradients on each pass).
model.lin_xr2phi.weight.retain_grad()
sum([smooth_loss_list[i] for i in range(blk_size-1)]).backward()
lossVec[epoch][0] = sum([smooth_loss_list[i].item() for i in range(blk_size-1)])
#smooth_loss.backward(retain_graph = True)
#print("111: %s" % torch.cuda.memory_allocated(device))
# Compute gradient energy (without accounting for regularization terms)
#total_grad_norm = 0;
#for p in list(filter(lambda p: p.grad is not None, model.parameters())):
# total_grad_norm = total_grad_norm + p.grad.data.norm(2).item()
optimizer.step()
optimizer.zero_grad()
#Adjust for regularization
if(proxUpdate):
lr_current = optimizer.param_groups[0]['lr']
softshrink1 = nn.Softshrink(lambda1*lr)
softshrink2 = nn.Softshrink(lambda2*lr)
with torch.no_grad():
# Update all network parameters except for input layer weight matrix
model.lin_xr2phi.weight[:,n:].data = softshrink1(model.lin_xr2phi.weight[:,n:]).data
model.lin_xr2phi.bias.data = softshrink1(model.lin_xr2phi.bias).data
model.lin_r.weight.data = softshrink1(model.lin_r.weight).data
model.lin_r.bias.data = softshrink1(model.lin_r.bias).data
model.lin_o.weight.data = softshrink1(model.lin_o.weight).data
model.lin_o.bias.data = softshrink1(model.lin_o.bias).data
model.lin_y.weight.data = softshrink1(model.lin_y.weight).data
model.lin_y.bias.data = softshrink1(model.lin_y.bias).data
# Update input layer weight matrix
inpWgtMtx = model.lin_xr2phi.weight[:,:n]
l2normTensor = torch.norm(inpWgtMtx, p=2, dim=0, keepdim=True) # 1 x n row tensor
#model.lin_xr2phi.weight.data[:,:n] = ((inpWgtMtx / torch.clamp(l2normTensor, min=(lambda2 * lr_current * 0.1)))
# * torch.clamp(l2normTensor - (lr_current * lambda2), min=0.0))
model.lin_xr2phi.weight.data[:,:n] = inpWgtMtx*(softshrink2(l2normTensor)/torch.clamp(l2normTensor, min=1e-8))
# Compute and log regularization loss without updating gadients
loss1 = lambda1*((torch.norm(model.lin_y.weight.data, 1)+ torch.norm(model.lin_y.bias.data, 1) +
torch.norm(model.lin_xr2phi.weight[:,n:].data, 1)) + torch.norm(model.lin_xr2phi.bias.data, 1) +
torch.norm(model.lin_o.weight.data, 1) + torch.norm(model.lin_o.bias.data, 1) +
torch.norm(model.lin_r.weight.data, 1) + torch.norm(model.lin_r.bias.data, 1))
lossVec[epoch][1] = lossVec[epoch][1] + loss1.item()
loss2 = lambda2*torch.sum(torch.norm(model.lin_xr2phi.weight.data, p=2, dim=0)[:n])
lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
# Again force gradient to be zero (just to be extra safe)
optimizer.zero_grad()
scheduler.step()
else:
loss1 = lambda1*((torch.norm(model.lin_y.weight, 1)+ torch.norm(model.lin_y.bias, 1) +
torch.norm(model.lin_xr2phi.weight[:,n:], 1)) + torch.norm(model.lin_xr2phi.bias, 1) +
torch.norm(model.lin_o.weight, 1) + torch.norm(model.lin_o.bias, 1) +
torch.norm(model.lin_r.weight, 1) + torch.norm(model.lin_r.bias, 1))
lossVec[epoch][1] = lossVec[epoch][1] + loss1.item()
model.lin_xr2phi.weight.retain_grad()
loss1.backward(retain_graph = True)
optimizer.step()
optimizer.zero_grad()
#for col in range(n):
# loss2 = lambda2*torch.norm(model.lin_xr2phi.weight[:,col], 2)
# model.lin_xr2phi.weight.retain_grad()
# loss2.backward(retain_graph = True)
# lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
loss2 = lambda2*torch.sum(torch.norm(model.lin_xr2phi.weight, p=2, dim=0)[:n])
lossVec[epoch][1] = lossVec[epoch][1] + loss2.item()
model.lin_xr2phi.weight.retain_grad()
loss2.backward(retain_graph = True)
optimizer.step()
scheduler.step()
# prune small weights
#model.lin_xr2phi.weight.data[:,predictedIdx] = 0
#model.lin_xr2phi.weight.grad.data[:,predictedIdx] = 0
#model.lin_xr2phi.weight.data = hardshrink1(model.lin_xr2phi.weight.data)
#model.lin_r.weight.data = hardshrink2(model.lin_r.weight.data)
#model.lin_o.weight.data = hardshrink2(model.lin_o.weight.data)
# Record total-loss for current epoch
lossVec[epoch][1] = lossVec[epoch][1] + lossVec[epoch][0]
trainingLoss = trainingLoss + lossVec[epoch][1]
fitErr = fitErr + lossVec[epoch][0]
with torch.no_grad():
paramDelta = (mseLoss(model.lin_y.weight, lin_y_weight)
+ mseLoss(model.lin_y.bias, lin_y_bias)
+ mseLoss(model.lin_xr2phi.weight, lin_xr2phi_weight)
+ mseLoss(model.lin_xr2phi.bias, lin_xr2phi_bias)
+ mseLoss(model.lin_o.weight, lin_o_weight)
+ mseLoss(model.lin_o.bias, lin_o_bias)
+ mseLoss(model.lin_r.weight, lin_r_weight)
+ mseLoss(model.lin_r.bias, lin_r_bias)).data
if(printEpoch == 1):
print('Predicted Node = %d \t epoch = %s \t lr = %.4f \t Training loss = %.4f \t Fit error = %.4f \t Delta = %f' % (predictedIdx, epoch, optimizer.param_groups[0]['lr'], trainingLoss, fitErr, paramDelta))
#for col in range(n):
# estWeights.data[col] = torch.norm(model.lin_xr2phi.weight.data[:,col], 2)
#print(torch.cat((IdxArr, estWeights), 1)[:10])
#print(sruCell.lin_xr2phi.weight.grad.data[:,:n_inp_channels])
#print(optimizer.param_groups[0]['lr']*sruCell.lin_o.weight.grad.data[0,:])
#print(model.lin_o.weight.grad.data)
#print(model.lin_xr2phi.weight.data[0,:])
#print(optimizer.param_groups[0]['lr']*model.lin_xr2phi.weight.grad.data[0,:])
#print(model.o_t.data)
#print(model.lin_r.weight.data[0,:])
#print(model.lin_y.weight.data)
#print("-------")
# Stopping criterion
if(paramDelta < stoppingThresh):
stoppingCntr = stoppingCntr + 1
if(stoppingCntr == stoppingCntrThr):
break
else:
stoppingCntr = 0
# run your code
if(printEpoch == 1):
print("Elapsed time (1) = % s seconds" % (time.time() - start1))
return model, lossVec
| 15,047 | 45.018349 | 216 | py |
SRU_for_GCI | SRU_for_GCI-master/utils/utilFuncs.py | # Import header files
import math
import torch
from torch import autograd
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
import sys
import numpy as np
import pylab
from matplotlib import pyplot as plt
import time
import sys
import csv
###########################################
# Python/numpy/pytorch environment config
###########################################
def env_config(GPUTrue, deviceName):
global_seed = 2
# Disable debug mode
#torch.backends.cudnn.enabled=False
torch.autograd.set_detect_anomaly(False)
# Shrink very small values to zero in tensors for computational speedup
torch.set_flush_denormal(True)
# Set seed for random number generation (for reproducibility of results)
torch.manual_seed(global_seed)
torch.cuda.manual_seed(global_seed)
np.random.seed(global_seed)
# Set device as GPU if available, otherwise default to CPU
if(GPUTrue):
device = torch.device(deviceName if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
return device, global_seed
######################################
# Function for loading input data
######################################
def loadTrainingData(inputDataFilePath, device):
# Load and parse input data (create batch data)
inpData = torch.load(inputDataFilePath)
Xtrain = torch.zeros(inpData['TsData'].shape[1], inpData['TsData'].shape[0], requires_grad = False, device=device)
Xtrain1 = inpData['TsData'].t()
Xtrain.data[:,:] = Xtrain1.data[:,:]
return Xtrain
#######################################################
# Function for reading ground truth network from file
#######################################################
def loadTrueNetwork(inputFilePath, networkSize):
with open(inputFilePath) as tsvin:
reader = csv.reader(tsvin, delimiter='\t')
numrows = 0
for row in reader:
numrows = numrows + 1
network = np.zeros((numrows,2),dtype=np.int16)
with open(inputFilePath) as tsvin:
reader = csv.reader(tsvin, delimiter='\t')
rowcounter = 0
for row in reader:
network[rowcounter][0] = int(row[0][1:])
network[rowcounter][1] = int(row[1][1:])
rowcounter = rowcounter + 1
Gtrue = np.zeros((networkSize,networkSize), dtype=np.int16)
for row in range(0,len(network),1):
Gtrue[network[row][1]-1][network[row][0]-1] = 1
return Gtrue
#############################################
# getCausalNodes
######################################
def getCausalNodes(model, threshold):
n = model.n_inp_channels
causalNodeMask = torch.zeros(n, 1, requires_grad = False, dtype=torch.int16)
for col in range(n):
#print(torch.norm(model.lin_xr2phi.weight.data[:,col],2))
if(torch.norm(model.lin_xr2phi.weight.data[:,col], 2) > threshold):
causalNodeMask.data[col] = 1
return causalNodeMask
#######################################################################
# Calculates false positive negatives and true positives negatives
#####################################################################
def calcPerfMetrics(Gtrue, Gest):
TP = 0 # True positive
FP = 0 # False positive
TN = 0 # True negative
FN = 0 # False negative
#n = Gest.shape[0]
GTGE = (Gtrue * Gest)
GestComplement = -1*(Gest-1)
GtrueComplement = -1*(Gtrue-1)
GTCGEC = (GtrueComplement * GestComplement)
TP = np.sum(GTGE)
FP = np.sum(Gest) - TP
TN = np.sum(GTCGEC)
FN = np.sum(GestComplement) - np.sum(GTCGEC)
TPR = float(TP)/float(TP+FN)
FPR = float(FP)/float(FP+TN)
Recall = float(TP)/float(TP+FN)
if(TP > 0 and FP > 0):
Precision = float(TP)/float(TP+FP)
else:
Precision = 0
return TPR, FPR, Precision, Recall
####################################################
# Calculates area under ROC curve
#
# (In) xin: numpy float array of false positive entries
# (In) yin: numpy float array of true positive entries
# (Out) auroc: calculated area under ROC curve
#
# Notes: xin and yin should sorted and be of same dimension
# and contain bounded entries in (0,1)
####################################################
def calcAUROC(xin, yin, verbose):
xin, yin = parallel_sort(xin, yin)
if(verbose > 0):
for ii in range(len(xin)):
print("%d\t %.6f \t %.6f" %(ii, xin[ii], yin[ii]))
# Update input arrays to include extreme points (0,0) and (1,1) to the ROC plot
xin = np.insert(xin,0,0)
yin = np.insert(yin,0,0)
xin = np.append(xin,1)
yin = np.append(yin,1)
n = len(xin)
auroc = 0
for ii in range(n-1):
h = xin[ii+1]-xin[ii]
b1 = yin[ii]
b2 = yin[ii+1]
trapezoid_area = 0.5*h*(b1 + b2)
auroc = auroc + trapezoid_area
return auroc, xin, yin
####################################################
# Calculates area under Precision-Recall curve
#
# (In) xin: numpy float array of precision values
# (In) yin: numpy float array of recall values
# (Out) aupr: calculated area under precision-recall curve
#
# Notes: xin and yin should sorted and be of same dimension
# and contain bounded entries in (0,1)
####################################################
def calcAUPR(xin, yin):
ll = len(xin)
# Update input arrays to include extreme points (0,1) and (1,0) to the precision-recall plot
if(xin[0] > 0):
xin = np.insert(xin,0,0)
yin = np.insert(yin,0,1)
if(xin[ll-1] < 1):
xin = np.append(xin,1)
yin = np.append(yin,0)
n = len(xin)
aupr = 0
for ii in range(n-1):
h = xin[ii+1]-xin[ii]
b1 = yin[ii]
b2 = yin[ii+1]
trapezoid_area = 0.5*h*(b1 + b2)
aupr = aupr + trapezoid_area
return aupr
###########################
# Count the number of tunable parameters in the model
##########################
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
##################################
# Calc metrics
##################################
def calcMetrics(jobLogFilename, model, dataset, verbose):
ld = np.load(jobLogFilename)
Gest = ld['Gest']
Gref = ld['Gref']
model_name = ld['model']
dataset = ld['dataset']
dsid = ld['dsid']
nepochs = ld['nepochs']
T = ld['T']
F = ld['F']
mu1 = ld['mu1']
mu2 = ld['mu2']
lr = ld['lr']
# if esru2 model, then register esru2 specific parameters, namely mu3
if(model_name == 'esru2' or model_name == 'esru3'):
mu3 = ld['mu3']
else:
mu3 = 0
n = Gest.shape[0]
Gest1 = np.ones((n,n), dtype=np.int16)
thresh = 0
thresh_idx = (Gest <= thresh)
Gest1.fill(1)
Gest1[thresh_idx] = 0
# remove self loops for gene causal network estimate
if(dataset == 'gene'):
for ii in range(n):
Gest1[ii][ii] = 0
#print(Gref)
#print(Gest1)
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest1)
if(verbose > 0):
print("thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (thresh, TPR, FPR, Precision, Recall))
return model_name, dataset, dsid, T, F, nepochs, lr, mu1, mu2, mu3, TPR, FPR, Precision, Recall
##################################
# Calc metrics
##################################
def calcMetricsTCDF(jobLogFilename, model, dataset, threshold, verbose):
ld = np.load(jobLogFilename)
Gest = ld['Gest']
Gref = ld['Gref']
model_name = ld['model']
dataset = ld['dataset']
dsid = ld['dsid']
nepochs = ld['nepochs']
T = ld['T']
F = ld['F']
nepochs = ld['nepochs']
kernel = ld['kernel_size']
level = ld['levels']
lr = ld['lr']
dilation = ld['dilation_c']
n = Gest.shape[0]
Gest1 = np.ones((n,n), dtype=np.int16)
thresh_idx = (Gest <= threshold)
Gest1.fill(1)
Gest1[thresh_idx] = 0
# remove self loops for gene causal network estimate
if(dataset == 'gene'):
for ii in range(n):
Gest1[ii][ii] = 0
#print(Gref)
#print(Gest1)
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest1)
if(verbose > 0):
print("thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (thresh, TPR, FPR, Precision, Recall))
return model_name, dataset, dsid, T, F, nepochs, lr, kernel, level, dilation, TPR, FPR, Precision, Recall
###################################################
# parallel sort in ascending order
###################################################
def parallel_sort(xin, yin):
n = len(xin)
xin_sorted_idx = np.argsort(xin)
yin_sorted_idx = np.argsort(yin)
xout = xin[xin_sorted_idx]
ysorted_by_x = yin[xin_sorted_idx]
yout = yin
#for ii in range(n):
# print("%d\t %.4f \t %.4f" %(ii, xout[ii], ysorted_by_x[ii]))
# for fixed xin[.], further sort yin[...]
x_prev = xout[0]
same_x_start_idx = 0
yout=[]
for ii in range(0, n, 1):
x = xout[ii]
if((x > x_prev) or (ii == n-1)):
if(ii == n-1):
same_x_stop_idx = n-1
else:
same_x_stop_idx = ii-1
if(same_x_start_idx == same_x_stop_idx):
y_arr_for_same_x = ysorted_by_x[same_x_start_idx]
else:
y_arr_for_same_x = np.sort(ysorted_by_x[same_x_start_idx:same_x_stop_idx+1:1])
#print("%d, %d, %.4f" %(same_x_start_idx, same_x_stop_idx, x_prev))
#print(ysorted_by_x[same_x_start_idx:same_x_stop_idx+1:1])
#print(y_arr_for_same_x)
yout = np.append(yout, y_arr_for_same_x)
#print("%d, %d, %.4f" %(same_x_start_idx, same_x_stop_idx, x_prev))
same_x_start_idx = ii
x_prev = xout[ii]
return xout, yout
def getGeneTrainingData(dataset_id, device):
if(dataset_id == 1):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Ecoli1.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Ecoli1.tsv"
elif(dataset_id == 2):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Ecoli2.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Ecoli2.tsv"
elif(dataset_id == 3):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Yeast1.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Yeast1.tsv"
elif(dataset_id == 4):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Yeast2.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Yeast2.tsv"
elif(dataset_id == 5):
InputDataFilePath = "data/dream3/Dream3TensorData/Size100Yeast3.pt"
RefNetworkFilePath = "data/dream3/TrueGeneNetworks/InSilicoSize100-Yeast3.tsv"
else:
print("Error while loading gene training data")
Xtrain = loadTrainingData(InputDataFilePath, device)
n = Xtrain.shape[0]
Gref = loadTrueNetwork(RefNetworkFilePath, n)
return Xtrain, Gref | 11,267 | 28.730871 | 142 | py |
SRU_for_GCI | SRU_for_GCI-master/utils/lorenz96Checker.py | import numpy as np
import torch
from utilFuncs import calcPerfMetrics, calcAUROC, calcAUPR
# lorenz96 params
T = 1000
F = 40.0
model_name = 'lstm'
mu = 6.6 # F = 10, mu = 0.2| F = 40, mu = 4.0
n = 10
numDatasets = 5
max_iter = 500
verbose = 0
thresholdVec = np.arange(0, 1, 0.05)
#thresholdVec = np.arange(0, 0.1, 0.001)
TPRVec = np.zeros(len(thresholdVec), dtype=np.float32)
FPRVec = np.zeros(len(thresholdVec), dtype=np.float32)
RecallVec = np.zeros(len(thresholdVec), dtype=np.float32)
PrecisionVec = np.zeros(len(thresholdVec), dtype=np.float32)
Gest1 = np.ones((n,n), dtype=np.int16)
Gest2 = np.ones((n,n), dtype=np.int16)
Gref1 = np.zeros((n,n), dtype=np.int16)
AUROCList = np.zeros(numDatasets)
AUPRList = np.zeros(numDatasets)
for dsid in range(numDatasets):
filename = "../logs/lorenz96/LORENZ%s_T%s_F%s_%s_niter%s_mu_%s.pt" % (dsid+1, T, F, model_name, max_iter, mu)
savedTensors = torch.load(filename)
Gest = savedTensors['Gest']
Gref = savedTensors['Gref']
Gest.requires_grad = False
Gest1 = Gest.cpu().numpy()
Gest2.fill(1)
for ii in range(len(thresholdVec)):
thresh = thresholdVec[ii]
thresh_idx = (Gest1 < thresh)
Gest2[thresh_idx] = 0
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest2)
if(verbose > 0):
print("thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (thresh, TPR, FPR, Precision, Recall))
TPRVec[ii] = TPR
FPRVec[ii] = FPR
PrecisionVec[ii] = Precision
RecallVec[ii] = Recall
AUROCList[dsid] = calcAUROC(np.flip(FPRVec), np.flip(TPRVec))
AUPRList[dsid] = calcAUPR(RecallVec, PrecisionVec)
print("%s_LORENZ%d_T%s_F%s: AUROC = %.4f, \t AUPR = %.4f" % (model_name, dsid, T, F, AUROCList[dsid], AUPRList[dsid]))
print("Mean AUROC = %.4f, \t Mean AUPR = %.4f" % (AUROCList.mean(), AUPRList.mean()))
| 1,909 | 32.508772 | 146 | py |
SRU_for_GCI | SRU_for_GCI-master/utils/perfChk.py | import math
import torch
import matplotlib
#import sys
import numpy as np
import pylab
from matplotlib import pyplot as plt
#import time
from utilFuncs import loadTrueNetwork, getCausalNodes, calcPerfMetrics, calcAUROC, calcAUPR
dataset = 'LORENZ'
#dataset = 'VAR'
#dataset = 'GENE'
if(dataset == 'LORENZ'):
dataset_id = 1
T = 1000
F = 40.0
model_name = 'sru'
max_iter = 500
n = 10
thresh = 0.05
muVec = np.arange(1.0, 11.0, 1.0)
#muVec = np.arange(18.0, 40.0, 1.0)
TPRVec = np.zeros(len(muVec), dtype=np.float32)
FPRVec = np.zeros(len(muVec), dtype=np.float32)
RecallVec = np.zeros(len(muVec), dtype=np.float32)
PrecisionVec = np.zeros(len(muVec), dtype=np.float32)
Gest1 = np.zeros((n,n), dtype=np.int16)
for muidx in range(len(muVec)):
mu = muVec[muidx]
LogPath = "logs/lorenz96/%s/%s%s_T%s_F%s_%s_niter%s_mu_%s.pt" % (model_name, dataset, dataset_id, T, F, model_name, max_iter, mu)
savedTensors = torch.load(LogPath)
Gref = savedTensors['Gref']
Gest = savedTensors['Gest']
Gest.requires_grad = False
Gest1 = Gest.cpu().numpy()
#print(Gest1)
Gest1[Gest1 <= thresh] = 0
Gest1[Gest1 > 0] = 1
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest1)
print("mu = %.4f, \t thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (mu, thresh, TPR, FPR, Precision, Recall))
TPRVec[muidx] = TPR
FPRVec[muidx] = FPR
PrecisionVec[muidx] = Precision
RecallVec[muidx] = Recall
AUROC, FPRVec, TPRVec = calcAUROC(np.flip(FPRVec), np.flip(TPRVec))
AUPR = calcAUPR(RecallVec, PrecisionVec)
print("AUROC = %.4f, \t AUPR = %.4f" % (AUROC, AUPR))
plt.figure(1)
plt.title("ROC (TPR vs FPR)")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.xlim(0,1)
plt.ylim(0,1)
plt.plot(FPRVec, TPRVec)
plt.show()
plt.figure(2)
plt.title("ROC (Precision vs Recall)")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim(0,1)
plt.ylim(0,1)
plt.plot(RecallVec, PrecisionVec)
plt.show()
else:
print("Dataset is not supported")
##########################
# old stuff
###########################
if 0:
#LogPath = "logs/sru_niter1000_mu_2.0.pt"
#LogPath = "logs/sru_mod_niter1000_mu_1.0.pt"
#LogPath = "logs/lstm_niter200_mu_1.0.pt"
#LogPath = "logs/lstm_niter200_mu_1.0.pt"
#LogPath = "LORENZ_mlp_niter50000_mu_1.pt"
InputDataFilePath = "Dream3TensorData/Size100Ecoli1.pt"
RefNetworkFilePath = "Dream3TrueGeneNetworks/InSilicoSize100-Ecoli1.tsv"
n = 100;
Gref = loadTrueNetwork(RefNetworkFilePath, n)
savedTensors = torch.load(LogPath)
Gest = savedTensors['Gest']
print(Gest)
Gest1 = torch.zeros(n,n, requires_grad = False, dtype=torch.int16)
#thresholdVec = np.arange(0, 0.2, 0.001)
thresholdVec = np.arange(0, 2, 0.05)
TPRVec = np.zeros(len(thresholdVec), dtype=np.float32)
FPRVec = np.zeros(len(thresholdVec), dtype=np.float32)
RecallVec = np.zeros(len(thresholdVec), dtype=np.float32)
PrecisionVec = np.zeros(len(thresholdVec), dtype=np.float32)
#ignore self loops
for ii in range(n):
Gest.data[ii][ii] = 0
for ii in range(len(thresholdVec)):
thresh = thresholdVec[ii]
for rr in range(n):
for cc in range(n):
Gest1.data[rr][cc] = Gest.data[rr][cc] > thresh
TPR, FPR, Precision, Recall = calcPerfMetrics(Gref, Gest1)
print("thresh = %1.4f, \t TPR = %1.4f, \t FPR = %1.4f, \t Precision = %.4f, \t Recall = %.4f" % (thresh, TPR, FPR, Precision, Recall))
TPRVec[ii] = TPR
FPRVec[ii] = FPR
PrecisionVec[ii] = Precision
RecallVec[ii] = Recall
| 3,856 | 30.357724 | 160 | py |
Quantized-GBDT | Quantized-GBDT-master/experiments/generate_script.py | import os
import argparse
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("data_path", type=str)
arg_parser.add_argument("--use-discretized-grad", action='store_true')
arg_parser.add_argument("--discretized-grad-renew", action='store_true')
arg_parser.add_argument("--stochastic-rounding", action='store_true')
arg_parser.add_argument("--for-speed", action='store_true')
arg_parser.add_argument("--device", type=str, default='cpu')
arg_parser.add_argument("--algorithm", type=str, default='lgb')
arg_parser.add_argument("--log-path", type=str, default='logs')
script_fname = 'run.sh'
running = open(script_fname, 'w')
os.system(f"chmod +x {script_fname}")
data = [
'higgs',
'epsilon',
'criteo',
'bosch',
'kitsune',
'yahoo',
'msltr',
'year'
]
task = [
'binary',
'binary',
'binary',
'binary',
'binary',
'ranking',
'ranking',
'regression'
]
col_wise_data = ['epsilon', 'year', 'yahoo', 'bosch']
bins = [2, 3, 4, 5]
def generate_script(data_path, use_discretized_grad, discretized_grad_renew, stochastic_rounding, for_speed, device, algorithm, log_dir):
data_path = data_path.rstrip('/')
dataset = [
f'data={data_path}/higgs.train',
f'data={data_path}/epsilon.train',
f'data={data_path}/criteo.train',
f'data={data_path}/bosch.train',
f'data={data_path}/kitsune.train',
f'data={data_path}/yahoo.train',
f'data={data_path}/msltr.train',
f'data={data_path}/year.train',
]
validset = [
f'valid={data_path}/higgs.test',
f'valid={data_path}/epsilon.test',
f'valid={data_path}/criteo.test',
f'valid={data_path}/bosch.test',
f'valid={data_path}/kitsune.test',
f'valid={data_path}/yahoo.test',
f'valid={data_path}/msltr.test',
f'valid={data_path}/year.test'
]
os.system(f"mkdir -p {log_dir}")
if algorithm == 'lgb':
use_discretized_grad_str = str(use_discretized_grad).lower()
discretized_grad_renew_str = str(discretized_grad_renew).lower()
stochastic_rounding_str = str(stochastic_rounding).lower()
num_k = 4 if use_discretized_grad else 1
for i in range(8):
for j in range(5):
for k in range(num_k):
base_conf_fname = 'train_model.conf' if task[i] == 'binary' else ('train_rank_model.conf' if task[i] == 'ranking' else 'train_reg_model.conf')
args = ''
args += dataset[i]
if not for_speed:
args += ' ' + validset[i]
args += ' seed=' + str(j)
if use_discretized_grad:
args += ' grad_discretize_bins='+str(2**bins[k]-2)
log_name = f'./{log_dir}/train_' + data[i] + '_seed'+str(j) + '_bins' + str(bins[k])+'.log'
else:
log_name = f'./{log_dir}/train_' + data[i] + '_seed'+str(j)+ '_fp32' + '.log'
args += f' use_discretized_grad={use_discretized_grad_str} discretized_grad_renew={discretized_grad_renew_str} stochastic_rounding={stochastic_rounding_str}'
if data[i] == 'bosch':
args += ' learning_rate=0.015 num_leaves=45'
if data[i] in col_wise_data:
args += ' force_row_wise=false force_col_wise=true'
if device != 'cpu':
args += f' device_type=cuda gpu_device_id=0 num_threads=24'
if not use_discretized_grad:
running.write(f'../LightGBM-master/lightgbm config={base_conf_fname} {args} > {log_name} 2>&1\n')
else:
running.write(f'../LightGBM/lightgbm config={base_conf_fname} {args} > {log_name} 2>&1\n')
elif algorithm == 'xgb':
for i in range(8):
for j in range(5):
log_name = f'./{log_dir}/train_' + data[i] + '_seed'+str(j)+ '_xgb' + '.log'
base_conf_fname = 'xgboost.conf'
args = ''
args += dataset[i]
if task[i] == 'ranking':
args += '.xgb?format=libsvm'
args += ' seed=' + str(j)
if not for_speed:
args += ' ' + validset[i].replace('valid=', 'eval[test]=')
if task[i] == 'ranking':
args += '.xgb?format=libsvm'
metric = 'auc' if task[i] == 'binary' else ('rmse' if task[i] == 'regression' else 'ndcg@10')
args += f' eval_metric={metric}'
objective = 'binary:logistic' if task[i] == 'binary' else ('reg:linear' if task[i] == 'regression' else 'rank:pairwise')
args += f' objective={objective}'
if data[i] == 'bosch':
args += ' eta=0.015 max_leaves=45' # max_leaves=45 for xgboost to reduce time cost for post pruning
if device != 'cpu':
args += ' tree_method=gpu_hist nthread=24'
running.write(f'../xgboost/xgboost {base_conf_fname} {args} > {log_name} 2>&1\n')
elif algorithm == 'cat':
for i in range(8):
for j in range(5):
log_name = f'./{log_dir}/train_' + data[i] + '_seed'+str(j)+ '_cat' + '.log'
base_conf_fname = 'catboost.json'
args = ''
args += f"--params-file {base_conf_fname}"
if data[i] == 'bosch':
args += " --learning-rate 0.015 --max-leaves 45"
data_path_prefix = 'libsvm://' if task[i] != 'ranking' else ''
data_path_suffix = '' if task[i] != 'ranking' else '.cat'
data_path_for_catboost = dataset[i].split('=')[-1]
args += f" --learn-set {data_path_prefix}{data_path_for_catboost}{data_path_suffix}"
if not for_speed:
valid_path_for_catboost = validset[i].split('=')[-1]
args += f" --test-set {data_path_prefix}{valid_path_for_catboost}{data_path_suffix}"
args += f" --column-description {data_path_for_catboost.split('.')[0]}.cd"
loss_function = "Logloss" if task[i] == 'binary' else ("RMSE" if task[i] == 'regression' else "YetiRank")
args += f" --loss-function {loss_function}"
if not for_speed:
eval_metric = "AUC" if task[i] == 'binary' else ("RMSE" if task[i] == 'regression' else "NDCG:top=10\\;type=Exp")
args += f" --eval-metric {eval_metric}"
args += " --metric-period 1"
task_type = "CPU" if device == 'cpu' else "GPU --devices 0 --thread-count 24"
args += f" --task-type {task_type}"
args += f" --random-seed {j}"
args += f" --bootstrap-type No --random-strength 0.0 --rsm 1.0" # remove known randomness
running.write(f"../catboost/catboost/app/catboost fit {args} > {log_name} 2>&1\n")
if __name__ == '__main__':
args = arg_parser.parse_args()
generate_script(args.data_path, args.use_discretized_grad, args.discretized_grad_renew, args.stochastic_rounding, args.for_speed, args.device, args.algorithm, args.log_path)
| 7,377 | 46.294872 | 177 | py |
neuron-merging | neuron-merging-main/main.py | from __future__ import print_function
import warnings
warnings.simplefilter("ignore", UserWarning)
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
import sys
import pickle
import copy
cwd = os.getcwd()
sys.path.append(cwd+'/../')
import models
from torchvision import datasets, transforms
from torch.autograd import Variable
from decompose import Decompose
def save_state(model, acc):
print('==> Saving model ...')
state = {
'acc': acc,
'state_dict': model.state_dict(),
}
for key in state['state_dict'].keys():
if 'module' in key:
print(key)
state['state_dict'][key.replace('module.', '')] = \
state['state_dict'].pop(key)
# save
if args.model_type == 'original':
if args.arch == 'WideResNet' :
model_filename = '.'.join([args.arch,
args.dataset,
args.model_type,
'_'.join(map(str, args.depth_wide)),
'pth.tar'])
elif args.arch == 'ResNet' :
model_filename = '.'.join([args.arch,
args.dataset,
args.model_type,
str(args.depth_wide),
'pth.tar'])
else:
model_filename = '.'.join([args.arch,
args.dataset,
args.model_type,
'pth.tar'])
else: # retrain
if args.arch == 'WideResNet' :
model_filename = '.'.join([args.arch,
'_'.join(map(str, args.depth_wide)),
args.dataset,
args.model_type,
args.criterion,
str(args.pruning_ratio),
'pth.tar'])
elif args.arch == 'ResNet' :
model_filename = '.'.join([args.arch,
str(args.depth_wide),
args.dataset,
args.model_type,
args.criterion,
str(args.pruning_ratio),
'pth.tar'])
else :
model_filename = '.'.join([args.arch,
args.dataset,
args.model_type,
args.criterion,
str(args.pruning_ratio),
'pth.tar'])
torch.save(state, os.path.join('saved_models/', model_filename))
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data))
return
def test(epoch, evaluate=False):
global best_acc
global best_epoch
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += criterion(output, target).data
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
acc = 100. * float(correct) / len(test_loader.dataset)
if (acc > best_acc):
best_acc = acc
best_epoch = epoch
if not evaluate:
save_state(model, best_acc)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss * args.batch_size, correct, len(test_loader.dataset),
100. * float(correct) / len(test_loader.dataset)))
print('Best Accuracy: {:.2f}%, Best Epoch: {}\n'.format(best_acc, best_epoch))
return
def adjust_learning_rate(optimizer, epoch, gammas, schedule):
lr = args.lr
for (gamma, step) in zip (gammas, schedule):
if(epoch>= step) and (args.epochs * 3 //4 >= epoch):
lr = lr * gamma
elif(epoch>= step) and (args.epochs * 3 //4 < epoch):
lr = lr * gamma * gamma
else:
break
print('learning rate : ', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return
def weight_init(model, decomposed_weight_list, target):
for layer in model.state_dict():
decomposed_weight = decomposed_weight_list.pop(0)
model.state_dict()[layer].copy_(decomposed_weight)
return model
if __name__=='__main__':
# settings
parser = argparse.ArgumentParser(description='Neuron Merging Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--arch', action='store', default='VGG',
help='network structure: VGG | ResNet | WideResNet | LeNet_300_100')
parser.add_argument('--pretrained', action='store', default=None,
help='pretrained model')
parser.add_argument('--evaluate', action='store_true', default=False,
help='whether to run evaluation')
parser.add_argument('--retrain', action='store_true', default=False,
help='whether to retrain')
parser.add_argument('--model-type', action='store', default='original',
help='model type: original | prune | merge')
parser.add_argument('--target', action='store', default='conv',
help='decomposing target: default=None | conv | ip')
parser.add_argument('--dataset', action='store', default='cifar10',
help='dataset: cifar10 | cifar100 | FashionMNIST')
parser.add_argument('--criterion', action='store', default='l1-norm',
help='criterion : l1-norm | l2-norm | l2-GM')
parser.add_argument('--threshold', type=float, default=1,
help='threshold (default: 1)')
parser.add_argument('--lamda', type=float, default=0.8,
help='lamda (default: 0.8)')
parser.add_argument('--pruning-ratio', type=float, default=0.7,
help='pruning ratio : (default: 0.7)')
parser.add_argument('--gammas', type=float, nargs='+', default=[0.1,0.1],
help='gammas : (default: [0.1,0.1])')
parser.add_argument('--schedule', type=int, nargs='+', default=[100,200],
help='schedule : (default: [100,200])')
parser.add_argument('--depth-wide', action='store', default=None,
help='depth and wide (default: None)')
args = parser.parse_args()
# check options
if not (args.model_type in [None, 'original', 'merge', 'prune']):
print('ERROR: Please choose the correct model type')
exit()
if not (args.target in [None, 'conv', 'ip']):
print('ERROR: Please choose the correct decompose target')
exit()
if not (args.arch in ['VGG','ResNet','WideResNet','LeNet_300_100']):
print('ERROR: specified arch is not suppported')
exit()
torch.manual_seed(args.seed)
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic=True
# load data
num_classes = 10
if args.dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_data = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
test_data = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, num_workers=2)
num_classes = 10
elif args.dataset == 'cifar100':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_data = datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
test_data = datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, num_workers=2)
num_classes = 100
elif args.dataset == 'FashionMNIST':
transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ])
train_data = datasets.FashionMNIST('data', train=True, download=True, transform=transform)
test_data = datasets.FashionMNIST('data', train=False, download=True, transform=transform)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, **kwargs)
num_classes = 10
else :
pass
if args.depth_wide:
args.depth_wide = eval(args.depth_wide)
cfg = None
# make cfg
if args.retrain:
if args.target == 'conv' :
if args.arch == 'VGG':
if args.dataset == 'cifar10':
cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M', 256, 256, 256]
elif args.dataset == 'cifar100':
cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 256, 'M', 256, 256, 256]
temp_cfg = list(filter(('M').__ne__, cfg))
elif args.arch == 'ResNet':
cfg = [16, 32, 64]
for i in range(len(cfg)):
cfg[i] = int(cfg[i] * (1 - args.pruning_ratio))
temp_cfg = cfg
elif args.arch == 'WideResNet':
cfg = [16, 32, 64]
temp_cfg = [16, 32, 32]
for i in range(len(cfg)):
cfg[i] = int(cfg[i] * (1 - args.pruning_ratio))
temp_cfg[i] = cfg[i] * args.depth_wide[1]
elif args.target == 'ip' :
if args.arch == 'LeNet_300_100':
cfg = [300,100]
for i in range(len(cfg)):
cfg[i] = round(cfg[i] * (1 - args.pruning_ratio))
temp_cfg = cfg
pass
# generate the model
if args.arch == 'VGG':
model = models.VGG(num_classes, cfg=cfg)
elif args.arch == 'LeNet_300_100':
model = models.LeNet_300_100(bias_flag=True, cfg=cfg)
elif args.arch == 'ResNet':
model = models.ResNet(int(args.depth_wide) ,num_classes,cfg=cfg)
elif args.arch == 'WideResNet':
model = models.WideResNet(args.depth_wide[0], num_classes, widen_factor=args.depth_wide[1], cfg=cfg)
else:
pass
if args.cuda:
model.cuda()
# pretrain
best_acc = 0.0
best_epoch = 0
if args.pretrained:
pretrained_model = torch.load(args.pretrained)
best_epoch = 0
if args.model_type == 'original':
best_acc = pretrained_model['acc']
model.load_state_dict(pretrained_model['state_dict'])
# weight initialization
if args.retrain:
decomposed_list = Decompose(args.arch, pretrained_model['state_dict'], args.criterion, args.threshold, args.lamda, args.model_type, temp_cfg, args.cuda).main()
model = weight_init(model, decomposed_list, args.target)
# print the number of model parameters
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('Total parameter number:', params, '\n')
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
criterion = nn.CrossEntropyLoss()
if args.evaluate:
test(0, evaluate=True)
exit()
for epoch in range(1, args.epochs + 1):
adjust_learning_rate(optimizer, epoch, args.gammas, args.schedule)
train(epoch)
test(epoch) | 14,964 | 38.485488 | 167 | py |
neuron-merging | neuron-merging-main/decompose.py | from __future__ import print_function
import argparse
import pickle
import numpy as np
from sklearn.utils.extmath import randomized_svd
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import cosine_similarity
from scipy.spatial.distance import cosine
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.spatial import distance
import sys
import os
import scipy
import random
cwd = os.getcwd()
sys.path.append(cwd+'/../')
def create_scaling_mat_ip_thres_bias(weight, ind, threshold, model_type):
'''
weight - 2D matrix (n_{i+1}, n_i), np.ndarray
ind - chosen indices to remain, np.ndarray
threshold - cosine similarity threshold
'''
assert(type(weight) == np.ndarray)
assert(type(ind) == np.ndarray)
cosine_sim = 1-pairwise_distances(weight, metric="cosine")
weight_chosen = weight[ind, :]
scaling_mat = np.zeros([weight.shape[0], weight_chosen.shape[0]])
for i in range(weight.shape[0]):
if i in ind: # chosen
ind_i, = np.where(ind == i)
assert(len(ind_i) == 1) # check if only one index is found
scaling_mat[i, ind_i] = 1
else: # not chosen
if model_type == 'prune':
continue
max_cos_value = np.max(cosine_sim[i][ind])
max_cos_value_index = np.argpartition(cosine_sim[i][ind], -1)[-1]
if threshold and max_cos_value < threshold:
continue
baseline_weight = weight_chosen[max_cos_value_index]
current_weight = weight[i]
baseline_norm = np.linalg.norm(baseline_weight)
current_norm = np.linalg.norm(current_weight)
scaling_factor = current_norm / baseline_norm
scaling_mat[i, max_cos_value_index] = scaling_factor
return scaling_mat
def create_scaling_mat_conv_thres_bn(weight, ind, threshold,
bn_weight, bn_bias,
bn_mean, bn_var, lam, model_type):
'''
weight - 4D tensor(n, c, h, w), np.ndarray
ind - chosen indices to remain
threshold - cosine similarity threshold
bn_weight, bn_bias - parameters of batch norm layer right after the conv layer
bn_mean, bn_var - running_mean, running_var of BN (for inference)
lam - how much to consider cosine sim over bias, float value between 0 and 1
'''
assert(type(weight) == np.ndarray)
assert(type(ind) == np.ndarray)
assert(type(bn_weight) == np.ndarray)
assert(type(bn_bias) == np.ndarray)
assert(type(bn_mean) == np.ndarray)
assert(type(bn_var) == np.ndarray)
assert(bn_weight.shape[0] == weight.shape[0])
assert(bn_bias.shape[0] == weight.shape[0])
assert(bn_mean.shape[0] == weight.shape[0])
assert(bn_var.shape[0] == weight.shape[0])
weight = weight.reshape(weight.shape[0], -1)
cosine_dist = pairwise_distances(weight, metric="cosine")
weight_chosen = weight[ind, :]
scaling_mat = np.zeros([weight.shape[0], weight_chosen.shape[0]])
for i in range(weight.shape[0]):
if i in ind: # chosen
ind_i, = np.where(ind == i)
assert(len(ind_i) == 1) # check if only one index is found
scaling_mat[i, ind_i] = 1
else: # not chosen
if model_type == 'prune':
continue
current_weight = weight[i]
current_norm = np.linalg.norm(current_weight)
current_cos = cosine_dist[i]
gamma_1 = bn_weight[i]
beta_1 = bn_bias[i]
mu_1 = bn_mean[i]
sigma_1 = bn_var[i]
# choose one
cos_list = []
scale_list = []
bias_list = []
for chosen_i in ind:
chosen_weight = weight[chosen_i]
chosen_norm = np.linalg.norm(chosen_weight, ord = 2)
chosen_cos = current_cos[chosen_i]
gamma_2 = bn_weight[chosen_i]
beta_2 = bn_bias[chosen_i]
mu_2 = bn_mean[chosen_i]
sigma_2 = bn_var[chosen_i]
# compute cosine sim
cos_list.append(chosen_cos)
# compute s
s = current_norm/chosen_norm
# compute scale term
scale_term_inference = s * (gamma_2 / gamma_1) * (sigma_1 / sigma_2)
scale_list.append(scale_term_inference)
# compute bias term
bias_term_inference = abs((gamma_2/sigma_2) * (s * (-(sigma_1*beta_1/gamma_1) + mu_1) - mu_2) + beta_2)
bias_term_inference = bias_term_inference/scale_term_inference
bias_list.append(bias_term_inference)
assert(len(cos_list) == len(ind))
assert(len(scale_list) == len(ind))
assert(len(bias_list) == len(ind))
# merge cosine distance and bias distance
bias_list = (bias_list - np.min(bias_list)) / (np.max(bias_list)-np.min(bias_list))
score_list = lam * np.array(cos_list) + (1-lam) * np.array(bias_list)
# find index and scale with minimum distance
min_ind = np.argmin(score_list)
min_scale = scale_list[min_ind]
min_cosine_sim = 1-cos_list[min_ind]
# check threshold - second
if threshold and min_cosine_sim < threshold:
continue
scaling_mat[i, min_ind] = min_scale
return scaling_mat
class Decompose:
def __init__(self, arch, param_dict, criterion, threshold, lamda, model_type, cfg, cuda):
self.param_dict = param_dict
self.arch = arch
self.criterion = criterion
self.threshold = threshold
self.lamda = lamda
self.model_type = model_type
self.cfg = cfg
self.cuda = cuda
self.output_channel_index = {}
self.decompose_weight = []
def get_output_channel_index(self, value, layer_id):
output_channel_index = []
if len(value.size()) :
weight_vec = value.view(value.size()[0], -1)
weight_vec = weight_vec.cuda()
# l1-norm
if self.criterion == 'l1-norm':
norm = torch.norm(weight_vec, 1, 1)
norm_np = norm.cpu().detach().numpy()
arg_max = np.argsort(norm_np)
arg_max_rev = arg_max[::-1][:self.cfg[layer_id]]
output_channel_index = sorted(arg_max_rev.tolist())
# l2-norm
elif self.criterion == 'l2-norm':
norm = torch.norm(weight_vec, 2, 1)
norm_np = norm.cpu().detach().numpy()
arg_max = np.argsort(norm_np)
arg_max_rev = arg_max[::-1][:self.cfg[layer_id]]
output_channel_index = sorted(arg_max_rev.tolist())
# l2-GM
elif self.criterion == 'l2-GM':
weight_vec = weight_vec.cpu().detach().numpy()
matrix = distance.cdist(weight_vec, weight_vec, 'euclidean')
similar_sum = np.sum(np.abs(matrix), axis=0)
output_channel_index = np.argpartition(similar_sum, -self.cfg[layer_id])[-self.cfg[layer_id]:]
return output_channel_index
def get_decompose_weight(self):
# scale matrix
z = None
# copy original weight
self.decompose_weight = list(self.param_dict.values())
# cfg index
layer_id = -1
for index, layer in enumerate(self.param_dict):
original = self.param_dict[layer]
# VGG
if self.arch == 'VGG':
# feature
if 'feature' in layer :
# conv
if len(self.param_dict[layer].shape) == 4:
layer_id += 1
# get index
self.output_channel_index[index] = self.get_output_channel_index(self.param_dict[layer], layer_id)
# Merge scale matrix
if z != None:
original = original[:,input_channel_index,:,:]
for i, f in enumerate(self.param_dict[layer]):
o = f.view(f.shape[0],-1)
o = torch.mm(z,o)
o = o.view(z.shape[0],f.shape[1],f.shape[2])
original[i,:,:,:] = o
# make scale matrix with batchNorm
bn = list(self.param_dict.values())
bn_weight = bn[index+1].cpu().detach().numpy()
bn_bias = bn[index+2].cpu().detach().numpy()
bn_mean = bn[index+3].cpu().detach().numpy()
bn_var = bn[index+4].cpu().detach().numpy()
x = create_scaling_mat_conv_thres_bn(self.param_dict[layer].cpu().detach().numpy(), np.array(self.output_channel_index[index]), self.threshold,
bn_weight, bn_bias, bn_mean, bn_var, self.lamda, self.model_type)
z = torch.from_numpy(x).type(dtype=torch.float)
if self.cuda:
z = z.cuda()
z = z.t()
# pruned
pruned = original[self.output_channel_index[index],:,:,:]
# update next input channel
input_channel_index = self.output_channel_index[index]
# update decompose weight
self.decompose_weight[index] = pruned
# batchNorm
elif len(self.param_dict[layer].shape):
# pruned
pruned = self.param_dict[layer][input_channel_index]
# update decompose weight
self.decompose_weight[index] = pruned
# first classifier
else:
pruned = torch.zeros(original.shape[0],z.shape[0])
if self.cuda:
pruned = pruned.cuda()
for i, f in enumerate(original):
o_old = f.view(z.shape[1],-1)
o = torch.mm(z,o_old).view(-1)
pruned[i,:] = o
self.decompose_weight[index] = pruned
break
# ResNet
elif self.arch == 'ResNet':
# block
if 'layer' in layer :
# last layer each block
if '0.conv1.weight' in layer :
layer_id += 1
# Pruning
if 'conv1' in layer :
# get index
self.output_channel_index[index] = self.get_output_channel_index(self.param_dict[layer], layer_id)
# make scale matrix with batchNorm
bn = list(self.param_dict.values())
bn_weight = bn[index+1].cpu().detach().numpy()
bn_bias = bn[index+2].cpu().detach().numpy()
bn_mean = bn[index+3].cpu().detach().numpy()
bn_var = bn[index+4].cpu().detach().numpy()
x = create_scaling_mat_conv_thres_bn(self.param_dict[layer].cpu().detach().numpy(), np.array(self.output_channel_index[index]), self.threshold,
bn_weight, bn_bias, bn_mean, bn_var, self.lamda, self.model_type)
z = torch.from_numpy(x).type(dtype=torch.float)
if self.cuda:
z = z.cuda()
z = z.t()
# pruned
pruned = original[self.output_channel_index[index],:,:,:]
# update next input channel
input_channel_index = self.output_channel_index[index]
# update decompose weight
self.decompose_weight[index] = pruned
# batchNorm
elif 'bn1' in layer :
if len(self.param_dict[layer].shape):
# pruned
pruned = self.param_dict[layer][input_channel_index]
# update decompose weight
self.decompose_weight[index] = pruned
# Merge scale matrix
elif 'conv2' in layer :
if z != None:
original = original[:,input_channel_index,:,:]
for i, f in enumerate(self.param_dict[layer]):
o = f.view(f.shape[0],-1)
o = torch.mm(z,o)
o = o.view(z.shape[0],f.shape[1],f.shape[2])
original[i,:,:,:] = o
scaled = original
# update decompose weight
self.decompose_weight[index] = scaled
# WideResNet
elif self.arch == 'WideResNet':
# block
if 'block' in layer :
# last layer each block
if '0.conv1.weight' in layer :
layer_id += 1
# Pruning
if 'conv1' in layer :
# get index
self.output_channel_index[index] = self.get_output_channel_index(self.param_dict[layer], layer_id)
# make scale matrix with batchNorm
bn = list(self.param_dict.values())
bn_weight = bn[index+1].cpu().detach().numpy()
bn_bias = bn[index+2].cpu().detach().numpy()
bn_mean = bn[index+3].cpu().detach().numpy()
bn_var = bn[index+4].cpu().detach().numpy()
x = create_scaling_mat_conv_thres_bn(self.param_dict[layer].cpu().detach().numpy(), np.array(self.output_channel_index[index]), self.threshold,
bn_weight, bn_bias, bn_mean, bn_var, self.lamda, self.model_type)
z = torch.from_numpy(x).type(dtype=torch.float)
if self.cuda:
z = z.cuda()
z = z.t()
# pruned
pruned = original[self.output_channel_index[index],:,:,:]
# update next input channel
input_channel_index = self.output_channel_index[index]
# update decompose weight
self.decompose_weight[index] = pruned
# BatchNorm
elif 'bn2' in layer :
if len(self.param_dict[layer].shape):
# pruned
pruned = self.param_dict[layer][input_channel_index]
# update decompose weight
self.decompose_weight[index] = pruned
# Merge scale matrix
elif 'conv2' in layer :
# scale
if z != None:
original = original[:,input_channel_index,:,:]
for i, f in enumerate(self.param_dict[layer]):
o = f.view(f.shape[0],-1)
o = torch.mm(z,o)
o = o.view(z.shape[0],f.shape[1],f.shape[2])
original[i,:,:,:] = o
scaled = original
# update decompose weight
self.decompose_weight[index] = scaled
# LeNet_300_100
elif self.arch == 'LeNet_300_100':
# ip
if layer in ['ip1.weight','ip2.weight'] :
# Merge scale matrix
if z != None:
original = torch.mm(original,z)
layer_id += 1
# concatenate weight and bias
if layer in 'ip1.weight' :
weight = self.param_dict['ip1.weight'].cpu().detach().numpy()
bias = self.param_dict['ip1.bias'].cpu().detach().numpy()
elif layer in 'ip2.weight' :
weight = self.param_dict['ip2.weight'].cpu().detach().numpy()
bias = self.param_dict['ip2.bias'].cpu().detach().numpy()
bias_reshaped = bias.reshape(bias.shape[0],-1)
concat_weight = np.concatenate([weight, bias_reshaped], axis = 1)
# get index
self.output_channel_index[index] = self.get_output_channel_index(torch.from_numpy(concat_weight), layer_id)
# make scale matrix with bias
x = create_scaling_mat_ip_thres_bias(concat_weight, np.array(self.output_channel_index[index]), self.threshold, self.model_type)
z = torch.from_numpy(x).type(dtype=torch.float)
if self.cuda:
z = z.cuda()
# pruned
pruned = original[self.output_channel_index[index],:]
# update next input channel
input_channel_index = self.output_channel_index[index]
# update decompose weight
self.decompose_weight[index] = pruned
elif layer in 'ip3.weight':
original = torch.mm(original,z)
# update decompose weight
self.decompose_weight[index] = original
# update bias
elif layer in ['ip1.bias','ip2.bias']:
self.decompose_weight[index] = original[input_channel_index]
else :
pass
def main(self):
if self.cuda == False:
for layer in self.param_dict:
self.param_dict[layer] = self.param_dict[layer].cpu()
self.get_decompose_weight()
return self.decompose_weight | 19,186 | 35.616412 | 168 | py |
neuron-merging | neuron-merging-main/models/ResNet.py | import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, cfg, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, cfg, stride)
self.bn1 = nn.BatchNorm2d(cfg)
self.conv2 = conv3x3(cfg, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
x = F.relu(x)
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
# out = self.relu(out)
return out
def bn_feature(self, x):
x = F.relu(x)
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
bn_feature = out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
# out = self.relu(out)
return out, bn_feature
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, cfg, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * Bottleneck.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * Bottleneck.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
x = F.relu(x)
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
# out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, num_classes, cfg=None, bottleneck=False):
super(ResNet, self).__init__()
if cfg == None:
cfg = [16, 32, 64]
self.inplanes = 16
#print(bottleneck)
if bottleneck == True:
n = int((depth - 2) / 9)
block = Bottleneck
else:
n = int((depth - 2) / 6)
block = BasicBlock
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n, cfg[0])
self.layer2 = self._make_layer(block, 32, n, cfg[1], stride=2)
self.layer3 = self._make_layer(block, 64, n, cfg[2], stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, cfg, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, cfg, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, cfg))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = F.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
else:
print('ResNet unknown block error !!!')
return [bn1, bn2, bn3]
def get_channel_num(self):
return [16, 32, 64]
def extract_feature(self, x, preReLU=False):
x = x.cuda()
x = self.conv1(x)
x = self.bn1(x)
feat1 = self.layer1(x)
feat2 = self.layer2(feat1)
feat3 = self.layer3(feat2)
x = F.relu(feat3)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
#out = self.fc(x)
if not preReLU:
feat1 = F.relu(feat1)
feat2 = F.relu(feat2)
feat3 = F.relu(feat3)
return [feat1, feat2, feat3]#, out
def bn_feature(self,x):
bn_feature_list = []
x = self.conv1(x)
x = self.bn1(x)
for block in self.layer1:
if isinstance(block,BasicBlock):
x, bn_feature = block.bn_feature(x)
temp = bn_feature.cpu().detach().numpy()
bn_feature_list.append(temp)
for block in self.layer2:
if isinstance(block,BasicBlock):
x, bn_feature = block.bn_feature(x)
temp = bn_feature.cpu().detach().numpy()
bn_feature_list.append(temp)
for block in self.layer3:
if isinstance(block,BasicBlock):
x, bn_feature = block.bn_feature(x)
temp = bn_feature.cpu().detach().numpy()
bn_feature_list.append(temp)
return bn_feature_list
def ware(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = F.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x.cpu().detach().numpy()
| 7,173 | 26.381679 | 99 | py |
neuron-merging | neuron-merging-main/models/LeNet_300_100.py | from __future__ import print_function
import torch
import torch.nn as nn
import os
class LeNet_300_100(nn.Module):
def __init__(self, bias_flag, cfg):
if cfg == None:
cfg = [300,100]
super(LeNet_300_100, self).__init__()
self.ip1 = nn.Linear(28*28, cfg[0], bias=bias_flag)
self.relu_ip1 = nn.ReLU(inplace=True)
self.ip2 = nn.Linear(cfg[0], cfg[1], bias=bias_flag)
self.relu_ip2 = nn.ReLU(inplace=True)
self.ip3 = nn.Linear(cfg[1], 10, bias=bias_flag)
return
def forward(self, x):
x = x.view(x.size(0), 28*28)
x = self.ip1(x)
x = self.relu_ip1(x)
x = self.ip2(x)
x = self.relu_ip2(x)
x = self.ip3(x)
return x | 749 | 29 | 60 | py |
neuron-merging | neuron-merging-main/models/VGG.py | from __future__ import print_function
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
__all__ = ['VGG']
defaultcfg = {
11: [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13: [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
}
class VGG(nn.Module):
def __init__(self, out_classes=10, depth=16, init_weights=True, cfg=None):
super(VGG, self).__init__()
if cfg is None:
cfg = defaultcfg[depth]
self.cfg = cfg
self.feature = self.make_layers(cfg, True)
self.classifier = nn.Sequential(
nn.Linear(cfg[-1], 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, out_classes)
)
if init_weights:
self._initialize_weights()
def make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, x):
x = self.feature(x)
x = nn.AvgPool2d(2)(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(0.5)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def extract_feature(self, x, preReLU=False):
x = x.cuda()
feat1 = self.feature(x)
if not preReLU:
feat1 = F.relu(feat1)
return [feat1]
def bn_feature(self,x):
bn_feature = []
for layer in self.feature:
x = layer(x)
if isinstance(layer, nn.BatchNorm2d) :
# temp = torch.sum(x,0).cpu().detach().numpy()
temp = x.cpu().detach().numpy()
bn_feature.append(temp)
return bn_feature
def ware(self, x):
x = self.feature(x)
x = nn.AvgPool2d(2)(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y.cpu().detach().numpy()
| 3,154 | 26.920354 | 107 | py |
neuron-merging | neuron-merging-main/models/WideResNet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, cfg, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, cfg, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(cfg)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(cfg, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, cfg, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, cfg, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, cfg, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, cfg, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0, cfg=None):
super(WideResNet, self).__init__()
if cfg == None:
cfg = [16, 32, 64]
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, cfg[0]*widen_factor, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, cfg[1]*widen_factor, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, cfg[2]*widen_factor, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels[3])
return self.fc(out)
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def get_channel_num(self):
return self.nChannels[1:]
def extract_feature(self, x, preReLU=False):
x = x.cuda()
out = self.conv1(x)
feat1 = self.block1(out)
feat2 = self.block2(feat1)
feat3 = self.block3(feat2)
out = self.relu(self.bn1(feat3))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels[3])
out = self.fc(out)
if preReLU:
feat1 = self.block2.layer[0].bn1(feat1)
feat2 = self.block3.layer[0].bn1(feat2)
feat3 = self.bn1(feat3)
return [feat1, feat2, feat3]
def ware(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels[3])
return self.fc(out).cpu().detach().numpy() | 4,955 | 35.711111 | 119 | py |
LAP-PAL | LAP-PAL-master/continuous/main.py | import numpy as np
import torch
import gym
import argparse
import os
import time
import utils
import TD3
import LAP_TD3
import PAL_TD3
import PER_TD3
# Runs policy for X episodes and returns average reward
def eval_policy(policy, env, seed, eval_episodes=10):
eval_env = gym.make(env)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state), test=True)
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--algorithm", default="LAP_TD3") # Algorithm nameu
parser.add_argument("--env", default="HalfCheetah-v3") # OpenAI gym environment name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--start_timesteps", default=25e3, type=int)# Time steps initial random policy is used
parser.add_argument("--eval_freq", default=5e3, type=int) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=3e6, type=int) # Max time steps to run environment
parser.add_argument("--expl_noise", default=0.1) # Std of Gaussian exploration noise
parser.add_argument("--batch_size", default=256, type=int) # Batch size for both actor and critic
parser.add_argument("--discount", default=0.99) # Discount factor
parser.add_argument("--tau", default=0.005) # Target network update rate
parser.add_argument("--policy_noise", default=0.2) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", default=0.5) # Range to clip target policy noise
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
parser.add_argument("--alpha", default=0.4) # Priority = TD^alpha (only used by LAP/PAL)
parser.add_argument("--min_priority", default=1, type=int) # Minimum priority (set to 1 in paper, only used by LAP/PAL)
args = parser.parse_args()
file_name = "%s_%s_%s" % (args.algorithm, args.env, str(args.seed))
print("---------------------------------------")
print(f"Settings: {file_name}")
print("---------------------------------------")
if not os.path.exists("./results"):
os.makedirs("./results")
env = gym.make(args.env)
# Set seeds
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args.discount,
"tau": args.tau,
"policy_noise": args.policy_noise * max_action,
"noise_clip": args.noise_clip * max_action,
"policy_freq": args.policy_freq
}
# Initialize policy and replay buffer
if args.algorithm == "TD3":
policy = TD3.TD3(**kwargs)
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
elif args.algorithm == "PER_TD3":
policy = PER_TD3.PER_TD3(**kwargs)
replay_buffer = utils.PrioritizedReplayBuffer(state_dim, action_dim)
kwargs["alpha"] = args.alpha
kwargs["min_priority"] = args.min_priority
if args.algorithm == "LAP_TD3":
policy = LAP_TD3.LAP_TD3(**kwargs)
replay_buffer = utils.PrioritizedReplayBuffer(state_dim, action_dim)
elif args.algorithm == "PAL_TD3":
policy = PAL_TD3.PAL_TD3(**kwargs)
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, args.env, args.seed)]
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
# Select action randomly or according to policy
if t < args.start_timesteps:
action = env.action_space.sample()
else:
action = (
policy.select_action(np.array(state))
+ np.random.normal(0, max_action * args.expl_noise, size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train agent after collecting sufficient data
if t >= args.start_timesteps: #>=
policy.train(replay_buffer, args.batch_size)
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % args.eval_freq == 0:
evaluations.append(eval_policy(policy, args.env, args.seed))
np.save("./results/%s" % (file_name), evaluations) | 5,222 | 33.361842 | 121 | py |
LAP-PAL | LAP-PAL-master/continuous/PAL_TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def act(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
a = self.l3(a)
return self.max_action * torch.tanh(a), a
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class PAL_TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
alpha=0.4,
min_priority=1
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.alpha = alpha
self.min_priority = min_priority
self.total_it = 0
def select_action(self, state, test=False):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
td_loss1 = (current_Q1 - target_Q)
td_loss2 = (current_Q2 - target_Q)
critic_loss = self.PAL(td_loss1) + self.PAL(td_loss2)
critic_loss /= torch.max(td_loss1.abs(), td_loss2.abs()).clamp(min=self.min_priority).pow(self.alpha).mean().detach()
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
# If min_priority=1, this can be simplified.
def PAL(self, x):
return torch.where(
x.abs() < self.min_priority,
(self.min_priority ** self.alpha) * 0.5 * x.pow(2),
self.min_priority * x.abs().pow(1. + self.alpha)/(1. + self.alpha)
).mean()
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 5,164 | 26.768817 | 119 | py |
LAP-PAL | LAP-PAL-master/continuous/PER_TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def act(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
a = self.l3(a)
return self.max_action * torch.tanh(a), a
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class PER_TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
alpha=0.6,
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.alpha = alpha
self.total_it = 0
def select_action(self, state, test=False):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done, ind, weights = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
td_loss1 = (current_Q1 - target_Q).abs()
td_loss2 = (current_Q2 - target_Q).abs()
# Compute critic loss
critic_loss = (
(weights * F.mse_loss(current_Q1, target_Q, reduction='none')).mean()
+ (weights * F.mse_loss(current_Q2, target_Q, reduction='none')).mean()
)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
priority = torch.max(td_loss1, td_loss2).pow(self.alpha).cpu().data.numpy().flatten()
replay_buffer.update_priority(ind, priority)
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 5,032 | 26.653846 | 94 | py |
LAP-PAL | LAP-PAL-master/continuous/utils.py | import numpy as np
import torch
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(self.size, size=batch_size)
return (
torch.FloatTensor(self.state[ind]).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
class PrioritizedReplayBuffer():
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.tree = SumTree(max_size)
self.max_priority = 1.0
self.beta = 0.4
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.tree.set(self.ptr, self.max_priority)
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = self.tree.sample(batch_size)
weights = self.tree.levels[-1][ind] ** -self.beta
weights /= weights.max()
self.beta = min(self.beta + 2e-7, 1) # Hardcoded: 0.4 + 2e-7 * 3e6 = 1.0. Only used by PER.
return (
torch.FloatTensor(self.state[ind]).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device),
ind,
torch.FloatTensor(weights).to(self.device).reshape(-1, 1)
)
def update_priority(self, ind, priority):
self.max_priority = max(priority.max(), self.max_priority)
self.tree.batch_set(ind, priority)
class SumTree(object):
def __init__(self, max_size):
self.levels = [np.zeros(1)]
# Tree construction
# Double the number of nodes at each level
level_size = 1
while level_size < max_size:
level_size *= 2
self.levels.append(np.zeros(level_size))
# Batch binary search through sum tree
# Sample a priority between 0 and the max priority
# and then search the tree for the corresponding index
def sample(self, batch_size):
value = np.random.uniform(0, self.levels[0][0], size=batch_size)
ind = np.zeros(batch_size, dtype=int)
for nodes in self.levels[1:]:
ind *= 2
left_sum = nodes[ind]
is_greater = np.greater(value, left_sum)
# If value > left_sum -> go right (+1), else go left (+0)
ind += is_greater
# If we go right, we only need to consider the values in the right tree
# so we subtract the sum of values in the left tree
value -= left_sum * is_greater
return ind
def set(self, ind, new_priority):
priority_diff = new_priority - self.levels[-1][ind]
for nodes in self.levels[::-1]:
np.add.at(nodes, ind, priority_diff)
ind //= 2
def batch_set(self, ind, new_priority):
# Confirm we don't increment a node twice
ind, unique_ind = np.unique(ind, return_index=True)
priority_diff = new_priority[unique_ind] - self.levels[-1][ind]
for nodes in self.levels[::-1]:
np.add.at(nodes, ind, priority_diff)
ind //= 2 | 4,293 | 28.410959 | 93 | py |
LAP-PAL | LAP-PAL-master/continuous/TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
def select_action(self, state, test=False):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor loss
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 4,551 | 26.756098 | 93 | py |
LAP-PAL | LAP-PAL-master/continuous/LAP_TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def act(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
a = self.l3(a)
return self.max_action * torch.tanh(a), a
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class LAP_TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
alpha=0.4,
min_priority=1
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.alpha = alpha
self.min_priority = min_priority
self.total_it = 0
def select_action(self, state, test=False):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done, ind, weights = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
td_loss1 = (current_Q1 - target_Q).abs()
td_loss2 = (current_Q2 - target_Q).abs()
# Compute critic loss
critic_loss = self.huber(td_loss1) + self.huber(td_loss2)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
priority = torch.max(td_loss1, td_loss2).clamp(min=self.min_priority).pow(self.alpha).cpu().data.numpy().flatten()
replay_buffer.update_priority(ind, priority)
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def huber(self, x):
return torch.where(x < self.min_priority, 0.5 * x.pow(2), self.min_priority * x).mean()
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 5,118 | 26.67027 | 116 | py |
LAP-PAL | LAP-PAL-master/discrete/main.py | import argparse
import copy
import importlib
import json
import os
import numpy as np
import torch
import DDQN
import PER_DDQN
import LAP_DDQN
import PAL_DDQN
import utils
def main(env, replay_buffer, is_atari, state_dim, num_actions, args, parameters, device):
# Initialize and load policy
kwargs = {
"is_atari": is_atari,
"num_actions": num_actions,
"state_dim": state_dim,
"device": device,
"discount": parameters["discount"],
"optimizer": parameters["optimizer"],
"optimizer_parameters": parameters["optimizer_parameters"],
"polyak_target_update": parameters["polyak_target_update"],
"target_update_frequency": parameters["target_update_freq"],
"tau": parameters["tau"],
"initial_eps": parameters["initial_eps"],
"end_eps": parameters["end_eps"],
"eps_decay_period": parameters["eps_decay_period"],
"eval_eps": parameters["eval_eps"]
}
if args.algorithm == "DDQN":
policy = DDQN.DDQN(**kwargs)
elif args.algorithm == "PER_DDQN":
policy = PER_DDQN.PER_DDQN(**kwargs)
kwargs["alpha"] = parameters["alpha"]
kwargs["min_priority"] = parameters["min_priority"]
if args.algorithm == "LAP_DDQN":
policy = LAP_DDQN.LAP_DDQN(**kwargs)
elif args.algorithm == "PAL_DDQN":
policy = PAL_DDQN.PAL_DDQN(**kwargs)
evaluations = []
state, done = env.reset(), False
episode_start = True
episode_reward = 0
episode_timesteps = 0
episode_num = 0
# Interact with the environment for max_timesteps
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
#if args.train_behavioral:
if t < parameters["start_timesteps"]:
action = env.action_space.sample()
else:
action = policy.select_action(np.array(state))
# Perform action and log results
next_state, reward, done, info = env.step(action)
episode_reward += reward
# Only consider "done" if episode terminates due to failure condition
done_float = float(done) if episode_timesteps < env._max_episode_steps else 0
# For atari, info[0] = clipped reward, info[1] = done_float
if is_atari:
reward = info[0]
done_float = info[1]
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_float, done, episode_start)
state = copy.copy(next_state)
episode_start = False
# Train agent after collecting sufficient data
if t >= parameters["start_timesteps"] and (t + 1) % parameters["train_freq"] == 0:
policy.train(replay_buffer)
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
# Reset environment
state, done = env.reset(), False
episode_start = True
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % parameters["eval_freq"] == 0:
evaluations.append(eval_policy(policy, args.env, args.seed))
np.save(f"./results/{setting}.npy", evaluations)
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
eval_env, _, _, _ = utils.make_env(env_name, atari_preprocessing)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state), eval=True)
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
# Atari Specific
atari_preprocessing = {
"frame_skip": 4,
"frame_size": 84,
"state_history": 4,
"done_on_life_loss": False,
"reward_clipping": True,
"max_episode_timesteps": 27e3
}
atari_parameters = {
# LAP/PAL
"alpha": 0.6,
"min_priority": 1e-2,
# Exploration
"start_timesteps": 2e4,
"initial_eps": 1,
"end_eps": 1e-2,
"eps_decay_period": 25e4,
# Evaluation
"eval_freq": 5e4,
"eval_eps": 1e-3,
# Learning
"discount": 0.99,
"buffer_size": 1e6,
"batch_size": 32,
"optimizer": "RMSprop",
"optimizer_parameters": {
"lr": 0.0000625,
"alpha": 0.95,
"centered": True,
"eps": 0.00001
},
"train_freq": 4,
"polyak_target_update": False,
"target_update_freq": 8e3,
"tau": 1
}
regular_parameters = {
# LAP/PAL
"alpha": 0.4,
"min_priority": 1,
# Exploration
"start_timesteps": 1e3,
"initial_eps": 0.1,
"end_eps": 0.1,
"eps_decay_period": 1,
# Evaluation
"eval_freq": 5e3,
"eval_eps": 0,
# Learning
"discount": 0.99,
"buffer_size": 1e6,
"batch_size": 64,
"optimizer": "Adam",
"optimizer_parameters": {
"lr": 3e-4
},
"train_freq": 1,
"polyak_target_update": True,
"target_update_freq": 1,
"tau": 0.005
}
# Load parameters
parser = argparse.ArgumentParser()
parser.add_argument("--algorithm", default="LAP_DDQN") # OpenAI gym environment name
parser.add_argument("--env", default="PongNoFrameskip-v0") # OpenAI gym environment name #PongNoFrameskip-v0
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--buffer_name", default="Default") # Prepends name to filename
parser.add_argument("--max_timesteps", default=50e6, type=int) # Max time steps to run environment or train for
args = parser.parse_args()
print("---------------------------------------")
print(f"Setting: Algorithm: {args.algorithm}, Env: {args.env}, Seed: {args.seed}")
print("---------------------------------------")
setting = f"{args.algorithm}_{args.env}_{args.seed}"
if not os.path.exists("./results"):
os.makedirs("./results")
# Make env and determine properties
env, is_atari, state_dim, num_actions = utils.make_env(args.env, atari_preprocessing)
parameters = atari_parameters if is_atari else regular_parameters
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Initialize buffer
prioritized = True if args.algorithm == "PER_DDQN" or args.algorithm == "LAP_DDQN" else False
replay_buffer = utils.ReplayBuffer(
state_dim,
prioritized,
is_atari,
atari_preprocessing,
parameters["batch_size"],
parameters["buffer_size"],
device
)
main(env, replay_buffer, is_atari, state_dim, num_actions, args, parameters, device)
| 6,543 | 26.846809 | 116 | py |
LAP-PAL | LAP-PAL-master/discrete/utils.py | import cv2
import gym
import numpy as np
import torch
def ReplayBuffer(state_dim, prioritized, is_atari, atari_preprocessing, batch_size, buffer_size, device):
if is_atari:
return PrioritizedAtariBuffer(state_dim, atari_preprocessing, batch_size, buffer_size, device, prioritized)
else:
return PrioritizedStandardBuffer(state_dim, batch_size, buffer_size, device, prioritized)
class PrioritizedAtariBuffer(object):
def __init__(self, state_dim, atari_preprocessing, batch_size, buffer_size, device, prioritized):
self.batch_size = batch_size
self.max_size = int(buffer_size)
self.device = device
self.state_history = atari_preprocessing["state_history"]
self.ptr = 0
self.size = 0
self.state = np.zeros((
self.max_size + 1,
atari_preprocessing["frame_size"],
atari_preprocessing["frame_size"]
), dtype=np.uint8)
self.action = np.zeros((self.max_size, 1), dtype=np.int64)
self.reward = np.zeros((self.max_size, 1))
# not_done only consider "done" if episode terminates due to failure condition
# if episode terminates due to timelimit, the transition is not added to the buffer
self.not_done = np.zeros((self.max_size, 1))
self.first_timestep = np.zeros(self.max_size, dtype=np.uint8)
self.prioritized = prioritized
if self.prioritized:
self.tree = SumTree(self.max_size)
self.max_priority = 1.0
self.beta = 0.4
def add(self, state, action, next_state, reward, done, env_done, first_timestep):
# If dones don't match, env has reset due to timelimit
# and we don't add the transition to the buffer
if done != env_done:
return
self.state[self.ptr] = state[0]
self.action[self.ptr] = action
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.first_timestep[self.ptr] = first_timestep
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
if self.prioritized:
self.tree.set(self.ptr, self.max_priority)
def sample(self):
ind = self.tree.sample(self.batch_size) if self.prioritized \
else np.random.randint(0, self.size, size=self.batch_size)
# Note + is concatenate here
state = np.zeros(((self.batch_size, self.state_history) + self.state.shape[1:]), dtype=np.uint8)
next_state = np.array(state)
state_not_done = 1.
next_not_done = 1.
for i in range(self.state_history):
# Wrap around if the buffer is filled
if self.size == self.max_size:
j = (ind - i) % self.max_size
k = (ind - i + 1) % self.max_size
else:
j = ind - i
k = (ind - i + 1).clip(min=0)
# If j == -1, then we set state_not_done to 0.
state_not_done *= (j + 1).clip(min=0, max=1).reshape(-1, 1, 1)
j = j.clip(min=0)
# State should be all 0s if the episode terminated previously
state[:, i] = self.state[j] * state_not_done
next_state[:, i] = self.state[k] * next_not_done
# If this was the first timestep, make everything previous = 0
next_not_done *= state_not_done
state_not_done *= (1. - self.first_timestep[j]).reshape(-1, 1, 1)
batch = (
torch.ByteTensor(state).to(self.device).float(),
torch.LongTensor(self.action[ind]).to(self.device),
torch.ByteTensor(next_state).to(self.device).float(),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
if self.prioritized:
weights = np.array(self.tree.nodes[-1][ind]) ** -self.beta
weights /= weights.max()
self.beta = min(self.beta + 4.8e-8, 1) # Hardcoded: 0.4 + 4.8e-8 * 12.5e6 = 1.0. Only used by PER.
batch += (ind, torch.FloatTensor(weights).to(self.device).reshape(-1, 1))
return batch
def update_priority(self, ind, priority):
self.max_priority = max(priority.max(), self.max_priority)
self.tree.batch_set(ind, priority)
# Replay buffer for standard gym tasks
class PrioritizedStandardBuffer():
def __init__(self, state_dim, batch_size, buffer_size, device, prioritized):
self.batch_size = batch_size
self.max_size = int(buffer_size)
self.device = device
self.ptr = 0
self.size = 0
self.state = np.zeros((self.max_size, state_dim))
self.action = np.zeros((self.max_size, 1))
self.next_state = np.array(self.state)
self.reward = np.zeros((self.max_size, 1))
self.not_done = np.zeros((self.max_size, 1))
self.prioritized = prioritized
if self.prioritized:
self.tree = SumTree(self.max_size)
self.max_priority = 1.0
self.beta = 0.4
def add(self, state, action, next_state, reward, done, env_done, first_timestep):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
if self.prioritized:
self.tree.set(self.ptr, self.max_priority)
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self):
ind = self.tree.sample(self.batch_size) if self.prioritized \
else np.random.randint(0, self.size, size=self.batch_size)
batch = (
torch.FloatTensor(self.state[ind]).to(self.device),
torch.LongTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
if self.prioritized:
weights = np.array(self.tree.nodes[-1][ind]) ** -self.beta
weights /= weights.max()
self.beta = min(self.beta + 2e-7, 1) # Hardcoded: 0.4 + 2e-7 * 3e6 = 1.0. Only used by PER.
batch += (ind, torch.FloatTensor(weights).to(self.device).reshape(-1, 1))
return batch
def update_priority(self, ind, priority):
self.max_priority = max(priority.max(), self.max_priority)
self.tree.batch_set(ind, priority)
class SumTree(object):
def __init__(self, max_size):
self.nodes = []
# Tree construction
# Double the number of nodes at each level
level_size = 1
for _ in range(int(np.ceil(np.log2(max_size))) + 1):
nodes = np.zeros(level_size)
self.nodes.append(nodes)
level_size *= 2
# Batch binary search through sum tree
# Sample a priority between 0 and the max priority
# and then search the tree for the corresponding index
def sample(self, batch_size):
query_value = np.random.uniform(0, self.nodes[0][0], size=batch_size)
node_index = np.zeros(batch_size, dtype=int)
for nodes in self.nodes[1:]:
node_index *= 2
left_sum = nodes[node_index]
is_greater = np.greater(query_value, left_sum)
# If query_value > left_sum -> go right (+1), else go left (+0)
node_index += is_greater
# If we go right, we only need to consider the values in the right tree
# so we subtract the sum of values in the left tree
query_value -= left_sum * is_greater
return node_index
def set(self, node_index, new_priority):
priority_diff = new_priority - self.nodes[-1][node_index]
for nodes in self.nodes[::-1]:
np.add.at(nodes, node_index, priority_diff)
node_index //= 2
def batch_set(self, node_index, new_priority):
# Confirm we don't increment a node twice
node_index, unique_index = np.unique(node_index, return_index=True)
priority_diff = new_priority[unique_index] - self.nodes[-1][node_index]
for nodes in self.nodes[::-1]:
np.add.at(nodes, node_index, priority_diff)
node_index //= 2
# Atari Preprocessing
# Code is based on https://github.com/openai/gym/blob/master/gym/wrappers/atari_preprocessing.py
class AtariPreprocessing(object):
def __init__(
self,
env,
frame_skip=4,
frame_size=84,
state_history=4,
done_on_life_loss=False,
reward_clipping=True, # Clips to a range of -1,1
max_episode_timesteps=27000
):
self.env = env.env
self.done_on_life_loss = done_on_life_loss
self.frame_skip = frame_skip
self.frame_size = frame_size
self.reward_clipping = reward_clipping
self._max_episode_steps = max_episode_timesteps
self.observation_space = np.zeros((frame_size, frame_size))
self.action_space = self.env.action_space
self.lives = 0
self.episode_length = 0
# Tracks previous 2 frames
self.frame_buffer = np.zeros(
(2,
self.env.observation_space.shape[0],
self.env.observation_space.shape[1]),
dtype=np.uint8
)
# Tracks previous 4 states
self.state_buffer = np.zeros((state_history, frame_size, frame_size), dtype=np.uint8)
def reset(self):
self.env.reset()
self.lives = self.env.ale.lives()
self.episode_length = 0
self.env.ale.getScreenGrayscale(self.frame_buffer[0])
self.frame_buffer[1] = 0
self.state_buffer[0] = self.adjust_frame()
self.state_buffer[1:] = 0
return self.state_buffer
# Takes single action is repeated for frame_skip frames (usually 4)
# Reward is accumulated over those frames
def step(self, action):
total_reward = 0.
self.episode_length += 1
for frame in range(self.frame_skip):
_, reward, done, _ = self.env.step(action)
total_reward += reward
if self.done_on_life_loss:
crt_lives = self.env.ale.lives()
done = True if crt_lives < self.lives else done
self.lives = crt_lives
if done:
break
# Second last and last frame
f = frame + 2 - self.frame_skip
if f >= 0:
self.env.ale.getScreenGrayscale(self.frame_buffer[f])
self.state_buffer[1:] = self.state_buffer[:-1]
self.state_buffer[0] = self.adjust_frame()
done_float = float(done)
if self.episode_length >= self._max_episode_steps:
done = True
return self.state_buffer, total_reward, done, [np.clip(total_reward, -1, 1), done_float]
def adjust_frame(self):
# Take maximum over last two frames
np.maximum(
self.frame_buffer[0],
self.frame_buffer[1],
out=self.frame_buffer[0]
)
# Resize
image = cv2.resize(
self.frame_buffer[0],
(self.frame_size, self.frame_size),
interpolation=cv2.INTER_AREA
)
return np.array(image, dtype=np.uint8)
def seed(self, seed):
self.env.seed(seed)
# Create environment, add wrapper if necessary and create env_properties
def make_env(env_name, atari_preprocessing):
env = gym.make(env_name)
is_atari = gym.envs.registry.spec(env_name).entry_point == 'gym.envs.atari:AtariEnv'
env = AtariPreprocessing(env, **atari_preprocessing) if is_atari else env
state_dim = (
atari_preprocessing["state_history"],
atari_preprocessing["frame_size"],
atari_preprocessing["frame_size"]
) if is_atari else env.observation_space.shape[0]
return (
env,
is_atari,
state_dim,
env.action_space.n
) | 10,436 | 28.483051 | 109 | py |
LAP-PAL | LAP-PAL-master/discrete/PER_DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.l1 = nn.Linear(3136, 512)
self.l2 = nn.Linear(512, num_actions)
def forward(self, state):
q = F.relu(self.c1(state))
q = F.relu(self.c2(q))
q = F.relu(self.c3(q))
q = F.relu(self.l1(q.reshape(-1, 3136)))
return self.l2(q)
# Used for Box2D / Toy problems
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q)
class PER_DDQN(object):
def __init__(
self,
is_atari,
num_actions,
state_dim,
device,
discount=0.99,
optimizer="Adam",
optimizer_parameters={},
polyak_target_update=False,
target_update_frequency=8e3,
tau=0.005,
initial_eps = 1,
end_eps = 0.001,
eps_decay_period = 25e4,
eval_eps=0.001,
):
self.device = device
# Determine network type
self.Q = Conv_Q(4, num_actions).to(self.device) if is_atari else FC_Q(state_dim, num_actions).to(self.device)
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, optimizer)(self.Q.parameters(), **optimizer_parameters)
self.discount = discount
# Target update rule
self.maybe_update_target = self.polyak_target_update if polyak_target_update else self.copy_target_update
self.target_update_frequency = target_update_frequency
self.tau = tau
# Decay for eps
self.initial_eps = initial_eps
self.end_eps = end_eps
self.slope = (self.end_eps - self.initial_eps) / eps_decay_period
# Evaluation hyper-parameters
self.state_shape = (-1, 4, 84, 84) if is_atari else (-1, state_dim) ### need to pass framesize
self.eval_eps = eval_eps
self.num_actions = num_actions
# Number of training iterations
self.iterations = 0
def select_action(self, state, eval=False):
eps = self.eval_eps if eval \
else max(self.slope * self.iterations + self.initial_eps, self.end_eps)
# Select action according to policy with probability (1-eps)
# otherwise, select random action
if np.random.uniform(0,1) > eps:
with torch.no_grad():
state = torch.FloatTensor(state).reshape(self.state_shape).to(self.device)
return int(self.Q(state).argmax(1))
else:
return np.random.randint(self.num_actions)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done, ind, weights = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
td_loss = (current_Q - target_Q).abs()
Q_loss = self.huber(td_loss)
# Optimize the Q network
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
priority = td_loss.pow(0.6).clamp(min=0.06309573444).cpu().data.numpy().flatten()
replay_buffer.reinsert(ind, priority)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done, ind, weights = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
# Compute Q loss
Q_loss = (weights * F.smooth_l1_loss(current_Q, target_Q, reduction='none')).mean()
# Optimize the Q network
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
priority = ((current_Q - target_Q).abs() + 1e-10).pow(0.6).cpu().data.numpy().flatten()
replay_buffer.update_priority(ind, priority)
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def save(self, filename):
torch.save(self.iterations, filename + "iterations")
torch.save(self.Q.state_dict(), f"{filename}Q_{self.iterations}")
torch.save(self.Q_optimizer.state_dict(), filename + "optimizer")
def load(self, filename):
self.iterations = torch.load(filename + "iterations")
self.Q.load_state_dict(torch.load(f"{filename}Q_{self.iterations}"))
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer.load_state_dict(torch.load(filename + "optimizer")) | 5,366 | 28.010811 | 111 | py |
LAP-PAL | LAP-PAL-master/discrete/PAL_DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.l1 = nn.Linear(3136, 512)
self.l2 = nn.Linear(512, num_actions)
def forward(self, state):
q = F.relu(self.c1(state))
q = F.relu(self.c2(q))
q = F.relu(self.c3(q))
q = F.relu(self.l1(q.reshape(-1, 3136)))
return self.l2(q)
# Used for Box2D / Toy problems
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q)
class PAL_DDQN(object):
def __init__(
self,
is_atari,
num_actions,
state_dim,
device,
discount=0.99,
optimizer="Adam",
optimizer_parameters={},
polyak_target_update=False,
target_update_frequency=8e3,
tau=0.005,
initial_eps = 1,
end_eps = 0.001,
eps_decay_period = 25e4,
eval_eps=0.001,
alpha=0.6,
min_priority=1e-2
):
self.device = device
# Determine network type
self.Q = Conv_Q(4, num_actions).to(self.device) if is_atari else FC_Q(state_dim, num_actions).to(self.device)
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, optimizer)(self.Q.parameters(), **optimizer_parameters)
self.discount = discount
# PAL hyper-parameters
self.alpha = alpha
self.min_priority = min_priority
# Target update rule
self.maybe_update_target = self.polyak_target_update if polyak_target_update else self.copy_target_update
self.target_update_frequency = target_update_frequency
self.tau = tau
# Decay for eps
self.initial_eps = initial_eps
self.end_eps = end_eps
self.slope = (self.end_eps - self.initial_eps) / eps_decay_period
# Evaluation hyper-parameters
self.state_shape = (-1,) + state_dim if is_atari else (-1, state_dim)
self.eval_eps = eval_eps
self.num_actions = num_actions
# Number of training iterations
self.iterations = 0
def select_action(self, state, eval=False):
eps = self.eval_eps if eval \
else max(self.slope * self.iterations + self.initial_eps, self.end_eps)
# Select action according to policy with probability (1-eps)
# otherwise, select random action
if np.random.uniform(0,1) > eps:
with torch.no_grad():
state = torch.FloatTensor(state).reshape(self.state_shape).to(self.device)
return int(self.Q(state).argmax(1))
else:
return np.random.randint(self.num_actions)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
td_loss = (current_Q - target_Q).abs()
weight = td_loss.clamp(min=self.min_priority).pow(self.alpha).mean().detach()
# Compute critic loss
Q_loss = self.PAL(td_loss)/weight.detach()
# Optimize the Q
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
def PAL(self, x):
return torch.where(
x.abs() < self.min_priority,
(self.min_priority ** self.alpha) * 0.5 * x.pow(2),
self.min_priority * x.abs().pow(1. + self.alpha)/(1. + self.alpha)
).mean()
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def save(self, filename):
torch.save(self.iterations, filename + "iterations")
torch.save(self.Q.state_dict(), f"{filename}Q_{self.iterations}")
torch.save(self.Q_optimizer.state_dict(), filename + "optimizer")
def load(self, filename):
self.iterations = torch.load(filename + "iterations")
self.Q.load_state_dict(torch.load(f"{filename}Q_{self.iterations}"))
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer.load_state_dict(torch.load(filename + "optimizer")) | 4,712 | 27.053571 | 111 | py |
LAP-PAL | LAP-PAL-master/discrete/LAP_DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.l1 = nn.Linear(3136, 512)
self.l2 = nn.Linear(512, num_actions)
def forward(self, state):
q = F.relu(self.c1(state))
q = F.relu(self.c2(q))
q = F.relu(self.c3(q))
q = F.relu(self.l1(q.reshape(-1, 3136)))
return self.l2(q)
# Used for Box2D / Toy problems
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q)
class LAP_DDQN(object):
def __init__(
self,
is_atari,
num_actions,
state_dim,
device,
discount=0.99,
optimizer="Adam",
optimizer_parameters={},
polyak_target_update=False,
target_update_frequency=8e3,
tau=0.005,
initial_eps = 1,
end_eps = 0.001,
eps_decay_period = 25e4,
eval_eps=0.001,
alpha=0.6,
min_priority=1e-2
):
self.device = device
# Determine network type
self.Q = Conv_Q(4, num_actions).to(self.device) if is_atari else FC_Q(state_dim, num_actions).to(self.device)
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, optimizer)(self.Q.parameters(), **optimizer_parameters)
self.discount = discount
# LAP hyper-parameters
self.alpha = alpha
self.min_priority = min_priority
# Target update rule
self.maybe_update_target = self.polyak_target_update if polyak_target_update else self.copy_target_update
self.target_update_frequency = target_update_frequency
self.tau = tau
# Decay for eps
self.initial_eps = initial_eps
self.end_eps = end_eps
self.slope = (self.end_eps - self.initial_eps) / eps_decay_period
# Evaluation hyper-parameters
self.state_shape = (-1,) + state_dim if is_atari else (-1, state_dim)
self.eval_eps = eval_eps
self.num_actions = num_actions
# Number of training iterations
self.iterations = 0
def select_action(self, state, eval=False):
eps = self.eval_eps if eval \
else max(self.slope * self.iterations + self.initial_eps, self.end_eps)
# Select action according to policy with probability (1-eps)
# otherwise, select random action
if np.random.uniform(0,1) > eps:
with torch.no_grad():
state = torch.FloatTensor(state).reshape(self.state_shape).to(self.device)
return int(self.Q(state).argmax(1))
else:
return np.random.randint(self.num_actions)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done, ind, weights = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
td_loss = (current_Q - target_Q).abs()
Q_loss = self.huber(td_loss)
# Optimize the Q network
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
priority = td_loss.clamp(min=self.min_priority).pow(self.alpha).cpu().data.numpy().flatten()
replay_buffer.update_priority(ind, priority)
def huber(self, x):
return torch.where(x < self.min_priority, 0.5 * x.pow(2), self.min_priority * x).mean()
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def save(self, filename):
torch.save(self.iterations, filename + "iterations")
torch.save(self.Q.state_dict(), f"{filename}Q_{self.iterations}")
torch.save(self.Q_optimizer.state_dict(), filename + "optimizer")
def load(self, filename):
self.iterations = torch.load(filename + "iterations")
self.Q.load_state_dict(torch.load(f"{filename}Q_{self.iterations}"))
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer.load_state_dict(torch.load(filename + "optimizer")) | 4,658 | 27.408537 | 111 | py |
LAP-PAL | LAP-PAL-master/discrete/DDQN.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Used for Atari
class Conv_Q(nn.Module):
def __init__(self, frames, num_actions):
super(Conv_Q, self).__init__()
self.c1 = nn.Conv2d(frames, 32, kernel_size=8, stride=4)
self.c2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.c3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.l1 = nn.Linear(3136, 512)
self.l2 = nn.Linear(512, num_actions)
def forward(self, state):
q = F.relu(self.c1(state))
q = F.relu(self.c2(q))
q = F.relu(self.c3(q))
q = F.relu(self.l1(q.reshape(-1, 3136)))
return self.l2(q)
# Used for Box2D / Toy problems
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q)
class DDQN(object):
def __init__(
self,
is_atari,
num_actions,
state_dim,
device,
discount=0.99,
optimizer="Adam",
optimizer_parameters={},
polyak_target_update=False,
target_update_frequency=8e3,
tau=0.005,
initial_eps = 1,
end_eps = 0.001,
eps_decay_period = 25e4,
eval_eps=0.001,
):
self.device = device
# Determine network type
self.Q = Conv_Q(4, num_actions).to(self.device) if is_atari else FC_Q(state_dim, num_actions).to(self.device)
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, optimizer)(self.Q.parameters(), **optimizer_parameters)
self.discount = discount
# Target update rule
self.maybe_update_target = self.polyak_target_update if polyak_target_update else self.copy_target_update
self.target_update_frequency = target_update_frequency
self.tau = tau
# Decay for eps
self.initial_eps = initial_eps
self.end_eps = end_eps
self.slope = (self.end_eps - self.initial_eps) / eps_decay_period
# Evaluation hyper-parameters
self.state_shape = (-1,) + state_dim if is_atari else (-1, state_dim)
self.eval_eps = eval_eps
self.num_actions = num_actions
# Number of training iterations
self.iterations = 0
def select_action(self, state, eval=False):
eps = self.eval_eps if eval \
else max(self.slope * self.iterations + self.initial_eps, self.end_eps)
# Select action according to policy with probability (1-eps)
# otherwise, select random action
if np.random.uniform(0,1) > eps:
with torch.no_grad():
state = torch.FloatTensor(state).reshape(self.state_shape).to(self.device)
return int(self.Q(state).argmax(1))
else:
return np.random.randint(self.num_actions)
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
next_action = self.Q(next_state).argmax(1, keepdim=True)
target_Q = (
reward + done * self.discount *
self.Q_target(next_state).gather(1, next_action).reshape(-1, 1)
)
# Get current Q estimate
current_Q = self.Q(state).gather(1, action)
# Compute Q loss
Q_loss = F.smooth_l1_loss(current_Q, target_Q)
# Optimize the Q network
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def save(self, filename):
torch.save(self.iterations, filename + "iterations")
torch.save(self.Q.state_dict(), f"{filename}Q_{self.iterations}")
torch.save(self.Q_optimizer.state_dict(), filename + "optimizer")
def load(self, filename):
self.iterations = torch.load(filename + "iterations")
self.Q.load_state_dict(torch.load(f"{filename}Q_{self.iterations}"))
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer.load_state_dict(torch.load(filename + "optimizer")) | 4,265 | 27.251656 | 111 | py |
sa-nmt | sa-nmt-master/Loss.py | """
This file handles the details of the loss function during training.
This includes: loss criterion, training statistics, and memory optimizations.
"""
from __future__ import division
import time
import sys
import math
import torch
import torch.nn as nn
def nmt_criterion(vocab_size, pad_id=0):
"""
Construct the standard NMT Criterion
"""
weight = torch.ones(vocab_size)
weight[pad_id] = 0
crit = nn.NLLLoss(weight, size_average=False)
return crit
class Statistics:
"""
Training loss function statistics.
"""
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_correct = n_correct
self.start_time = time.time()
def update(self, stat):
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
def accuracy(self):
return 100 * (self.n_correct / self.n_words)
def ppl(self):
print(self.loss, self.n_words)
return math.exp(min(self.loss / self.n_words, 100))
def elapsed_time(self):
return time.time() - self.start_time
def output(self, epoch, uidx, max_updates, start):
t = self.elapsed_time()
print(("Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; " +
"%3.0f tgt tok/s; %6.0f s elapsed") %
(epoch, uidx, max_updates,
self.accuracy(),
self.ppl(),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush()
def log(self, prefix, experiment, optim):
t = self.elapsed_time()
experiment.add_scalar_value(prefix + "_ppl", self.ppl())
experiment.add_scalar_value(prefix + "_accuracy", self.accuracy())
experiment.add_scalar_value(prefix + "_tgtper", self.n_words / t)
experiment.add_scalar_value(prefix + "_lr", optim.lr)
@staticmethod
def score(loss, scores, targ, pad):
pred = scores.max(1)[1]
non_padding = targ.ne(pad)
num_correct = pred.eq(targ) \
.masked_select(non_padding).int() \
.sum().item()
return Statistics(loss, non_padding.int().sum().item(), num_correct)
def filter_gen_state(state):
for k, v in state.items():
if v is not None:
yield k, v
def new_split(x, size):
xs = []
for u in torch.split(x, size):
v = u.detach()
if u.requires_grad:
v.requires_grad_(True)
xs += [v]
return tuple(xs)
def shards(state, shard_size, eval=False):
if eval:
yield state
else:
non_none = dict(filter_gen_state(state))
keys, values = zip(*((k, new_split(v, shard_size))
for k, v in non_none.items()))
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for i, k in enumerate(keys):
dv = [v.grad for v in values[i] if v.grad is not None]
if dv:
dv = torch.cat(dv)
variables += [(state[k], dv)]
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
class LossCompute:
def __init__(self, generator, crit):
self.generator = generator
self.crit = crit
def make_loss_batch(self, outputs, targets):
"""
Create all the variables that need to be sharded.
This needs to match compute loss exactly.
"""
return {"out": outputs,
"target": targets}
def compute_loss(self, out, target):
def bottle(v):
return v.view(-1, v.size(2))
target = target.view(-1)
# Standard generator.
scores = self.generator(bottle(out))
loss = self.crit(scores, target)
scores_data = scores.detach()
target = target.clone()
# Coverage loss term.
stats = Statistics.score(loss.item(), scores_data, target, 0)
return loss, stats
| 4,092 | 27.227586 | 77 | py |
sa-nmt | sa-nmt-master/translate.py | import argparse
import torch
import modelx as models
import infer
import string
# build args parser
parser = argparse.ArgumentParser(description='Training NMT')
parser.add_argument('-checkpoint', required=True,
help='saved checkpoit.')
parser.add_argument('-input', required=True,
help='Text file to translate.')
parser.add_argument('-output', default='trans.bpe', help='output file')
parser.add_argument('-beam_size', default=5, type=int,
help="Beam size.")
parser.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Use CUDA")
opt = parser.parse_args()
use_gpu = len(opt.gpuid) > 0
if torch.cuda.is_available() and not use_gpu:
print("so you should probably run with -gpus 0")
checkpoint = torch.load(opt.checkpoint)
train_opt = checkpoint['opt']
print('| train configuration')
train_opt.min_thres = -5.0
train_opt.max_thres = 7.0
#if train_opt.hard is None:
#train_opt.hard = False
print(train_opt)
model = models.make_base_model(train_opt, use_gpu, checkpoint)
if train_opt.encoder_type == "sabrnn":
punct_idx = set()
for p in set(string.punctuation):
if p in model.dicts[0]:
punct_idx.add(model.dicts[0][p])
model.encoder.punct(punct_idx)
# over-write some options
train_opt.beam_size = opt.beam_size
agent = infer.Beam(train_opt, model)
agent.translate(opt.input, opt.output)
| 1,416 | 28.520833 | 71 | py |
sa-nmt | sa-nmt-master/extract_tree.py | import argparse
import torch
from torch.autograd import Variable
import modelx as models
import networkx as nx
from networkx.algorithms.tree import maximum_spanning_arborescence
import string
# build args parser
parser = argparse.ArgumentParser(description='Training NMT')
parser.add_argument('-checkpoint', required=True,
help='saved checkpoit.')
parser.add_argument('-input', required=True,
help='Text file to translate.')
parser.add_argument('-output', default='tree.txt', help='output file')
parser.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Use CUDA")
opt = parser.parse_args()
use_gpu = True
if torch.cuda.is_available() and not use_gpu:
print("so you should probably run with -gpus 0")
checkpoint = torch.load(opt.checkpoint)
train_opt = checkpoint['opt']
print('| train configuration')
print(train_opt)
use_gpu = len(train_opt.gpuid) > 0
model = models.make_base_model(train_opt, use_gpu, checkpoint)
if train_opt.encoder_type in ["sabrnn", "fabrnn"]:
punct_idx = set()
for p in set(string.punctuation):
if p in model.dicts[0]:
punct_idx.add(model.dicts[0][p])
print('Add punctuation constraint')
model.encoder.punct(punct_idx)
# get the encoder
encoder = model.encoder
dicts = model.dicts
tt = torch.cuda if use_gpu else torch
def encode_string(ss):
ss = ss.split()
ss = [dicts[0].get(w, 1) for w in ss]
ss = Variable(tt.LongTensor(ss).view(-1, 1),
volatile=True)
return ss
def collapse_bpe(s, score):
"""Collapse BPEed tokens
Args:
s: a bped sentence
score: beta in the paper, a 2D tensor of p(z|s),
sum over the last dimension should be 1
"""
punct = set(string.punctuation)
# (1) identify bpe
tokens = s.split()
bpe = []
indices = []
punct_idx = []
for i, w in enumerate(tokens):
if w in punct:
punct_idx += [i]
if w.endswith("@@"):
bpe += [i]
else:
if len(bpe) == 0:
indices += [[i]]
else:
bpe += [i] # add the last bped token
indices += [bpe]
bpe = []
# collapsing from here
s_ = []
for bpe in indices:
# (1) collapse heads
s_.append(score[:, bpe].sum(1).view(-1, 1))
s_ = torch.cat(s_, 1)
collapsed_score = []
for bpe in indices:
# (2) collapse childs
collapsed_score += [s_[bpe, :].sum(0).view(1, -1)]
collapsed_score = torch.cat(collapsed_score, 0)
s = s.replace("@@ ", "") # the original string
return s, collapsed_score
def build_graph(score):
"""Build graph from potential score matrix
Args:
score: FloatTensor (n, n), score.sum(1) = 1
Returns:
a graph object
"""
# return a list of (parent, child, weight)
arcs = []
n = score.size(0)
# find the root first
val, idx = score.diag().max(0)
arcs.append((0, idx[0] + 1, val[0]))
for i in range(n):
for j in range(n):
if i == j: # root on the diagonal
continue
# arcs.append([0, i+1, score[i, j]])
else:
arcs.append([j+1, i+1, score[i, j]])
g = nx.DiGraph()
g.add_weighted_edges_from(arcs)
return g
def mst(score):
"""Get spaning tree from the adjacent matrix"""
g = build_graph(score)
mst = maximum_spanning_arborescence(g)
tree = []
for e in mst.edges():
head, child = e
tree.append('%s->%s' % (head, child))
return ' '.join(tree)
def renorm(m):
x = m.exp()
x = x / x.sum(1, keepdim=True)
return x.log()
def collapse():
fw = open(opt.output, 'w')
for line in open(opt.input):
x = encode_string(line)
model.encoder(x)
if train_opt.encoder_type == 'sabrnn':
score = model.encoder.tree_attn.edge_score.squeeze(0)
else:
score = model.encoder.attn.score.squeeze(0).log()
s, score = collapse_bpe(line, score.data)
try:
tree = mst(score)
out = '%s ||| %s\n' % (s.strip(), tree)
fw.write(out)
except:
pass
fw.close()
collapse()
| 4,276 | 26.242038 | 70 | py |
sa-nmt | sa-nmt-master/models.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from attention import GlobalAttention, SelfAttention
from Utils import aeq
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
import math
class EncoderBase(nn.Module):
"""
EncoderBase class for sharing code among various encoder.
"""
def _check_args(self, input, lengths=None, hidden=None):
s_len, n_batch = input.size()
if lengths is not None:
n_batch_ = len(lengths) # lengths.size()
aeq(n_batch, n_batch_)
def forward(self, input, lengths=None, hidden=None):
"""
Args:
input (LongTensor): len x batch x nfeat.
lengths (LongTensor): batch
hidden: Initial hidden state.
Returns:
hidden_t (Variable): Pair of layers x batch x rnn_size - final
encoder state
outputs (FloatTensor): len x batch x rnn_size - Memory bank
"""
raise NotImplementedError
class Encoder(EncoderBase):
""" The standard RNN encoder. """
def __init__(self, word_vocab_size, embedding_size, word_padding_idx,
num_layers, hidden_size, dropout, bidirectional=True):
super(Encoder, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.embeddings = nn.Embedding(word_vocab_size,
embedding_size,
padding_idx=word_padding_idx)
self.rnn = nn.LSTM(input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
self.word_embs = emb # for accessing later
packed_emb = emb
if lengths is not None:
packed_emb = pack(emb, lengths)
outputs, hidden = self.rnn(packed_emb, hidden)
if lengths is not None:
outputs = unpack(outputs)[0]
return outputs, hidden
# Structured Attention and our models
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations."
"""
def __init__(self, eps=1e-5, device=torch.device('cpu')):
self.eps = eps
super(MatrixTree, self).__init__()
self.device = device
def forward(self, input, lengths=None):
laplacian = input.exp()
output = input.clone()
output.data.fill_(0)
for b in range(input.size(0)):
lx = lengths[b] if lengths is not None else input.size(1)
input_b = input[b, :lx, :lx]
lap = laplacian[b, :lx, :lx].masked_fill(
torch.eye(lx).to(self.device).ne(0), 0)
lap = -lap + torch.diag(lap.sum(0))
# store roots on diagonal
lap[0] = input_b.diag().exp()
inv_laplacian = lap.inverse()
factor = inv_laplacian.diag().unsqueeze(1)\
.expand(lx, lx).transpose(0, 1)
term1 = input_b.exp().mul(factor).clone()
term2 = input_b.exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output_b = term1 - term2
roots_output = input_b.diag().exp().mul(
inv_laplacian.transpose(0, 1)[0])
output[b, :lx, :lx] = output_b + torch.diag(roots_output)
return output
class TreeAttention(nn.Module):
"""Structured attention class"""
def __init__(self, dim, min_thres=-5, max_thres=7, hard=False,
device=torch.device('cpu')):
super(TreeAttention, self).__init__()
self.q = nn.Linear(dim, dim, bias=False)
self.k = nn.Linear(dim, dim, bias=False)
self.v = nn.Linear(dim, dim, bias=False)
self.root_query = nn.Parameter(torch.randn(dim))
self.scale = math.sqrt(1 / dim)
self.dtree = MatrixTree()
self.min_thres = min_thres
self.max_thres = max_thres
self.hard = hard
self.device = device
def forward(self, input, punct_mask=None, lengths=None):
s_len, batch, dim = input.size()
input = input.contiguous().transpose(0, 1) \
.contiguous().view(-1, dim)
q = self.q(input).view(batch, s_len, -1) # (batch, s_len, dim)
k = self.k(input).view(batch, s_len, -1) # (batch, s_len, dim)
v = self.v(input).view(batch, s_len, -1) # (batch, s_len, dim)
_score = torch.bmm(q, k.transpose(1, 2)) # (batch, s_len, s_len)
# compute root
r_ = self.root_query.view(1, -1, 1).expand(batch, dim, 1)
root = torch.bmm(k, r_).squeeze(-1) # (batch, s_len)
mask = torch.eye(s_len).to(self.device)
score = _score.clone()
for b in range(batch):
score[b] = _score[b] * mask + torch.diag(root[b])
# normalized
score *= self.scale
if punct_mask is not None:
punct_mask = punct_mask.transpose(0, 1)
punct_mask = punct_mask[:, None, :].expand(batch, s_len, s_len) \
.transpose(1, 2)
score.data.masked_fill_(punct_mask, -math.inf)
score = score.clamp(self.min_thres, self.max_thres)
self.edge_score = score.transpose(1, 2)
edge_score = self.dtree(score, lengths).transpose(1, 2)
# edge_score.sum(2) == 1
if self.hard:
y = edge_score.data.new(edge_score.size()).fill_(0)
_, max_idx = edge_score.data.max(2)
y.scatter_(2, max_idx[:, :, None], 1)
hard_edge = (Variable(y) - edge_score).detach() + edge_score
edge_score = hard_edge
return torch.bmm(edge_score, v)
class SAEncoder(Encoder):
""" The structured attention RNN encoder. """
def __init__(self, word_vocab_size, embedding_size, word_padding_idx,
num_layers, hidden_size, dropout, bidirectional=True,
encode_multi_key=True, min_thres=-5, max_thres=7, hard=False):
super(SAEncoder, self).__init__(word_vocab_size, embedding_size,
word_padding_idx,
num_layers, hidden_size, dropout,
bidirectional=True)
self.tree_attn = TreeAttention(hidden_size, min_thres, max_thres, hard)
self.encode_multi_key = encode_multi_key
if not self.encode_multi_key:
self.linear = nn.Linear(hidden_size, hidden_size, bias=False)
self.punct_idx = None
def punct(self, punct_idx):
self.punct_idx = punct_idx
def forward(self, input, lengths=None):
outputs, hidden = Encoder.forward(self, input, lengths)
# find puncts
punct_mask = None
if self.punct_idx is not None:
punct = set(input.data.contiguous().view(-1).tolist())
punct &= self.punct_idx
if len(punct) > 0:
punct_mask = 0
for p in punct:
punct_mask += input.data.eq(p)
tree_outputs = self.tree_attn(outputs, punct_mask, lengths)
# tree_outputs has size (batch_size, s_len, hidden_size)
if not self.encode_multi_key:
# compute gate syntax
z = self.linear(outputs.view(-1, outputs.size(2))).sigmoid()
gtree = z.view_as(outputs) * tree_outputs.transpose(0, 1)
outputs = outputs + gtree
return outputs, hidden
return (outputs, tree_outputs), hidden
class FAEncoder(Encoder):
""" The flat attention RNN encoder. """
def __init__(self, word_vocab_size, embedding_size, word_padding_idx,
num_layers, hidden_size, dropout, bidirectional=True,
encode_multi_key=True):
super(FAEncoder, self).__init__(word_vocab_size, embedding_size,
word_padding_idx,
num_layers, hidden_size, dropout,
bidirectional=True)
self.attn = SelfAttention(hidden_size)
self.encode_multi_key = encode_multi_key
def punct(self, punct_idx):
self.punct_idx = punct_idx
def forward(self, input, lengths=None):
mask = input.data.eq(0).t()
outputs, hidden = Encoder.forward(self, input, lengths)
punct_mask = None
if self.punct_idx is not None:
punct = set(input.data.contiguous().view(-1).tolist())
punct &= self.punct_idx
if len(punct) > 0:
punct_mask = 0
for p in punct:
punct_mask += input.data.eq(p)
self_attn_outputs = self.attn(outputs, mask, punct_mask)
if not self.encode_multi_key:
outputs = outputs + self_attn_outputs.transpose(0, 1)
return outputs, hidden
return (outputs, self_attn_outputs), hidden
class StackedLSTM(nn.Module):
"""
Our own implementation of stacked LSTM.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i, layer in enumerate(self.layers):
h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input, (h_1, c_1)
class Decoder(nn.Module):
def __init__(self, word_vocab_size, embedding_size, word_padding_idx,
num_layers, hidden_size, dropout, multi_key=False,
shared_attn=False):
input_size = embedding_size + hidden_size
self.hidden_size = hidden_size
super(Decoder, self).__init__()
self.embeddings = nn.Embedding(word_vocab_size, embedding_size,
padding_idx=word_padding_idx)
self.rnn = StackedLSTM(num_layers, input_size, hidden_size, dropout)
self.attn = GlobalAttention(hidden_size, multi_key, shared_attn)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, context, mask=None, init_output=None):
emb = self.embeddings(input)
batch_size = input.size(1)
h_size = (batch_size, self.hidden_size)
outputs = []
if init_output is None:
output = Variable(emb.data.new(*h_size).zero_(),
requires_grad=False)
else:
output = init_output
attns = []
# set mask
if mask is not None:
self.attn.apply_mask(mask)
for i, emb_t in enumerate(emb.split(1)):
emb_t = emb_t.squeeze(0)
emb_t = torch.cat([emb_t, output], 1)
rnn_output, hidden = self.rnn(emb_t, hidden)
attn_output, attn = self.attn(rnn_output,
context)
output = self.dropout(attn_output)
outputs += [output]
attns.append(attn)
attns = torch.stack(attns)
outputs = torch.stack(outputs)
return outputs, hidden, attns
class NMT(nn.Module):
def __init__(self, encoder, decoder):
super(NMT, self).__init__()
self.encoder = encoder
self.decoder = decoder
def _fix_enc_hidden(self, h):
"""
The encoder hidden is (layers*directions) x batch x dim.
We need to convert it to layers x batch x (directions*dim).
"""
return torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
def _init_hidden(self, enc_hidden):
hidden = (self._fix_enc_hidden(enc_hidden[0]),
self._fix_enc_hidden(enc_hidden[1]))
return hidden
def forward(self, src, tgt, src_lengths):
context, enc_hidden = self.encoder(src, src_lengths)
if isinstance(context, tuple):
context_ = (context[0].transpose(0, 1), context[1])
else:
context_ = context.transpose(0, 1)
enc_hidden = self._init_hidden(enc_hidden)
src_pad_mask = src.data.eq(0).t()
out, dec_hidden, attn = self.decoder(tgt, enc_hidden,
context_, src_pad_mask)
return out
def make_encoder(opt):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
"""
if opt.encoder_type == "sabrnn":
return SAEncoder(opt.src_vocab_size, opt.word_vec_size, 0,
opt.layers, opt.rnn_size, opt.dropout,
bidirectional=True,
encode_multi_key=opt.encode_multi_key,
min_thres=opt.min_thres, max_thres=opt.max_thres,
hard=opt.hard)
elif opt.encoder_type == "fabrnn":
return FAEncoder(opt.src_vocab_size, opt.word_vec_size, 0,
opt.layers, opt.rnn_size, opt.dropout,
bidirectional=True,
encode_multi_key=opt.encode_multi_key)
else:
return Encoder(opt.src_vocab_size, opt.word_vec_size, 0,
opt.layers, opt.rnn_size, opt.dropout)
def make_decoder(opt):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
return Decoder(opt.tgt_vocab_size, opt.word_vec_size, 0,
opt.layers, opt.rnn_size, opt.dropout,
opt.encode_multi_key, opt.share_attn)
def make_base_model(model_opt, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu: Boolean: whether to use gpu.
checkpoint: the snapshot model.
Returns:
the NMTModel.
"""
# Make encoder.
encoder = make_encoder(model_opt)
decoder = make_decoder(model_opt)
# Make NMT (= encoder + decoder).
model = NMT(encoder, decoder)
# Make Generator.
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, model_opt.tgt_vocab_size),
nn.LogSoftmax(dim=-1))
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.weight
model.generator = generator
# Load the model states from checkpoint.
if checkpoint is not None:
print('Loading model')
model.load_state_dict(checkpoint['model'])
model.dicts = checkpoint['dicts']
return model
| 15,669 | 36.488038 | 79 | py |
sa-nmt | sa-nmt-master/infer.py | import torch
from torch.autograd import Variable
import pickle as pkl
import math
# TODO: documentation of functions
class Beam(object):
r"""Beam search class for NMT.
This is a simple beam search object. It takes model, which can be used to
compute the next probable output and dictionaries that will be used to
map from word indices to the real strings.
opt:
opt (argpaser): that contains path to dictionaries
model (pytorch network): this should be creat ed before
"""
def __init__(self, opt, model):
self.opt = opt
self.tt = torch.cuda if len(opt.gpuid) > 0 else torch
self.model = model
self.model.eval()
self.dicts = model.dicts
# create an inverse map from int->word for target side
self.idx2w = {}
for w, idx in self.dicts[1].items():
self.idx2w[idx] = w
self.bos_idx = self.dicts[1]['<bos>']
self.eos_idx = self.dicts[1]['<eos>']
self.pad_idx = self.dicts[1]['<pad>']
def encode_string(self, ss):
ss = ss.split()
ss = [self.dicts[0].get(w, 1) for w in ss]
if self.opt.src_vocab_size > 0:
ss = [w if w < self.opt.src_vocab_size else 1 for w in ss]
ss = Variable(self.tt.LongTensor(ss).view(-1, 1),
volatile=True)
return ss
def decode_string(self, tidx):
ts = []
for i in list(tidx):
if i == self.eos_idx:
break
else:
ts += [self.idx2w[i]]
return ' '.join(ts)
def beam_search(self, input):
"""
Beam search function.
opt: trained optiongs
input: Tensor (bptt x 1)
"""
k = self.opt.beam_size
completed_hyps = []
input = input.expand(input.size(0), k)
max_len = int(input.size(0) * 1.5)
hypos = self.tt.LongTensor(max_len, k).fill_(2)
init_target = self.tt.LongTensor(1, k).fill_(2)
init_target = Variable(init_target, volatile=True)
scores = self.tt.FloatTensor(k).fill_(-math.inf)
scores[0] = 0
#lengths = [input.size(0) for i in range(k)]
context, enc_hidden = self.model.encoder(input)
init_hidden = self.model._init_hidden(enc_hidden)
# alias
decoder = self.model.decoder
generator = self.model.generator
init_output = None
if isinstance(context, tuple):
context = (context[0].transpose(0, 1), context[1])
else:
context = context.transpose(0, 1)
decoder.attn.mask = None
for t in range(max_len):
out, dec_hidden, attn = decoder(init_target, init_hidden, context,
init_output=init_output)
log_probs = generator(out.squeeze(0)).data
scores_t, idx_t = log_probs.topk(k, 1)
scores_t = scores_t + scores.view(-1, 1).expand_as(scores_t)
scores, k_idx = scores_t.view(-1).topk(k)
next_hp = k_idx.div(k)
next_ys = idx_t.view(-1).index_select(0, k_idx)
done_y = next_ys.eq(self.eos_idx)
if done_y.sum() > 0 and t > 0:
for i in range(k):
if next_ys[i] == self.eos_idx:
j = next_hp[i]
text = self.decode_string(hypos[0:t, j])
completed_hyps.append((text, scores[i] / (t+1)))
k -= 1
if k > 0:
cont_y = next_ys.ne(self.eos_idx)
next_ys = next_ys.masked_select(cont_y)
next_hp = next_hp.masked_select(cont_y)
if isinstance(context, tuple):
context = (context[0][:k], context[1][:k])
else:
context = context[:k]
scores = scores.masked_select(cont_y)
if k == 0:
break
hypos = hypos.index_select(1, next_hp)
hypos[t] = next_ys
init_target = Variable(next_ys.view(1, -1), volatile=True)
next_hp = Variable(next_hp)
init_output = out.squeeze(0).index_select(0, next_hp)
init_hidden = [h.index_select(1, next_hp) for h in dec_hidden]
if len(completed_hyps) > 0:
completed_hyps.sort(key=lambda tup: tup[1])
best_h = completed_hyps.pop()
return best_h[0]
else:
best_s, idx = scores.topk(1)
best_h = hypos.index_select(1, idx).view(-1)
return self.decode_string(best_h)
def translate(self, text_file, out_file='output.txt'):
fw = open(out_file, 'w')
for line in open(text_file):
src_idx = self.encode_string(line)
s = self.beam_search(src_idx)
fw.write(s + '\n')
fw.close()
| 4,938 | 35.316176 | 78 | py |
sa-nmt | sa-nmt-master/attention.py | import torch
import torch.nn as nn
from Utils import aeq
import math
import torch.nn.functional as F
class SelfAttention(nn.Module):
"""Self attention class"""
def __init__(self, dim):
super(SelfAttention, self).__init__()
self.q = nn.Linear(dim, dim, bias=False)
self.k = nn.Linear(dim, dim, bias=False)
self.v = nn.Linear(dim, dim, bias=False)
self.scale = math.sqrt(1 / dim)
def forward(self, input, mask=None, punct_mask=None):
"""
input (FloatTensor): s_len, batch, dim
mask (ByteTensor): batch x s_len
"""
s_len, batch, dim = input.size()
input = input.contiguous().transpose(0, 1) \
.contiguous().view(-1, dim)
q = self.q(input).view(batch, s_len, -1)
k = self.k(input).view(batch, s_len, -1)
v = self.v(input).view(batch, s_len, -1)
score = torch.bmm(q, k.transpose(1, 2)) * self.scale
# now (batch, s_len, s_len) FloatTensor
if mask is not None:
mask = mask[:, None, :].expand(batch, s_len, s_len)
score.data.masked_fill_(mask, -math.inf)
if punct_mask is not None:
punct_mask = punct_mask.transpose(0, 1) # batch, s_len
punct_mask = punct_mask[:, None, :].expand(batch, s_len, s_len)
score.data.masked_fill_(punct_mask, -math.inf)
# need proper masking
attn = F.softmax(score.view(-1, s_len), dim=-1).view(-1, s_len, s_len)
self.score = attn
return torch.bmm(attn, v)
class GlobalAttention(nn.Module):
"""
Luong Attention. This implement general attention
Concrete distribution: The Concrete Distribution: A Continuous Relaxation
of Discrete Random Variables
"""
def __init__(self, dim, multi_key=False, share_attn=False):
"""
dim (Int): dimension of input vector
multi_key (Boolean): using multi keys encoder
share_attn (Boolean): sharing attention weights between
semantic and syntactic annotations
"""
super(GlobalAttention, self).__init__()
# make a local copy of hyper-parameters
self.dim = dim
self.share_attn = share_attn
self.multi_key = multi_key
self.linear_in = nn.Linear(dim, dim, bias=False)
self.share_attn = share_attn
if multi_key:
if not share_attn: # using a separate attention
self.linear_sa = nn.Linear(dim, dim, bias=False)
self.linear_out = nn.Linear(dim*3, dim, bias=False)
self.gate = nn.Linear(dim, dim)
else:
self.linear_out = nn.Linear(dim*2, dim, bias=False)
self.mask = None
def apply_mask(self, mask):
self.mask = mask
def score(self, h_t, h_s, sa_attn=False):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
sa_attn (Boolean): using a separate attention for syntax context
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
h_t_ = h_t.view(tgt_batch*tgt_len, tgt_dim)
if sa_attn:
h_t_ = self.linear_sa(h_t_)
else:
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
def forward(self, input, context):
"""
input (FloatTensor): batch x tgt_len x dim: decoder's rnn's output.
context (FloatTensor): batch x src_len x dim: src hidden states
"""
# one step input
if isinstance(context, tuple):
context, tree_context = context
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = context.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
# compute attention scores, as in Luong et al.
align = self.score(input, context)
if self.mask is not None:
mask_ = self.mask[:, None, :]
align.data.masked_fill_(mask_, -math.inf)
# Softmax to normalize attention weights
align_vectors = F.softmax(align, dim=-1)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, context)
if self.multi_key:
# sharing attention weight
if self.share_attn:
sc = torch.bmm(align_vectors, tree_context)
else:
# computing attention scores for syntax
tree_align = self.score(input, tree_context, True)
if self.mask is not None:
tree_align.data.masked_fill_(self.mask[:, None, :],
-math.inf)
tree_align_vectors = F.softmax(tree_align, dim=-1)
sc = torch.bmm(tree_align_vectors, tree_context)
z = F.sigmoid(self.gate(input)) # batch x tgt_len x dim
self.z = z # for visualization
sc = sc * z
concat_c = torch.cat([c, input, sc], 2).view(batch*targetL, dim*3)
else:
concat_c = torch.cat([c, input], 2).view(batch*targetL, dim*2)
attn_h = self.linear_out(concat_c).view(batch, targetL, dim)
attn_h = F.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(dim, dim_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
| 6,737 | 35.032086 | 78 | py |
sa-nmt | sa-nmt-master/train.py | import argparse
import torch
from Iterator import TextIterator
import models
from itertools import zip_longest
import random
import Loss
import opts
import os
import math
import subprocess
from infer import Beam
import re
from torch.optim.lr_scheduler import ReduceLROnPlateau
parser = argparse.ArgumentParser(description='train.py')
# Data and loading options
parser.add_argument('-datasets', required=True, default=[],
nargs='+', type=str,
help='source_file target_file.')
parser.add_argument('-valid_datasets', required=True, default=[],
nargs='+', type=str,
help='valid_source valid target files.')
parser.add_argument('-beam_size', default=12, type=int, help="beam size")
# dictionaries
parser.add_argument('-dicts', required=True, default=[],
nargs='+',
help='source_vocab.pkl target_vocab.pkl files.')
# opts.py
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opts.preprocess_opts(parser)
opt = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# for reproducibility
torch.manual_seed(opt.seed)
random.seed(opt.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(opt.seed)
print(opt)
# batch preparation
def prepare_data(seqs_x, seqs_y):
mb = [(seqs_x[i], seqs_y[i]) for i in range(len(seqs_x))]
mb.sort(key=lambda x: len(x[0]), reverse=True)
xs = torch.LongTensor(
list(zip_longest(*map(lambda x: x[0], mb), fillvalue=0))).to(device)
ys = torch.LongTensor(
list(zip_longest(*map(lambda x: x[1], mb), fillvalue=0))).to(device)
lengths_x = [len(x[0]) for x in mb]
return xs, ys, lengths_x
def eval(model, criterion, valid_data):
stats = Loss.Statistics()
model.eval()
loss = Loss.LossCompute(model.generator, criterion)
for src, tgt in valid_data:
src, tgt, src_lengths = prepare_data(src, tgt, True)
outputs = model(src, tgt[:-1], src_lengths)
gen_state = loss.make_loss_batch(outputs, tgt[1:])
_, batch_stats = loss.compute_loss(**gen_state)
stats.update(batch_stats)
model.train()
return stats
def init_uniform(model, init_range=0.04):
"""Simple uniform initialization of all the weights"""
for p in model.parameters():
p.data.uniform_(-init_range, init_range)
def tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print('encoder: ', enc)
print('decoder: ', dec)
def check_model_path():
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def train(opt):
print('| build data iterators')
train = TextIterator(*opt.datasets, *opt.dicts,
src_vocab_size=opt.src_vocab_size,
tgt_vocab_size=opt.tgt_vocab_size,
batch_size=opt.batch_size,
max_seq_length=opt.max_seq_length)
valid = TextIterator(*opt.valid_datasets, *opt.dicts,
src_vocab_size=opt.src_vocab_size,
tgt_vocab_size=opt.tgt_vocab_size,
batch_size=opt.batch_size,
max_seq_length=opt.max_seq_length)
if opt.src_vocab_size < 0:
opt.src_vocab_size = len(train.source_dict)
if opt.tgt_vocab_size < 0:
opt.tgt_vocab_size = len(train.target_dict)
print('| vocabulary size. source = %d; target = %d' %
(opt.src_vocab_size, opt.tgt_vocab_size))
dicts = [train.source_dict, train.target_dict]
crit = Loss.nmt_criterion(opt.tgt_vocab_size, 0).to(device)
if opt.train_from != '':
print('| Load trained model!')
checkpoint = torch.load(opt.train_from)
model = models.make_base_model(opt, checkpoint)
else:
model = models.make_base_model(opt)
init_uniform(model)
model.to(device)
if opt.encoder_type in ["sabrnn", "fabrnn"]:
print('Add punctuation constrain!')
model.encoder.punct(train.src_punct)
print(model)
model.dicts = dicts
check_model_path()
tally_parameters(model)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)
scheduler = ReduceLROnPlateau(optimizer, 'min',
factor=opt.learning_rate_decay,
patience=0)
uidx = 0 # number of updates
estop = False
min_lr = opt.learning_rate * math.pow(opt.learning_rate_decay, 5)
best_bleu = -1
for eidx in range(1, opt.epochs + 1):
closs = Loss.LossCompute(model.generator, crit)
tot_loss = 0
total_stats = Loss.Statistics()
report_stats = Loss.Statistics()
for x, y in train:
model.zero_grad()
src, tgt, lengths_x = prepare_data(x, y)
out = model(src, tgt[:-1], lengths_x)
gen_state = closs.make_loss_batch(out, tgt[1:])
shard_size = opt.max_generator_batches
batch_size = len(lengths_x)
batch_stats = Loss.Statistics()
for shard in Loss.shards(gen_state, shard_size):
loss, stats = closs.compute_loss(**shard)
loss.div(batch_size).backward()
batch_stats.update(stats)
tot_loss += loss.item()
torch.nn.utils.clip_grad_norm_(model.parameters(),
opt.max_grad_norm)
optimizer.step()
total_stats.update(batch_stats)
report_stats.update(batch_stats)
uidx += 1
if uidx % opt.report_every == 0:
report_stats.output(eidx, uidx, opt.max_updates,
total_stats.start_time)
report_stats = Loss.Statistics()
if uidx % opt.eval_every == 0:
valid_stats = eval(model, crit, valid)
# maybe adjust learning rate
scheduler.step(valid_stats.ppl())
cur_lr = optimizer.param_groups[0]['lr']
print('Validation perplexity %d: %g' %
(uidx, valid_stats.ppl()))
print('Learning rate: %g' % cur_lr)
if cur_lr < min_lr:
print('Reaching minimum learning rate. Stop training!')
estop = True
break
model_state_dict = model.state_dict()
if eidx >= opt.start_checkpoint_at:
checkpoint = {
'model': model_state_dict,
'opt': opt,
'dicts': dicts
}
# evaluate with BLEU score
inference = Beam(opt, model)
output_bpe = opt.save_model + '.bpe'
output_txt = opt.save_model + '.txt'
inference.translate(opt.valid_datasets[0], output_bpe)
model.train()
subprocess.call("sed 's/@@ //g' {:s} > {:s}"
.format(output_bpe, output_txt),
shell=True)
ref = opt.valid_datasets[1][:-4]
subprocess.call("sed 's/@@ //g' {:s} > {:s}"
.format(opt.valid_datasets[1], ref),
shell=True)
cmd = "perl data/multi-bleu.perl {} < {}" \
.format(ref, output_txt)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE) \
.stdout.read().decode('utf-8')
bleu = re.search("[\d]+.[\d]+", p)
bleu = float(bleu.group())
print('Validation BLEU %d: %g' % (uidx, bleu))
if bleu > best_bleu:
best_bleu = bleu
torch.save(checkpoint, '%s_best.pt' % opt.save_model)
print('Saved model: %d | BLEU %.2f' % (uidx, bleu))
if uidx >= opt.max_updates:
print('Finishing after {:d} iterations!'.format(uidx))
estop = True
break
if estop:
break
train(opt)
| 8,797 | 36.598291 | 77 | py |
cogcn | cogcn-main/cogcn/utils.py | import pickle as pkl
import os
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
import pandas as pd
from sklearn.metrics import roc_auc_score, average_precision_score
from matplotlib import pyplot as plt
def load_data_cma(dataset):
adj_file = os.path.join(dataset, "struct.csv")
feat_file = os.path.join(dataset, "content.csv")
# Load reate adjacency matrix
adj = pd.read_csv(adj_file, header=None)
adj = adj.values
adj = nx.from_numpy_matrix(adj)
adj = nx.adjacency_matrix(adj)
print("Adjacency matrix shape:", adj.shape)
# Load features
feat = pd.read_csv(feat_file, header=None)
feat = feat.values
features = torch.FloatTensor(feat)
print("Features shape:", features.shape)
return adj, features
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def preprocess_graph(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()
# return sparse_to_tuple(adj_normalized)
return sparse_mx_to_torch_sparse_tensor(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def plot_losses(losses, epoch_mark):
for i in range(4):
plt.subplot(2,2,i+1)
plt.plot(losses[:,i])
plt.axvline(epoch_mark, color='r')
plt.axvline(epoch_mark*2, color='g')
plt.show()
| 2,240 | 29.69863 | 95 | py |
cogcn | cogcn-main/cogcn/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import GraphConvolution
class GCNAE(nn.Module):
def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout):
super(GCNAE, self).__init__()
self.encgc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
self.encgc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
self.decgc1 = GraphConvolution(hidden_dim2, hidden_dim1, dropout, act=F.relu)
self.decgc2 = GraphConvolution(hidden_dim1, input_feat_dim, dropout, act=lambda x: x)
def encode(self, x, adj):
hidden1 = self.encgc1(x, adj)
hidden2 = self.encgc2(hidden1, adj)
return hidden2
def decode(self, hidden, adj):
hidden1 = self.decgc1(hidden, adj)
recon = self.decgc2(hidden1, adj)
return recon
def forward(self, x, adj):
enc = self.encode(x, adj)
dec = self.decode(enc, adj)
return dec, enc
class InnerProductDecoder(nn.Module):
"""Decoder for using inner product for prediction."""
def __init__(self, dropout, act=torch.sigmoid):
super(InnerProductDecoder, self).__init__()
self.dropout = dropout
self.act = act
def forward(self, z):
z = F.dropout(z, self.dropout, training=self.training)
adj = self.act(torch.mm(z, z.t()))
return adj
| 1,415 | 31.930233 | 93 | py |
cogcn | cogcn-main/cogcn/kmeans.py | import sys
import torch
import torch.nn as nn
from sklearn.cluster import KMeans
class Clustering(object):
def __init__(self, K, n_init=5, max_iter=250):
self.K = K
self.n_init = n_init
self.max_iter = max_iter
self.u = None
self.M = None
def cluster(self, embed):
embed_np = embed.detach().cpu().numpy()
clustering = KMeans(n_clusters=self.K, n_init=self.n_init, max_iter=self.max_iter)
clustering.fit(embed_np)
self.M = clustering.labels_
self.u = self._compute_centers(self.M, embed_np)
def get_loss(self, embed):
loss = torch.Tensor([0.])
#TODO: This may be slightly inefficient, we can fix it later to use matrix multiplications
for i, clusteridx in enumerate(self.M):
x = embed[i]
c = self.u[clusteridx]
difference = x - c
err = torch.sum(torch.mul(difference, difference))
loss += err
return loss
def get_membership(self):
return self.M
def _compute_centers(self,labels, embed):
"""
sklearn kmeans may not give accurate cluster centers in some cases (see doc), so we compute ourselves
"""
clusters = {}
for i,lbl in enumerate(labels):
if clusters.get(lbl) is None:
clusters[lbl] = []
clusters[lbl].append(torch.FloatTensor(embed[i]))
centers = {}
for k in clusters:
all_embed = torch.stack(clusters[k])
center = torch.mean(all_embed, 0)
centers[k] = center
return centers | 1,646 | 28.410714 | 109 | py |
cogcn | cogcn-main/cogcn/layers.py | import torch
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0., act=F.relu):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.act = act
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
def forward(self, input, adj):
input = F.dropout(input, self.dropout, self.training)
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
output = self.act(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
| 1,110 | 30.742857 | 77 | py |
cogcn | cogcn-main/cogcn/train.py | from __future__ import division
from __future__ import print_function
import argparse
import time
import sys
import os
import pickle
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
from torch import optim
from matplotlib import pyplot as plt
from model import GCNAE
from optimizer import compute_structure_loss, compute_attribute_loss, update_o1, update_o2
from utils import load_data_cma, preprocess_graph, plot_losses
from kmeans import Clustering
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='gcn_vae', help="models used")
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
# 5000 epochs is where the pretrain loss starts hovering around 5.0 - 6.0
parser.add_argument('--k', type=int, default=6, help='Number of clusters.')
parser.add_argument('--preepochs', type=int, default=350, help='Number of epochs to pre-train.')
parser.add_argument('--clusepochs', type=int, default=1, help='Number of epochs to pre-train for clustering.')
parser.add_argument('--epochs', type=int, default=300, help='Number of epochs to train.')
parser.add_argument('--hidden1', type=int, default=64, help='Number of units in hidden layer 1.')
parser.add_argument('--hidden2', type=int, default=32, help='Number of units in hidden layer 2.')
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')
parser.add_argument('--lambda1', type=float, default=0.1, help='Structure loss weight.')
parser.add_argument('--lambda2', type=float, default=0.1, help='Attribute loss weight.')
parser.add_argument('--lambda3', type=float, default=0.8, help='Clustering loss weight.')
parser.add_argument('--dropout', type=float, default=0.2, help='Dropout rate (1 - keep probability).')
parser.add_argument('--dataset-str', type=str, default=None, help='type of dataset.')
parser.add_argument('--outfile', type=str, default='embeddings', help='output embeddings file.')
args = parser.parse_args()
def gae_for(args):
print("Using {} dataset".format(args.dataset_str))
adj, features = load_data_cma(args.dataset_str)
n_nodes, feat_dim = features.shape
# Some preprocessing
adj_norm = preprocess_graph(adj)
model = GCNAE(feat_dim, args.hidden1, args.hidden2, args.dropout)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
gamma = 0.98
schedule_update_interval = 400
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=gamma)
# initialize all outlier scores with the equal values summing to 1
init_value = [1./n_nodes] * n_nodes
o_1 = torch.FloatTensor(init_value) # structural outlier
o_2 = torch.FloatTensor(init_value) # attribute outlier
lossfn = nn.MSELoss(reduction='none')
optimizer = optim.Adam(model.parameters(), lr=args.lr)
lambda1 = args.lambda1 / (args.lambda1 + args.lambda2)
lambda2 = args.lambda2 / (args.lambda1 + args.lambda2)
kmeans = Clustering(args.k)
# PRETRAIN ON STRUCTURE AND ATTRIBUTE LOSSES, NO OUTLIER LOSS
for epoch in range(args.preepochs):
model.train()
optimizer.zero_grad()
recon, embed = model(features, adj_norm)
structure_loss = compute_structure_loss(adj_norm, embed, o_1)
attribute_loss = compute_attribute_loss(lossfn, features, recon, o_2)
loss = lambda1 * structure_loss + lambda2 * attribute_loss
# Update the functions F and G (embedding network)
loss.backward()
cur_loss = loss.item()
optimizer.step()
if (epoch+1) % 100 == 0:
#print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(cur_loss), "time=", "{:.5f}".format(time.time() - t), "Pretrain:",pretrain)
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(cur_loss), "lr=", "{:.5f}".format(scheduler.get_last_lr()[0]))
# Initialize clusters
recon, embed = model(features, adj_norm)
kmeans.cluster(embed)
# TRAIN ON ALL THREE LOSES WITH OUTLIER UPDATES
for epoch in range(args.epochs):
# Update the values of O_i1 and O_i2
o_1 = update_o1(adj_norm, embed)
o_2 = update_o2(features, recon)
if (epoch+1) % schedule_update_interval == 0:
scheduler.step()
model.train()
optimizer.zero_grad()
recon, embed = model(features, adj_norm)
kmeans.cluster(embed)
structure_loss = compute_structure_loss(adj_norm, embed, o_1)
attribute_loss = compute_attribute_loss(lossfn, features, recon, o_2)
clustering_loss = kmeans.get_loss(embed)
loss = (args.lambda1 * structure_loss) + (args.lambda2 * attribute_loss) + (args.lambda3 * clustering_loss)
#loss = (args.lambda1 * structure_loss) + (args.lambda2 * attribute_loss)
# Update the functions F and G (embedding network)
loss.backward()
cur_loss = loss.item()
optimizer.step()
if (epoch+1) % 100 == 0:
#print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(cur_loss), "time=", "{:.5f}".format(time.time() - t), "Pretrain:",pretrain)
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(cur_loss), "lr=", "{:.5f}".format(scheduler.get_last_lr()[0]))
# Extract embeddings
adj_norm = preprocess_graph(adj)
recon, embed = model(features, adj_norm)
embed = embed.detach().cpu().numpy()
embfile = os.path.join(args.dataset_str, args.outfile+".pkl")
with open(embfile,"wb") as f:
pickle.dump(embed, f)
o_1 = o_1.detach().cpu().numpy()
o_2 = o_2.detach().cpu().numpy()
outlfile = os.path.join(args.dataset_str, args.outfile+"_outliers.pkl")
with open(outlfile,"wb") as f:
pickle.dump([o_1, o_2], f)
membfile = os.path.join(args.dataset_str, args.outfile+"_membership.pkl")
with open(membfile,"wb") as f:
pickle.dump(kmeans.get_membership(), f)
if __name__ == '__main__':
gae_for(args)
| 5,980 | 39.412162 | 157 | py |
cogcn | cogcn-main/cogcn/optimizer.py | import sys
import torch
import torch.nn as nn
import torch.nn.modules.loss
import torch.nn.functional as F
from sklearn.cluster import KMeans
def compute_attribute_loss(lossfn, features, recon, outlier_wt):
loss = lossfn(features, recon)
loss = loss.sum(dim=1)
outlier_wt = torch.log(1/outlier_wt)
attr_loss = torch.sum(torch.mul(outlier_wt, loss))
return attr_loss
def compute_structure_loss(adj, embed, outlier_wt):
# to compute F(x_i).F(x_j)
embeddot = torch.mm(embed, torch.transpose(embed, 0, 1))
adj_tensor = adj.to_dense()
# compute A_ij - F(x_i)*F(x_j)
difference = adj_tensor - embeddot
# square difference and sum
loss = torch.sum(torch.mul(difference, difference), dim=1)
outlier_wt = torch.log(1/outlier_wt)
struct_loss = torch.sum(torch.mul(outlier_wt, loss))
return struct_loss
def update_o1(adj, embed):
# to compute F(x_i).F(x_j)
embed = embed.data
embeddot = torch.mm(embed, torch.transpose(embed, 0, 1))
adj_tensor = adj.to_dense()
# compute A_ij - F(x_i)*F(x_j)
difference = adj_tensor - embeddot
# square difference and sum
error = torch.sum(torch.mul(difference, difference), dim=1)
# compute the denominator
normalization_factor = torch.sum(error)
# normalize the errors
o1 = error/normalization_factor
return o1
def update_o2(features, recon):
features = features.data
recon = recon.data
# error = x - F(G(x))
error = features - recon
# error now = (x - F(G(x)))^2, summed across dim 1
error = torch.sum(torch.mul(error, error), dim=1)
# compute the denominator
normalization_factor = torch.sum(error)
# normalize the errors
o2 = error/normalization_factor
return o2
| 1,768 | 23.915493 | 64 | py |
deepglo | deepglo-master/DeepGLO/DeepGLO.py | from __future__ import print_function
import torch, h5py
import numpy as np
from scipy.io import loadmat
from torch.nn.utils import weight_norm
import torch.nn as nn
import torch.optim as optim
import numpy as np
# import matplotlib
from torch.autograd import Variable
import sys
import itertools
import torch.nn.functional as F
import copy
import os
import gc
from DeepGLO.data_loader import *
from sklearn.decomposition import NMF
use_cuda = True #### Assuming you have a GPU ######
from DeepGLO.utilities import *
from DeepGLO.LocalModel import *
from DeepGLO.metrics import *
import copy
import random
import pickle
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
def get_model(A, y, lamb=0):
"""
Regularized least-squares
"""
n_col = A.shape[1]
return np.linalg.lstsq(
A.T.dot(A) + lamb * np.identity(n_col), A.T.dot(y), rcond=None
)
class DeepGLO(object):
def __init__(
self,
Ymat,
vbsize=150,
hbsize=256,
num_channels_X=[32, 32, 32, 32, 1],
num_channels_Y=[32, 32, 32, 32, 1],
kernel_size=7,
dropout=0.2,
rank=64,
kernel_size_Y=7,
lr=0.0005,
val_len=24,
end_index=20000,
normalize=False,
start_date="2016-1-1",
freq="H",
covariates=None,
use_time=True,
dti=None,
svd=False,
period=None,
forward_cov=False,
):
self.start_date = start_date
self.freq = freq
self.covariates = covariates
self.use_time = use_time
self.dti = dti
self.dropout = dropout
self.period = period
self.forward_cov = forward_cov
self.Xseq = TemporalConvNet(
num_inputs=1,
num_channels=num_channels_X,
kernel_size=kernel_size,
dropout=dropout,
init=True,
)
if normalize:
self.s = np.std(Ymat[:, 0:end_index], axis=1)
# self.s[self.s == 0] = 1.0
self.s += 1.0
self.m = np.mean(Ymat[:, 0:end_index], axis=1)
self.Ymat = (Ymat - self.m[:, None]) / self.s[:, None]
self.mini = np.abs(np.min(self.Ymat))
self.Ymat = self.Ymat + self.mini
else:
self.Ymat = Ymat
self.normalize = normalize
n, T = self.Ymat.shape
t0 = end_index + 1
if t0 > T:
self.Ymat = np.hstack([self.Ymat, self.Ymat[:, -1].reshape(-1, 1)])
if svd:
indices = np.random.choice(self.Ymat.shape[0], rank, replace=False)
X = self.Ymat[indices, 0:t0]
mX = np.std(X, axis=1)
mX[mX == 0] = 1.0
X = X / mX[:, None]
Ft = get_model(X.transpose(), self.Ymat[:, 0:t0].transpose(), lamb=0.1)
F = Ft[0].transpose()
self.X = torch.from_numpy(X).float()
self.F = torch.from_numpy(F).float()
else:
R = torch.zeros(rank, t0).float()
X = torch.normal(R, 0.1)
C = torch.zeros(n, rank).float()
F = torch.normal(C, 0.1)
self.X = X.float()
self.F = F.float()
self.vbsize = vbsize
self.hbsize = hbsize
self.num_channels_X = num_channels_X
self.num_channels_Y = num_channels_Y
self.kernel_size_Y = kernel_size_Y
self.rank = rank
self.kernel_size = kernel_size
self.lr = lr
self.val_len = val_len
self.end_index = end_index
self.D = data_loader(
Ymat=self.Ymat,
vbsize=vbsize,
hbsize=hbsize,
end_index=end_index,
val_len=val_len,
shuffle=False,
)
def tensor2d_to_temporal(self, T):
T = T.view(1, T.size(0), T.size(1))
T = T.transpose(0, 1)
return T
def temporal_to_tensor2d(self, T):
T = T.view(T.size(0), T.size(2))
return T
def calculate_newX_loss_vanilla(self, Xn, Fn, Yn, Xf, alpha):
Yout = torch.mm(Fn, Xn)
cr1 = nn.L1Loss()
cr2 = nn.MSELoss()
l1 = cr2(Yout, Yn) / torch.mean(Yn ** 2)
l2 = cr2(Xn, Xf) / torch.mean(Xf ** 2)
return (1 - alpha) * l1 + alpha * l2
def recover_future_X(
self,
last_step,
future,
cpu=True,
num_epochs=50,
alpha=0.5,
vanilla=True,
tol=1e-7,
):
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
X = self.X[:, last_step - rg : last_step]
X = self.tensor2d_to_temporal(X)
outX = self.predict_future(model=self.Xseq, inp=X, future=future, cpu=cpu)
outX = self.temporal_to_tensor2d(outX)
Xf = outX[:, -future::]
Yn = self.Ymat[:, last_step : last_step + future]
Yn = torch.from_numpy(Yn).float()
if cpu:
self.Xseq = self.Xseq.cpu()
else:
Yn = Yn.cuda()
Xf = Xf.cuda()
Fn = self.F
Xt = torch.zeros(self.rank, future).float()
Xn = torch.normal(Xt, 0.1)
if not cpu:
Xn = Xn.cuda()
lprev = 0
for i in range(num_epochs):
Xn = Variable(Xn, requires_grad=True)
optim_Xn = optim.Adam(params=[Xn], lr=self.lr)
optim_Xn.zero_grad()
loss = self.calculate_newX_loss_vanilla(
Xn, Fn.detach(), Yn.detach(), Xf.detach(), alpha
)
loss.backward()
optim_Xn.step()
# Xn = torch.clamp(Xn.detach(), min=0)
if np.abs(lprev - loss.cpu().item()) <= tol:
break
if i % 1000 == 0:
print("Recovery Loss: " + str(loss.cpu().item()))
lprev = loss.cpu().item()
self.Xseq = self.Xseq.cuda()
return Xn.detach()
def step_factX_loss(self, inp, out, last_vindex, last_hindex, reg=0.0):
Xout = self.X[:, last_hindex + 1 : last_hindex + 1 + out.size(2)]
Fout = self.F[self.D.I[last_vindex : last_vindex + out.size(0)], :]
if use_cuda:
Xout = Xout.cuda()
Fout = Fout.cuda()
Xout = Variable(Xout, requires_grad=True)
out = self.temporal_to_tensor2d(out)
optim_X = optim.Adam(params=[Xout], lr=self.lr)
Hout = torch.matmul(Fout, Xout)
optim_X.zero_grad()
loss = torch.mean(torch.pow(Hout - out.detach(), 2))
l2 = torch.mean(torch.pow(Xout, 2))
r = loss.detach() / l2.detach()
loss = loss + r * reg * l2
loss.backward()
optim_X.step()
# Xout = torch.clamp(Xout, min=0)
self.X[:, last_hindex + 1 : last_hindex + 1 + inp.size(2)] = Xout.cpu().detach()
return loss
def step_factF_loss(self, inp, out, last_vindex, last_hindex, reg=0.0):
Xout = self.X[:, last_hindex + 1 : last_hindex + 1 + out.size(2)]
Fout = self.F[self.D.I[last_vindex : last_vindex + out.size(0)], :]
if use_cuda:
Xout = Xout.cuda()
Fout = Fout.cuda()
Fout = Variable(Fout, requires_grad=True)
optim_F = optim.Adam(params=[Fout], lr=self.lr)
out = self.temporal_to_tensor2d(out)
Hout = torch.matmul(Fout, Xout)
optim_F.zero_grad()
loss = torch.mean(torch.pow(Hout - out.detach(), 2))
l2 = torch.mean(torch.pow(Fout, 2))
r = loss.detach() / l2.detach()
loss = loss + r * reg * l2
loss.backward()
optim_F.step()
self.F[
self.D.I[last_vindex : last_vindex + inp.size(0)], :
] = Fout.cpu().detach()
return loss
def step_temporal_loss_X(self, inp, last_vindex, last_hindex):
Xin = self.X[:, last_hindex : last_hindex + inp.size(2)]
Xout = self.X[:, last_hindex + 1 : last_hindex + 1 + inp.size(2)]
for p in self.Xseq.parameters():
p.requires_grad = False
if use_cuda:
Xin = Xin.cuda()
Xout = Xout.cuda()
Xin = Variable(Xin, requires_grad=True)
Xout = Variable(Xout, requires_grad=True)
optim_out = optim.Adam(params=[Xout], lr=self.lr)
Xin = self.tensor2d_to_temporal(Xin)
Xout = self.tensor2d_to_temporal(Xout)
hatX = self.Xseq(Xin)
optim_out.zero_grad()
loss = torch.mean(torch.pow(Xout - hatX.detach(), 2))
loss.backward()
optim_out.step()
# Xout = torch.clamp(Xout, min=0)
temp = self.temporal_to_tensor2d(Xout.detach())
self.X[:, last_hindex + 1 : last_hindex + 1 + inp.size(2)] = temp
return loss
def predict_future_batch(self, model, inp, future=10, cpu=True):
if cpu:
model = model.cpu()
inp = inp.cpu()
else:
inp = inp.cuda()
out = model(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
out = torch.cat((inp, output), dim=2)
torch.cuda.empty_cache()
for i in range(future - 1):
inp = out
out = model(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
out = torch.cat((inp, output), dim=2)
torch.cuda.empty_cache()
out = self.temporal_to_tensor2d(out)
out = np.array(out.cpu().detach())
return out
def predict_future(self, model, inp, future=10, cpu=True, bsize=90):
n = inp.size(0)
inp = inp.cpu()
ids = np.arange(0, n, bsize)
ids = list(ids) + [n]
out = self.predict_future_batch(model, inp[ids[0] : ids[1], :, :], future, cpu)
torch.cuda.empty_cache()
for i in range(1, len(ids) - 1):
temp = self.predict_future_batch(
model, inp[ids[i] : ids[i + 1], :, :], future, cpu
)
torch.cuda.empty_cache()
out = np.vstack([out, temp])
out = torch.from_numpy(out).float()
return self.tensor2d_to_temporal(out)
def predict_global(
self, ind, last_step=100, future=10, cpu=False, normalize=False, bsize=90
):
if ind is None:
ind = np.arange(self.Ymat.shape[0])
if cpu:
self.Xseq = self.Xseq.cpu()
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
X = self.X[:, last_step - rg : last_step]
n = X.size(0)
T = X.size(1)
X = self.tensor2d_to_temporal(X)
outX = self.predict_future(
model=self.Xseq, inp=X, future=future, cpu=cpu, bsize=bsize
)
outX = self.temporal_to_tensor2d(outX)
F = self.F
Y = torch.matmul(F, outX)
Y = np.array(Y[ind, :].cpu().detach())
self.Xseq = self.Xseq.cuda()
del F
torch.cuda.empty_cache()
for p in self.Xseq.parameters():
p.requires_grad = True
if normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def train_Xseq(self, Ymat, num_epochs=20, early_stop=False, tenacity=3):
seq = self.Xseq
num_channels = self.num_channels_X
kernel_size = self.kernel_size
vbsize = min(self.vbsize, Ymat.shape[0] / 2)
for p in seq.parameters():
p.requires_grad = True
TC = LocalModel(
Ymat=Ymat,
num_inputs=1,
num_channels=num_channels,
kernel_size=kernel_size,
vbsize=vbsize,
hbsize=self.hbsize,
normalize=False,
end_index=self.end_index - self.val_len,
val_len=self.val_len,
lr=self.lr,
num_epochs=num_epochs,
)
TC.train_model(early_stop=early_stop, tenacity=tenacity)
self.Xseq = TC.seq
def train_factors(
self,
reg_X=0.0,
reg_F=0.0,
mod=5,
early_stop=False,
tenacity=3,
ind=None,
seed=False,
):
self.D.epoch = 0
self.D.vindex = 0
self.D.hindex = 0
if use_cuda:
self.Xseq = self.Xseq.cuda()
for p in self.Xseq.parameters():
p.requires_grad = True
l_F = [0.0]
l_X = [0.0]
l_X_temporal = [0.0]
iter_count = 0
vae = float("inf")
scount = 0
Xbest = self.X.clone()
Fbest = self.F.clone()
while self.D.epoch < self.num_epochs:
last_epoch = self.D.epoch
last_vindex = self.D.vindex
last_hindex = self.D.hindex
inp, out, vindex, hindex = self.D.next_batch(option=1)
if use_cuda:
inp = inp.float().cuda()
out = out.float().cuda()
if iter_count % mod >= 0:
l1 = self.step_factF_loss(inp, out, last_vindex, last_hindex, reg=reg_F)
l_F = l_F + [l1.cpu().item()]
if iter_count % mod >= 0:
l1 = self.step_factX_loss(inp, out, last_vindex, last_hindex, reg=reg_X)
l_X = l_X + [l1.cpu().item()]
if seed == False and iter_count % mod == 1:
l2 = self.step_temporal_loss_X(inp, last_vindex, last_hindex)
l_X_temporal = l_X_temporal + [l2.cpu().item()]
iter_count = iter_count + 1
if self.D.epoch > last_epoch:
print("Entering Epoch# ", self.D.epoch)
print("Factorization Loss F: ", np.mean(l_F))
print("Factorization Loss X: ", np.mean(l_X))
print("Temporal Loss X: ", np.mean(l_X_temporal))
if ind is None:
ind = np.arange(self.Ymat.shape[0])
else:
ind = ind
inp = self.predict_global(
ind,
last_step=self.end_index - self.val_len,
future=self.val_len,
cpu=False,
)
R = self.Ymat[ind, self.end_index - self.val_len : self.end_index]
S = inp[:, -self.val_len : :]
ve = np.abs(R - S).mean() / np.abs(R).mean()
print("Validation Loss (Global): ", ve)
if ve <= vae:
vae = ve
scount = 0
Xbest = self.X.clone()
Fbest = self.F.clone()
# Xseqbest = TemporalConvNet(
# num_inputs=1,
# num_channels=self.num_channels_X,
# kernel_size=self.kernel_size,
# dropout=self.dropout,
# )
# Xseqbest.load_state_dict(self.Xseq.state_dict())
Xseqbest = pickle.loads(pickle.dumps(self.Xseq))
else:
scount += 1
if scount > tenacity and early_stop:
print("Early Stopped")
self.X = Xbest
self.F = Fbest
self.Xseq = Xseqbest
if use_cuda:
self.Xseq = self.Xseq.cuda()
break
def create_Ycov(self):
t0 = self.end_index + 1
self.D.epoch = 0
self.D.vindex = 0
self.D.hindex = 0
Ycov = copy.deepcopy(self.Ymat[:, 0:t0])
Ymat_now = self.Ymat[:, 0:t0]
if use_cuda:
self.Xseq = self.Xseq.cuda()
self.Xseq = self.Xseq.eval()
while self.D.epoch < 1:
last_epoch = self.D.epoch
last_vindex = self.D.vindex
last_hindex = self.D.hindex
inp, out, vindex, hindex = self.D.next_batch(option=1)
if use_cuda:
inp = inp.cuda()
Xin = self.tensor2d_to_temporal(self.X[:, last_hindex : last_hindex + inp.size(2)]).cuda()
Xout = self.temporal_to_tensor2d(self.Xseq(Xin)).cpu()
Fout = self.F[self.D.I[last_vindex : last_vindex + out.size(0)], :]
output = np.array(torch.matmul(Fout, Xout).detach())
Ycov[
last_vindex : last_vindex + output.shape[0],
last_hindex + 1 : last_hindex + 1 + output.shape[1],
] = output
for p in self.Xseq.parameters():
p.requires_grad = True
if self.period is None:
Ycov_wc = np.zeros(shape=[Ycov.shape[0], 1, Ycov.shape[1]])
if self.forward_cov:
Ycov_wc[:, 0, 0:-1] = Ycov[:, 1::]
else:
Ycov_wc[:, 0, :] = Ycov
else:
Ycov_wc = np.zeros(shape=[Ycov.shape[0], 2, Ycov.shape[1]])
if self.forward_cov:
Ycov_wc[:, 0, 0:-1] = Ycov[:, 1::]
else:
Ycov_wc[:, 0, :] = Ycov
Ycov_wc[:, 1, self.period - 1 : :] = Ymat_now[:, 0 : -(self.period - 1)]
return Ycov_wc
def train_Yseq(self, num_epochs=20, early_stop=False, tenacity=7):
Ycov = self.create_Ycov()
self.Yseq = LocalModel(
self.Ymat,
num_inputs=1,
num_channels=self.num_channels_Y,
kernel_size=self.kernel_size_Y,
dropout=self.dropout,
vbsize=self.vbsize,
hbsize=self.hbsize,
num_epochs=num_epochs,
lr=self.lr,
val_len=self.val_len,
test=True,
end_index=self.end_index - self.val_len,
normalize=False,
start_date=self.start_date,
freq=self.freq,
covariates=self.covariates,
use_time=self.use_time,
dti=self.dti,
Ycov=Ycov,
)
self.Yseq.train_model(early_stop=early_stop, tenacity=tenacity)
def train_all_models(
self, init_epochs=100, alt_iters=10, y_iters=200, tenacity=7, mod=5
):
print("Initializing Factors.....")
self.num_epochs = init_epochs
self.train_factors()
if alt_iters % 2 == 1:
alt_iters += 1
print("Starting Alternate Training.....")
for i in range(1, alt_iters):
if i % 2 == 0:
print(
"--------------------------------------------Training Factors. Iter#: "
+ str(i)
+ "-------------------------------------------------------"
)
self.num_epochs = 300
self.train_factors(
seed=False, early_stop=True, tenacity=tenacity, mod=mod
)
else:
print(
"--------------------------------------------Training Local Model. Iter#: "
+ str(i)
+ "-------------------------------------------------------"
)
self.num_epochs = 300
T = np.array(self.X.cpu().detach())
self.train_Xseq(
Ymat=T,
num_epochs=self.num_epochs,
early_stop=True,
tenacity=tenacity,
)
self.num_epochs = y_iters
self.train_Yseq(num_epochs=y_iters, early_stop=True, tenacity=tenacity)
def predict(
self, ind=None, last_step=100, future=10, cpu=False, normalize=False, bsize=90
):
if ind is None:
ind = np.arange(self.Ymat.shape[0])
if cpu:
self.Xseq = self.Xseq.cpu()
self.Yseq.seq = self.Yseq.seq.eval()
self.Xseq = self.Xseq.eval()
rg = max(
1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels_X) - 1),
1 + 2 * (self.kernel_size_Y - 1) * 2 ** (len(self.num_channels_Y) - 1),
)
covs = self.Yseq.covariates[:, last_step - rg : last_step + future]
# print(covs.shape)
yc = self.predict_global(
ind=ind,
last_step=last_step,
future=future,
cpu=cpu,
normalize=False,
bsize=bsize,
)
if self.period is None:
ycovs = np.zeros(shape=[yc.shape[0], 1, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
else:
ycovs = np.zeros(shape=[yc.shape[0], 2, yc.shape[1]])
if self.forward_cov:
ycovs[:, 0, 0:-1] = yc[:, 1::]
else:
ycovs[:, 0, :] = yc
period = self.period
while last_step + future - (period - 1) > last_step + 1:
period += self.period
ycovs[:, 1, period - 1 : :] = self.Ymat[
:, last_step - rg : last_step + future - (period - 1)
] ### this seems like we are looking ahead, but it will not use the last coordinate, which is the only new point added
# print(ycovs.shape)
Y = self.Yseq.predict_future(
data_in=self.Ymat[ind, last_step - rg : last_step],
covariates=covs,
ycovs=ycovs,
future=future,
cpu=cpu,
bsize=bsize,
normalize=False,
)
if normalize:
Y = Y - self.mini
Y = Y * self.s[ind, None] + self.m[ind, None]
return Y
else:
return Y
def rolling_validation(self, Ymat, tau=24, n=7, bsize=90, cpu=False, alpha=0.3):
prevX = self.X.clone()
prev_index = self.end_index
out = self.predict(
last_step=self.end_index,
future=tau,
bsize=bsize,
cpu=cpu,
normalize=self.normalize,
)
out_global = self.predict_global(
np.arange(self.Ymat.shape[0]),
last_step=self.end_index,
future=tau,
cpu=cpu,
normalize=self.normalize,
bsize=bsize,
)
predicted_values = []
actual_values = []
predicted_values_global = []
S = out[:, -tau::]
S_g = out_global[:, -tau::]
predicted_values += [S]
predicted_values_global += [S_g]
R = Ymat[:, self.end_index : self.end_index + tau]
actual_values += [R]
print("Current window wape: " + str(wape(S, R)))
self.Xseq = self.Xseq.eval()
self.Yseq.seq = self.Yseq.seq.eval()
for i in range(n - 1):
Xn = self.recover_future_X(
last_step=self.end_index + 1,
future=tau,
num_epochs=100000,
alpha=alpha,
vanilla=True,
cpu=True,
)
self.X = torch.cat([self.X, Xn], dim=1)
self.end_index += tau
out = self.predict(
last_step=self.end_index,
future=tau,
bsize=bsize,
cpu=cpu,
normalize=self.normalize,
)
out_global = self.predict_global(
np.arange(self.Ymat.shape[0]),
last_step=self.end_index,
future=tau,
cpu=cpu,
normalize=self.normalize,
bsize=bsize,
)
S = out[:, -tau::]
S_g = out_global[:, -tau::]
predicted_values += [S]
predicted_values_global += [S_g]
R = Ymat[:, self.end_index : self.end_index + tau]
actual_values += [R]
print("Current window wape: " + str(wape(S, R)))
predicted = np.hstack(predicted_values)
predicted_global = np.hstack(predicted_values_global)
actual = np.hstack(actual_values)
dic = {}
dic["wape"] = wape(predicted, actual)
dic["mape"] = mape(predicted, actual)
dic["smape"] = smape(predicted, actual)
dic["mae"] = np.abs(predicted - actual).mean()
dic["rmse"] = np.sqrt(((predicted - actual) ** 2).mean())
dic["nrmse"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
dic["wape_global"] = wape(predicted_global, actual)
dic["mape_global"] = mape(predicted_global, actual)
dic["smape_global"] = smape(predicted_global, actual)
dic["mae_global"] = np.abs(predicted_global - actual).mean()
dic["rmse_global"] = np.sqrt(((predicted_global - actual) ** 2).mean())
dic["nrmse_global"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
baseline = Ymat[:, Ymat.shape[1] - n * tau - tau : Ymat.shape[1] - tau]
dic["baseline_wape"] = wape(baseline, actual)
dic["baseline_mape"] = mape(baseline, actual)
dic["baseline_smape"] = smape(baseline, actual)
self.X = prevX
self.end_index = prev_index
return dic
| 25,258 | 32.235526 | 131 | py |
deepglo | deepglo-master/DeepGLO/data_loader.py | import torch, h5py
import numpy as np
from scipy.io import loadmat
import torch.nn as nn
import torch.optim as optim
import numpy as np
# import matplotlib
from torch.autograd import Variable
import itertools
from sklearn.preprocessing import normalize
import datetime
import json
import os, sys
import pandas as pd
import pyarrow.parquet as pq
from DeepGLO.Ftree import *
class data_loader(object):
"""
Data Loader Class for DeepGLO
"""
def __init__(
self,
Ymat,
covariates=None,
Ycov=None,
vbsize=200,
hbsize=100,
end_index=20000,
val_len=30,
shuffle=False,
):
"""
Argeuments:
Ymat: time-series matrix n*T
covariates: global covariates common for all time series r*T, where r is the number of covariates
Ycov: per time-series covariates n*l*T, l such covariates per time-series
All of the above arguments are numpy arrays
vbsize: vertical batch size
hbsize: horizontal batch size
end_index: training and validation set is only from 0:end_index
val_len: validation length. The last 'val_len' time-points for every time-series is the validation set
shuffle: data is shuffles if True (this is deprecated and set to False)
"""
n, T = Ymat.shape
self.vindex = 0
self.hindex = 0
self.epoch = 0
self.vbsize = vbsize
self.hbsize = hbsize
self.Ymat = Ymat
self.val_len = val_len
self.end_index = end_index
self.val_index = np.random.randint(0, n - self.vbsize - 5)
self.shuffle = shuffle
self.I = np.array(range(n))
self.covariates = covariates
self.Ycov = Ycov
def next_batch(self, option=1):
"""
Arguments:
option = 1 means data is returned as pytorch tensor of shape nd*cd*td where nd is vbsize, hb is hsize and cd is the number os channels (depends on covariates)
option = 0 is deprecated
Returns:
inp: input batch
out: one shifted output batch
vindex: strating vertical index of input batch
hindex: starting horizontal index of input batch
"""
n, T = self.Ymat.shape
if self.hindex + self.hbsize + 1 >= self.end_index:
pr_hindex = self.hindex
self.hindex = 0
if self.vindex + self.vbsize >= n:
pr_vindex = self.vindex
self.vindex = 0
self.epoch = self.epoch + 1
if self.shuffle:
I = np.random.choice(n, n, replace=False)
self.I = I
self.Ymat = self.Ymat[self.I, :]
else:
pr_vindex = self.vindex
self.vindex = self.vindex + self.vbsize
else:
pr_hindex = self.hindex
self.hindex = self.hindex + self.hbsize
pr_vindex = self.vindex
data = self.Ymat[
int(pr_vindex) : int(pr_vindex + self.vbsize),
int(pr_hindex) : int(min(self.end_index, pr_hindex + self.hbsize)),
]
out_data = self.Ymat[
int(pr_vindex) : int(pr_vindex + self.vbsize),
int(pr_hindex + 1) : int(min(self.end_index, pr_hindex + self.hbsize) + 1),
]
nd, Td = data.shape
if self.covariates is not None:
covs = self.covariates[
:, int(pr_hindex) : int(min(self.end_index, pr_hindex + self.hbsize))
]
rcovs = np.repeat(
covs.reshape(1, covs.shape[0], covs.shape[1]), repeats=nd, axis=0
)
if self.Ycov is not None:
ycovs = self.Ycov[
int(pr_vindex) : int(pr_vindex + self.vbsize),
:,
int(pr_hindex) : int(min(self.end_index, pr_hindex + self.hbsize)),
]
if option == 1:
inp = torch.from_numpy(data).view(1, nd, Td)
out = torch.from_numpy(out_data).view(1, nd, Td)
if self.covariates is not None:
rcovs = torch.from_numpy(rcovs).float()
if self.Ycov is not None:
ycovs = torch.from_numpy(ycovs).float()
inp = inp.transpose(0, 1).float()
if self.covariates is not None:
inp = torch.cat((inp, rcovs), 1)
if self.Ycov is not None:
inp = torch.cat((inp, ycovs), 1)
out = out.transpose(0, 1).float()
else:
inp = torch.from_numpy(data).float()
out = torch.from_numpy(out_data).float()
inp[torch.isnan(inp)] = 0
out[torch.isnan(out)] = 0
return inp, out, self.vindex, self.hindex
def supply_test(self, option=1):
"""
Supplies validation set in the same format as above
"""
n, T = self.Ymat.shape
index = self.val_index
in_data = self.Ymat[
int(index) : int(index + self.vbsize),
int(self.end_index) : int(self.end_index + self.val_len),
]
out_data = self.Ymat[
int(index) : int(index + self.vbsize),
int(self.end_index + 1) : int(self.end_index + self.val_len + 1),
]
nd, Td = in_data.shape
if self.covariates is not None:
covs = self.covariates[
:, int(self.end_index) : int(self.end_index + self.val_len)
]
rcovs = np.repeat(
covs.reshape(1, covs.shape[0], covs.shape[1]), repeats=nd, axis=0
)
if self.Ycov is not None:
ycovs = self.Ycov[
int(index) : int(index + self.vbsize),
:,
int(self.end_index) : int(self.end_index + self.val_len),
]
if option == 1:
inp = torch.from_numpy(in_data).view(1, nd, Td)
inp = inp.transpose(0, 1).float()
if self.covariates is not None:
rcovs = torch.from_numpy(rcovs).float()
if self.Ycov is not None:
ycovs = torch.from_numpy(ycovs).float()
out = torch.from_numpy(out_data).view(1, nd, Td)
if self.covariates is not None:
inp = torch.cat((inp, rcovs), 1)
if self.Ycov is not None:
inp = torch.cat((inp, ycovs), 1)
out = out.transpose(0, 1).float()
else:
inp = torch.from_numpy(in_data).float()
out = torch.from_numpy(out_data).float()
return inp, out, self.vindex, self.hindex
| 6,610 | 34.735135 | 167 | py |
deepglo | deepglo-master/DeepGLO/LocalModel.py | import torch, h5py
import numpy as np
from scipy.io import loadmat
from torch.nn.utils import weight_norm
import torch.nn as nn
import torch.optim as optim
import numpy as np
# import matplotlib
from torch.autograd import Variable
import itertools
import torch.nn.functional as F
from DeepGLO.data_loader import *
use_cuda = True #### Assuming you have a GPU ######
from DeepGLO.utilities import *
from DeepGLO.time import *
from DeepGLO.metrics import *
import random
import pickle
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, : -self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(
self,
n_inputs,
n_outputs,
kernel_size,
stride,
dilation,
padding,
dropout=0.1,
init=True,
):
super(TemporalBlock, self).__init__()
self.kernel_size = kernel_size
self.conv1 = weight_norm(
nn.Conv1d(
n_inputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(
nn.Conv1d(
n_outputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(
self.conv1,
self.chomp1,
self.relu1,
self.dropout1,
self.conv2,
self.chomp2,
self.relu2,
self.dropout2,
)
self.downsample = (
nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
)
self.init = init
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
if self.init:
nn.init.normal_(self.conv1.weight, std=1e-3)
nn.init.normal_(self.conv2.weight, std=1e-3)
self.conv1.weight[:, 0, :] += (
1.0 / self.kernel_size
) ###new initialization scheme
self.conv2.weight += 1.0 / self.kernel_size ###new initialization scheme
nn.init.normal_(self.conv1.bias, std=1e-6)
nn.init.normal_(self.conv2.bias, std=1e-6)
else:
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.conv2.weight)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalBlock_last(nn.Module):
def __init__(
self,
n_inputs,
n_outputs,
kernel_size,
stride,
dilation,
padding,
dropout=0.2,
init=True,
):
super(TemporalBlock_last, self).__init__()
self.kernel_size = kernel_size
self.conv1 = weight_norm(
nn.Conv1d(
n_inputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(
nn.Conv1d(
n_outputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
)
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(
self.conv1,
self.chomp1,
self.dropout1,
self.conv2,
self.chomp2,
self.dropout2,
)
self.downsample = (
nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
)
self.init = init
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
if self.init:
nn.init.normal_(self.conv1.weight, std=1e-3)
nn.init.normal_(self.conv2.weight, std=1e-3)
self.conv1.weight[:, 0, :] += (
1.0 / self.kernel_size
) ###new initialization scheme
self.conv2.weight += 1.0 / self.kernel_size ###new initialization scheme
nn.init.normal_(self.conv1.bias, std=1e-6)
nn.init.normal_(self.conv2.bias, std=1e-6)
else:
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.conv2.weight)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return out + res
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.1, init=True):
super(TemporalConvNet, self).__init__()
layers = []
self.num_channels = num_channels
self.num_inputs = num_inputs
self.kernel_size = kernel_size
self.dropout = dropout
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
if i == num_levels - 1:
layers += [
TemporalBlock_last(
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size,
dropout=dropout,
init=init,
)
]
else:
layers += [
TemporalBlock(
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size,
dropout=dropout,
init=init,
)
]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class LocalModel(object):
def __init__(
self,
Ymat,
num_inputs=1,
num_channels=[32, 32, 32, 32, 32, 1],
kernel_size=7,
dropout=0.2,
vbsize=300,
hbsize=128,
num_epochs=100,
lr=0.0005,
val_len=10,
test=True,
end_index=120,
normalize=False,
start_date="2016-1-1",
freq="H",
covariates=None,
use_time=False,
dti=None,
Ycov=None,
):
"""
Arguments:
Ymat: input time-series n*T
num_inputs: always set to 1
num_channels: list containing channel progression of temporal comvolution network
kernel_size: kernel size of temporal convolution filters
dropout: dropout rate for each layer
vbsize: vertical batch size
hbsize: horizontal batch size
num_epochs: max. number of epochs
lr: learning rate
val_len: validation length
test: always set to True
end_index: no data is touched fro training or validation beyond end_index
normalize: normalize dataset before training or not
start_data: start data in YYYY-MM-DD format (give a random date if unknown)
freq: "H" hourly, "D": daily and for rest see here: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases
covariates: global covariates common for all time series r*T, where r is the number of covariates
Ycov: per time-series covariates n*l*T, l such covariates per time-series
use_time: if false, default trime-covriates are not used
dti: date time object can be explicitly supplied here, leave None if default options are to be used
"""
self.start_date = start_date
if use_time:
self.time = TimeCovariates(
start_date=start_date, freq=freq, normalized=True, num_ts=Ymat.shape[1]
)
if dti is not None:
self.time.dti = dti
time_covariates = self.time.get_covariates()
if covariates is None:
self.covariates = time_covariates
else:
self.covariates = np.vstack([time_covariates, covariates])
else:
self.covariates = covariates
self.Ycov = Ycov
self.freq = freq
self.vbsize = vbsize
self.hbsize = hbsize
self.num_inputs = num_inputs
self.num_channels = num_channels
self.num_epochs = num_epochs
self.lr = lr
self.val_len = val_len
self.Ymat = Ymat
self.test = test
self.end_index = end_index
self.normalize = normalize
self.kernel_size = kernel_size
if normalize:
Y = Ymat
m = np.mean(Y[:, 0 : self.end_index], axis=1)
s = np.std(Y[:, 0 : self.end_index], axis=1)
# s[s == 0] = 1.0
s += 1.0
Y = (Y - m[:, None]) / s[:, None]
mini = np.abs(np.min(Y))
self.Ymat = Y + mini
self.m = m
self.s = s
self.mini = mini
if self.Ycov is not None:
self.num_inputs += self.Ycov.shape[1]
if self.covariates is not None:
self.num_inputs += self.covariates.shape[0]
self.seq = TemporalConvNet(
num_inputs=self.num_inputs,
num_channels=num_channels,
kernel_size=kernel_size,
dropout=dropout,
init=True,
)
self.seq = self.seq.float()
self.D = data_loader(
Ymat=self.Ymat,
vbsize=vbsize,
hbsize=hbsize,
end_index=end_index,
val_len=val_len,
covariates=self.covariates,
Ycov=self.Ycov,
)
self.val_len = val_len
if use_cuda:
self.seq = self.seq.cuda()
def __loss__(self, out, target, dic=None):
criterion = nn.L1Loss()
return criterion(out, target) / torch.abs(target.data).mean()
def __prediction__(self, data):
dic = None
out = self.seq(data)
return out, dic
def train_model(self, early_stop=False, tenacity=10):
"""
early_stop: set true for using early stop
tenacity: patience for early_stop
"""
print("Training Local Model(Tconv)")
if use_cuda:
self.seq = self.seq.cuda()
optimizer = optim.Adam(params=self.seq.parameters(), lr=self.lr)
iter_count = 0
loss_all = []
loss_test_all = []
vae = float("inf")
scount = 0
while self.D.epoch < self.num_epochs:
last_epoch = self.D.epoch
inp, out_target, _, _ = self.D.next_batch()
if self.test:
inp_test, out_target_test, _, _ = self.D.supply_test()
current_epoch = self.D.epoch
if use_cuda:
inp = inp.cuda()
out_target = out_target.cuda()
inp = Variable(inp)
out_target = Variable(out_target)
optimizer.zero_grad()
out, dic = self.__prediction__(inp)
loss = self.__loss__(out, out_target, dic)
iter_count = iter_count + 1
for p in self.seq.parameters():
p.requires_grad = True
loss.backward()
for p in self.seq.parameters():
p.grad.data.clamp_(max=1e5, min=-1e5)
optimizer.step()
loss_all = loss_all + [loss.cpu().item()]
if self.test:
if use_cuda:
inp_test = inp_test.cuda()
out_target_test = out_target_test.cuda()
inp_test = Variable(inp_test)
out_target_test = Variable(out_target_test)
out_test, dic = self.__prediction__(inp_test)
losst = self.__loss__(out_test, out_target_test, dic)
loss_test_all = loss_test_all + [losst.cpu().item()]
if current_epoch > last_epoch:
ve = loss_test_all[-1]
print("Entering Epoch# ", current_epoch)
print("Train Loss:", np.mean(loss_all))
print("Validation Loss:", ve)
if ve <= vae:
vae = ve
scount = 0
# self.saved_seq = TemporalConvNet(
# num_inputs=self.seq.num_inputs,
# num_channels=self.seq.num_channels,
# kernel_size=self.seq.kernel_size,
# dropout=self.seq.dropout,
# )
# self.saved_seq.load_state_dict(self.seq.state_dict())
self.saved_seq = pickle.loads(pickle.dumps(self.seq))
else:
scount += 1
if scount > tenacity and early_stop:
self.seq = self.saved_seq
if use_cuda:
self.seq = self.seq.cuda()
break
def convert_to_input(self, data, cuda=True):
n, m = data.shape
inp = torch.from_numpy(data).view(1, n, m)
inp = inp.transpose(0, 1).float()
if cuda:
inp = inp.cuda()
return inp
def convert_covariates(self, data, covs, cuda=True):
nd, td = data.shape
rcovs = np.repeat(
covs.reshape(1, covs.shape[0], covs.shape[1]), repeats=nd, axis=0
)
rcovs = torch.from_numpy(rcovs).float()
if cuda:
rcovs = rcovs.cuda()
return rcovs
def convert_ycovs(self, data, ycovs, cuda=True):
nd, td = data.shape
ycovs = torch.from_numpy(ycovs).float()
if cuda:
ycovs = ycovs.cuda()
return ycovs
def convert_from_output(self, T):
out = T.view(T.size(0), T.size(2))
return np.array(out.cpu().detach())
def predict_future_batch(
self, data, covariates=None, ycovs=None, future=10, cpu=False
):
if cpu:
self.seq = self.seq.cpu()
else:
self.seq = self.seq.cuda()
inp = self.convert_to_input(data)
if covariates is not None:
cov = self.convert_covariates(data, covariates)
inp = torch.cat((inp, cov[:, :, 0 : inp.size(2)]), 1)
if ycovs is not None:
ycovs = self.convert_ycovs(data, ycovs)
inp = torch.cat((inp, ycovs[:, :, 0 : inp.size(2)]), 1)
if cpu:
inp = inp.cpu()
cov = cov.cpu()
ycovs = ycovs.cpu()
out, dic = self.__prediction__(inp)
ci = inp.size(2)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
if covariates is not None:
output = torch.cat(
(output, cov[:, :, ci].view(cov.size(0), cov.size(1), 1)), 1
)
if ycovs is not None:
output = torch.cat(
(output, ycovs[:, :, ci].view(ycovs.size(0), ycovs.size(1), 1)), 1
)
out = torch.cat((inp, output), dim=2)
for i in range(future - 1):
inp = out
out, dic = self.__prediction__(inp)
output = out[:, :, out.size(2) - 1].view(out.size(0), out.size(1), 1)
ci += 1
if covariates is not None:
output = torch.cat(
(output, cov[:, :, ci].view(cov.size(0), cov.size(1), 1)), 1
)
if ycovs is not None:
output = torch.cat(
(output, ycovs[:, :, ci].view(ycovs.size(0), ycovs.size(1), 1)), 1
)
out = torch.cat((inp, output), dim=2)
out = out[:, 0, :].view(out.size(0), 1, out.size(2))
out = out.cuda()
y = self.convert_from_output(out)
self.seq = self.seq.cuda()
return y
def predict_future(
self,
data_in,
covariates=None,
ycovs=None,
future=10,
cpu=False,
bsize=40,
normalize=False,
):
"""
data_in: input past data in same format of Ymat
covariates: input past covariates
ycovs: input past individual covariates
future: number of time-points to predict
cpu: if true then gpu is not used
bsize: batch size for processing (determine according to gopu memory limits)
normalize: should be set according to the normalization used in the class initialization
"""
if normalize:
data = (data_in - self.m[:, None]) / self.s[:, None]
data += self.mini
else:
data = data_in
n, T = data.shape
I = list(np.arange(0, n, bsize))
I.append(n)
bdata = data[range(I[0], I[1]), :]
if ycovs is not None:
out = self.predict_future_batch(
bdata, covariates, ycovs[range(I[0], I[1]), :, :], future, cpu
)
else:
out = self.predict_future_batch(bdata, covariates, None, future, cpu)
for i in range(1, len(I) - 1):
bdata = data[range(I[i], I[i + 1]), :]
self.seq = self.seq.cuda()
if ycovs is not None:
temp = self.predict_future_batch(
bdata, covariates, ycovs[range(I[i], I[i + 1]), :, :], future, cpu
)
else:
temp = self.predict_future_batch(bdata, covariates, None, future, cpu)
out = np.vstack([out, temp])
if normalize:
temp = (out - self.mini) * self.s[:, None] + self.m[:, None]
out = temp
return out
def rolling_validation(self, Ymat, tau=24, n=7, bsize=90, cpu=False, alpha=0.3):
last_step = Ymat.shape[1] - tau * n
rg = 1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels) - 1)
self.seq = self.seq.eval()
if self.covariates is not None:
covs = self.covariates[:, last_step - rg : last_step + tau]
else:
covs = None
if self.Ycov is not None:
ycovs = self.Ycov[:, :, last_step - rg : last_step + tau]
else:
ycovs = None
data_in = Ymat[:, last_step - rg : last_step]
out = self.predict_future(
data_in,
covariates=covs,
ycovs=ycovs,
future=tau,
cpu=cpu,
bsize=bsize,
normalize=self.normalize,
)
predicted_values = []
actual_values = []
S = out[:, -tau::]
predicted_values += [S]
R = Ymat[:, last_step : last_step + tau]
actual_values += [R]
print("Current window wape: " + str(wape(S, R)))
for i in range(n - 1):
last_step += tau
rg = 1 + 2 * (self.kernel_size - 1) * 2 ** (len(self.num_channels) - 1)
if self.covariates is not None:
covs = self.covariates[:, last_step - rg : last_step + tau]
else:
covs = None
if self.Ycov is not None:
ycovs = self.Ycov[:, :, last_step - rg : last_step + tau]
else:
ycovs = None
data_in = Ymat[:, last_step - rg : last_step]
out = self.predict_future(
data_in,
covariates=covs,
ycovs=ycovs,
future=tau,
cpu=cpu,
bsize=bsize,
normalize=self.normalize,
)
S = out[:, -tau::]
predicted_values += [S]
R = Ymat[:, last_step : last_step + tau]
actual_values += [R]
print("Current window wape: " + str(wape(S, R)))
predicted = np.hstack(predicted_values)
actual = np.hstack(actual_values)
dic = {}
dic["wape"] = wape(predicted, actual)
dic["mape"] = mape(predicted, actual)
dic["smape"] = smape(predicted, actual)
dic["mae"] = np.abs(predicted - actual).mean()
dic["rmse"] = np.sqrt(((predicted - actual) ** 2).mean())
dic["nrmse"] = dic["rmse"] / np.sqrt(((actual) ** 2).mean())
baseline = Ymat[:, Ymat.shape[1] - n * tau - tau : Ymat.shape[1] - tau]
dic["baseline_wape"] = wape(baseline, actual)
dic["baseline_mape"] = mape(baseline, actual)
dic["baseline_smape"] = smape(baseline, actual)
return dic
| 21,683 | 31.804841 | 157 | py |
deepglo | deepglo-master/run_scripts/run_traffic.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import pandas as pd
import numpy as np
import pickle
import random
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
import json
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def bool2str(b):
if b:
return "true"
else:
return "false"
Ymat = np.load("./datasets/traffic.npy")
vbsize = 128 ## vertical batch size
hbsize = 256 ## horizontal batch size
num_channels_X = [32, 32, 32, 32, 32, 1] ## number of channels for local model
num_channels_Y = [32, 32, 32, 32, 32, 1] ## number of channels for hybrid model
kernel_size = 7 ## kernel size for local models
dropout = 0.2 ## dropout during training
rank = 64 ## rank of global model
kernel_size_Y = 7 ## kernel size of hybrid model
lr = 0.0005 ## learning rate
val_len = 24 ## validation length
end_index = Ymat.shape[1] - 24 * 7 ## models will not look beyond this during training
start_date = "2012-1-1" ## start date time for the time-series
freq = "H" ## frequency of data
covariates = None ## no covraites specified
use_time = True ## us time covariates
dti = None ## no spcified time covariates (using default)
svd = False ## factor matrices are not initialized by NMF
period = None ## periodicity of 24 is expected, leave it out if not known
y_iters = 300 ## max. number of iterations while training Tconv models
init_epochs = 100 ## max number of iterations while initialiozing factors
forward_cov = False
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(args):
DG = DeepGLO(
Ymat,
vbsize=vbsize,
hbsize=hbsize,
num_channels_X=num_channels_X,
num_channels_Y=num_channels_Y,
kernel_size=kernel_size,
dropout=dropout,
rank=rank,
kernel_size_Y=kernel_size_Y,
lr=lr,
val_len=val_len,
end_index=end_index,
normalize=normalize,
start_date=start_date,
freq=freq,
covariates=covariates,
use_time=use_time,
dti=dti,
svd=svd,
period=period,
forward_cov=forward_cov,
)
DG.train_all_models(y_iters=y_iters, init_epochs=init_epochs)
result_dic = DG.rolling_validation(
Ymat=Ymat, tau=24, n=7, bsize=100, cpu=False, alpha=0.3
)
print(result_dic)
out_path = Path(
".",
"results",
"result_dictionary_traffic_" + bool2str(normalize) + ".pkl",
)
pickle.dump(result_dic, open(out_path, "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--normalize",
type=str2bool,
required=True,
help="normalize for training or not",
)
args = parser.parse_args()
global normalize
normalize = args.normalize
main(args)
| 3,498 | 24.727941 | 87 | py |
deepglo | deepglo-master/run_scripts/run_wiki.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import pandas as pd
import numpy as np
import pickle
import random
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
import json
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def bool2str(b):
if b:
return "true"
else:
return "false"
Ymat = np.load("./datasets/wiki.npy")
vbsize = 2048 ## vertical batch size
hbsize = 256 ## horizontal batch size
num_channels_X = [32, 32, 32, 32, 1] ## number of channels for local model
num_channels_Y = [32, 32, 32, 32, 1] ## number of channels for hybrid model
kernel_size = 7 ## kernel size for local models
dropout = 0.2 ## dropout during training
rank = 128 ## rank of global model
kernel_size_Y = 7 ## kernel size of hybrid model
lr = 0.0005 ## learning rate
val_len = 14 ## validation length
end_index = Ymat.shape[1] - 14 * 4 ## models will not look beyond this during training
start_date = "2012-1-1" ## start date time for the time-series
freq = "D" ## frequency of data
covariates = None ## no covraites specified
use_time = True ## us time covariates
dti = None ## no specified time covariates (using default)
svd = True ## factor matrices are initialized by NMF
period = 7 ## periodicity of 7 is expected, leave it out if not known
y_iters = 300 ## max. number of iterations while training Tconv models
init_epochs = 100 ## max number of iterations while initializing factors
forward_cov = False
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(args):
DG = DeepGLO(
Ymat,
vbsize=vbsize,
hbsize=hbsize,
num_channels_X=num_channels_X,
num_channels_Y=num_channels_Y,
kernel_size=kernel_size,
dropout=dropout,
rank=rank,
kernel_size_Y=kernel_size_Y,
lr=lr,
val_len=val_len,
end_index=end_index,
normalize=normalize,
start_date=start_date,
freq=freq,
covariates=covariates,
use_time=use_time,
dti=dti,
svd=svd,
period=period,
forward_cov=forward_cov,
)
DG.train_all_models(y_iters=y_iters, init_epochs=init_epochs)
result_dic = DG.rolling_validation(
Ymat=Ymat, tau=14, n=4, bsize=100, cpu=False, alpha=0.5
)
print(result_dic)
out_path = Path(
".",
"results",
"result_dictionary_wiki_" + bool2str(normalize) + ".pkl",
)
pickle.dump(result_dic, open(out_path, "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--normalize",
type=str2bool,
required=True,
help="normalize for training or not",
)
args = parser.parse_args()
global normalize
normalize = args.normalize
main(args)
| 3,475 | 24.940299 | 87 | py |
deepglo | deepglo-master/run_scripts/run_pems.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#sys.path.append("/efs/users/rajatse/DeepGLOv2/")
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import pandas as pd
import numpy as np
import pickle
import random
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def bool2str(b):
if b:
return "true"
else:
return "false"
Ymat = np.load("./datasets/pems.npy")
print(Ymat.shape)
vbsize = 128 ## vertical batch size
hbsize = 256 ## horizontal batch size
num_channels_X = [32, 32, 32, 32, 32, 1] ## number of channels for local model
num_channels_Y = [16, 16, 16, 16, 16, 1] ## number of channels for hybrid model
kernel_size = 7 ## kernel size for local models
dropout = 0.1 ## dropout during training
rank = 64 ## rank of global model
kernel_size_Y = 7 ## kernel size of hybrid model
lr = 0.0005 ## learning rate
val_len = 24 ## validation length
end_index = Ymat.shape[1] - 160 * 9 ## models will not look beyond this during training
start_date = "2012-5-1" ## start date time for the time-series
freq = "5T" ## frequency of data
covariates = None ## no covraites specified
use_time = True ## us time covariates
dti = None ## no spcified time covariates (using default)
svd = True ## factor matrices are initialized by NMF
period = None ## periodicity of 24 is expected, leave it out if not known
y_iters = 300 ## max. number of iterations while training Tconv models
init_epochs = 100 ## max number of iterations while initialiozing factors
forward_cov = False
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(args):
DG = DeepGLO(
Ymat,
vbsize=vbsize,
hbsize=hbsize,
num_channels_X=num_channels_X,
num_channels_Y=num_channels_Y,
kernel_size=kernel_size,
dropout=dropout,
rank=rank,
kernel_size_Y=kernel_size_Y,
lr=lr,
val_len=val_len,
end_index=end_index,
normalize=normalize,
start_date=start_date,
freq=freq,
covariates=covariates,
use_time=use_time,
dti=dti,
svd=svd,
period=period,
forward_cov=forward_cov,
)
DG.train_all_models(y_iters=y_iters, init_epochs=init_epochs)
result_dic = DG.rolling_validation(
Ymat=Ymat, tau=9, n=160, bsize=100, cpu=False, alpha=0.3
)
print(result_dic)
out_path = Path(
".",
"results",
"result_dictionary_pems_" + bool2str(normalize) + ".pkl",
)
pickle.dump(result_dic, open(out_path, "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--normalize",
type=str2bool,
required=True,
help="normalize for training or not",
)
args = parser.parse_args()
global normalize
normalize = args.normalize
main(args)
| 3,544 | 25.259259 | 88 | py |
deepglo | deepglo-master/run_scripts/run_electricity.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import pandas as pd
import numpy as np
import pickle
import json
import random
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def bool2str(b):
if b:
return "true"
else:
return "false"
Ymat = np.load("./datasets/electricity.npy")
vbsize = 128 ## vertical batch size
hbsize = 256 ## horizontal batch size
num_channels_X = [32, 32, 32, 32, 32, 1] ## number of channels for local model
num_channels_Y = [32, 32, 32, 32, 32, 1] ## number of channels for hybrid model
kernel_size = 7 ## kernel size for local models
dropout = 0.2 ## dropout during training
rank = 64 ## rank of global model
kernel_size_Y = 7 ## kernel size of hybrid model
lr = 0.0005 ## learning rate
val_len = 24 ## validation length
end_index = Ymat.shape[1] - 24 * 7 ## models will not look beyond this during training
start_date = "2012-1-1" ## start date time for the time-series
freq = "H" ## frequency of data
covariates = None ## no covraites specified
use_time = True ## us time covariates
dti = None ## no spcified time covariates (using default)
svd = True ## factor matrices are initialized by NMF
period = 24 ## periodicity of 24 is expected, leave it out if not known
y_iters = 300 ## max. number of iterations while training Tconv models
init_epochs = 100 ## max number of iterations while initialiozing factors
forward_cov = False
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(args):
DG = DeepGLO(
Ymat,
vbsize=vbsize,
hbsize=hbsize,
num_channels_X=num_channels_X,
num_channels_Y=num_channels_Y,
kernel_size=kernel_size,
dropout=dropout,
rank=rank,
kernel_size_Y=kernel_size_Y,
lr=lr,
val_len=val_len,
end_index=end_index,
normalize=normalize,
start_date=start_date,
freq=freq,
covariates=covariates,
use_time=use_time,
dti=dti,
svd=svd,
period=period,
forward_cov=forward_cov,
)
DG.train_all_models(y_iters=y_iters, init_epochs=init_epochs)
result_dic = DG.rolling_validation(
Ymat=Ymat, tau=24, n=7, bsize=100, cpu=False, alpha=0.3
)
print(result_dic)
out_path = Path("./results",
"result_dictionary_electricity_" + bool2str(normalize) + ".pkl",
)
pickle.dump(result_dic, open(out_path, "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--normalize",
type=str2bool,
required=True,
help="normalize for training or not",
)
args = parser.parse_args()
global normalize
normalize = args.normalize
main(args)
| 3,478 | 25.157895 | 87 | py |
HyperIMBA | HyperIMBA-main/main.py | import argparse
import torch
import dataloader as dl
import torch.nn.functional as F
import numpy as np
from models import GatHyper, SageHyper, GcnHyper
import test as tt
def main(args):
if args.dataset == 'all':
ds_names = ['Cora','Citeseer','Photo','Actor','chameleon','Squirrel']
else:
ds_names = [args.dataset]
if args.backbone in ['all','Gcn','Gat','Sage']:
if args.backbone == 'all':
backbones = [b+'Hyper' for b in ['Gcn','Gat','Sage']]
else:
backbones = [args.backbone+'Hyper']
else:
return
for ds in ds_names:
for babo in backbones:
babotrain_acc={babo:[i for i in range(args.run_times)]}
babovalid_acc={babo:[i for i in range(args.run_times)]}
babotest_acc={babo:[i for i in range(args.run_times)]}
babowf1={babo:[i for i in range(args.run_times)]}
f2=open('results/'+ds+babo+'_scores.txt', 'w+')
f2.write('{0:7} {1:7}\n'.format(ds,babo))
f2.write('{0:7} {1:7} {2:7} {3:7} {4:7}\n'.format('run','train','valid','m-f1','w-f1'))
f2.flush()
for run in range(args.run_times):
dataset,data,train_mask,val_mask,test_mask = dl.select_dataset(ds, args.split)
model,data = globals()[babo].call(data,dataset.name,data.x.size(1),dataset.num_classes,args.hid_dim)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
best_val_acc = test_acc = 0.0
best_val_loss = np.inf
for epoch in range(1, args.epoch+1):
model.train()
optimizer.zero_grad()
F.nll_loss(model(data,args.loss_hp)[train_mask], data.y[train_mask]).backward()
optimizer.step()
train_acc,val_acc,tmp_test_acc,val_loss,tmp_w_f1 = tt.test(model, data, train_mask, val_mask, test_mask, args.loss_hp)
#print("acc:", train_acc,val_acc,tmp_test_acc,val_loss.item(),tmp_w_f1)
if val_acc>=best_val_acc:
train_re=train_acc
best_val_acc=val_acc
test_acc=tmp_test_acc
w_f1 = tmp_w_f1
best_val_loss=val_loss
wait_step=0
else:
wait_step += 1
if wait_step == args.stop_step:
#print('Early stop! Validate-- Min loss: ', best_val_loss, ', Max f1-score: ', best_val_acc)
break
del model
del data
babotrain_acc[babo][run]=train_re
babovalid_acc[babo][run]=best_val_acc
babotest_acc[babo][run]=test_acc
babowf1[babo][run]=w_f1
log ='Epoch: 200, dataset name: '+ ds + ', Backbone: '+ babo + ', Test: {0:.4f} {1:.4f}\n'
print((log.format(babotest_acc[babo][run],babowf1[babo][run])))
f2.write('{0:4d} {1:4f} {2:4f} {3:4f} {4:4f}\n'.format(run,babotrain_acc[babo][run],babovalid_acc[babo][run],babotest_acc[babo][run],babowf1[babo][run]))
f2.flush()
f2.write('{0:4} {1:4f} {2:4f} {3:4f} {4:4f}\n'.format('std',np.std(babotrain_acc[babo]),np.std(babovalid_acc[babo]),np.std(babotest_acc[babo]),np.std(babowf1[babo])))
f2.write('{0:4} {1:4f} {2:4f} {3:4f} {4:4f}\n'.format('mean',np.mean(babotrain_acc[babo]),np.mean(babovalid_acc[babo]),np.mean(babotest_acc[babo]),np.mean(babowf1[babo])))
f2.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperbolic Geometric Hierarchy-IMBAlance Learning')
parser.add_argument("--dataset", '-d', type=str, default="Cora", help="all,Cora,Citeseer,Photo,Actor,chameleon,Squirrel")
parser.add_argument("--backbone", '-b', type=str, default="Gcn", help="all,Gcn,Gat,Sage")
parser.add_argument("--split", '-s', type=str, default=0,
help="Way of train-set split: 0~5(random,(0.5,1),(0,0.05),(0.66,1),(0.33,0.66),(0,0.33))")
parser.add_argument("--gpu", type=int, default=-1, help="GPU index. Default: -1, using CPU.")
parser.add_argument("--hid_dim", type=int, default=256, help="Hidden layer dimension")
parser.add_argument("--num_layers", type=int, default=2, help="Number of layers")
parser.add_argument("--epoch", type=int, default=200, help="Number of epochs. Default: 200")
parser.add_argument("--run_times", type=int, default=10, help="Run times")
parser.add_argument("--lr", type=float, default=0.01, help="Learning rate. Default: 0.01")
parser.add_argument("--weight_decay", type=float, default=0.0005, help="Weight decay. Default: 0.0005")
parser.add_argument("--loss_hp", type=float, default=1, help="Loss hyper-parameters (alpha). Default: 1")
#parser.add_argument('--early_stop', action='store_true', default=True, help="Indicates whether to use early stop")
parser.add_argument('--stop_step', default=100, help="Step of early stop")
args = parser.parse_args()
print(args)
main(args)
| 5,254 | 56.119565 | 183 | py |
HyperIMBA | HyperIMBA-main/test.py | import torch
from sklearn.metrics import f1_score
import torch.nn.functional as F
def test(model, data, train_mask, val_mask, test_mask, alpha):
with torch.no_grad():
model.eval()
logits, accs = model(data, alpha), []
for mask in [train_mask,val_mask,test_mask]:
pred = logits[mask].max(1)[1]
acc = f1_score(pred.cpu(), data.y[mask].cpu(), average='micro')
accs.append(acc)
accs.append(F.nll_loss(model(data, alpha)[val_mask], data.y[val_mask]))
accs.append(f1_score(pred.cpu(), data.y[mask].cpu(), average='weighted'))
return accs | 617 | 37.625 | 81 | py |
HyperIMBA | HyperIMBA-main/dataloader.py | import torch_geometric.datasets as dt
import torch_geometric.transforms as T
import torch
import numpy as np
from dgl.data.utils import generate_mask_tensor, idx2mask
from sklearn.model_selection import train_test_split
def select_dataset(ds,spcial):
if ds=='Cora' or ds=='Citeseer':
ds_loader='Planetoid'
elif ds=='Photo':
ds_loader='Amazon'
elif ds == 'chameleon' or ds == 'Squirrel':
ds_loader='WikipediaNetwork'
else:
ds_loader=ds
dataset=load_datas(ds_loader,ds,spcial)
if ds == 'Actor':
data=dataset.data
dataset.name = ds
else:
data=dataset[0]
train_mask=data.train_mask
val_mask=data.val_mask
test_mask=data.test_mask
return dataset,data,train_mask,val_mask,test_mask
def load_datas(ds_loader,ds,spcial):
if ds_loader=='Planetoid':
dataset = dt.Planetoid(root='data/'+ds, name=ds, transform=T.NormalizeFeatures())
else:
dataset = getattr(dt, ds_loader)('data/'+ds,ds)
if ds_loader == 'Actor':
dataset.name = ds
data = get_split(dataset, spcial)
dataset.data = data
return dataset
def get_split(dataset, spcial):
data = dataset.data
values=np.load('hyperemb/'+dataset.name+'_values.npy')
sorted, indices = torch.sort(torch.norm(torch.tensor(values),dim=1),descending=True)
#train set split ratio 1:1:8
if spcial == 1:#Top 50% in the Poincare weight
train_idx, val_idx, test_idx = split_idx1(indices[:data.num_nodes//2],indices[data.num_nodes//2:], 0.2, 0.1, 42)
elif spcial == 2:#Bottom 50%
train_idx, val_idx, test_idx = split_idx1(indices[data.num_nodes//2:],indices[:data.num_nodes//2], 0.2, 0.1, 42)
elif spcial == 3:#Top 33%
train_idx, val_idx, test_idx = split_idx1(indices[:data.num_nodes//3],indices[data.num_nodes//3:], 0.3, 0.1, 42)
elif spcial == 4:#Middle 33%
remaining = torch.cat((indices[:data.num_nodes//3],indices[data.num_nodes//3+data.num_nodes//3:]))
train_idx, val_idx, test_idx = split_idx1(indices[data.num_nodes//3:data.num_nodes//3+data.num_nodes//3],remaining, 0.3, 0.1, 42)
elif spcial == 5:#Bottom 33%
train_idx, val_idx, test_idx = split_idx1(indices[data.num_nodes//3+data.num_nodes//3:],indices[:data.num_nodes//3+data.num_nodes//3], 0.3, 0.1, 42)
else:#random
train_idx, val_idx, test_idx = split_idx(np.arange(data.num_nodes), 0.1, 0.1, 42)
data.train_mask = generate_mask_tensor(idx2mask(train_idx, data.num_nodes))
data.val_mask = generate_mask_tensor(idx2mask(val_idx, data.num_nodes))
data.test_mask = generate_mask_tensor(idx2mask(test_idx, data.num_nodes))
return data
def split_idx(samples, train_size, val_size, random_state=None):
train, val = train_test_split(samples, train_size=train_size, random_state=random_state)
if isinstance(val_size, float):
val_size *= len(samples) / len(val)
val, test = train_test_split(val, train_size=val_size, random_state=random_state)
return train, val, test
def split_idx1(samples1, samples2, train_size, val_size, random_state=None):
train, val = train_test_split(samples1, train_size=train_size, random_state=random_state)
val = torch.cat((val,samples2))
val, test = train_test_split(val, train_size=val_size, random_state=random_state)
return train, val, test
| 3,365 | 41.607595 | 156 | py |
HyperIMBA | HyperIMBA-main/calculator.py | #Calculate Hyperbolic Embedding
import argparse
import torch
import numpy as np
from models.Poincare import PoincareModel
import dataloader as dl
from torch_geometric.utils import degree, to_networkx
from GraphRicciCurvature.OllivierRicci import OllivierRicci
parser = argparse.ArgumentParser(description='Calculate Hyperbolic Embedding')
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--manifolds', type=str, default='poincare', help="ricci, poincare")
parser.add_argument("--dataset", '-d', type=str, default="Cora", help="all,Cora,Citeseer,Photo,Actor,chameleon,Squirrel")
parser.add_argument("--split", '-s', type=str, default=0, help="Random split train-set")
args = parser.parse_args()
print(args)
dataset,data,_,_,_ = dl.select_dataset(args.dataset, args.split)
if args.manifolds=='ricci':
G = to_networkx(data)
orc = OllivierRicci(G, alpha=0.5, verbose="TRACE")
orc.compute_ricci_curvature()
G_orc = orc.G.copy() # save an intermediate result
curvature="ricciCurvature"
ricci_results = {}
ricci = {}
for i,(n1,n2) in enumerate(list(G_orc.edges()),0):
#ricci_results[i] = G_orc[n1][n2][curvature]
ricci[i] = [int(n1),int(n2),G_orc[n1][n2][curvature]]
weights = [ricci[i] for i in ricci.keys()]
np.savetxt('hyperemb/' + args.dataset + '.edge_list',weights,fmt="%d %d %.16f")
else:
degrees = np.array(degree(data.edge_index[0],num_nodes=data.num_nodes)+degree(data.edge_index[1],num_nodes=data.num_nodes))
edges_list = list(data.edge_index.t().numpy())
labels = dict(enumerate(data.y.numpy()+1, 0))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dim = 2
model = PoincareModel(edges_list,node_weights=degrees*0.2,node_labels=labels, n_components=dim,eta=0.01,n_negative=10, name="hierarchy", device=device)
model.init_embeddings()
model.train(args.epochs)
weights = model.embeddings
keys = np.array([item for item in model.emb_dict.keys()])
values = np.array([item for item in model.emb_dict.values()])
np.save('hyperemb/' + args.dataset + '_keys.npy', keys)
np.save('hyperemb/' + args.dataset + '_values.npy', values)
| 2,190 | 41.134615 | 155 | py |
HyperIMBA | HyperIMBA-main/models/GcnHyper.py | from typing import Optional, Tuple
import numpy as np
import torch
from torch import Tensor
from torch.nn import Parameter
from torch_scatter import scatter_add
from torch_sparse import SparseTensor, fill_diag, matmul, mul
from torch_sparse import sum as sparsesum
import torch.nn.functional as F
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.nn.inits import zeros
from torch_geometric.typing import Adj, OptTensor, PairTensor
from torch_geometric.utils import add_remaining_self_loops
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch.nn import Sequential as seq, Parameter,LeakyReLU,init,Linear
from torch_geometric.utils import add_self_loops, remove_self_loops,degree,softmax
@torch.jit._overload
def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,
add_self_loops=True, flow="source_to_target", dtype=None):
# type: (Tensor, OptTensor, Optional[int], bool, bool, str, Optional[int]) -> PairTensor # noqa
pass
@torch.jit._overload
def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,
add_self_loops=True, flow="source_to_target", dtype=None):
# type: (SparseTensor, OptTensor, Optional[int], bool, bool, str, Optional[int]) -> SparseTensor # noqa
pass
def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,
add_self_loops=True, flow="source_to_target", dtype=None):
fill_value = 2. if improved else 1.
if isinstance(edge_index, SparseTensor):
assert flow in ["source_to_target"]
adj_t = edge_index
if not adj_t.has_value():
adj_t = adj_t.fill_value(1., dtype=dtype)
if add_self_loops:
adj_t = fill_diag(adj_t, fill_value)
deg = sparsesum(adj_t, dim=1)
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0.)
adj_t = mul(adj_t, deg_inv_sqrt.view(-1, 1))
adj_t = mul(adj_t, deg_inv_sqrt.view(1, -1))
return adj_t
else:
assert flow in ["source_to_target", "target_to_source"]
num_nodes = maybe_num_nodes(edge_index, num_nodes)
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
if add_self_loops:
edge_index, tmp_edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
assert tmp_edge_weight is not None
edge_weight = tmp_edge_weight
row, col = edge_index[0], edge_index[1]
idx = col if flow == "source_to_target" else row
deg = scatter_add(edge_weight, idx, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
class GCNConv(MessagePassing):
r"""The graph convolutional operator from the `"Semi-supervised
Classification with Graph Convolutional Networks"
<https://arxiv.org/abs/1609.02907>`_ paper
.. math::
\mathbf{X}^{\prime} = \mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2} \mathbf{X} \mathbf{\Theta},
where :math:`\mathbf{\hat{A}} = \mathbf{A} + \mathbf{I}` denotes the
adjacency matrix with inserted self-loops and
:math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij}` its diagonal degree matrix.
The adjacency matrix can include other values than :obj:`1` representing
edge weights via the optional :obj:`edge_weight` tensor.
Its node-wise formulation is given by:
.. math::
\mathbf{x}^{\prime}_i = \mathbf{\Theta}^{\top} \sum_{j \in
\mathcal{N}(v) \cup \{ i \}} \frac{e_{j,i}}{\sqrt{\hat{d}_j
\hat{d}_i}} \mathbf{x}_j
with :math:`\hat{d}_i = 1 + \sum_{j \in \mathcal{N}(i)} e_{j,i}`, where
:math:`e_{j,i}` denotes the edge weight from source node :obj:`j` to target
node :obj:`i` (default: :obj:`1.0`)
Args:
in_channels (int): Size of each input sample, or :obj:`-1` to derive
the size from the first input(s) to the forward method.
out_channels (int): Size of each output sample.
improved (bool, optional): If set to :obj:`True`, the layer computes
:math:`\mathbf{\hat{A}}` as :math:`\mathbf{A} + 2\mathbf{I}`.
(default: :obj:`False`)
cached (bool, optional): If set to :obj:`True`, the layer will cache
the computation of :math:`\mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2}` on first execution, and will use the
cached version for further executions.
This parameter should only be set to :obj:`True` in transductive
learning scenarios. (default: :obj:`False`)
add_self_loops (bool, optional): If set to :obj:`False`, will not add
self-loops to the input graph. (default: :obj:`True`)
normalize (bool, optional): Whether to add self-loops and compute
symmetric normalization coefficients on the fly.
(default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **input:**
node features :math:`(|\mathcal{V}|, F_{in})`,
edge indices :math:`(2, |\mathcal{E}|)`,
edge weights :math:`(|\mathcal{E}|)` *(optional)*
- **output:** node features :math:`(|\mathcal{V}|, F_{out})`
"""
_cached_edge_index: Optional[Tuple[Tensor, Tensor]]
_cached_adj_t: Optional[SparseTensor]
def __init__(self, in_channels: int, out_channels: int,
k_ricci,e_poinc,n_components,n_components_p,
improved: bool = False, cached: bool = False,
add_self_loops: bool = True, normalize: bool = True,
bias: bool = True, **kwargs):
kwargs.setdefault('aggr', 'add')
super().__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.cached = cached
self.add_self_loops = add_self_loops
self.normalize = normalize
self.k_ricci = k_ricci
self.e_poinc = e_poinc
self._cached_edge_index = None
self._cached_adj_t = None
self.lin = Linear(in_channels, out_channels, bias=False)
widths=[n_components,out_channels]
widths_p=[n_components_p,out_channels]
self.hmpnn=create_wmlp(widths,out_channels,1)
self.ham=create_wmlp(widths_p,out_channels,1)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
self.lin.reset_parameters()
zeros(self.bias)
self._cached_edge_index = None
self._cached_adj_t = None
def forward(self, x: Tensor, edge_index: Adj, alpha_hp: float,
edge_weight: OptTensor = None) -> Tensor:
""""""
if self.normalize:
if isinstance(edge_index, Tensor):
cache = self._cached_edge_index
if cache is None:
edge_index, edge_weight = gcn_norm( # yapf: disable
edge_index, edge_weight, x.size(self.node_dim),
self.improved, self.add_self_loops, self.flow)
if self.cached:
self._cached_edge_index = (edge_index, edge_weight)
else:
edge_index, edge_weight = cache[0], cache[1]
elif isinstance(edge_index, SparseTensor):
cache = self._cached_adj_t
if cache is None:
edge_index = gcn_norm( # yapf: disable
edge_index, edge_weight, x.size(self.node_dim),
self.improved, self.add_self_loops, self.flow)
if self.cached:
self._cached_adj_t = edge_index
else:
edge_index = cache
edge_weight = edge_weight.view(-1, 1)
x = self.lin(x)
edge_weight=self.hmpnn(self.k_ricci)
edge_weight=softmax(edge_weight,edge_index[0])
# propagate_type: (x: Tensor, edge_weight: OptTensor)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
size=None)
p_weight=self.ham(self.e_poinc)
p_weight=F.leaky_relu(p_weight)
if self.bias is not None:
out += self.bias
return out+alpha_hp*p_weight
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return x_j if edge_weight is None else edge_weight * x_j
def update(self, aggr_out):
return aggr_out
def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:
return matmul(adj_t, x, reduce=self.aggr)
def create_wmlp(widths,nfeato,lbias):
mlp_modules=[]
for k in range(len(widths)-1):
mlp_modules.append(Linear(widths[k],widths[k+1],bias=False))
mlp_modules.append(LeakyReLU(0.2,True))
mlp_modules.append(Linear(widths[len(widths)-1],nfeato,bias=lbias))
return seq(*mlp_modules)
class Net(torch.nn.Module):
def __init__(self,data,num_features,num_hidden,num_classes,k_ricci,e_poinc,n_components,n_components_p):
super(Net, self).__init__()
self.conv1 = GCNConv(num_features, num_hidden,k_ricci,e_poinc,n_components,n_components_p, cached=True)
self.conv2 = GCNConv(num_hidden, num_classes, k_ricci,e_poinc,n_components,n_components_p, cached=True)
def forward(self,data,alpha):
x = F.dropout(data.x,p=0.6,training=self.training)
x = self.conv1(x, data.edge_index, alpha)
x = F.elu(x)
x = F.dropout(x,p=0.6,training=self.training)
x = self.conv2(x, data.edge_index, alpha)
return F.log_softmax(x, dim=1)
def num(strings):
try:
return int(strings)
except ValueError:
return float(strings)
def call(data,name,num_features,num_classes,num_hidden):
#ricci
filename='hyperemb/'+name+'.edge_list'
f=open(filename)
cur_list=list(f)
if name=='Cora' or name == 'Actor' or name=='chameleon' or name=='squirrel':
ricci_cur=[[] for i in range(len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
else:
ricci_cur=[[] for i in range(2*len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
ricci_cur[i+len(cur_list)]=[ricci_cur[i][1],ricci_cur[i][0],ricci_cur[i][2]]
ricci_cur=sorted(ricci_cur)
k_ricci=[i[2] for i in ricci_cur]
k_ricci=k_ricci+[0 for i in range(data.x.size(0))]
k_ricci=torch.tensor(k_ricci, dtype=torch.float)
data.k_ricci=k_ricci.view(-1,1)
data.n_components=1
#poincare
data.edge_index, _ = remove_self_loops(data.edge_index)
keys=np.load('hyperemb/'+name+'_keys.npy')
values=np.load('hyperemb/'+name+'_values.npy')
e_poinc = dict(zip(keys, values))
data.n_components_p = values.shape[1]
alls = dict(enumerate(np.ones((data.num_nodes,data.n_components_p)), 0))
alls.update(e_poinc)
e_poinc = torch.tensor(np.array([alls[i] for i in alls]))
data.e_poinc = e_poinc.to(torch.float32)
data.edge_index, _ = add_self_loops(data.edge_index,num_nodes=data.x.size(0))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data.k_ricci = data.k_ricci.to(device)
data.e_poinc = data.e_poinc.to(device)
data = data.to(device)
model= Net(data,num_features,num_hidden,num_classes,data.k_ricci,data.e_poinc,data.n_components,data.n_components_p).to(device)
return model, data | 12,196 | 40.06734 | 131 | py |
HyperIMBA | HyperIMBA-main/models/SageHyper.py | import numpy as np
import torch
from torch.nn import Sequential as seq, Parameter,LeakyReLU,init,Linear
from typing import List, Optional, Tuple, Union
import torch.nn.functional as F
from torch import Tensor
from torch.nn import LSTM
from torch_sparse import SparseTensor, matmul
from torch_geometric.nn.aggr import Aggregation, MultiAggregation
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import Adj, OptPairTensor, Size
from torch_geometric.utils import add_self_loops, remove_self_loops,degree,softmax
class SAGEConv(MessagePassing):
r"""The GraphSAGE operator from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i + \mathbf{W}_2 \cdot
\mathrm{mean}_{j \in \mathcal{N(i)}} \mathbf{x}_j
If :obj:`project = True`, then :math:`\mathbf{x}_j` will first get
projected via
.. math::
\mathbf{x}_j \leftarrow \sigma ( \mathbf{W}_3 \mathbf{x}_j +
\mathbf{b})
as described in Eq. (3) of the paper.
Args:
in_channels (int or tuple): Size of each input sample, or :obj:`-1` to
derive the size from the first input(s) to the forward method.
A tuple corresponds to the sizes of source and target
dimensionalities.
out_channels (int): Size of each output sample.
aggr (string or Aggregation, optional): The aggregation scheme to use.
Any aggregation of :obj:`torch_geometric.nn.aggr` can be used,
*e.g.*, :obj:`"mean"`, :obj:`"max"`, or :obj:`"lstm"`.
(default: :obj:`"mean"`)
normalize (bool, optional): If set to :obj:`True`, output features
will be :math:`\ell_2`-normalized, *i.e.*,
:math:`\frac{\mathbf{x}^{\prime}_i}
{\| \mathbf{x}^{\prime}_i \|_2}`.
(default: :obj:`False`)
root_weight (bool, optional): If set to :obj:`False`, the layer will
not add transformed root node features to the output.
(default: :obj:`True`)
project (bool, optional): If set to :obj:`True`, the layer will apply a
linear transformation followed by an activation function before
aggregation (as described in Eq. (3) of the paper).
(default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **inputs:**
node features :math:`(|\mathcal{V}|, F_{in})` or
:math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))`
if bipartite,
edge indices :math:`(2, |\mathcal{E}|)`
- **outputs:** node features :math:`(|\mathcal{V}|, F_{out})` or
:math:`(|\mathcal{V_t}|, F_{out})` if bipartite
"""
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
k_ricci,e_poinc,n_components,n_components_p,
aggr: Optional[Union[str, List[str], Aggregation]] = "mean",
normalize: bool = False,
root_weight: bool = True,
project: bool = False,
bias: bool = True,
**kwargs,
):
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.root_weight = root_weight
self.project = project
self.k_ricci = k_ricci
self.e_poinc = e_poinc
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
if aggr == 'lstm':
kwargs.setdefault('aggr_kwargs', {})
kwargs['aggr_kwargs'].setdefault('in_channels', in_channels[0])
kwargs['aggr_kwargs'].setdefault('out_channels', in_channels[0])
super().__init__(aggr, **kwargs)
widths=[n_components,out_channels]
widths_p=[n_components_p,out_channels]
self.hmpnn=create_wmlp(widths,in_channels[0],1)
self.ham=create_wmlp(widths_p,out_channels,1)
if self.project:
self.lin = Linear(in_channels[0], in_channels[0], bias=True)
if self.aggr is None:
self.fuse = False # No "fused" message_and_aggregate.
self.lstm = LSTM(in_channels[0], in_channels[0], batch_first=True)
if isinstance(self.aggr_module, MultiAggregation):
aggr_out_channels = self.aggr_module.get_out_channels(
in_channels[0])
else:
aggr_out_channels = in_channels[0]
self.lin_l = Linear(aggr_out_channels, out_channels, bias=bias)
if self.root_weight:
self.lin_r = Linear(in_channels[1], out_channels, bias=False)
self.reset_parameters()
def reset_parameters(self):
if self.project:
self.lin.reset_parameters()
self.aggr_module.reset_parameters()
self.lin_l.reset_parameters()
if self.root_weight:
self.lin_r.reset_parameters()
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, alpha_hp: float,
size: Size = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
if self.project and hasattr(self, 'lin'):
x = (self.lin(x[0]).relu(), x[1])
# propagate_type: (x: OptPairTensor)
out_weight=self.hmpnn(self.k_ricci)
out_weight=softmax(out_weight,edge_index[0])
out = self.propagate(x=x,edge_index=edge_index,out_weight=out_weight)
out = self.lin_l(out)
p_weight=self.ham(self.e_poinc)
p_weight=F.leaky_relu(p_weight)
out = out+alpha_hp*p_weight
x_r = x[1]
if self.root_weight and x_r is not None:
out += self.lin_r(x_r)
if self.normalize:
out = F.normalize(out, p=2., dim=-1)
return out
def message(self, x_j: Tensor, out_weight: Tensor) -> Tensor:
return out_weight*x_j
def message_and_aggregate(self, adj_t: SparseTensor,
x: OptPairTensor) -> Tensor:
adj_t = adj_t.set_value(None, layout=None)
return matmul(adj_t, x[0], reduce=self.aggr)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, aggr={self.aggr})')
def create_wmlp(widths,nfeato,lbias):
mlp_modules=[]
for k in range(len(widths)-1):
mlp_modules.append(Linear(widths[k],widths[k+1],bias=False))
mlp_modules.append(LeakyReLU(0.2,True))
mlp_modules.append(Linear(widths[len(widths)-1],nfeato,bias=lbias))
return seq(*mlp_modules)
class Net(torch.nn.Module):
def __init__(self,data,num_features,num_hidden,num_classes,k_ricci,e_poinc,n_components,n_components_p):
super(Net, self).__init__()
self.conv1 = SAGEConv(num_features, num_hidden,k_ricci,e_poinc,n_components,n_components_p)
self.conv2 = SAGEConv(num_hidden, num_classes,k_ricci,e_poinc,n_components,n_components_p)
def forward(self, data, alpha):
x, edge_index = data.x, data.edge_index
x = F.dropout(x, p=0.6, training=self.training)
x = F.relu(self.conv1(x, edge_index, alpha))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, alpha)
return F.log_softmax(x, dim=1)
def num(strings):
try:
return int(strings)
except ValueError:
return float(strings)
def call(data,name,num_features,num_classes,num_hidden):
#ricci
filename='hyperemb/'+name+'.edge_list'
f=open(filename)
cur_list=list(f)
if name=='Cora' or name == 'Actor' or name=='chameleon' or name=='squirrel':
ricci_cur=[[] for i in range(len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
else:
ricci_cur=[[] for i in range(2*len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
ricci_cur[i+len(cur_list)]=[ricci_cur[i][1],ricci_cur[i][0],ricci_cur[i][2]]
ricci_cur=sorted(ricci_cur)
k_ricci=[i[2] for i in ricci_cur]
k_ricci=k_ricci+[0 for i in range(data.x.size(0))]
k_ricci=torch.tensor(k_ricci, dtype=torch.float)
data.k_ricci=k_ricci.view(-1,1)
data.n_components=1
#poincare
data.edge_index, _ = remove_self_loops(data.edge_index)
keys=np.load('hyperemb/'+name+'_keys.npy')
values=np.load('hyperemb/'+name+'_values.npy')
e_poinc = dict(zip(keys, values))
data.n_components_p = values.shape[1]
alls = dict(enumerate(np.ones((data.num_nodes,data.n_components_p)), 0))
alls.update(e_poinc)
e_poinc = torch.tensor(np.array([alls[i] for i in alls]))
data.e_poinc = e_poinc.to(torch.float32)
data.edge_index, _ = add_self_loops(data.edge_index,num_nodes=data.x.size(0))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data.k_ricci = data.k_ricci.to(device)
data.e_poinc = data.e_poinc.to(device)
data = data.to(device)
model= Net(data,num_features,num_hidden,num_classes,data.k_ricci,data.e_poinc,data.n_components,data.n_components_p).to(device)
return model, data
| 9,477 | 38.327801 | 131 | py |
HyperIMBA | HyperIMBA-main/models/GatHyper.py | from typing import Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter
from torch_sparse import SparseTensor, set_diag
import math
import numpy as np
from typing import Any
from torch.nn import Sequential as seq, Parameter,LeakyReLU,init,Linear
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import (
Adj,
NoneType,
OptPairTensor,
OptTensor,
Size,
)
from torch_geometric.utils import add_self_loops, remove_self_loops, softmax
#from ..inits import glorot, zeros
class GATConv(MessagePassing):
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
heads,
k_ricci,e_poinc,n_components,n_components_p,
concat: bool = True,
negative_slope: float = 0.2,
dropout: float = 0.0,
add_self_loops: bool = True,
edge_dim: Optional[int] = None,
fill_value: Union[float, Tensor, str] = 'mean',
bias: bool = True,
**kwargs,
):
kwargs.setdefault('aggr', 'add')
super().__init__(node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.add_self_loops = add_self_loops
self.edge_dim = edge_dim
self.fill_value = fill_value
self.k_ricci = k_ricci
self.e_poinc = e_poinc
widths=[n_components,out_channels]
widths_p=[n_components_p,out_channels*heads]
self.hmpnn=create_wmlp(widths,out_channels,1)
self.ham=create_wmlp(widths_p,out_channels*heads,1)
# In case we are operating in bipartite graphs, we apply separate
# transformations 'lin_src' and 'lin_dst' to source and target nodes:
if isinstance(in_channels, int):
self.lin_src = Linear(in_channels, heads * out_channels,
bias=False, weight_initializer='glorot')
self.lin_dst = self.lin_src
else:
self.lin_src = Linear(in_channels[0], heads * out_channels, False,
weight_initializer='glorot')
self.lin_dst = Linear(in_channels[1], heads * out_channels, False,
weight_initializer='glorot')
# The learnable parameters to compute attention coefficients:
self.att_src = Parameter(torch.Tensor(1, heads, out_channels))
self.att_dst = Parameter(torch.Tensor(1, heads, out_channels))
if edge_dim is not None:
self.lin_edge = Linear(edge_dim, heads * out_channels, bias=False,
weight_initializer='glorot')
self.att_edge = Parameter(torch.Tensor(1, heads, out_channels))
else:
self.lin_edge = None
self.register_parameter('att_edge', None)
if bias and concat:
self.bias = Parameter(torch.Tensor(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
self.lin_src.reset_parameters()
self.lin_dst.reset_parameters()
if self.lin_edge is not None:
self.lin_edge.reset_parameters()
glorot(self.att_src)
glorot(self.att_dst)
glorot(self.att_edge)
zeros(self.bias)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, alpha_hp: float,
edge_attr: OptTensor = None, size: Size = None,
return_attention_weights=None):
H, C = self.heads, self.out_channels
# We first transform the input node features. If a tuple is passed, we
# transform source and target node features via separate weights:
if isinstance(x, Tensor):
assert x.dim() == 2, "Static graphs not supported in 'GATConv'"
x_src = x_dst = self.lin_src(x).view(-1, H, C)
else: # Tuple of source and target node features:
x_src, x_dst = x
assert x_src.dim() == 2, "Static graphs not supported in 'GATConv'"
x_src = self.lin_src(x_src).view(-1, H, C)
if x_dst is not None:
x_dst = self.lin_dst(x_dst).view(-1, H, C)
x = (x_src, x_dst)
# Next, we compute node-level attention coefficients, both for source
# and target nodes (if present):
alpha_src = (x_src * self.att_src).sum(dim=-1)
alpha_dst = None if x_dst is None else (x_dst * self.att_dst).sum(-1)
alpha = (alpha_src, alpha_dst)
if self.add_self_loops:
if isinstance(edge_index, Tensor):
# We only want to add self-loops for nodes that appear both as
# source and target nodes:
num_nodes = x_src.size(0)
if x_dst is not None:
num_nodes = min(num_nodes, x_dst.size(0))
num_nodes = min(size) if size is not None else num_nodes
edge_index, edge_attr = remove_self_loops(
edge_index, edge_attr)
edge_index, edge_attr = add_self_loops(
edge_index, edge_attr, fill_value=self.fill_value,
num_nodes=num_nodes)
elif isinstance(edge_index, SparseTensor):
if self.edge_dim is None:
edge_index = set_diag(edge_index)
else:
raise NotImplementedError(
"The usage of 'edge_attr' and 'add_self_loops' "
"simultaneously is currently not yet supported for "
"'edge_index' in a 'SparseTensor' form")
# edge_updater_type: (alpha: OptPairTensor, edge_attr: OptTensor)
alpha = self.edge_updater(edge_index, alpha=alpha, edge_attr=edge_attr)
# propagate_type: (x: OptPairTensor, alpha: Tensor)
#hyperIMBA
out_weight=self.hmpnn(self.k_ricci)
out_weight=softmax(out_weight,edge_index[0])
alpha = out_weight
# alpha = alpha+out_weight
out = self.propagate(edge_index, x=x, alpha=alpha, size=size)
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out += self.bias
p_weight=self.ham(self.e_poinc)
p_weight=F.leaky_relu(p_weight)
out = out+alpha_hp*p_weight
if isinstance(return_attention_weights, bool):
if isinstance(edge_index, Tensor):
return out, (edge_index, alpha)
elif isinstance(edge_index, SparseTensor):
return out, edge_index.set_value(alpha, layout='coo')
else:
return out
def edge_update(self, alpha_j: Tensor, alpha_i: OptTensor,
edge_attr: OptTensor, index: Tensor, ptr: OptTensor,
size_i: Optional[int]) -> Tensor:
# Given edge-level attention coefficients for source and target nodes,
# we simply need to sum them up to "emulate" concatenation:
alpha = alpha_j if alpha_i is None else alpha_j + alpha_i
if edge_attr is not None and self.lin_edge is not None:
if edge_attr.dim() == 1:
edge_attr = edge_attr.view(-1, 1)
edge_attr = self.lin_edge(edge_attr)
edge_attr = edge_attr.view(-1, self.heads, self.out_channels)
alpha_edge = (edge_attr * self.att_edge).sum(dim=-1)
alpha = alpha + alpha_edge
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, index, ptr, size_i)
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
return alpha
def message(self, x_j: Tensor, alpha: Tensor) -> Tensor:
return alpha.unsqueeze(1) * x_j
#return alpha.unsqueeze(-1) * x_j
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, heads={self.heads})')
def glorot(value: Any):
if isinstance(value, Tensor):
stdv = math.sqrt(6.0 / (value.size(-2) + value.size(-1)))
value.data.uniform_(-stdv, stdv)
else:
for v in value.parameters() if hasattr(value, 'parameters') else []:
glorot(v)
for v in value.buffers() if hasattr(value, 'buffers') else []:
glorot(v)
def zeros(value: Any):
constant(value, 0.)
def constant(value: Any, fill_value: float):
if isinstance(value, Tensor):
value.data.fill_(fill_value)
else:
for v in value.parameters() if hasattr(value, 'parameters') else []:
constant(v, fill_value)
for v in value.buffers() if hasattr(value, 'buffers') else []:
constant(v, fill_value)
def create_wmlp(widths,nfeato,lbias):
mlp_modules=[]
for k in range(len(widths)-1):
mlp_modules.append(Linear(widths[k],widths[k+1],bias=False))
mlp_modules.append(LeakyReLU(0.2,True))
mlp_modules.append(Linear(widths[len(widths)-1],nfeato,bias=lbias))
return seq(*mlp_modules)
class Net(torch.nn.Module):
def __init__(self,data,num_features,num_hidden,heads,num_classes,k_ricci,e_poinc,n_components,n_components_p):
super(Net, self).__init__()
self.conv1 = GATConv(num_features, num_hidden, heads,k_ricci,e_poinc,n_components,n_components_p)
self.conv2 = GATConv(num_hidden * heads, num_classes, 1,k_ricci,e_poinc,n_components,n_components_p,concat=False)
def forward(self, data, alpha):
x, edge_index = data.x, data.edge_index
x = F.dropout(x, p=0.6, training=self.training)
x = F.relu(self.conv1(x, edge_index, alpha))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, alpha)
return F.log_softmax(x, dim=1)
def num(strings):
try:
return int(strings)
except ValueError:
return float(strings)
def call(data,name,num_features,num_classes,num_hidden):
#ricci
filename='hyperemb/'+name+'.edge_list'
f=open(filename)
cur_list=list(f)
if name=='Cora' or name == 'Actor' or name=='chameleon' or name=='squirrel':
ricci_cur=[[] for i in range(len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
else:
ricci_cur=[[] for i in range(2*len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
ricci_cur[i+len(cur_list)]=[ricci_cur[i][1],ricci_cur[i][0],ricci_cur[i][2]]
ricci_cur=sorted(ricci_cur)
k_ricci=[i[2] for i in ricci_cur]
k_ricci=k_ricci+[0 for i in range(data.x.size(0))]
k_ricci=torch.tensor(k_ricci, dtype=torch.float)
data.k_ricci=k_ricci.view(-1,1)
data.n_components=1
#poincare
data.edge_index, _ = remove_self_loops(data.edge_index)
keys=np.load('hyperemb/'+name+'_keys.npy')
values=np.load('hyperemb/'+name+'_values.npy')
e_poinc = dict(zip(keys, values))
data.n_components_p = values.shape[1]
alls = dict(enumerate(np.ones((data.num_nodes,data.n_components_p)), 0))
alls.update(e_poinc)
e_poinc = torch.tensor(np.array([alls[i] for i in alls]))
data.e_poinc = e_poinc.to(torch.float32)
data.edge_index, _ = add_self_loops(data.edge_index,num_nodes=data.x.size(0))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data.k_ricci = data.k_ricci.to(device)
data.e_poinc = data.e_poinc.to(device)
data = data.to(device)
model= Net(data,num_features,num_hidden//8,8,num_classes,data.k_ricci,data.e_poinc,data.n_components,data.n_components_p).to(device)
return model, data
| 12,092 | 39.043046 | 136 | py |
larq | larq-main/larq/optimizers_test.py | import numpy as np
import pytest
import tensorflow as tf
from packaging import version
from tensorflow import keras
from tensorflow.python.keras import testing_utils
import larq as lq
from larq import testing_utils as lq_testing_utils
if version.parse(tf.__version__) >= version.parse("2.11"):
from tensorflow.keras.optimizers import legacy as optimizers # type: ignore
else:
from tensorflow.keras import optimizers # type: ignore
def _test_optimizer(
optimizer, target=0.75, test_kernels_are_binary=True, trainable_bn=True
):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=1000, test_samples=0, input_shape=(10,), num_classes=2
)
y_train = keras.utils.to_categorical(y_train)
model = lq_testing_utils.get_small_bnn_model(
x_train.shape[1], 20, y_train.shape[1], trainable_bn=trainable_bn
)
model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["acc"])
initial_vars = [tf.keras.backend.get_value(w) for w in model.trainable_weights]
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
trained_vars = [tf.keras.backend.get_value(w) for w in model.trainable_weights]
# check all trainable variables have actually been updated
for v0, v1 in zip(initial_vars, trained_vars):
assert not np.all(v0 == v1)
# Note that when kernels are treated as latent weights they need not be
# binary (see https://arxiv.org/abs/1906.02107 for further discussion)
if test_kernels_are_binary:
for layer in model.layers:
if "quant" in layer.name:
for weight in layer.trainable_weights:
assert np.all(np.isin(tf.keras.backend.get_value(weight), [-1, 1]))
assert history.history["acc"][-1] >= target
def _test_serialization(optimizer):
config = keras.optimizers.serialize(optimizer)
optim = keras.optimizers.deserialize(config)
new_config = keras.optimizers.serialize(optim)
assert config == new_config
class TestCaseOptimizer:
def test_type_check_predicate(self):
with pytest.raises(TypeError):
# pytype: disable=wrong-arg-types
lq.optimizers.CaseOptimizer((False, lq.optimizers.Bop()))
# pytype: enable=wrong-arg-types
def test_type_check_optimizer(self):
with pytest.raises(TypeError):
lq.optimizers.CaseOptimizer((lq.optimizers.Bop.is_binary_variable, False))
def test_type_check_default(self):
with pytest.raises(TypeError):
lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=False,
)
def test_overlapping_predicates(self):
with pytest.raises(ValueError):
naughty_case_opt = lq.optimizers.CaseOptimizer(
(lambda var: True, lq.optimizers.Bop()),
(lambda var: True, lq.optimizers.Bop()),
)
_test_optimizer(naughty_case_opt)
def test_missing_default(self):
with pytest.warns(Warning):
naughty_case_opt = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
)
# Simple MNIST model
mnist = tf.keras.datasets.mnist
(train_images, train_labels), _ = mnist.load_data()
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
lq.layers.QuantDense(
64,
input_quantizer="ste_sign",
kernel_quantizer=lq.quantizers.NoOp(precision=1),
activation="relu",
),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=naughty_case_opt,
metrics=["acc"],
)
# Should raise on first call to apply_gradients()
model.fit(train_images[:1], train_labels[:1], epochs=1)
def test_wrong_predicate(self):
"""Make sure we throw when an optimizer does not claim variables."""
with pytest.raises(ValueError):
naughty_case_opt = lq.optimizers.CaseOptimizer(
(lambda var: False, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
)
# Simple MNIST model
mnist = tf.keras.datasets.mnist
(train_images, train_labels), _ = mnist.load_data()
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=naughty_case_opt,
metrics=["acc"],
)
# Should raise on first call to apply_gradients()
model.fit(train_images[:1], train_labels[:1], epochs=1)
def test_weights(self):
(train_images, train_labels), _ = tf.keras.datasets.mnist.load_data()
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
lq.layers.QuantDense(
64,
input_quantizer="ste_sign",
kernel_quantizer=lq.quantizers.NoOp(precision=1),
activation="relu",
),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.SGD(0.1, momentum=0.9),
),
)
model.fit(train_images[:1], train_labels[:1], epochs=1)
opt_weights = model.optimizer.weights
# SGD with momentum and Bop both create a single momentum variable per weight
# and one variable each to keep track of iterations
assert len(opt_weights) == len(model.weights) + 2
checked_weights = 0
for opt in model.optimizer.optimizers:
for weight in opt.weights:
assert weight is opt_weights[checked_weights]
checked_weights += 1
assert checked_weights == len(opt_weights)
@pytest.mark.usefixtures("eager_mode")
def test_checkpoint(self, tmp_path):
# Build and run a simple model.
var = tf.Variable([2.0])
opt = optimizers.SGD(1.0, momentum=1.0)
opt = lq.optimizers.CaseOptimizer((lambda var: True, opt))
opt.minimize(lambda: var + 1.0, var_list=[var])
slot_var = opt.optimizers[0].get_slot(var, "momentum")
slot_value = slot_var.numpy().item()
# Save a checkpoint.
checkpoint = tf.train.Checkpoint(optimizer=opt, var=var)
save_path = checkpoint.save(tmp_path / "ckpt")
# Run model again.
opt.minimize(lambda: var + 1.0, var_list=[var])
assert slot_var.numpy().item() != slot_value
# Load checkpoint and ensure loss scale is back to its original value.
status = checkpoint.restore(save_path)
status.assert_consumed()
status.run_restore_ops()
assert slot_var.numpy().item() == slot_value
class TestBopOptimizer:
def test_bop_accuracy(self):
_test_optimizer(
lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
),
test_kernels_are_binary=True,
)
# test optimizer on model with only binary trainable vars (low accuracy)
_test_optimizer(
lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
),
test_kernels_are_binary=True,
trainable_bn=False,
target=0,
)
@pytest.mark.usefixtures("distribute_scope")
def test_mixed_precision(self):
opt = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
)
try:
opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
except AttributeError:
opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
opt, "dynamic"
)
_test_optimizer(opt, test_kernels_are_binary=True)
def test_bop_tf_1_14_schedules(self):
_test_optimizer(
lq.optimizers.CaseOptimizer(
(
lq.optimizers.Bop.is_binary_variable,
lq.optimizers.Bop(
threshold=tf.keras.optimizers.schedules.InverseTimeDecay(
3.0, decay_steps=1.0, decay_rate=0.5
),
gamma=tf.keras.optimizers.schedules.InverseTimeDecay(
3.0, decay_steps=1.0, decay_rate=0.5
),
),
),
default_optimizer=optimizers.Adam(0.01),
),
test_kernels_are_binary=True,
)
def test_bop_serialization(self):
_test_serialization(
lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
),
)
@pytest.mark.parametrize(
"hyper",
[5e-4, tf.keras.optimizers.schedules.PolynomialDecay(5e-4, 100)],
)
def test_bop_serialization_schedule(self, hyper):
bop = lq.optimizers.Bop(
gamma=hyper,
threshold=hyper,
)
new_bop = lq.optimizers.Bop.from_config(bop.get_config())
assert isinstance(new_bop._get_hyper("gamma"), type(bop._get_hyper("gamma")))
assert isinstance(
new_bop._get_hyper("threshold"), type(bop._get_hyper("threshold"))
)
| 10,528 | 37.01083 | 88 | py |
larq | larq-main/larq/callbacks.py | from typing import Any, Callable, MutableMapping, Optional
from tensorflow import keras
class HyperparameterScheduler(keras.callbacks.Callback):
"""Generic hyperparameter scheduler.
!!! example
```python
bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)
adam = tf.keras.optimizers.Adam(0.01)
optimizer = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam,
)
callbacks = [
HyperparameterScheduler(lambda x: 0.001 * (0.1 ** (x // 30)), "gamma", bop)
]
```
# Arguments
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new hyperparameter as output.
hyperparameter: str. the name of the hyperparameter to be scheduled.
optimizer: the optimizer that contains the hyperparameter that will be scheduled.
Defaults to `self.model.optimizer` if `optimizer == None`.
update_freq: str (optional), denotes on what update_freq to change the
hyperparameter. Can be either "epoch" (default) or "step".
verbose: int. 0: quiet, 1: update messages.
log_name: str (optional), under which name to log this hyperparameter to
Tensorboard. If `None`, defaults to `hyperparameter`. Use this if you have
several schedules for the same hyperparameter on different optimizers.
"""
def __init__(
self,
schedule: Callable,
hyperparameter: str,
optimizer: Optional[keras.optimizers.Optimizer] = None,
update_freq: str = "epoch",
verbose: int = 0,
log_name: Optional[str] = None,
):
super().__init__()
self.optimizer = optimizer
self.schedule = schedule
self.hyperparameter = hyperparameter
self.log_name = log_name or hyperparameter
self.verbose = verbose
if update_freq not in ["epoch", "step"]:
raise ValueError(
"HyperparameterScheduler.update_freq can only be 'step' or 'epoch'."
f" Received value '{update_freq}'"
)
self.update_freq = update_freq
def set_model(self, model: keras.models.Model) -> None:
super().set_model(model)
if self.optimizer is None:
# It is not possible for a model to reach this state and not have
# an optimizer, so we can safely access it here.
self.optimizer = model.optimizer
if not hasattr(self.optimizer, self.hyperparameter):
raise ValueError(
f'Optimizer must have a "{self.hyperparameter}" attribute.'
)
def set_hyperparameter(self, t: int) -> Any:
hp = getattr(self.optimizer, self.hyperparameter)
try: # new API
hyperparameter_val = keras.backend.get_value(hp)
hyperparameter_val = self.schedule(t, hyperparameter_val)
except TypeError: # Support for old API for backward compatibility
hyperparameter_val = self.schedule(t)
keras.backend.set_value(hp, hyperparameter_val)
return hp
def on_batch_begin(
self, batch: int, logs: Optional[MutableMapping[str, Any]] = None
) -> None:
if not self.update_freq == "step":
return
# We use optimizer.iterations (i.e. global step), since batch only
# reflects the batch index in the current epoch.
batch = keras.backend.get_value(self.optimizer.iterations)
hp = self.set_hyperparameter(batch)
if self.verbose > 0:
print(
f"Batch {batch}: {self.log_name} is now {keras.backend.get_value(hp)}."
)
def on_epoch_begin(
self, epoch: int, logs: Optional[MutableMapping[str, Any]] = None
) -> None:
if not self.update_freq == "epoch":
return
hp = self.set_hyperparameter(epoch)
if self.verbose > 0:
print(
f"Epoch {epoch}: {self.log_name} is now {keras.backend.get_value(hp)}."
)
def on_epoch_end(
self, epoch: int, logs: Optional[MutableMapping[str, Any]] = None
) -> None:
logs = logs or {}
hp = getattr(self.optimizer, self.hyperparameter)
logs[self.log_name] = keras.backend.get_value(hp)
| 4,375 | 36.724138 | 89 | py |
larq | larq-main/larq/quantizers.py | """A Quantizer defines the way of transforming a full precision input to a
quantized output and the pseudo-gradient method used for the backwards pass.
Quantizers can either be used through quantizer arguments that are supported
for Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they
can be used similar to activations, i.e. either through an `Activation` layer,
or through the `activation` argument supported by all forward layers:
```python
import tensorflow as tf
import larq as lq
...
x = lq.layers.QuantDense(64, activation=None)(x)
x = lq.layers.QuantDense(64, input_quantizer="ste_sign")(x)
```
is equivalent to:
```python
x = lq.layers.QuantDense(64)(x)
x = tf.keras.layers.Activation("ste_sign")(x)
x = lq.layers.QuantDense(64)(x)
```
as well as:
```python
x = lq.layers.QuantDense(64, activation="ste_sign")(x)
x = lq.layers.QuantDense(64)(x)
```
We highly recommend using the first of these formulations: for the
other two formulations, intermediate layers - like batch normalization or
average pooling - and shortcut connections may result in non-binary input
to the convolutions.
Quantizers can either be referenced by string or called directly.
The following usages are equivalent:
```python
lq.layers.QuantDense(64, kernel_quantizer="ste_sign")
```
```python
lq.layers.QuantDense(64, kernel_quantizer=lq.quantizers.SteSign(clip_value=1.0))
```
"""
from typing import Callable, Union
import tensorflow as tf
from packaging import version
from larq import context, math
from larq import metrics as lq_metrics
from larq import utils
__all__ = [
"ApproxSign",
"DoReFa",
"DoReFaQuantizer",
"MagnitudeAwareSign",
"NoOp",
"NoOpQuantizer",
"Quantizer",
"SteHeaviside",
"SteSign",
"SteTern",
"SwishSign",
]
def _clipped_gradient(x, dy, clip_value):
"""Calculate `clipped_gradent * dy`."""
if clip_value is None:
return dy
zeros = tf.zeros_like(dy)
mask = tf.math.less_equal(tf.math.abs(x), clip_value)
return tf.where(mask, dy, zeros)
def ste_sign(x: tf.Tensor, clip_value: float = 1.0) -> tf.Tensor:
@tf.custom_gradient
def _call(x):
def grad(dy):
return _clipped_gradient(x, dy, clip_value)
return math.sign(x), grad
return _call(x)
def _scaled_sign(x): # pragma: no cover
return 1.3 * ste_sign(x)
@tf.custom_gradient
def approx_sign(x: tf.Tensor) -> tf.Tensor:
def grad(dy):
abs_x = tf.math.abs(x)
zeros = tf.zeros_like(dy)
mask = tf.math.less_equal(abs_x, 1.0)
return tf.where(mask, (1 - abs_x) * 2 * dy, zeros)
return math.sign(x), grad
def swish_sign(x: tf.Tensor, beta: float = 5.0) -> tf.Tensor:
@tf.custom_gradient
def _call(x):
def grad(dy):
b_x = beta * x
return dy * beta * (2 - b_x * tf.tanh(b_x * 0.5)) / (1 + tf.cosh(b_x))
return math.sign(x), grad
return _call(x)
def ste_tern(
x: tf.Tensor,
threshold_value: float = 0.05,
ternary_weight_networks: bool = False,
clip_value: float = 1.0,
) -> tf.Tensor:
@tf.custom_gradient
def _call(x):
if ternary_weight_networks:
threshold = 0.7 * tf.reduce_sum(tf.abs(x)) / tf.cast(tf.size(x), x.dtype)
else:
threshold = threshold_value
def grad(dy):
return _clipped_gradient(x, dy, clip_value)
return tf.sign(tf.sign(x + threshold) + tf.sign(x - threshold)), grad
return _call(x)
def ste_heaviside(x: tf.Tensor, clip_value: float = 1.0) -> tf.Tensor:
@tf.custom_gradient
def _call(x):
def grad(dy):
return _clipped_gradient(x, dy, clip_value)
return math.heaviside(x), grad
return _call(x)
class Quantizer(tf.keras.layers.Layer):
"""Common base class for defining quantizers.
# Attributes
precision: An integer defining the precision of the output. This value will be
used by `lq.models.summary()` for improved logging.
"""
precision = None
def compute_output_shape(self, input_shape):
return input_shape
class _BaseQuantizer(Quantizer):
"""Private base class for defining quantizers with Larq metrics."""
def __init__(self, *args, metrics=None, **kwargs):
self._custom_metrics = metrics
super().__init__(*args, **kwargs)
def build(self, input_shape):
if self._custom_metrics and "flip_ratio" in self._custom_metrics:
self.flip_ratio = lq_metrics.FlipRatio(name=f"flip_ratio/{self.name}")
self.flip_ratio.build(input_shape)
super().build(input_shape)
def call(self, inputs):
if hasattr(self, "flip_ratio"):
self.add_metric(self.flip_ratio(inputs))
return inputs
@property
def non_trainable_weights(self):
return []
@utils.register_keras_custom_object
class NoOp(_BaseQuantizer):
r"""Instantiates a serializable no-op quantizer.
\\[
q(x) = x
\\]
!!! warning
This quantizer will not change the input variable. It is only intended to mark
variables with a desired precision that will be recognized by optimizers like
`Bop` and add training metrics to track variable changes.
!!! example
```python
layer = lq.layers.QuantDense(
16, kernel_quantizer=lq.quantizers.NoOp(precision=1),
)
layer.build((32,))
assert layer.kernel.precision == 1
```
# Arguments
precision: Set the desired precision of the variable. This can be used to tag
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
"""
precision = None
def __init__(self, precision: int, **kwargs):
self.precision = precision
super().__init__(**kwargs)
def get_config(self):
return {**super().get_config(), "precision": self.precision}
# `NoOp` used to be called `NoOpQuantizer`; this alias is for
# backwards-compatibility.
NoOpQuantizer = NoOp
@utils.register_alias("ste_sign")
@utils.register_keras_custom_object
class SteSign(_BaseQuantizer):
r"""Instantiates a serializable binary quantizer.
\\[
q(x) = \begin{cases}
-1 & x < 0 \\\
1 & x \geq 0
\end{cases}
\\]
The gradient is estimated using the Straight-Through Estimator
(essentially the binarization is replaced by a clipped identity on the
backward pass).
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
1 & \left|x\right| \leq \texttt{clip_value} \\\
0 & \left|x\right| > \texttt{clip_value}
\end{cases}\\]
```plot-activation
quantizers.SteSign
```
# Arguments
clip_value: Threshold for clipping gradients. If `None` gradients are not
clipped.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# References
- [Binarized Neural Networks: Training Deep Neural Networks with Weights and
Activations Constrained to +1 or -1](https://arxiv.org/abs/1602.02830)
"""
precision = 1
def __init__(self, clip_value: float = 1.0, **kwargs):
self.clip_value = clip_value
super().__init__(**kwargs)
def call(self, inputs):
outputs = ste_sign(inputs, clip_value=self.clip_value)
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "clip_value": self.clip_value}
@utils.register_alias("approx_sign")
@utils.register_keras_custom_object
class ApproxSign(_BaseQuantizer):
r"""Instantiates a serializable binary quantizer.
\\[
q(x) = \begin{cases}
-1 & x < 0 \\\
1 & x \geq 0
\end{cases}
\\]
The gradient is estimated using the ApproxSign method.
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
(2 - 2 \left|x\right|) & \left|x\right| \leq 1 \\\
0 & \left|x\right| > 1
\end{cases}
\\]
```plot-activation
quantizers.ApproxSign
```
# Arguments
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# References
- [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
Representational Capability and Advanced Training
Algorithm](https://arxiv.org/abs/1808.00278)
"""
precision = 1
def call(self, inputs):
outputs = approx_sign(inputs)
return super().call(outputs)
@utils.register_alias("ste_heaviside")
@utils.register_keras_custom_object
class SteHeaviside(_BaseQuantizer):
r"""
Instantiates a binarization quantizer with output values 0 and 1.
\\[
q(x) = \begin{cases}
+1 & x > 0 \\\
0 & x \leq 0
\end{cases}
\\]
The gradient is estimated using the Straight-Through Estimator
(essentially the binarization is replaced by a clipped identity on the
backward pass).
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
1 & \left|x\right| \leq 1 \\\
0 & \left|x\right| > 1
\end{cases}\\]
```plot-activation
quantizers.SteHeaviside
```
# Arguments
clip_value: Threshold for clipping gradients. If `None` gradients are not
clipped.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# Returns
AND Binarization function
"""
precision = 1
def __init__(self, clip_value: float = 1.0, **kwargs):
self.clip_value = clip_value
super().__init__(**kwargs)
def call(self, inputs):
outputs = ste_heaviside(inputs, clip_value=self.clip_value)
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "clip_value": self.clip_value}
@utils.register_alias("swish_sign")
@utils.register_keras_custom_object
class SwishSign(_BaseQuantizer):
r"""Sign binarization function.
\\[
q(x) = \begin{cases}
-1 & x < 0 \\\
1 & x \geq 0
\end{cases}
\\]
The gradient is estimated using the SignSwish method.
\\[
\frac{\partial q_{\beta}(x)}{\partial x} = \frac{\beta\left\\{2-\beta x \tanh \left(\frac{\beta x}{2}\right)\right\\}}{1+\cosh (\beta x)}
\\]
```plot-activation
quantizers.SwishSign
```
# Arguments
beta: Larger values result in a closer approximation to the derivative of the
sign.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# Returns
SwishSign quantization function
# References
- [BNN+: Improved Binary Network Training](https://arxiv.org/abs/1812.11800)
"""
precision = 1
def __init__(self, beta: float = 5.0, **kwargs):
self.beta = beta
super().__init__(**kwargs)
def call(self, inputs):
outputs = swish_sign(inputs, beta=self.beta)
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "beta": self.beta}
@utils.register_alias("magnitude_aware_sign")
@utils.register_keras_custom_object
class MagnitudeAwareSign(_BaseQuantizer):
r"""Instantiates a serializable magnitude-aware sign quantizer for Bi-Real Net.
A scaled sign function computed according to Section 3.3 in
[Zechun Liu et al](https://arxiv.org/abs/1808.00278).
```plot-activation
quantizers._scaled_sign
```
# Arguments
clip_value: Threshold for clipping gradients. If `None` gradients are not
clipped.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# References
- [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
Representational Capability and Advanced Training
Algorithm](https://arxiv.org/abs/1808.00278)
"""
precision = 1
def __init__(self, clip_value: float = 1.0, **kwargs):
self.clip_value = clip_value
super().__init__(**kwargs)
def call(self, inputs):
scale_factor = tf.stop_gradient(
tf.reduce_mean(tf.abs(inputs), axis=list(range(len(inputs.shape) - 1)))
)
outputs = scale_factor * ste_sign(inputs, clip_value=self.clip_value)
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "clip_value": self.clip_value}
@utils.register_alias("ste_tern")
@utils.register_keras_custom_object
class SteTern(_BaseQuantizer):
r"""Instantiates a serializable ternarization quantizer.
\\[
q(x) = \begin{cases}
+1 & x > \Delta \\\
0 & |x| < \Delta \\\
-1 & x < - \Delta
\end{cases}
\\]
where \\(\Delta\\) is defined as the threshold and can be passed as an argument,
or can be calculated as per the Ternary Weight Networks original paper, such that
\\[
\Delta = \frac{0.7}{n} \sum_{i=1}^{n} |W_i|
\\]
where we assume that \\(W_i\\) is generated from a normal distribution.
The gradient is estimated using the Straight-Through Estimator
(essentially the Ternarization is replaced by a clipped identity on the
backward pass).
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
1 & \left|x\right| \leq \texttt{clip_value} \\\
0 & \left|x\right| > \texttt{clip_value}
\end{cases}\\]
```plot-activation
quantizers.SteTern
```
# Arguments
threshold_value: The value for the threshold, \\(\Delta\\).
ternary_weight_networks: Boolean of whether to use the
Ternary Weight Networks threshold calculation.
clip_value: Threshold for clipping gradients. If `None` gradients are not
clipped.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# References
- [Ternary Weight Networks](https://arxiv.org/abs/1605.04711)
"""
precision = 2
def __init__(
self,
threshold_value: float = 0.05,
ternary_weight_networks: bool = False,
clip_value: float = 1.0,
**kwargs,
):
self.threshold_value = threshold_value
self.ternary_weight_networks = ternary_weight_networks
self.clip_value = clip_value
super().__init__(**kwargs)
def call(self, inputs):
outputs = ste_tern(
inputs,
threshold_value=self.threshold_value,
ternary_weight_networks=self.ternary_weight_networks,
clip_value=self.clip_value,
)
return super().call(outputs)
def get_config(self):
return {
**super().get_config(),
"threshold_value": self.threshold_value,
"ternary_weight_networks": self.ternary_weight_networks,
"clip_value": self.clip_value,
}
@utils.register_alias("dorefa_quantizer")
@utils.register_keras_custom_object
class DoReFa(_BaseQuantizer):
r"""Instantiates a serializable k_bit quantizer as in the DoReFa paper.
\\[
q(x) = \begin{cases}
0 & x < \frac{1}{2n} \\\
\frac{i}{n} & \frac{2i-1}{2n} < x < \frac{2i+1}{2n} \text{ for } i \in \\{1,n-1\\}\\\
1 & \frac{2n-1}{2n} < x
\end{cases}
\\]
where \\(n = 2^{\text{k_bit}} - 1\\). The number of bits, k_bit, needs to be passed
as an argument.
The gradient is estimated using the Straight-Through Estimator
(essentially the binarization is replaced by a clipped identity on the
backward pass).
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
1 & 0 \leq x \leq 1 \\\
0 & \text{else}
\end{cases}\\]
The behavior for quantizing weights should be different in comparison to
the quantization of activations:
instead of limiting input operands (or in this case: weights) using a hard
limiter, a tangens hyperbolicus is applied to achieve a softer limiting
with a gradient, which is continuously differentiable itself.
\\[
w_{lim}(w) = \tanh(w)
\\]
Furthermore, the weights of each layer are normed, such that the weight with
the largest magnitude gets the largest or smallest (depending on its sign)
quantizable value. That way, the full quantizable numeric range is utilized.
\\[
w_{norm}(w) = \frac{w}{\max(|w|)}
\\]
The formulas can be found in the paper in section 2.3. Please note, that
the paper refers to weights being quantized on a numeric range of [-1, 1], while
activations are quantized on the numeric range [0, 1]. This implementation
uses the same ranges as specified in the paper.
The activation quantizer defines the function quantizek() from the paper with
the correct numeric range of [0, 1]. The weight quantization mode adds
pre- and post-processing for numeric range adaptions, soft limiting and
norming. The full quantization function including the adaption of numeric ranges is
\\[
q(w) = 2 \, quantize_{k}(\frac{w_{norm}\left(w_{lim}\left(w\right)\right)}{2} + \frac{1}{2}) - 1
\\]
!!! warning
The weight mode works for weights on the range [-1, 1], which matches the
default setting of `constraints.weight_clip`. Do not use this quantizer
with a different constraint `clip_value` than the default one.
__`mode == "activations"`__
```plot-activation
quantizers.DoReFa
```
__`mode == "weights"`__
```plot-activation
quantizers.DoReFa(mode='weights')
```
# Arguments
k_bit: number of bits for the quantization.
mode: `"activations"` for clipping inputs on [0, 1] range or `"weights"` for
soft-clipping and norming weights on [-1, 1] range before applying
quantization.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# Returns
Quantization function
# Raises
ValueError for bad value of `mode`.
# References
- [DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low
Bitwidth Gradients](https://arxiv.org/abs/1606.06160)
"""
precision = None
def __init__(self, k_bit: int = 2, mode: str = "activations", **kwargs):
self.precision = k_bit
if mode not in ("activations", "weights"):
raise ValueError(
f"Invalid DoReFa quantizer mode {mode}. "
"Valid values are 'activations' and 'weights'."
)
self.mode = mode
super().__init__(**kwargs)
def weight_preprocess(self, inputs):
# Limit inputs to [-1, 1] range
limited = tf.math.tanh(inputs)
# Divider for max-value norm.
dividend = tf.math.reduce_max(tf.math.abs(limited))
# Need to stop the gradient here. Otherwise, for the maximum element,
# which gives the dividend, normed is limited/limited (for this one
# maximum digit). The derivative of y = x/x, dy/dx is just zero, when
# one does the simplification y = x/x = 1. But TF does NOT do this
# simplification when computing the gradient for the
# normed = limited/dividend operation. As a result, this gradient
# becomes complicated, because during the computation, "dividend" is
# not just a constant, but depends on "limited" instead. Here,
# tf.stop_gradient is used to mark "dividend" as a constant explicitly.
dividend = tf.stop_gradient(dividend)
# Norm and then scale from value range [-1,1] to [0,1] (the range
# expected by the core quantization operation).
# If the dividend used for the norm operation is 0, all elements of
# the weight tensor are 0 and divide_no_nan returns 0 for all weights.
# So if all elements of the weight tensor are zero, nothing is normed.
return tf.math.divide_no_nan(limited, 2.0 * dividend) + 0.5
def call(self, inputs):
# Depending on quantizer mode (activation or weight) just clip inputs
# on [0, 1] range or use weight preprocessing method.
if self.mode == "activations":
inputs = tf.clip_by_value(inputs, 0.0, 1.0)
elif self.mode == "weights":
inputs = self.weight_preprocess(inputs)
else:
raise ValueError(
f"Invalid DoReFa quantizer mode {self.mode}. "
"Valid values are 'activations' and 'weights'."
)
@tf.custom_gradient
def _k_bit_with_identity_grad(x):
n = 2**self.precision - 1
return tf.round(x * n) / n, lambda dy: dy
outputs = _k_bit_with_identity_grad(inputs)
# Scale weights from [0, 1] quantization range back to [-1,1] range
if self.mode == "weights":
outputs = 2.0 * outputs - 1.0
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "k_bit": self.precision, "mode": self.mode}
# `DoReFa` used to be called `DoReFaQuantizer`; this alias is for
# backwards-compatibility.
DoReFaQuantizer = DoReFa
QuantizerType = Union[Quantizer, Callable[[tf.Tensor], tf.Tensor]]
def serialize(quantizer: tf.keras.layers.Layer, use_legacy_format=False):
if use_legacy_format and version.parse(tf.__version__) >= version.parse("2.13"):
return tf.keras.utils.legacy.serialize_keras_object(quantizer)
return tf.keras.utils.serialize_keras_object(quantizer)
def deserialize(name, custom_objects=None, use_legacy_format=False):
if use_legacy_format and version.parse(tf.__version__) >= version.parse("2.13"):
return tf.keras.utils.legacy.deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="quantization function",
)
return tf.keras.utils.deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="quantization function",
)
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
use_legacy_format = "module" not in identifier
return deserialize(identifier, use_legacy_format=use_legacy_format)
if isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
return get(config)
if callable(identifier):
return identifier
raise ValueError(
f"Could not interpret quantization function identifier: {identifier}"
)
def get_kernel_quantizer(identifier):
"""Returns a quantizer from identifier and adds default kernel quantizer metrics.
# Arguments
identifier: Function or string
# Returns
`Quantizer` or `None`
"""
quantizer = get(identifier)
if isinstance(quantizer, _BaseQuantizer) and not quantizer._custom_metrics:
quantizer._custom_metrics = list(context.get_training_metrics())
return quantizer
| 23,775 | 30.449735 | 141 | py |
larq | larq-main/larq/context.py | """Context managers that configure global behaviour of Larq."""
import contextlib
import threading
__all__ = [
"metrics_scope",
"quantized_scope",
"get_training_metrics",
"should_quantize",
]
_quantized_scope = threading.local()
_quantized_scope.should_quantize = False
@contextlib.contextmanager
def quantized_scope(quantize):
"""A context manager to define the behaviour of `QuantizedVariable`.
!!! example
```python
model.save("full_precision_model.h5") # save full precision latent weights
fp_weights = model.get_weights() # get latent weights
with larq.context.quantized_scope(True):
model.save("binary_model.h5") # save binarized weights
weights = model.get_weights() # get binarized weights
```
# Arguments
quantize: If `should_quantize` is `True`, `QuantizedVariable` will return their
quantized value in the forward pass. If `False`, `QuantizedVariable` will
act as a latent variable.
"""
backup = should_quantize()
_quantized_scope.should_quantize = quantize
yield quantize
_quantized_scope.should_quantize = backup
def should_quantize():
"""Returns the current quantized scope."""
return getattr(_quantized_scope, "should_quantize", False)
_global_training_metrics = set()
_available_metrics = {"flip_ratio"}
@contextlib.contextmanager
def metrics_scope(metrics=[]):
"""A context manager to set the training metrics to be used in quantizers.
!!! example
```python
with larq.context.metrics_scope(["flip_ratio"]):
model = tf.keras.models.Sequential(
[larq.layers.QuantDense(3, kernel_quantizer="ste_sign", input_shape=(32,))]
)
model.compile(loss="mse", optimizer="sgd")
```
# Arguments
metrics: Iterable of metrics to add to quantizers defined inside this context.
Currently only the `flip_ratio` metric is available.
"""
for metric in metrics:
if metric not in _available_metrics:
raise ValueError(
f"Unknown training metric '{metric}'. Available metrics: {_available_metrics}."
)
backup = _global_training_metrics.copy()
_global_training_metrics.update(metrics)
yield _global_training_metrics
_global_training_metrics.clear()
_global_training_metrics.update(backup)
def get_training_metrics():
"""Retrieves a live reference to the training metrics in the current scope.
Updating and clearing training metrics using `larq.context.metrics_scope` is
preferred, but `get_training_metrics` can be used to directly access them.
!!! example
```python
get_training_metrics().clear()
get_training_metrics().add("flip_ratio")
```
# Returns
A set of training metrics in the current scope.
"""
return _global_training_metrics
| 2,953 | 29.453608 | 95 | py |
larq | larq-main/larq/conftest.py | import pytest
import tensorflow as tf
from packaging import version
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from larq import context as lq_context
if version.parse(tf.__version__) >= version.parse("1.15"):
strategy_combinations.set_virtual_cpus_to_at_least(3)
distributed_devices = ["/cpu:1", "/cpu:2"]
else:
distributed_devices = ["/cpu:0"]
@pytest.fixture
def eager_mode():
"""pytest fixture for running test in eager mode"""
with context.eager_mode():
yield
@pytest.fixture
def graph_mode():
"""pytest fixture for running test in graph mode"""
with context.graph_mode():
with tf.compat.v1.Session().as_default():
yield
tf.keras.backend.clear_session()
@pytest.fixture(params=["eager", "graph"])
def eager_and_graph_mode(request):
"""pytest fixture for running test in eager and graph mode"""
if request.param == "graph":
with context.graph_mode():
with tf.compat.v1.Session().as_default():
yield request.param
tf.keras.backend.clear_session()
else:
with context.eager_mode():
yield request.param
@pytest.fixture(params=["graph", "tf_eager", "tf_keras_eager"])
def keras_should_run_eagerly(request):
"""Fixture to run in graph and two eager modes.
The modes are:
- Graph mode
- TensorFlow eager and Keras eager
- TensorFlow eager and Keras not eager
The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras
should run eagerly.
"""
if request.param == "graph":
if version.parse(tf.__version__) >= version.parse("2"):
pytest.skip("Skipping graph mode for TensorFlow 2+.")
with context.graph_mode():
yield
else:
with context.eager_mode():
yield request.param == "tf_keras_eager"
@pytest.fixture(params=[False, True])
def distribute_scope(request):
if request.param is True:
with tf.distribute.MirroredStrategy(distributed_devices).scope():
yield request.param
else:
yield request.param
@pytest.fixture(params=[True, False])
def quantized(request):
"""pytest fixture for running test quantized and non-quantized"""
with lq_context.quantized_scope(request.param):
yield request.param
@pytest.fixture(params=["channels_last", "channels_first"])
def data_format(request):
return request.param
| 2,508 | 27.511364 | 85 | py |
larq | larq-main/larq/testing_utils.py | import numpy as np
import tensorflow as tf
import larq as lq
def _eval_tensor(tensor):
if tensor is None:
return None
elif callable(tensor):
return _eval_helper(tensor())
else:
return tensor.numpy()
def _eval_helper(tensors):
if tensors is None:
return None
return tf.nest.map_structure(_eval_tensor, tensors)
def evaluate(tensors):
if tf.executing_eagerly():
return _eval_helper(tensors)
else:
sess = tf.compat.v1.get_default_session()
return sess.run(tensors)
def generate_real_values_with_zeros(low=-2, high=2, shape=(4, 10)):
real_values = np.random.uniform(low, high, shape)
real_values = np.insert(real_values, 1, 0, axis=1)
return real_values
def get_small_bnn_model(input_dim, num_hidden, output_dim, trainable_bn=True):
model = tf.keras.models.Sequential()
model.add(
lq.layers.QuantDense(
units=num_hidden,
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
activation="relu",
input_shape=(input_dim,),
use_bias=False,
)
)
model.add(tf.keras.layers.BatchNormalization(trainable=trainable_bn))
model.add(
lq.layers.QuantDense(
units=output_dim,
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
input_quantizer="ste_sign",
activation="softmax",
use_bias=False,
)
)
return model
def random_input(shape):
for i, dim in enumerate(shape):
if dim is None:
shape[i] = np.random.randint(1, 4)
data = 10 * np.random.random(shape) - 0.5
return data.astype("float32")
# This is a fork of https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/testing_utils.py#L72
# as recommended in https://github.com/tensorflow/tensorflow/issues/28601#issuecomment-492810252
def layer_test(
layer_cls,
kwargs=None,
input_shape=None,
input_dtype=None,
input_data=None,
expected_output=None,
expected_output_dtype=None,
should_run_eagerly=False,
):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Shape tuple for the expected shape of the output.
expected_output_dtype: Data type expected for the output.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError("input_shape is None")
if not input_dtype:
input_dtype = "float32"
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == "float":
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test in functional API
x = tf.keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if tf.keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError(
"When testing layer %s, for input %s, found output "
"dtype=%s but expected to find %s.\nFull kwargs: %s"
% (
layer_cls.__name__,
x,
tf.keras.backend.dtype(y),
expected_output_dtype,
kwargs,
)
)
# check shape inference
model = tf.keras.models.Model(x, y)
expected_output_shape = tuple(
layer.compute_output_shape(tf.TensorShape(input_shape)).as_list()
)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
"When testing layer %s, for input %s, found output_shape="
"%s but expected to find %s.\nFull kwargs: %s"
% (
layer_cls.__name__,
x,
actual_output_shape,
expected_output_shape,
kwargs,
)
)
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = tf.keras.models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=2e-3)
# Recreate layer to prevent layer metrics from being configured multiple times.
layer = layer_cls(**kwargs)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# train(). This was causing some error for layer with Defun as it body.
# See b/120160788 for more details. This should be mitigated after 2.0.
model = tf.keras.models.Model(x, layer(x))
model.compile(
"rmsprop",
"mse",
weighted_metrics=["acc"],
run_eagerly=should_run_eagerly,
)
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config["batch_input_shape"] = input_shape
layer = layer.__class__.from_config(layer_config)
model = tf.keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
"When testing layer %s **after deserialization**, "
"for input %s, found output_shape="
"%s but expected to find inferred shape %s.\nFull kwargs: %s"
% (
layer_cls.__name__,
x,
actual_output_shape,
expected_output_shape,
kwargs,
)
)
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = tf.keras.models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=2e-3)
# for further checks in the caller function
return actual_output
| 7,954 | 34.044053 | 117 | py |
larq | larq-main/larq/quantized_variable.py | """Contains QuantizedVariable, a variable that can be quantized in the forward pass."""
from typing import Optional
import tensorflow as tf
from packaging import version
from tensorflow.python.distribute.values import DistributedVariable
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from larq import context
from larq.quantizers import QuantizerType
# pytype: disable=import-error
try:
from tensorflow.python.distribute.ps_values import AggregatingVariable
from tensorflow.python.types.core import Tensor as TensorType
except ModuleNotFoundError:
TensorType = object
from tensorflow.python.distribute.values import AggregatingVariable
# pytype: enable=import-error
UNSPECIFIED = object()
_SUPPORTS_TRACE_TYPE = version.parse(tf.__version__) >= version.parse("2.8")
if _SUPPORTS_TRACE_TYPE:
try:
from tensorflow.types.experimental import TraceType
except ImportError:
from tensorflow.python.types.trace import TraceType
class QuantizedVariableSpec(TraceType):
"""TraceType for QuantizedVariableSpec for tracing with tf.function.
This class implements the Type for QuantizedVariable used in tracing.
"""
def __init__(self, value):
self.latent_variable = value
def is_subtype_of(self, other) -> bool:
"""If the other spec is the same as `self`, return True."""
return self == other
def most_specific_common_supertype(self, others):
"""`self` is the common supertype if all input types match it."""
return self if all(self == other for other in others) else None
def placeholder_value(self, placeholder_context=None):
"""Use the QuantizedVariable value itself as a placeholder."""
return self.latent_variable
def _cast(self, value, _):
return value
def _to_tensors(self, value):
return []
def __hash__(self) -> int:
return hash(id(self.latent_variable))
def __eq__(self, other) -> bool:
return self is other
class QuantizedVariable(tf.Variable, TensorType):
"""A Variable that can be quantized in the forward pass in applicable contexts."""
def __init__(
self,
variable: tf.Variable,
quantizer: Optional[QuantizerType] = None,
precision: Optional[int] = None,
op: Optional[tf.Operation] = UNSPECIFIED,
):
"""Creates an QuantizedVariable instance.
# Arguments
variable: A floating-point resource variable to wrap.
quantizer: An optional quantizer to transform the floating-point
variable to a fake quantized variable.
precision: An optional integer defining the precision of the quantized
variable. If `None`, `quantizer.precision` is used.
op: An optional operation of this variable.
"""
if not resource_variable_ops.is_resource_variable(variable):
raise ValueError(
"`variable` must be of type `tf.ResourceVariable`, "
f"but got `{type(variable)}`."
)
if not (quantizer is None or callable(quantizer)):
raise ValueError(
"`quantizer` must be `callable` or `None`, "
f"but got `{type(quantizer)}`."
)
if not (precision is None or type(precision) == int):
raise ValueError(
"`precision` must be of type `int` or `None`, "
f"but got `{type(precision)}`."
)
self.latent_variable = variable
self.quantizer = quantizer
self.precision = precision or getattr(quantizer, "precision", None)
self._op = op
@classmethod
def from_variable(
cls,
variable: tf.Variable,
quantizer: Optional[QuantizerType] = None,
precision: Optional[int] = None,
op: Optional[tf.Operation] = UNSPECIFIED,
):
"""Creates a QuantizedVariable that wraps another variable.
This typically just returns `QuantizedVariable(variable)`. But, if the variable
is a DistributedVariable or one of its subclasses, we instead dynamically
create a class that subclasses from both QuantizedVariable and
variable.__class__. This is so the returned variable will still pass
`isinstance(variable, variable.__class__)`, which is required for
DistributedVariables and its subclasses to work properly.
# Arguments
variable: A floating-point resource variable to wrap.
quantizer: An optional quantizer to transform the floating-point variable to
a fake quantized variable.
precision: An optional integer defining the precision of the quantized
variable. If `None`, `quantizer.precision` is used.
op: An optional operation of this variable.
# Returns
A QuantizedVariable that wraps the variable.
"""
if not isinstance(variable, (DistributedVariable, AggregatingVariable)):
return cls(variable, quantizer, precision, op=op)
class QuantizedDistributedVariable(cls, variable.__class__):
"""A QuantizedVariable that also subclasses from `variable.__class__`.
`variable.__class__` is either a `DistributedVariable` or an
`AggregatingVariable`.
"""
def get(self, *args, **kwargs):
# For some reason this is needed to make unit `x + x` pass on TF 1.14
return self._quantize(self.latent_variable.get(*args, **kwargs))
return QuantizedDistributedVariable(variable, quantizer, precision, op=op)
def _quantize(self, value):
if self.quantizer and context.should_quantize():
return self.quantizer(value)
return value
def value(self):
return self._quantize(self.latent_variable.value())
def read_value(self):
return self._quantize(self.latent_variable.read_value())
def numpy(self):
return self._quantize(self.latent_variable).numpy()
def sparse_read(self, *args, **kwargs):
return self._quantize(self.latent_variable.sparse_read(*args, **kwargs))
def gather_nd(self, *args, **kwargs):
return self._quantize(self.latent_variable.gather_nd(*args, **kwargs))
def __getattr__(self, name):
return getattr(self.latent_variable, name)
def _dense_var_to_tensor(self, *args, **kwargs):
return self._quantize(
self.latent_variable._dense_var_to_tensor(*args, **kwargs)
)
def eval(self, session=None):
return self._quantize(self.latent_variable).eval(session=session)
def initialized_value(self):
return self._quantize(self.latent_variable.initialized_value())
@property
def initial_value(self):
return self._quantize(self.latent_variable.initial_value)
def __tf_tensor__(
self, dtype: Optional[tf.dtypes.DType] = None, name: Optional[str] = None
) -> tf.Tensor:
return self._dense_var_to_tensor(dtype=dtype, name=name)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
@staticmethod
def _get_name(obj) -> str:
try:
return obj.__name__
except AttributeError:
return obj.__class__.__name__
def __repr__(self) -> str:
repr_ = (
f"<{self.__class__.__name__} '{self.name}' "
f"shape={self.shape} dtype={self.dtype.name}"
)
if self.quantizer is not None:
repr_ += f" quantizer={self._get_name(self.quantizer)}"
if self.precision is not None:
repr_ += f" precision={self.precision}"
if tf.executing_eagerly() and not self._in_graph_mode:
return f"{repr_} numpy={ops.numpy_text(self.read_value(), is_repr=True)}>"
return f"{repr_}>"
# Method delegations: We delegate the following methods to self.latent_variable.
# Each of these methods simply calls the same method on self.latent_variable. The
# base Variable raises NotImplementedError for most of these, so we must
# override them.
#
# We do not define the following methods from Variable for the following
# reasons:
# * 'ref': Instead we inherit the definition from Variable.
# If we defined and delegated to Variable, the ref of an QuantizedVariable
# would be the same as the ref of the underlying variable, which would be
# strange as they are different Python objects.
def set_shape(self, *args, **kwargs):
return self.latent_variable.set_shape(*args, **kwargs)
@property
def trainable(self):
return self.latent_variable.trainable
@property
def synchronization(self):
return self.latent_variable.synchronization
@property
def aggregation(self):
return self.latent_variable.aggregation
@property
def constraint(self):
return self.latent_variable.constraint
def _apply_assign_update(
self, update_fn, value, use_locking=None, name=None, read_value=True
):
if ops.executing_eagerly_outside_functions():
assign_op = update_fn(value, use_locking, name, False)
if read_value:
return QuantizedVariable.from_variable(
self.latent_variable, self.quantizer, self.precision, op=assign_op
)
return assign_op
# Fallback to wrapping the returned variable in graph mode if possible
assign_var = update_fn(value, use_locking, name, read_value)
if read_value and resource_variable_ops.is_resource_variable(assign_var):
return QuantizedVariable.from_variable(
assign_var, self.quantizer, self.precision
)
return assign_var
def _apply_update(self, update_fn, *args, **kwargs):
update_var = update_fn(*args, **kwargs)
if ops.executing_eagerly_outside_functions():
return self
# Fallback to wrapping the returned variable in graph mode if possible
if resource_variable_ops.is_resource_variable(update_var):
return QuantizedVariable.from_variable(
update_var, self.quantizer, self.precision
)
return update_var
def assign(self, value, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(
self.latent_variable.assign, value, use_locking, name, read_value
)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(
self.latent_variable.assign_add, delta, use_locking, name, read_value
)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(
self.latent_variable.assign_sub, delta, use_locking, name, read_value
)
def scatter_sub(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_sub, *args, **kwargs)
def scatter_add(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_add, *args, **kwargs)
def scatter_max(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_max, *args, **kwargs)
def scatter_min(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_min, *args, **kwargs)
def scatter_mul(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_mul, *args, **kwargs)
def scatter_div(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_div, *args, **kwargs)
def scatter_update(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_update, *args, **kwargs)
def batch_scatter_update(self, *args, **kwargs):
return self._apply_update(
self.latent_variable.batch_scatter_update, *args, **kwargs
)
def scatter_nd_sub(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_nd_sub, *args, **kwargs)
def scatter_nd_add(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_nd_add, *args, **kwargs)
def scatter_nd_update(self, *args, **kwargs):
return self._apply_update(
self.latent_variable.scatter_nd_update, *args, **kwargs
)
def count_up_to(self, *args, **kwargs):
return self.latent_variable.count_up_to(*args, **kwargs)
def load(self, *args, **kwargs):
return self.latent_variable.load(*args, **kwargs)
@property
def dtype(self):
return self.latent_variable.dtype
@property
def name(self):
return self.latent_variable.name
@property
def _shared_name(self):
return self.latent_variable._shared_name
@property
def initializer(self):
return self.latent_variable.initializer
@property
def device(self):
return self.latent_variable.device
@property
def op(self):
if self._op is not UNSPECIFIED:
return self._op
return self.latent_variable.op
@property
def graph(self):
return self.latent_variable.graph
@property
def shape(self):
return self.latent_variable.shape
def get_shape(self):
return self.latent_variable.get_shape()
def __tf_tracing_type__(self, context):
if _SUPPORTS_TRACE_TYPE:
return QuantizedVariableSpec(self)
return NotImplemented
def _gather_saveables_for_checkpoint(self):
# By delegating this method to the wrapped variable, checkpoints with
# QuantizedVariables are identical to checkpoints with normal variables.
# Therefore models checkpointed with QuantizedVariables can be restored on
# models with normal variables, and vice versa.
return self.latent_variable._gather_saveables_for_checkpoint()
def _map_resources(self, *args):
# By delegating this method to the wrapped variable, SavedModel with
# QuantizedVariables are identical to SavedModel with normal variables.
obj_map, resource_map = self.latent_variable._map_resources(*args)
obj_map[self] = obj_map[self.latent_variable]
return obj_map, resource_map
def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):
# By delegating this method to the wrapped variable, SavedModel with
# QuantizedVariables are identical to SavedModel with normal variables.
resource_list = self.latent_variable._export_to_saved_model_graph(
object_map, tensor_map, options, **kwargs
)
object_map[self] = object_map[self.latent_variable]
return resource_list
# TODO: Maybe encode the fact the variable is an QuantizedVariable in to_proto().
def to_proto(self, *args, **kwargs):
return self.latent_variable.to_proto(*args, **kwargs)
def from_proto(self, *args, **kwargs):
return self.latent_variable.from_proto(*args, **kwargs)
# Delegate the private attributes _handle_name and _initializer_op to
# self.latent_variable. SavedModel sets these attributes when loading a model. For
# example, it sets _handle_name here:
# https://github.com/tensorflow/tensorflow/blob/db26bd574fa95b5bdd53c08463dd19407cc0297e/tensorflow/python/keras/saving/saved_model/load.py#L211
# We need to expose these attributes on AutoCastVariable as well for
# SavedModel to work properly.
# TODO: Find a better way to support SavedModel. Exposing private attributes is
# hacky and difficult to maintain.
# For more info see https://github.com/tensorflow/tensorflow/commit/1fcda57f37c2ac854cabf1c3462eb14e39d36c60
@property
def _handle_name(self):
return self.latent_variable._handle_name
@_handle_name.setter
def _handle_name(self, handle_name):
self.latent_variable._handle_name = handle_name
@property
def _initializer_op(self):
return self.latent_variable._initializer_op
@_initializer_op.setter
def _initializer_op(self, initializer_op):
self.latent_variable._initializer_op = initializer_op
def _as_graph_element(self):
if self.quantizer and context.should_quantize():
return self.quantizer(self.latent_variable)
graph_element = self.latent_variable._as_graph_element()
if graph_element is None:
return self._op
return graph_element
QuantizedVariable._OverloadAllOperators()
tf.register_tensor_conversion_function(
QuantizedVariable, QuantizedVariable._dense_var_to_tensor
)
try:
ops.register_dense_tensor_like_type(QuantizedVariable)
except AttributeError:
pass
| 17,061 | 36.915556 | 148 | py |
larq | larq-main/larq/optimizers.py | """Neural networks with extremely low-precision weights and activations, such as
Binarized Neural Networks (BNNs), usually contain a mix of low-precision weights (e.g.
1-bit) and higher-precision weights (e.g. 8-bit, 16-bit, or 32-bit). Examples of this
include the first and last layers of image classificiation models, which have
higher-precision weights in most BNN architectures from the literature.
Training a BNN, then, consists of optimizing both low-precision and higher-precision
weights. In `larq`, we provide a mechanism to target different bit-precision variables
with different optimizers using the `CaseOptimizer` class. Modeled after the
[`tf.case`](https://www.tensorflow.org/api_docs/python/tf/case) signature,
`CaseOptimizer` accepts pairs of predicates and optimizers. A predicate, given a
variable, decides whether its optimizer should train that variable.
A `CaseOptimizer` behaves much like any other
[Keras optimizer](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers), and
once you instantiate it you can pass it to your `model.compile()` as usual. To
instantiate a `CaseOptimzer`, pass one or a list of `(predicate, optimizer)` tuples,
along with a `default` optimizer which trains any variables not claimed by another
optimizer. A variable may not be claimed by more than one optimizer's predicate.
!!! example
```python
no_op_quantizer = lq.quantizers.NoOp(precision=1)
layer = lq.layers.QuantDense(16, kernel_quantizer=no_op_quantizer)
case_optimizer = lq.optimizers.CaseOptimizer(
(
lq.optimizers.Bop.is_binary_variable, # predicate
lq.optimizers.Bop(threshold=1e-6, gamma=1e-3), # optimizer
),
default_optimizer=tf.keras.optimizers.Adam(0.01),
)
```
"""
import warnings
from copy import deepcopy
from typing import Callable, Optional, Tuple
import tensorflow as tf
from packaging import version
import larq as lq
from larq import utils
__all__ = ["Bop", "CaseOptimizer"]
if version.parse(tf.__version__) >= version.parse("2.11"):
from tensorflow.keras.optimizers.legacy import Optimizer # type: ignore
else:
from tensorflow.keras.optimizers import Optimizer # type: ignore
# From https://github.com/keras-team/keras/blob/a8606fd45b760cce3e65727e9d62cae796c45930/keras/optimizer_v2/optimizer_v2.py#L1430-L1450
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# Get the distributed variable if it exists.
if hasattr(var, "_distributed_container"):
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
@utils.register_keras_custom_object
class CaseOptimizer(Optimizer):
"""An optmizer wrapper that applies different optimizers to a subset of variables.
An optimizer is used to train a variable iff its accompanying predicate evaluates to
`True`.
For each variable, at most one optimizer's predicate may evaluate to `True`. If no
optimizer's predicate evaluates to `True` for a variable, it is trained with the
`default_optimizer`. If a variable is claimed by no optimizers and
`default_optimizer == None`, the variable is not trained.
# Arguments
predicate_optimizer_pairs: One or more `(pred, tf.keras.optimizers.legacy.Optimizer)`
pairs, where `pred` takes one `tf.Variable` as argument and returns `True`
if the optimizer should be used for that variable, e.g. `pred(var) == True`.
default_optimizer: A `tf.keras.optimizers.legacy.Optimizer` to be applied to any
variable not claimed by any other optimizer. (Must be passed as keyword
argument.)
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
*predicate_optimizer_pairs: Tuple[Callable[[tf.Variable], bool], Optimizer],
default_optimizer: Optional[Optimizer] = None,
name: str = "optimizer_case",
):
super().__init__(name=name)
# Type checks for (predicate, optimizer) pairs
for i, (predicate, optimizer) in enumerate(predicate_optimizer_pairs):
if not callable(predicate):
raise TypeError(
f"Expected callable predicate at `predicate_optimizer_pairs[{i}][0]` but got `{type(predicate)}`."
)
if not isinstance(optimizer, Optimizer):
raise TypeError(
f"Expected `tf.keras.optimizers.legacy.Optimizer` at `predicate_optimizer_pairs[{i}][1]` but got `{type(optimizer)}`."
)
# Type check for default optimizers
if default_optimizer is not None and not isinstance(
default_optimizer, Optimizer
):
raise TypeError(
f"Expected `Optimizer` for `default_optimizer` but got `{type(default_optimizer)}`."
)
self.pred_opt_pairs = predicate_optimizer_pairs
self.default = default_optimizer
self.var_opt_mapping = None
# List of optimizers ending in `default_optimizer`, for easier internal access
self.optimizers = [opt for (_, opt) in self.pred_opt_pairs]
if self.default:
self.optimizers.append(self.default)
self.DEFAULT_OPT_INDEX = len(self.pred_opt_pairs)
# Track optimizers to support reloading via tf.train.Checkpoint
for i, optimizer in enumerate(self.optimizers):
self._track_trackable(optimizer, name=f"optimizer_{i}")
@property
def weights(self):
weights = []
for optimizer in self.optimizers:
weights.extend(optimizer.weights)
return weights
@Optimizer.iterations.setter
def iterations(self, variable):
raise NotImplementedError("CaseOptimzer does not support setting iterations.")
def apply_gradients(self, grads_and_vars, name: Optional[str] = None, **kwargs):
"""Apply gradients to variables for each optimizer.
On the first call to `apply_gradients()`, compute the mapping from variables to
optimizers and cache it in the `self.var_opt_mapping` dict for serialization and
faster access.
"""
if self.var_opt_mapping is None:
# Convert `grads_and_vars` to list so we can iterate multiple times over it
grads_and_vars = list(grads_and_vars)
self._compute_var_opt_mapping(grads_and_vars)
# Split gradients and variables into a separate list for each optimizer
grad_var_lists = [[] for _ in range(len(self.pred_opt_pairs) + 1)]
for grad, var in grads_and_vars:
var_key = _var_key(var)
if var_key in self.var_opt_mapping:
grad_var_lists[self.var_opt_mapping[var_key]].append((grad, var))
with tf.init_scope():
_ = self.iterations
# This is only necessary in TF 2.0 and older, but doesn't hurt on newer versions
for optimizer, opt_grads_and_vars in zip(self.optimizers, grad_var_lists):
optimizer._create_slots([v for (_, v) in opt_grads_and_vars])
return tf.distribute.get_replica_context().merge_call(
self._apply_gradients, args=(grad_var_lists, name), kwargs=kwargs
)
def _apply_gradients(self, distribution, grad_var_lists, name, **kwargs):
# Apply gradients to each optimizer
with tf.name_scope(self._name):
train_ops = [
distribution.extended.call_for_each_replica(
optimizer.apply_gradients, args=(opt_grads_and_vars,), kwargs=kwargs
)
for optimizer, opt_grads_and_vars in zip(
self.optimizers, grad_var_lists
)
]
return tf.group(*train_ops, name=name or "train_with_group")
def get_config(self):
optimizer_configs = [opt.get_config() for (_, opt) in self.pred_opt_pairs]
default_config = self.default.get_config()
config = {
"optimizer_configs": [
{"class_name": optimizer_config["name"], "config": optimizer_config}
for optimizer_config in optimizer_configs
],
"default_config": {
"class_name": default_config["name"],
"config": default_config,
},
"var_opt_mapping": self.var_opt_mapping, # serialized instead of `pred`s
}
return {**super().get_config(), **config}
@classmethod
def from_config(cls, original_config, custom_objects=None):
config = deepcopy(original_config)
case_optimizer = cls(
*[ # `(pred, opt)` tuples
(
lambda _: False, # placeholder callable (`pred` is not serialized)
tf.keras.optimizers.deserialize( # optimizer `opt`
opt_config, custom_objects=custom_objects
),
)
for opt_config in config["optimizer_configs"]
],
default_optimizer=tf.keras.optimizers.deserialize(
config["default_config"], custom_objects=custom_objects
),
)
# Since we no longer have the `pred`s, we set the mapping explicitly
case_optimizer.var_opt_mapping = config["var_opt_mapping"]
return case_optimizer
def _compute_var_opt_mapping(self, grads_and_vars):
"""Compute a unique mapping from variables to optimizer indices."""
self.var_opt_mapping = {}
for _, var in grads_and_vars:
num_optimizers = 0
var_key = _var_key(var)
# Find the optimizer(s) that want to claim this variable
for optimizer_index, (predicate, _) in enumerate(self.pred_opt_pairs):
if predicate(var):
self.var_opt_mapping[var_key] = optimizer_index
num_optimizers += 1
if num_optimizers > 1:
raise ValueError(f"Variable `{var}` claimed by multiple optimizers.")
if num_optimizers == 0:
if self.default is not None:
self.var_opt_mapping[var_key] = self.DEFAULT_OPT_INDEX
else:
warnings.warn(
f"No `default_optimizer` provided to train variable `{var}`."
)
# Make sure that each optimizer touches at least one variable
for optimizer_index, (_, optimizer) in enumerate(self.pred_opt_pairs):
if optimizer_index not in self.var_opt_mapping.values():
raise ValueError(
f"Optimizer `{optimizer}` did not claim any variables."
)
@utils.register_keras_custom_object
class Bop(Optimizer):
"""Binary optimizer (Bop).
Bop is a latent-free optimizer for Binarized Neural Networks (BNNs) and
Binary Weight Networks (BWN).
Bop maintains an exponential moving average of the gradients controlled by
`gamma`. If this average exceeds the `threshold`, a weight is flipped.
The hyperparameter `gamma` is somewhat analogues to the learning rate in
SGD methods: a high `gamma` results in rapid convergence but also makes
training more noisy.
Note that the default `threshold` is not optimal for all situations.
Setting the threshold too high results in little learning, while setting it
too low results in overly noisy behaviour.
!!! warning
The `is_binary_variable` check of this optimizer will only target variables that
have been explicitly marked as being binary using `NoOp(precision=1)`.
!!! example
```python
no_op_quantizer = lq.quantizers.NoOp(precision=1)
layer = lq.layers.QuantDense(16, kernel_quantizer=no_op_quantizer)
optimizer = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=tf.keras.optimizers.Adam(0.01), # for FP weights
)
```
# Arguments
threshold: magnitude of average gradient signal required to flip a weight.
gamma: the adaptivity rate.
name: name of the optimizer.
# References
- [Latent Weights Do Not Exist: Rethinking Binarized Neural Network Optimization](https://papers.nips.cc/paper/8971-latent-weights-do-not-exist-rethinking-binarized-neural-network-optimization)
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self, threshold: float = 1e-8, gamma: float = 1e-4, name: str = "Bop", **kwargs
):
super().__init__(name=name, **kwargs)
self._set_hyper("threshold", threshold)
self._set_hyper("gamma", gamma)
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, "m")
def _get_decayed_hyper(self, name: str, var_dtype):
hyper = self._get_hyper(name, var_dtype)
if isinstance(hyper, tf.keras.optimizers.schedules.LearningRateSchedule):
local_step = tf.cast(self.iterations, var_dtype)
hyper = tf.cast(hyper(local_step), var_dtype)
return hyper
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
gamma = self._get_decayed_hyper("gamma", var_dtype)
threshold = self._get_decayed_hyper("threshold", var_dtype)
m = self.get_slot(var, "m")
m_t = m.assign_add(gamma * (grad - m))
var_t = lq.math.sign(-tf.sign(var * m_t - threshold) * var)
return var.assign(var_t).op
def get_config(self):
config = {
"threshold": self._serialize_hyperparameter("threshold"),
"gamma": self._serialize_hyperparameter("gamma"),
}
return {**super().get_config(), **config}
@classmethod
def from_config(cls, config, custom_objects=None):
for hyper in ("gamma", "threshold"):
if hyper in config and isinstance(config[hyper], dict):
config[hyper] = tf.keras.optimizers.schedules.deserialize(
config[hyper], custom_objects=custom_objects
)
return cls(**config)
@staticmethod
def is_binary_variable(var: tf.Variable) -> bool:
"""Returns `True` for variables with `var.precision == 1`.
This is an example of a predictate that can be used by the `CaseOptimizer`.
# Arguments
var: a `tf.Variable`.
"""
return getattr(var, "precision", 32) == 1
| 14,848 | 39.350543 | 201 | py |
larq | larq-main/larq/math_test.py | import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("fn", [lq.math.sign])
def test_sign(fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [fn(x)])
binarized_values = np.random.choice([-1, 1], size=(2, 5)).astype(np.float32)
result = f(binarized_values)[0]
np.testing.assert_allclose(result, binarized_values)
real_values = generate_real_values_with_zeros()
result = f(real_values)[0]
assert not np.any(result == 0)
assert np.all(result[real_values < 0] == -1)
assert np.all(result[real_values >= 0] == 1)
zero_values = np.zeros((2, 5))
result = f(zero_values)[0]
assert np.all(result == 1)
@pytest.mark.parametrize("fn", [lq.math.heaviside])
def test_heaviside(fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [fn(x)])
binarized_values = np.random.choice([0, 1], size=(2, 5))
result = f([binarized_values])[0]
np.testing.assert_allclose(result, binarized_values)
real_values = generate_real_values_with_zeros()
result = f([real_values])[0]
assert np.all(result[real_values <= 0] == 0)
assert np.all(result[real_values > 0] == 1)
| 1,299 | 31.5 | 80 | py |
larq | larq-main/larq/layers_base.py | import logging
from typing import Optional
import tensorflow as tf
from larq import context, quantizers, utils
from larq.quantized_variable import QuantizedVariable
from larq.quantizers import NoOp, QuantizerType
log = logging.getLogger(__name__)
def _is_binary(quantizer):
return getattr(quantizer, "precision", None) == 1 and not isinstance(
quantizer, NoOp
)
def _compute_padded_size(stride, dilation_rate, input_size, filter_size):
if input_size is None:
return None
effective_filter_size = (filter_size - 1) * dilation_rate + 1
output_size = (input_size + stride - 1) // stride
padded_size = (output_size - 1) * stride + effective_filter_size
if tf.is_tensor(input_size):
return tf.math.maximum(padded_size, input_size)
return max(padded_size, input_size)
def _compute_padding(stride, dilation_rate, input_size, filter_size):
padded_size = _compute_padded_size(stride, dilation_rate, input_size, filter_size)
total_padding = padded_size - input_size
padding = total_padding // 2
return padding, padding + (total_padding % 2)
class BaseLayer(tf.keras.layers.Layer):
"""Base class for defining quantized layers.
`input_quantizer` is the element-wise quantization functions to use.
If `input_quantizer=None` this layer is equivalent to `tf.keras.layers.Layer`.
"""
def __init__(self, *args, input_quantizer=None, **kwargs):
self.input_quantizer = quantizers.get(input_quantizer)
super().__init__(*args, **kwargs)
def call(self, inputs):
if self.input_quantizer:
inputs = self.input_quantizer(inputs)
with context.quantized_scope(True):
return super().call(inputs)
def get_config(self):
return {
**super().get_config(),
"input_quantizer": quantizers.serialize(self.input_quantizer),
}
def _get_quantizer(self, name) -> Optional[QuantizerType]:
"""Get quantizer for given kernel name"""
return None
def _add_variable_with_custom_getter(self, name: str, **kwargs):
quantizer = self._get_quantizer(name)
if quantizer is None:
return super()._add_variable_with_custom_getter(name, **kwargs)
old_getter = kwargs.pop("getter")
# Wrap `getter` with a version that returns a `QuantizedVariable`.
def getter(*args, **kwargs):
variable = old_getter(*args, **kwargs)
return QuantizedVariable.from_variable(variable, quantizer)
return super()._add_variable_with_custom_getter(name, getter=getter, **kwargs)
class QuantizerBase(BaseLayer):
"""Base class for defining quantized layers with a single kernel.
`kernel_quantizer` is the element-wise quantization functions to use.
If `kernel_quantizer=None` this layer is equivalent to `BaseLayer`.
"""
def __init__(self, *args, kernel_quantizer=None, **kwargs):
self.kernel_quantizer = quantizers.get_kernel_quantizer(kernel_quantizer)
super().__init__(*args, **kwargs)
if _is_binary(self.kernel_quantizer) and not self.kernel_constraint:
log.warning(
"Using a binary weight quantizer without setting `kernel_constraint` "
"may result in starved weights (where the gradient is always zero)."
)
def _get_quantizer(self, name: str) -> Optional[QuantizerType]:
return self.kernel_quantizer if name == "kernel" else None
def get_config(self):
return {
**super().get_config(),
"kernel_quantizer": quantizers.serialize(self.kernel_quantizer),
}
class QuantizerBaseConv(tf.keras.layers.Layer):
"""Base class for defining quantized conv layers"""
def __init__(self, *args, pad_values=0.0, **kwargs):
self.pad_values = pad_values
super().__init__(*args, **kwargs)
is_zero_padding = not tf.is_tensor(self.pad_values) and self.pad_values == 0.0
self._is_native_padding = self.padding != "same" or is_zero_padding
if self.padding == "causal" and not is_zero_padding:
raise ValueError("Causal padding with `pad_values != 0` is not supported.")
def _get_spatial_padding_same(self, shape):
return [
_compute_padding(stride, dilation_rate, shape[i], filter_size)
for i, (stride, dilation_rate, filter_size) in enumerate(
zip(self.strides, self.dilation_rate, self.kernel_size)
)
]
def _get_spatial_shape(self, input_shape):
return (
input_shape[1:-1]
if self.data_format == "channels_last"
else input_shape[2:]
)
def _get_padding_same(self, inputs):
input_shape = inputs.shape
if not input_shape[1:].is_fully_defined():
input_shape = tf.shape(inputs)
padding = self._get_spatial_padding_same(self._get_spatial_shape(input_shape))
return (
[[0, 0], *padding, [0, 0]]
if self.data_format == "channels_last"
else [[0, 0], [0, 0], *padding]
)
def _get_padding_same_shape(self, input_shape):
spatial_input_shape = self._get_spatial_shape(input_shape)
spatial_shape = [
_compute_padded_size(stride, dilation, size, filter_size)
for size, stride, dilation, filter_size in zip(
spatial_input_shape,
self.strides,
self.dilation_rate,
self.kernel_size,
)
]
if self.data_format == "channels_last":
return tf.TensorShape([input_shape[0], *spatial_shape, input_shape[-1]])
return tf.TensorShape([*input_shape[:2], *spatial_shape])
def build(self, input_shape):
if self._is_native_padding:
super().build(input_shape)
else:
with utils.patch_object(self, "padding", "valid"):
super().build(self._get_padding_same_shape(input_shape))
def call(self, inputs):
if self._is_native_padding:
return super().call(inputs)
inputs = tf.pad(
inputs, self._get_padding_same(inputs), constant_values=self.pad_values
)
with utils.patch_object(self, "padding", "valid"):
return super().call(inputs)
def get_config(self):
return {
**super().get_config(),
"pad_values": tf.keras.backend.get_value(self.pad_values),
}
class QuantizerDepthwiseBase(BaseLayer):
"""Base class for defining depthwise quantized layers
`depthwise_quantizer` is the element-wise quantization functions to use.
If `depthwise_quantizer=None` this layer is equivalent to `BaseLayer`.
"""
def __init__(
self,
*args,
depthwise_quantizer: Optional[QuantizerType] = None,
**kwargs,
):
self.depthwise_quantizer = quantizers.get_kernel_quantizer(depthwise_quantizer)
super().__init__(*args, **kwargs)
if _is_binary(self.depthwise_quantizer) and not self.depthwise_constraint:
log.warning(
"Using a binary weight quantizer without setting `depthwise_constraint` "
"may result in starved weights (where the gradient is always zero)."
)
def _get_quantizer(self, name: str) -> Optional[QuantizerType]:
return self.depthwise_quantizer if name == "depthwise_kernel" else None
def get_config(self):
return {
**super().get_config(),
"depthwise_quantizer": quantizers.serialize(self.depthwise_quantizer),
}
class QuantizerSeparableBase(BaseLayer):
"""Base class for defining separable quantized layers.
`depthwise_quantizer` and `pointwise_quantizer` are the element-wise quantization
functions to use. If all quantization functions are `None` this layer is equivalent
to `BaseLayer`.
"""
def __init__(
self,
*args,
depthwise_quantizer: Optional[QuantizerType] = None,
pointwise_quantizer: Optional[QuantizerType] = None,
**kwargs,
):
self.depthwise_quantizer = quantizers.get_kernel_quantizer(depthwise_quantizer)
self.pointwise_quantizer = quantizers.get_kernel_quantizer(pointwise_quantizer)
super().__init__(*args, **kwargs)
if _is_binary(self.depthwise_quantizer) and not self.depthwise_constraint:
log.warning(
"Using a binary `depthwise_quantizer` without setting `depthwise_constraint` "
"may result in starved weights (where the gradient is always zero)."
)
if _is_binary(self.pointwise_quantizer) and not self.pointwise_constraint:
log.warning(
"Using a binary `pointwise_quantizer` without setting `pointwise_constraint` "
"may result in starved weights (where the gradient is always zero)."
)
def _get_quantizer(self, name: str) -> Optional[QuantizerType]:
if name == "depthwise_kernel":
return self.depthwise_quantizer
if name == "pointwise_kernel":
return self.pointwise_quantizer
return None
def get_config(self):
return {
**super().get_config(),
"depthwise_quantizer": quantizers.serialize(self.depthwise_quantizer),
"pointwise_quantizer": quantizers.serialize(self.pointwise_quantizer),
}
| 9,491 | 35.933852 | 94 | py |
larq | larq-main/larq/utils.py | from contextlib import contextmanager
import tensorflow as tf
def memory_as_readable_str(num_bits: int) -> str:
"""Generate a human-readable string for the memory size.
1 KiB = 1024 B; we use the binary prefix (KiB) [1,2] instead of the decimal prefix
(KB) to avoid any confusion with multiplying by 1000 instead of 1024.
[1] https://en.wikipedia.org/wiki/Binary_prefix
[2] https://physics.nist.gov/cuu/Units/binary.html
"""
suffixes = ["B", "KiB", "MiB", "GiB"]
num_bytes = num_bits / 8
for i, suffix in enumerate(suffixes):
rounded = num_bytes / (1024**i)
if rounded < 1024:
break
return f"{rounded:,.2f} {suffix}"
def register_keras_custom_object(cls):
"""See https://github.com/tensorflow/addons/blob/master/tensorflow_addons/utils/keras_utils.py#L25"""
tf.keras.utils.get_custom_objects()[cls.__name__] = cls
return cls
def register_alias(name: str):
"""A decorator to register a custom keras object under a given alias.
!!! example
```python
@utils.register_alias("degeneration")
class Degeneration(tf.keras.metrics.Metric):
pass
```
"""
def register_func(cls):
tf.keras.utils.get_custom_objects()[name] = cls
return cls
return register_func
def set_precision(precision: int = 32):
"""A decorator to set the precision of a quantizer function
# Arguments
precision: An integer defining the precision of the output.
"""
def decorator(function):
setattr(function, "precision", precision)
return function
return decorator
@contextmanager
def patch_object(object, name, value):
"""Temporarily overwrite attribute on object"""
old_value = getattr(object, name)
setattr(object, name, value)
yield
setattr(object, name, old_value)
| 1,874 | 25.408451 | 105 | py |
larq | larq-main/larq/models_test.py | import numpy as np
import pytest
import tensorflow as tf
from packaging import version
import larq as lq
from larq.models import ModelProfile
class ToyModel(tf.keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.conv = lq.layers.QuantConv2D(
filters=32,
kernel_size=(3, 3),
kernel_quantizer="ste_sign",
input_shape=(64, 64, 1),
padding="same",
)
self.pool = tf.keras.layers.GlobalAvgPool2D()
self.dense = tf.keras.layers.Dense(10, activation="softmax")
def call(self, inputs):
return self.dense(self.pool(self.conv(inputs)))
def get_functional_model():
input = tf.keras.Input((32, 32, 3))
x = lq.layers.QuantConv2D(
filters=32,
kernel_size=(3, 3),
kernel_quantizer="ste_sign",
padding="same",
)(input)
y, z = tf.split(x, 2, axis=-1)
x = tf.concat([y, z], axis=-1)
return tf.keras.Model(input, x, name="toy_model")
def get_profile_model():
return tf.keras.models.Sequential(
[
lq.layers.QuantConv2D(
filters=32,
kernel_size=(3, 3),
kernel_quantizer="ste_sign",
input_shape=(64, 64, 1),
padding="same",
),
tf.keras.layers.MaxPooling2D((2, 2)),
lq.layers.QuantDepthwiseConv2D(
kernel_size=3,
strides=(3, 3),
input_quantizer=lq.quantizers.SteTern(),
depthwise_quantizer=lq.quantizers.SteTern(),
padding="same",
pad_values=1.0,
use_bias=False,
),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantSeparableConv2D(
32,
(3, 3),
input_quantizer="ste_sign",
depthwise_quantizer="ste_sign",
pointwise_quantizer="ste_sign",
padding="same",
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, trainable=False),
],
)
def get_submodel_profile_model(start_index=2, end_index=5):
# Same as above, but with a submodel as a layer
model = get_profile_model()
# Create submodel from e.g. the middle three layers
submodel = tf.keras.models.Sequential(
[layer for layer in model.layers[start_index:end_index]],
)
return tf.keras.models.Sequential(
[*model.layers[:start_index], submodel, *model.layers[end_index:]]
)
def test_model_profile():
profile = ModelProfile(get_profile_model())
assert len(profile.layer_profiles) == 7
def test_layer_profile():
profile = ModelProfile(get_profile_model())
kernel_count = [
32 * 3 * 3 * 1,
0,
32 * 3 * 3,
0,
32 * 3 * 3 * 1 + 32 * 1 * 1 * 32,
0,
32 * 11 * 11 * 10,
]
bias_count = [32, 0, 0, 64, 32, 0, 10]
param_count = [k + b for k, b in zip(kernel_count, bias_count)]
memory = [ # bits * (c * w * h * b) + bits * bias
1 * (32 * 3 * 3 * 1) + 32 * 32,
0,
2 * (32 * 3 * 3),
32 * (2 * 32),
1 * (32 * 3 * 3 * 1 + 32 * 1 * 1 * 32) + 32 * 32,
0,
32 * (32 * 11 * 11 * 10 + 10),
]
int8_fp_weights_mem = [
1 * (32 * 3 * 3 * 1) + 8 * 32,
0,
2 * (32 * 3 * 3),
8 * (32 * 2),
1 * (32 * 3 * 3 * 1 + 32 * 1 * 1 * 32) + 8 * 32,
0,
8 * (32 * 11 * 11 * 10 + 10),
]
fp_equiv_mem = [32 * n for n in param_count]
input_precision = [None, None, 2, None, 1, None, None]
output_shape = [
(-1, 64, 64, 32),
(-1, 32, 32, 32),
(-1, 11, 11, 32),
(-1, 11, 11, 32),
(-1, 11, 11, 32),
(-1, 11 * 11 * 32),
(-1, 10),
]
output_pixels = [int(np.prod(os[1:-1])) for os in output_shape]
unique_param_bidtwidths = [[1, 32], [], [2], [32], [1, 32], [], [32]]
unique_op_precisions = [[32], [], [2], [], [1], [], [32]]
mac_count = [params * pixels for params, pixels in zip(kernel_count, output_pixels)]
bin_mac_count = [
mc if (1 in pb and ip == 1) else 0
for mc, pb, ip in zip(mac_count, unique_param_bidtwidths, input_precision)
]
profiles = profile.layer_profiles
for i in range(len(profiles)):
print(f"Testing layer {i}...")
assert profiles[i].input_precision == input_precision[i]
assert profiles[i].output_shape == output_shape[i]
assert profiles[i].output_pixels == output_pixels[i]
assert profiles[i].weight_count() == param_count[i]
assert profiles[i].unique_param_bidtwidths == unique_param_bidtwidths[i]
assert profiles[i].unique_op_precisions == unique_op_precisions[i]
assert profiles[i].memory == memory[i]
assert profiles[i].fp_equivalent_memory == fp_equiv_mem[i]
assert profiles[i].int8_fp_weights_memory == int8_fp_weights_mem[i]
assert profiles[i].op_count("mac") == mac_count[i]
assert profiles[i].op_count("mac", 1) == bin_mac_count[i]
def test_layer_profile_1d():
model = tf.keras.models.Sequential(
[
lq.layers.QuantConv1D(
filters=32,
kernel_size=3,
input_shape=(64, 6),
kernel_quantizer="ste_sign",
padding="same",
),
tf.keras.layers.MaxPooling1D(2),
lq.layers.QuantSeparableConv1D(
filters=16,
kernel_size=3,
input_quantizer="ste_sign",
depthwise_quantizer="ste_sign",
pointwise_quantizer="ste_sign",
padding="same",
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, trainable=False),
]
)
profile = ModelProfile(model)
kernel_count = [(32 * 3 * 6), 0, (32 * 3 + 16 * 32), 0, (16 * 32 * 10)]
bias_count = [32, 0, 16, 0, 10]
param_count = [k + b for k, b in zip(kernel_count, bias_count)]
memory = [ # bits * (c * w * d) + bits * bias
1 * (32 * 3 * 6) + 32 * 32,
0,
1 * (32 * 3 + 16 * 32) + 32 * 16,
0,
32 * (32 * 16 * 10 + 10),
]
int8_fp_weights_mem = [
1 * (32 * 3 * 6) + 8 * 32,
0,
1 * (32 * 3 + 16 * 32) + 8 * 16,
0,
8 * (32 * 16 * 10 + 10),
]
fp_equiv_mem = [32 * n for n in param_count]
input_precision = [None, None, 1, None, None]
output_shape = [
(-1, 64, 32),
(-1, 32, 32),
(-1, 32, 16),
(-1, 32 * 16),
(-1, 10),
]
output_pixels = [int(np.prod(os[1:-1])) for os in output_shape]
unique_param_bidtwidths = [[1, 32], [], [1, 32], [], [32]]
unique_op_precisions = [[32], [], [1], [], [32]]
mac_count = [params * pixels for params, pixels in zip(kernel_count, output_pixels)]
bin_mac_count = [
mc if (1 in pb and ip == 1) else 0
for mc, pb, ip in zip(mac_count, unique_param_bidtwidths, input_precision)
]
profiles = profile.layer_profiles
for i in range(len(profiles)):
print(f"Testing layer {i}...")
assert profiles[i].input_precision == input_precision[i]
assert profiles[i].output_shape == output_shape[i]
assert profiles[i].output_pixels == output_pixels[i]
assert profiles[i].weight_count() == param_count[i]
assert profiles[i].unique_param_bidtwidths == unique_param_bidtwidths[i]
assert profiles[i].unique_op_precisions == unique_op_precisions[i]
assert profiles[i].memory == memory[i]
assert profiles[i].fp_equivalent_memory == fp_equiv_mem[i]
assert profiles[i].int8_fp_weights_memory == int8_fp_weights_mem[i]
assert profiles[i].op_count("mac") == mac_count[i]
assert profiles[i].op_count("mac", 1) == bin_mac_count[i]
def test_summary(snapshot, capsys):
model = get_profile_model()
lq.models.summary(model)
captured = capsys.readouterr()
snapshot.assert_match(captured.out)
# A model with no weights
model = tf.keras.models.Sequential(
[tf.keras.layers.Lambda(lambda x: tf.zeros(2), input_shape=(32, 32))]
)
lq.models.summary(model)
captured = capsys.readouterr()
snapshot.assert_match(captured.out)
def test_submodel_summary(capsys, snapshot):
default_profile = ModelProfile(get_profile_model())
submodel = get_submodel_profile_model(start_index=2, end_index=5)
submodel_profile = ModelProfile(submodel)
submodel_layer_profile = submodel_profile.layer_profiles[2]
# Assert that layer profile of the submodel "layer" matches the original layers
profiles = default_profile.layer_profiles[2:5]
assert submodel_layer_profile.input_precision == profiles[0].input_precision
assert submodel_layer_profile.output_shape == profiles[-1].output_shape
assert submodel_layer_profile.output_pixels == profiles[-1].output_pixels
assert submodel_layer_profile.weight_count() == sum(
(p.weight_count() for p in profiles)
)
bitwidths = []
op_precisions = []
for p in profiles:
bitwidths.extend(p.unique_param_bidtwidths)
op_precisions.extend(p.unique_op_precisions)
assert set(submodel_layer_profile.unique_param_bidtwidths) == set(bitwidths)
assert set(submodel_layer_profile.unique_op_precisions) == set(op_precisions)
assert submodel_layer_profile.memory == sum((p.memory for p in profiles))
assert submodel_layer_profile.fp_equivalent_memory == sum(
(p.fp_equivalent_memory for p in profiles)
)
assert submodel_layer_profile.int8_fp_weights_memory == sum(
(p.int8_fp_weights_memory for p in profiles)
)
assert submodel_layer_profile.op_count("mac") == sum(
(p.op_count("mac") for p in profiles)
)
assert submodel_layer_profile.op_count("mac", 1) == sum(
(p.op_count("mac", 1) for p in profiles)
)
# Assert that the total profile summary matches
assert submodel_profile.generate_summary() == default_profile.generate_summary()
# Snapshot the submodel profile itself to make sure it remains correct
lq.models.summary(get_submodel_profile_model())
snapshot.assert_match(capsys.readouterr().out)
def test_subclass_model_summary(snapshot, capsys):
model = ToyModel()
model.build((None, 32, 32, 3))
lq.models.summary(model)
captured = capsys.readouterr()
snapshot.assert_match(captured.out)
def test_functional_model_summary(snapshot, capsys):
lq.models.summary(get_functional_model())
captured = capsys.readouterr()
key = "2.4+" if version.parse(tf.__version__) >= version.parse("2.3.9") else "<2.4"
snapshot.assert_match(captured.out.lower(), key)
def test_summary_invalid_model():
with pytest.raises(ValueError):
lq.models.summary(tf.keras.Model())
def test_bitsize_invalid_key():
with pytest.raises(NotImplementedError):
lq.models._bitsize_as_str(-1)
def test_number_as_readable_str_large():
assert lq.models._number_as_readable_str(1e16) == "1.00E+16"
@pytest.fixture(autouse=True)
def run_around_tests():
tf.keras.backend.clear_session()
yield
| 11,312 | 33.281818 | 88 | py |
larq | larq-main/larq/layers.py | """Each Quantized Layer requires a `input_quantizer` and `kernel_quantizer` that
describes the way of quantizing the activation of the previous layer and the weights
respectively.
If both `input_quantizer` and `kernel_quantizer` are `None` the layer
is equivalent to a full precision layer.
"""
import tensorflow as tf
from packaging import version
from larq import utils
from larq.layers_base import (
QuantizerBase,
QuantizerBaseConv,
QuantizerDepthwiseBase,
QuantizerSeparableBase,
)
@utils.register_keras_custom_object
class QuantDense(QuantizerBase, tf.keras.layers.Dense):
"""Just your regular densely-connected quantized NN layer.
`QuantDense` implements the operation:
`output = activation(dot(input_quantizer(input), kernel_quantizer(kernel)) + bias)`,
where `activation` is the element-wise activation function passed as the
`activation` argument, `kernel` is a weights matrix created by the layer, and `bias`
is a bias vector created by the layer (only applicable if `use_bias` is `True`).
`input_quantizer` and `kernel_quantizer` are the element-wise quantization
functions to use. If both quantization functions are `None` this layer is
equivalent to `Dense`.
!!! note ""
If the input to the layer has a rank greater than 2, then it is flattened
prior to the initial dot product with `kernel`.
!!! example
```python
# as first layer in a sequential model:
model = Sequential()
model.add(
QuantDense(
32,
input_quantizer="ste_sign",
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
input_shape=(16,),
)
)
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(
QuantDense(
32,
input_quantizer="ste_sign",
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
)
)
```
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
N-D tensor with shape: `(batch_size, ..., input_dim)`. The most common situation
would be a 2D input with shape `(batch_size, input_dim)`.
# Output shape
N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D input
with shape `(batch_size, input_dim)`, the output would have shape
`(batch_size, units)`.
"""
def __init__(
self,
units,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
units,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv1D(QuantizerBase, QuantizerBaseConv, tf.keras.layers.Conv1D):
"""1D quantized convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved with the layer input
over a single spatial (or temporal) dimension to produce a tensor of outputs.
`input_quantizer` and `kernel_quantizer` are the element-wise quantization
functions to use. If both quantization functions are `None` this layer is
equivalent to `Conv1D`.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model, provide an `input_shape`
argument (tuple of integers or `None`, e.g. `(10, 128)` for sequences of
10 vectors of 128-dimensional vectors, or `(None, 128)` for variable-length
sequences of 128-dimensional vectors.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer, specifying the stride
length of the convolution. Specifying any stride value != 1 is incompatible
with specifying any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive). `"causal"`
results in causal (dilated) convolutions, e.g. output[t] does not depend on
input[t+1:]. Useful when modeling temporal data where the model should not
violate the temporal order. See [WaveNet: A Generative Model for Raw Audio,
section 2.1](https://arxiv.org/abs/1609.03499).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
dilation_rate: an integer or tuple/list of a single integer, specifying the
dilation rate to use for dilated convolution. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any `strides`
value != 1.
groups: A positive integer specifying the number of groups in which the input
is split along the channel axis. Each group is convolved separately with
`filters / groups` filters. The output is the concatenation of all the
`groups` results along the channel axis. Input channels and `filters`
must both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything, no
activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
3D tensor with shape: `(batch_size, steps, input_dim)`
# Output shape
3D tensor with shape: `(batch_size, new_steps, filters)`.
`steps` value might have changed due to padding or strides.
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
pad_values=0.0,
data_format="channels_last",
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
if groups != 1:
if version.parse(tf.__version__) >= version.parse("2.3"):
kwargs = {**kwargs, "groups": groups}
else:
raise ValueError(
"`groups` != 1 requires TensorFlow version 2.3 or newer."
)
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv2D(QuantizerBase, QuantizerBaseConv, tf.keras.layers.Conv2D):
"""2D quantized convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of outputs.
`input_quantizer` and `kernel_quantizer` are the element-wise quantization
functions to use. If both quantization functions are `None` this layer is
equivalent to `Conv2D`. If `use_bias` is True, a bias vector is created
and added to the outputs. Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model, provide the keyword argument
`input_shape` (tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in
`data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single integer
to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`. It
defaults to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
groups: A positive integer specifying the number of groups in which the input
is split along the channel axis. Each group is convolved separately with
`filters / groups` filters. The output is the concatenation of all the
`groups` results along the channel axis. Input channels and `filters` must
both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
pad_values=0.0,
data_format=None,
dilation_rate=(1, 1),
groups=1,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
if groups != 1:
if version.parse(tf.__version__) >= version.parse("2.3"):
kwargs = {**kwargs, "groups": groups}
else:
raise ValueError(
"`groups` != 1 requires TensorFlow version 2.3 or newer."
)
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv3D(QuantizerBase, QuantizerBaseConv, tf.keras.layers.Conv3D):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. `input_quantizer` and `kernel_quantizer` are the element-wise quantization
functions to use. If both quantization functions are `None` this layer is
equivalent to `Conv3D`. If `use_bias` is True, a bias vector is created and
added to the outputs. Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model, provide the keyword argument
`input_shape` (tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel, in `data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window. Can be a single
integer to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of the
convolution along each spatial dimension. Can be a single integer to specify
the same value for all spatial dimensions. Specifying any stride value != 1
is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults
to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
groups: A positive integer specifying the number of groups in which the input
is split along the channel axis. Each group is convolved separately with
`filters / groups` filters. The output is the concatenation of all the
`groups` results along the channel axis. Input channels and `filters` must
both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
# Output shape
5D tensor with shape:
`(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
pad_values=0.0,
data_format=None,
dilation_rate=(1, 1, 1),
groups=1,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
if groups != 1:
if version.parse(tf.__version__) >= version.parse("2.3"):
kwargs = {**kwargs, "groups": groups}
else:
raise ValueError(
"`groups` != 1 requires TensorFlow version 2.3 or newer."
)
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantDepthwiseConv2D(
QuantizerDepthwiseBase, QuantizerBaseConv, tf.keras.layers.DepthwiseConv2D
):
"""Quantized depthwise separable 2D convolution.
Depthwise Separable convolutions consists in performing just the first step in a
depthwise spatial convolution (which acts on each input channel separately).
The `depth_multiplier` argument controls how many output channels are generated per
input channel in the depthwise step.
# Arguments
kernel_size: An integer or tuple/list of 2 integers, specifying the height and
width of the 2D convolution window. Can be a single integer to specify the
same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the height and width. Can be a single integer to specify
the same value for all spatial dimensions. Specifying any stride value != 1
is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
depth_multiplier: The number of depthwise convolution output channels for each
input channel. The total number of depthwise convolution output channels
will be equal to `filters_in * depth_multiplier`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (ie. `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
depthwise_quantizer: Quantization function applied to the `depthwise_kernel`
weights matrix.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to the depthwise kernel
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation').
depthwise_constraint: Constraint function applied to the depthwise kernel
matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
4D tensor with shape:
`[batch, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, rows, cols, channels]` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`[batch, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(
self,
kernel_size,
strides=(1, 1),
padding="valid",
pad_values=0.0,
depth_multiplier=1,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
input_quantizer=None,
depthwise_quantizer=None,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
kernel_size=kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
depth_multiplier=depth_multiplier,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
depthwise_quantizer=depthwise_quantizer,
depthwise_initializer=depthwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantSeparableConv1D(
QuantizerSeparableBase, QuantizerBaseConv, tf.keras.layers.SeparableConv1D
):
"""Depthwise separable 1D quantized convolution.
This layer performs a depthwise convolution that acts separately on channels,
followed by a pointwise convolution that mixes channels.
`input_quantizer`, `depthwise_quantizer` and `pointwise_quantizer` are the
element-wise quantization functions to use. If all quantization functions are `None`
this layer is equivalent to `SeparableConv1D`. If `use_bias` is True and
a bias initializer is provided, it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
# Arguments
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial dimensions of the filters.
strides: A single integer specifying the strides of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `(batch, length, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying the dilation rate to use for dilated
convolution. Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a linear activation.
use_bias: Boolean, whether the layer uses a bias.
input_quantizer: Quantization function applied to the input of the layer.
depthwise_quantizer: Quantization function applied to the depthwise kernel.
pointwise_quantizer: Quantization function applied to the pointwise kernel.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise convolution
kernel.
pointwise_regularizer: Optional regularizer for the pointwise convolution
kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer`
(e.g. used for norm constraints or value constraints for layer weights).
The function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
pad_values=0.0,
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
input_quantizer=None,
depthwise_quantizer=None,
pointwise_quantizer=None,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
depthwise_quantizer=depthwise_quantizer,
pointwise_quantizer=pointwise_quantizer,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantSeparableConv2D(
QuantizerSeparableBase, QuantizerBaseConv, tf.keras.layers.SeparableConv2D
):
"""Depthwise separable 2D convolution.
Separable convolutions consist in first performing a depthwise spatial convolution
(which acts on each input channel separately) followed by a pointwise convolution
which mixes together the resulting output channels. The `depth_multiplier` argument
controls how many output channels are generated per input channel
in the depthwise step.
`input_quantizer`, `depthwise_quantizer` and `pointwise_quantizer` are the
element-wise quantization functions to use. If all quantization functions are `None`
this layer is equivalent to `SeparableConv1D`. If `use_bias` is True and
a bias initializer is provided, it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Intuitively, separable convolutions can be understood as a way to factorize a
convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height and
width of the 2D convolution window. Can be a single integer to specify the
same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the height and width. Can be a single integer to specify
the same value for all spatial dimensions. Specifying any stride value != 1
is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`. It
defaults to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
depth_multiplier: The number of depthwise convolution output channels for each
input channel. The total number of depthwise convolution output channels
will be equal to `filters_in * depth_multiplier`.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
depthwise_quantizer: Quantization function applied to the depthwise kernel
matrix.
pointwise_quantizer: Quantization function applied to the pointwise kernel
matrix.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to the depthwise kernel
matrix.
pointwise_regularizer: Regularizer function applied to the pointwise kernel
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
depthwise_constraint: Constraint function applied to the depthwise kernel
matrix.
pointwise_constraint: Constraint function applied to the pointwise kernel
matrix.
bias_constraint: Constraint function applied to the bias vector.`
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
pad_values=0.0,
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
input_quantizer=None,
depthwise_quantizer=None,
pointwise_quantizer=None,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
depthwise_quantizer=depthwise_quantizer,
pointwise_quantizer=pointwise_quantizer,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv2DTranspose(QuantizerBase, tf.keras.layers.Conv2DTranspose):
"""Transposed quantized convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises from the desire to use a
transformation going in the opposite direction of a normal convolution, i.e.,
from something that has the shape of the output of some convolution to something
that has the shape of its input while maintaining a connectivity pattern
that is compatible with said convolution. `input_quantizer` and `kernel_quantizer`
are the element-wise quantization functions to use. If both quantization functions
are `None` this layer is equivalent to `Conv2DTranspose`.
When using this layer as the first layer in a model, provide the keyword argument
`input_shape` (tuple of integers, does not include the sample axis), e.g.
`input_shape=(128, 128, 3)` for 128x128 RGB pictures in
`data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single integer
to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 2 integers, specifying the amount
of padding along the height and width of the output tensor. Can be a single
integer to specify the same value for all spatial dimensions. The amount of
output padding along a given dimension must be lower than the stride along
that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string, one of `channels_last` (default) or `channels_first`. The
ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`. It
defaults to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
# References
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv3DTranspose(QuantizerBase, tf.keras.layers.Conv3DTranspose):
"""Transposed quantized convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution. `input_quantizer` and `kernel_quantizer`
are the element-wise quantization functions to use. If both quantization functions
are `None` this layer is equivalent to `Conv3DTranspose`.
When using this layer as the first layer in a model, provide the keyword argument
`input_shape` (tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height
and width of the 3D convolution window. Can be a single integer to specify the
same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of the
convolution along the depth, height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 3 integers, specifying the amount
of padding along the depth, height, and width. Can be a single integer to
specify the same value for all spatial dimensions. The amount of output
padding along a given dimension must be lower than the stride along that
same dimension. If set to `None` (default), the output shape is inferred.
data_format: A string, one of `channels_last` (default) or `channels_first`. The
ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, depth, height, width, channels)` while
`channels_first` corresponds to inputs with shape
`(batch, channels, depth, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
5D tensor with shape:
`(batch, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, depth, rows, cols, channels)` if data_format='channels_last'.
# Output shape
5D tensor with shape:
`(batch, filters, new_depth, new_rows, new_cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, new_depth, new_rows, new_cols, filters)` if data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
# References
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantLocallyConnected1D(QuantizerBase, tf.keras.layers.LocallyConnected1D):
"""Locally-connected quantized layer for 1D inputs.
The `QuantLocallyConnected1D` layer works similarly to the `QuantConv1D` layer,
except that weights are unshared, that is, a different set of filters is applied
at each different patch of the input. `input_quantizer` and `kernel_quantizer`
are the element-wise quantization functions to use. If both quantization functions
are `None` this layer is equivalent to `LocallyConnected1D`.
!!! example
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(QuantLocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(QuantLocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer, specifying the stride
length of the convolution. Specifying any stride value != 1 is incompatible
with specifying any `dilation_rate` value != 1.
padding: Currently only supports `"valid"` (case-insensitive).
`"same"` may be supported in the future.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `(batch, length, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, length)`. It defaults
to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1` or `2`.
`1` loops over input spatial locations to perform the forward pass.
It is memory-efficient but performs a lot of (small) ops.
`2` stores layer weights in a dense but sparsely-populated 2D matrix
and implements the forward pass as a single matrix-multiply. It uses
a lot of RAM but performs few (large) ops.
Depending on the inputs, layer parameters, hardware, and
`tf.executing_eagerly()` one implementation can be dramatically faster
(e.g. 50X) than another.
It is recommended to benchmark both in the setting of interest to pick
the most efficient one (in terms of speed and memory usage).
Following scenarios could benefit from setting `implementation=2`:
- eager execution;
- inference;
- running on CPU;
- large amount of RAM available;
- small models (few filters, small kernel);
- using `padding=same` (only possible with `implementation=2`).
# Input shape
3D tensor with shape: `(batch_size, steps, input_dim)`
# Output shape
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
implementation=implementation,
**kwargs,
)
@utils.register_keras_custom_object
class QuantLocallyConnected2D(QuantizerBase, tf.keras.layers.LocallyConnected2D):
"""Locally-connected quantized layer for 2D inputs.
The `QuantLocallyConnected2D` layer works similarly to the `QuantConv2D` layer,
except that weights are unshared, that is, a different set of filters is applied
at each different patch of the input. `input_quantizer` and `kernel_quantizer`
are the element-wise quantization functions to use. If both quantization functions
are `None` this layer is equivalent to `LocallyConnected2D`.
!!! example
```python
# apply a 3x3 unshared weights convolution with 64 output filters on a
32x32 image
# with `data_format="channels_last"`:
model = Sequential()
model.add(QuantLocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64
parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(QuantLocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the width and height. Can be a single integer to specify
the same value for all spatial dimensions.
padding: Currently only support `"valid"` (case-insensitive).
`"same"` will be supported in future.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`. It
defaults to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1` or `2`.
`1` loops over input spatial locations to perform the forward pass.
It is memory-efficient but performs a lot of (small) ops.
`2` stores layer weights in a dense but sparsely-populated 2D matrix
and implements the forward pass as a single matrix-multiply. It uses
a lot of RAM but performs few (large) ops.
Depending on the inputs, layer parameters, hardware, and
`tf.executing_eagerly()` one implementation can be dramatically faster
(e.g. 50X) than another.
It is recommended to benchmark both in the setting of interest to pick
the most efficient one (in terms of speed and memory usage).
Following scenarios could benefit from setting `implementation=2`:
- eager execution;
- inference;
- running on CPU;
- large amount of RAM available;
- small models (few filters, small kernel);
- using `padding=same` (only possible with `implementation=2`).
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
implementation=implementation,
**kwargs,
)
| 65,598 | 46.535507 | 91 | py |
larq | larq-main/larq/callbacks_test.py | import math
import numpy as np
import pytest
import tensorflow as tf
from packaging import version
from tensorflow.python.keras import testing_utils
import larq as lq
from larq import testing_utils as lq_testing_utils
from larq.callbacks import HyperparameterScheduler
if version.parse(tf.__version__) >= version.parse("2.11"):
from tensorflow.keras.optimizers import legacy as optimizers # type: ignore
else:
from tensorflow.keras import optimizers # type: ignore
class TestHyperparameterScheduler:
def _create_data_and_model(self, train_samples=1000):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=0,
input_shape=(10,),
num_classes=2,
)
y_train = tf.keras.utils.to_categorical(y_train)
model = lq_testing_utils.get_small_bnn_model(
x_train.shape[1], 20, y_train.shape[1]
)
return x_train, y_train, model
def test_normal_optimizer(self):
x_train, y_train, model = self._create_data_and_model()
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(0.01),
metrics=["accuracy"],
)
def scheduler(x):
return 1.0 / (1.0 + x)
# We shouldn' t need to specify the optimizer
test_scheduler = HyperparameterScheduler(
schedule=scheduler,
hyperparameter="lr",
verbose=1,
)
num_epochs = 2
model.fit(
x_train,
y_train,
epochs=num_epochs,
batch_size=16,
callbacks=[test_scheduler],
verbose=0,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.lr),
scheduler(num_epochs - 1),
decimal=8,
)
def test_per_step(self):
train_samples = 20
x_train, y_train, model = self._create_data_and_model(train_samples)
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(0.01),
metrics=["accuracy"],
)
def scheduler(x):
return 1.0 / (1.0 + x)
# Test that we don't accept incorrect `update_freq`
with pytest.raises(ValueError):
HyperparameterScheduler(
schedule=scheduler,
hyperparameter="lr",
update_freq="wrong",
)
# The actual scheduler we'll use
test_scheduler = HyperparameterScheduler(
schedule=scheduler,
hyperparameter="lr",
update_freq="step",
verbose=1,
)
num_epochs = 1
batch_size = 10
model.fit(
x_train,
y_train,
epochs=num_epochs,
batch_size=16,
callbacks=[test_scheduler],
verbose=0,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.lr),
scheduler(math.ceil(train_samples / batch_size) - 1),
decimal=8,
)
def test_case_optimizer(self):
x_train, y_train, model = self._create_data_and_model()
bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)
adam = optimizers.Adam(0.01)
case_optimizer = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, bop),
default_optimizer=adam,
)
model.compile(
loss="categorical_crossentropy",
optimizer=case_optimizer,
metrics=["accuracy"],
)
def scheduler(x):
return 1.0 / (1.0 + x)
cbk_gamma_scheduler = HyperparameterScheduler(
schedule=scheduler,
optimizer=model.optimizer.optimizers[0],
hyperparameter="gamma",
verbose=1,
)
cbk_threshold_scheduler = HyperparameterScheduler(
schedule=scheduler,
optimizer=model.optimizer.optimizers[0],
hyperparameter="threshold",
verbose=1,
)
cbk_lr_scheduler = HyperparameterScheduler(
schedule=scheduler,
optimizer=model.optimizer.optimizers[1],
hyperparameter="lr",
verbose=1,
)
num_epochs = 3
model.fit(
x_train,
y_train,
epochs=num_epochs,
batch_size=16,
callbacks=[cbk_gamma_scheduler, cbk_lr_scheduler, cbk_threshold_scheduler],
verbose=0,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.optimizers[0].gamma),
scheduler(num_epochs - 1),
decimal=8,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.optimizers[0].threshold),
scheduler(num_epochs - 1),
decimal=8,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.optimizers[1].lr),
scheduler(num_epochs - 1),
decimal=8,
)
def test_wrong_param(self):
x_train, y_train, model = self._create_data_and_model()
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(0.01),
metrics=["accuracy"],
)
def scheduler(x):
return 1.0 / (1.0 + x)
wrong_scheduler = HyperparameterScheduler(
schedule=scheduler,
hyperparameter="invalid_param",
verbose=1,
)
with pytest.raises(ValueError):
model.fit(
x_train,
y_train,
epochs=1,
batch_size=16,
callbacks=[wrong_scheduler],
verbose=0,
)
| 6,015 | 27.647619 | 87 | py |
larq | larq-main/larq/constraints_test.py | import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("name", ["weight_clip"])
def test_serialization(name):
fn = tf.keras.constraints.get(name)
ref_fn = getattr(lq.constraints, name)()
assert fn.__class__ == ref_fn.__class__
config = tf.keras.constraints.serialize(fn)
fn = tf.keras.constraints.deserialize(config)
assert fn.__class__ == ref_fn.__class__
def test_clip():
real_values = generate_real_values_with_zeros()
clip_instance = lq.constraints.weight_clip(clip_value=0.75)
result = clip_instance(tf.keras.backend.variable(real_values))
result = tf.keras.backend.eval(result)
np.testing.assert_allclose(result, np.clip(real_values, -0.75, 0.75))
| 811 | 31.48 | 73 | py |
larq | larq-main/larq/layers_test.py | import inspect
import numpy as np
import pytest
import tensorflow as tf
from packaging import version
import larq as lq
from larq import testing_utils
PARAMS_ALL_LAYERS = [
(lq.layers.QuantDense, tf.keras.layers.Dense, (3, 2), dict(units=3)),
(
lq.layers.QuantConv1D,
tf.keras.layers.Conv1D,
(2, 3, 7),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantConv2D,
tf.keras.layers.Conv2D,
(2, 3, 7, 6),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantConv3D,
tf.keras.layers.Conv3D,
(2, 3, 7, 6, 5),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantConv2DTranspose,
tf.keras.layers.Conv2DTranspose,
(2, 3, 7, 6),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantConv3DTranspose,
tf.keras.layers.Conv3DTranspose,
(2, 3, 7, 6, 5),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantLocallyConnected1D,
tf.keras.layers.LocallyConnected1D,
(2, 8, 5),
dict(filters=4, kernel_size=3),
),
(
lq.layers.QuantLocallyConnected2D,
tf.keras.layers.LocallyConnected2D,
(8, 6, 10, 4),
dict(filters=3, kernel_size=3),
),
]
PARAMS_SEP_LAYERS = [
(lq.layers.QuantSeparableConv1D, tf.keras.layers.SeparableConv1D, (2, 3, 7)),
(lq.layers.QuantSeparableConv2D, tf.keras.layers.SeparableConv2D, (2, 3, 7, 6)),
]
class TestLayers:
@pytest.mark.parametrize(
"quantized_layer, layer, input_shape, kwargs", PARAMS_ALL_LAYERS
)
def test_binarization(
self, quantized_layer, layer, input_shape, kwargs, keras_should_run_eagerly
):
input_data = testing_utils.random_input(input_shape)
random_weight = np.random.random() - 0.5
with lq.context.metrics_scope(["flip_ratio"]):
quant_output = testing_utils.layer_test(
quantized_layer,
kwargs=dict(
**kwargs,
kernel_quantizer="ste_sign",
input_quantizer="ste_sign",
kernel_initializer=tf.keras.initializers.constant(random_weight),
),
input_data=input_data,
should_run_eagerly=keras_should_run_eagerly,
)
fp_model = tf.keras.models.Sequential(
[
layer(
**kwargs,
kernel_initializer=tf.keras.initializers.constant(
np.sign(random_weight)
),
input_shape=input_shape[1:],
)
]
)
np.testing.assert_allclose(quant_output, fp_model.predict(np.sign(input_data)))
@pytest.mark.parametrize("quantized_layer, layer, input_shape", PARAMS_SEP_LAYERS)
def test_separable_layers(
self, quantized_layer, layer, input_shape, keras_should_run_eagerly
):
input_data = testing_utils.random_input(input_shape)
random_d_kernel = np.random.random() - 0.5
random_p_kernel = np.random.random() - 0.5
with lq.context.metrics_scope(["flip_ratio"]):
quant_output = testing_utils.layer_test(
quantized_layer,
kwargs=dict(
filters=3,
kernel_size=3,
depthwise_quantizer="ste_sign",
pointwise_quantizer="ste_sign",
input_quantizer="ste_sign",
depthwise_initializer=tf.keras.initializers.constant(
random_d_kernel
),
pointwise_initializer=tf.keras.initializers.constant(
random_p_kernel
),
),
input_data=input_data,
should_run_eagerly=keras_should_run_eagerly,
)
fp_model = tf.keras.models.Sequential(
[
layer(
filters=3,
kernel_size=3,
depthwise_initializer=tf.keras.initializers.constant(
np.sign(random_d_kernel)
),
pointwise_initializer=tf.keras.initializers.constant(
np.sign(random_p_kernel)
),
input_shape=input_shape[1:],
)
]
)
np.testing.assert_allclose(quant_output, fp_model.predict(np.sign(input_data)))
def test_depthwise_layers(self, keras_should_run_eagerly):
input_data = testing_utils.random_input((2, 3, 7, 6))
random_weight = np.random.random() - 0.5
with lq.context.metrics_scope(["flip_ratio"]):
quant_output = testing_utils.layer_test(
lq.layers.QuantDepthwiseConv2D,
kwargs=dict(
kernel_size=3,
depthwise_quantizer="ste_sign",
input_quantizer="ste_sign",
depthwise_initializer=tf.keras.initializers.constant(random_weight),
),
input_data=input_data,
should_run_eagerly=keras_should_run_eagerly,
)
fp_model = tf.keras.models.Sequential(
[
tf.keras.layers.DepthwiseConv2D(
kernel_size=3,
depthwise_initializer=tf.keras.initializers.constant(
np.sign(random_weight)
),
input_shape=input_data.shape[1:],
)
]
)
np.testing.assert_allclose(quant_output, fp_model.predict(np.sign(input_data)))
@pytest.mark.parametrize(
"layer_cls, input_dim",
[
(lq.layers.QuantConv1D, 3),
(lq.layers.QuantConv2D, 4),
(lq.layers.QuantConv3D, 5),
(lq.layers.QuantSeparableConv1D, 3),
(lq.layers.QuantSeparableConv2D, 4),
(lq.layers.QuantDepthwiseConv2D, 4),
],
)
@pytest.mark.parametrize("dilation", [True, False])
def test_non_zero_padding_layers(
self, mocker, layer_cls, input_dim, data_format, dilation
):
inputs = np.zeros(np.random.randint(5, 20, size=input_dim), np.float32)
kernel = tuple(np.random.randint(3, 7, size=input_dim - 2))
rand_tuple = tuple(np.random.randint(1, 4, size=input_dim - 2))
if not dilation and layer_cls in (
lq.layers.QuantSeparableConv2D,
lq.layers.QuantDepthwiseConv2D,
):
rand_tuple = int(rand_tuple[0])
kwargs = {"dilation_rate": rand_tuple} if dilation else {"strides": rand_tuple}
args = (kernel,) if layer_cls == lq.layers.QuantDepthwiseConv2D else (2, kernel)
ref_layer = layer_cls(*args, padding="same", **kwargs)
spy = mocker.spy(tf, "pad")
layer = layer_cls(*args, padding="same", pad_values=1.0, **kwargs)
layer.build(inputs.shape)
conv_op = getattr(layer, "_convolution_op", None)
assert layer(inputs).shape == ref_layer(inputs).shape
spy.assert_called_once_with(mocker.ANY, mocker.ANY, constant_values=1.0)
assert conv_op == getattr(layer, "_convolution_op", None)
@pytest.mark.parametrize(
"layer_cls",
[
lq.layers.QuantConv1D,
lq.layers.QuantConv2D,
lq.layers.QuantConv3D,
lq.layers.QuantSeparableConv1D,
lq.layers.QuantSeparableConv2D,
lq.layers.QuantDepthwiseConv2D,
],
)
@pytest.mark.parametrize("static", [True, False])
def test_non_zero_padding_shapes(self, layer_cls, data_format, static):
layer = layer_cls(
16, 3, padding="same", pad_values=1.0, data_format=data_format
)
input_shape = [32 if static else None] * layer.rank + [3]
if data_format == "channels_first":
input_shape = reversed(input_shape)
input = tf.keras.layers.Input(shape=input_shape)
layer(input)
if static:
for dim in layer.output_shape[1:]:
assert dim is not None
class TestLayerWarns:
def test_layer_warns(self, caplog):
lq.layers.QuantDense(5, kernel_quantizer="ste_sign")
assert len(caplog.records) >= 1
assert "kernel_constraint" in caplog.text
def test_layer_does_not_warn(self, caplog):
lq.layers.QuantDense(
5, kernel_quantizer="ste_sign", kernel_constraint="weight_clip"
)
assert "kernel_constraint" not in caplog.text
def test_depthwise_layer_warns(self, caplog):
lq.layers.QuantDepthwiseConv2D(5, depthwise_quantizer="ste_sign")
assert len(caplog.records) >= 1
assert "depthwise_constraint" in caplog.text
def test_depthwise_layer_does_not_warn(self, caplog):
lq.layers.QuantDepthwiseConv2D(
5, depthwise_quantizer="ste_sign", depthwise_constraint="weight_clip"
)
assert "depthwise_constraint" not in caplog.text
def test_separable_layer_warns(self, caplog):
lq.layers.QuantSeparableConv2D(
3, 3, depthwise_quantizer="ste_sign", pointwise_quantizer="ste_sign"
)
assert "depthwise_constraint" in caplog.text
assert "pointwise_constraint" in caplog.text
def test_separable_layer_does_not_warn(self, caplog):
lq.layers.QuantSeparableConv2D(
3,
3,
depthwise_quantizer="ste_sign",
pointwise_quantizer="ste_sign",
depthwise_constraint="weight_clip",
pointwise_constraint="weight_clip",
)
assert caplog.records == []
def test_conv1d_non_zero_padding_raises(self):
with pytest.raises(ValueError, match=r".*pad_values.*"):
lq.layers.QuantConv1D(24, 3, padding="causal", pad_values=1.0)
@pytest.mark.parametrize(
"layer", [lq.layers.QuantConv1D, lq.layers.QuantConv2D, lq.layers.QuantConv3D]
)
def test_groups(self, layer):
if version.parse(tf.__version__) < version.parse("2.3"):
with pytest.raises(ValueError, match=r".*groups.*"):
layer(24, 3, groups=2)
else:
assert layer(24, 3, groups=2).groups == 2
@pytest.mark.parametrize(
"quant_layer,layer",
[
(lq.layers.QuantDense, tf.keras.layers.Dense),
(lq.layers.QuantConv1D, tf.keras.layers.Conv1D),
(lq.layers.QuantConv2D, tf.keras.layers.Conv2D),
(lq.layers.QuantConv3D, tf.keras.layers.Conv3D),
(lq.layers.QuantConv2DTranspose, tf.keras.layers.Conv2DTranspose),
(lq.layers.QuantConv3DTranspose, tf.keras.layers.Conv3DTranspose),
(lq.layers.QuantLocallyConnected1D, tf.keras.layers.LocallyConnected1D),
(lq.layers.QuantLocallyConnected2D, tf.keras.layers.LocallyConnected2D),
(lq.layers.QuantDepthwiseConv2D, tf.keras.layers.DepthwiseConv2D),
],
)
def test_layer_kwargs(quant_layer, layer):
quant_params = inspect.signature(quant_layer).parameters
params = inspect.signature(layer).parameters
quant_params_list = list(quant_params.keys())
params_list = list(params.keys())
ignored_params = [
"input_quantizer",
"kernel_quantizer",
"depthwise_quantizer",
"pointwise_quantizer",
"pad_values",
]
if version.parse(tf.__version__) < version.parse("2.3"):
ignored_params.append("groups")
if layer in (tf.keras.layers.DepthwiseConv2D, tf.keras.layers.Conv3DTranspose):
ignored_params.append("dilation_rate")
for p in ignored_params:
try:
quant_params_list.remove(p)
except ValueError:
pass
assert quant_params_list == params_list
for param in params_list:
assert quant_params.get(param).default == params.get(param).default # type: ignore
| 12,025 | 34.68546 | 91 | py |
larq | larq-main/larq/constraints.py | """Functions from the `constraints` module allow setting constraints
(eg. weight clipping) on network parameters during optimization.
The penalties are applied on a per-layer basis. The exact API will depend on the layer,
but the layers `QuantDense`, `QuantConv1D`, `QuantConv2D` and `QuantConv3D` have a
unified API.
These layers expose 2 keyword arguments:
- `kernel_constraint` for the main weights matrix
- `bias_constraint` for the bias.
```python
import larq as lq
lq.layers.QuantDense(64, kernel_constraint="weight_clip")
lq.layers.QuantDense(64, kernel_constraint=lq.constraints.WeightClip(2.))
```
"""
from typing import Any, Mapping
import tensorflow as tf
from larq import utils
@utils.register_keras_custom_object
class WeightClip(tf.keras.constraints.Constraint):
"""Weight Clip constraint
Constrains the weights incident to each hidden unit
to be between `[-clip_value, clip_value]`.
# Arguments
clip_value: The value to clip incoming weights.
"""
def __init__(self, clip_value: float = 1):
self.clip_value = clip_value
def __call__(self, x: tf.Tensor) -> tf.Tensor:
return tf.clip_by_value(x, -self.clip_value, self.clip_value)
def get_config(self) -> Mapping[str, Any]:
return {"clip_value": self.clip_value}
# Aliases
@utils.register_keras_custom_object
class weight_clip(WeightClip):
pass
| 1,392 | 25.283019 | 87 | py |
larq | larq-main/larq/models.py | import itertools
from dataclasses import dataclass
from typing import Any, Callable, Iterator, Mapping, Optional, Sequence, TypeVar, Union
import numpy as np
import tensorflow as tf
from terminaltables import AsciiTable
from larq import layers as lq_layers
from larq.utils import memory_as_readable_str
__all__ = ["summary"]
mac_containing_layers = (
lq_layers.QuantConv2D,
lq_layers.QuantSeparableConv2D,
lq_layers.QuantDepthwiseConv2D,
lq_layers.QuantDense,
tf.keras.layers.Conv2D,
tf.keras.layers.SeparableConv2D,
tf.keras.layers.DepthwiseConv2D,
tf.keras.layers.Dense,
lq_layers.QuantConv1D,
lq_layers.QuantSeparableConv1D,
tf.keras.layers.Conv1D,
tf.keras.layers.SeparableConv1D,
)
op_count_supported_layer_types = (
tf.keras.layers.Flatten,
tf.keras.layers.BatchNormalization,
tf.keras.layers.MaxPool2D,
tf.keras.layers.AveragePooling2D,
tf.keras.layers.MaxPool1D,
tf.keras.layers.AveragePooling1D,
*mac_containing_layers,
)
T = TypeVar("T")
def _flatten(lst: Iterator[Iterator[T]]) -> Sequence[T]:
return list(itertools.chain.from_iterable(lst))
def _bitsize_as_str(bitsize: int) -> str:
bitsize_names = {8: "byte", 8 * 1024: "kB"}
try:
return bitsize_names[bitsize]
except KeyError:
raise NotImplementedError()
def _number_as_readable_str(num: float) -> str:
# The initial rounding here is necessary so that e.g. `999000` gets
# formatted as `1.000 M` rather than `1000 k`
num = float(f"{num:.3g}")
# For numbers less than 1000, output them directly, stripping any trailing
# zeros and decimal places.
if num < 1000:
return str(num).rstrip("0").rstrip(".")
# For numbers that are at least 1000 trillion (1 quadrillion) format with
# scientific notation (3 s.f. = 2 d.p. in scientific notation).
if num >= 1e15:
return f"{num:.2E}"
# Count the magnitude.
magnitude = 0
while abs(num) >= 1000 and magnitude < 4:
magnitude += 1
num /= 1000.0
# ':.3g' formats the number with 3 significant figures, without stripping trailing
# zeros.
num = f"{num:.3g}".rstrip(".")
unit = ["", " k", " M", " B", " T"][magnitude]
return num + unit
def _format_table_entry(x: float, units: int = 1) -> Union[float, str]:
try:
assert not np.isnan(x)
if type(x) == str or x == 0 or units == 1:
return x
return x / units
except Exception:
return "?"
def _normalize_shape(shape):
return tuple(dim if dim else -1 for dim in shape)
class WeightProfile:
def __init__(self, weight, trainable: bool = True):
self._weight = weight
self.bitwidth = getattr(weight, "precision", 32)
self.trainable = trainable
@property
def count(self) -> int:
return int(np.prod(self._weight.shape.as_list()))
@property
def memory(self) -> int:
return self.bitwidth * self.count
@property
def fp_equivalent_memory(self) -> int:
return 32 * self.count
@property
def int8_fp_weights_memory(self) -> int:
"""Count any 32- or 16-bit weights as 8 bits instead."""
if self.bitwidth > 8:
return self.count * 8
return self.bitwidth * self.count
def is_bias(self) -> bool:
return "bias" in self._weight.name
@dataclass
class OperationProfile:
n: int
precision: int
op_type: str
class LayerProfile:
def __init__(self, layer: tf.keras.layers.Layer):
self._layer = layer
self.name = layer.name
weights = layer.weights
if isinstance(layer, tf.keras.layers.BatchNormalization):
fused_pairs = [("beta", "moving_mean"), ("gamma", "moving_variance")]
for pair in fused_pairs:
names = [w.name.split("/")[-1].replace(":0", "") for w in weights]
if pair[0] in names and pair[1] in names:
weights.pop(names.index(pair[0]))
self.weight_profiles = [
WeightProfile(
weight,
trainable=any(weight is w for w in layer.trainable_weights),
)
for weight in weights
]
self.op_profiles = []
if isinstance(layer, mac_containing_layers) and self.output_pixels:
for p in self.weight_profiles:
if not p.is_bias():
self.op_profiles.append(
OperationProfile(
n=p.count * self.output_pixels,
precision=max(self.input_precision or 32, p.bitwidth),
op_type="mac",
)
)
@property
def memory(self) -> int:
return sum(p.memory for p in self.weight_profiles)
@property
def int8_fp_weights_memory(self) -> int:
return sum(p.int8_fp_weights_memory for p in self.weight_profiles)
@property
def fp_equivalent_memory(self) -> int:
return sum(p.fp_equivalent_memory for p in self.weight_profiles)
def weight_count(
self, bitwidth: Optional[int] = None, trainable: Optional[bool] = None
) -> int:
count = 0
for p in self.weight_profiles:
if (bitwidth is None or p.bitwidth == bitwidth) and (
trainable is None or p.trainable == trainable
):
count += p.count
return count
def op_count(
self, op_type: Optional[str] = None, precision: Optional[int] = None
) -> Optional[int]:
if op_type != "mac":
raise ValueError("Currently only counting of MAC-operations is supported.")
if (
isinstance(self._layer, op_count_supported_layer_types)
and self.output_pixels
):
count = 0
for op in self.op_profiles:
if (precision is None or op.precision == precision) and (
op_type is None or op.op_type == op_type
):
count += op.n
return count
return None
@property
def input_precision(self) -> Optional[int]:
try:
return self._layer.input_quantizer.precision
except AttributeError:
return None
@property
def output_shape(self) -> Optional[Sequence[int]]:
try:
output_shape = self._layer.output_shape
if isinstance(output_shape, list):
if len(output_shape) == 1:
return _normalize_shape(output_shape[0])
return [_normalize_shape(shape) for shape in output_shape]
return _normalize_shape(output_shape)
except AttributeError:
return None
@property
def output_shape_str(self) -> str:
try:
return str(self.output_shape or "multiple")
except RuntimeError:
return "?"
@property
def output_pixels(self) -> Optional[int]:
"""Number of pixels for a single feature map (1 for fully connected layers)."""
if not self.output_shape:
return None
if len(self.output_shape) == 4:
return int(np.prod(self.output_shape[1:3]))
if len(self.output_shape) == 3:
return self.output_shape[1]
if len(self.output_shape) == 2:
return 1
raise NotImplementedError()
@property
def unique_param_bidtwidths(self) -> Sequence[int]:
return sorted(set([p.bitwidth for p in self.weight_profiles]))
@property
def unique_op_precisions(self) -> Sequence[int]:
return sorted(set([op.precision for op in self.op_profiles]))
def generate_table_row(
self, table_config: Mapping[str, Any]
) -> Sequence[Union[str, float]]:
row = [self.name, self.input_precision or "-", self.output_shape_str]
for i in table_config["param_bidtwidths"]:
n = self.weight_count(i)
n = _format_table_entry(n, table_config["param_units"])
row.append(n)
row.append(_format_table_entry(self.memory, table_config["memory_units"]))
for i in table_config["mac_precisions"]:
n = self.op_count("mac", i)
n = _format_table_entry(n, table_config["mac_units"])
row.append(n)
return row
class ModelProfile(LayerProfile):
def __init__(self, model: tf.keras.models.Model):
self.name = model.name
def get_profile(layer):
return (
LayerProfile(layer)
if not isinstance(layer, tf.keras.models.Model)
else ModelProfile(layer)
)
self.layer_profiles = [get_profile(layer) for layer in model.layers]
@property
def memory(self) -> int:
return sum(lp.memory for lp in self.layer_profiles)
@property
def int8_fp_weights_memory(self) -> int:
return sum(lp.int8_fp_weights_memory for lp in self.layer_profiles)
@property
def fp_equivalent_memory(self) -> int:
return sum(lp.fp_equivalent_memory for lp in self.layer_profiles)
def weight_count(
self, bitwidth: Optional[int] = None, trainable: Optional[bool] = None
) -> int:
return sum(lp.weight_count(bitwidth, trainable) for lp in self.layer_profiles)
def op_count(
self, op_type: Optional[str] = None, bitwidth: Optional[int] = None
) -> int:
return sum(lp.op_count(op_type, bitwidth) or 0 for lp in self.layer_profiles)
@property
def unique_param_bidtwidths(self) -> Sequence[int]:
return sorted(
set(_flatten(lp.unique_param_bidtwidths for lp in self.layer_profiles))
)
@property
def unique_op_precisions(self) -> Sequence[int]:
return sorted(
set(_flatten(lp.unique_op_precisions for lp in self.layer_profiles))
)
@property
def input_precision(self) -> Optional[int]:
return self.layer_profiles[0].input_precision
@property
def output_shape(self) -> Optional[Sequence[int]]:
return self.layer_profiles[-1].output_shape
def _generate_table_header(self, table_config: Mapping[str, Any]) -> Sequence[str]:
return [
"Layer",
"Input prec.\n(bit)",
"Outputs",
*(
f"# {i}-bit\nx {table_config['param_units']}"
for i in table_config["param_bidtwidths"]
),
f"Memory\n({_bitsize_as_str(table_config['memory_units'])})",
*(f"{i}-bit MACs" for i in table_config["mac_precisions"]),
]
def _generate_table_total(
self, table_config: Mapping[str, Any]
) -> Sequence[Union[float, str]]:
row = ["Total", "", ""]
for i in table_config["param_bidtwidths"]:
row.append(
_format_table_entry(self.weight_count(i), table_config["param_units"])
)
row.append(_format_table_entry(self.memory, table_config["memory_units"]))
for i in table_config["mac_precisions"]:
row.append(
_format_table_entry(self.op_count("mac", i), table_config["mac_units"])
)
return row
def generate_table(
self, include_macs: bool = True
) -> Sequence[Sequence[Union[float, str]]]:
table_config = {
"param_bidtwidths": self.unique_param_bidtwidths,
"mac_precisions": self.unique_op_precisions if include_macs else [],
"param_units": 1,
"memory_units": 8 * 1024,
"mac_units": 1,
}
table = []
table.append(self._generate_table_header(table_config))
for lp in self.layer_profiles:
table.append(lp.generate_table_row(table_config))
table.append(self._generate_table_total(table_config))
return table
def generate_summary(
self, include_macs: bool = True
) -> Sequence[Sequence[Union[str, float]]]:
summary = [
["Total params", _number_as_readable_str(self.weight_count())],
[
"Trainable params",
_number_as_readable_str(self.weight_count(trainable=True)),
],
[
"Non-trainable params",
_number_as_readable_str(self.weight_count(trainable=False)),
],
["Model size", memory_as_readable_str(self.memory)],
[
"Model size (8-bit FP weights)",
memory_as_readable_str(self.int8_fp_weights_memory),
],
["Float-32 Equivalent", memory_as_readable_str(self.fp_equivalent_memory)],
[
"Compression Ratio of Memory",
self.memory / max(1e-8, self.fp_equivalent_memory),
],
]
if include_macs:
binarization_ratio = self.op_count("mac", 1) / max(
1, self.op_count(op_type="mac")
)
ternarization_ratio = self.op_count("mac", 2) / max(
1, self.op_count(op_type="mac")
)
summary.append(
[
"Number of MACs",
_number_as_readable_str(self.op_count(op_type="mac")),
]
)
if binarization_ratio > 0:
summary.append(
["Ratio of MACs that are binarized", f"{binarization_ratio:.4f}"]
)
if ternarization_ratio > 0:
summary.append(
["Ratio of MACs that are ternarized", f"{ternarization_ratio:.4f}"]
)
return summary
def sanitize_table(table_data: Sequence[Sequence[Any]]) -> Sequence[Sequence[str]]:
return [
[f"{v:.2f}" if type(v) == float else str(v) for v in row] for row in table_data
]
class LayersTable(AsciiTable):
def __init__(self, table_data, title=None):
super().__init__(sanitize_table(table_data), title=title)
self.inner_column_border = False
self.justify_columns = {
i: "left" if i == 0 else "right" for i in range(len(table_data[0]))
}
self.inner_footing_row_border = True
self.inner_heading_row_border = True
class SummaryTable(AsciiTable):
def __init__(self, table_data, title=None):
super().__init__(sanitize_table(table_data), title=title)
self.inner_column_border = False
self.inner_heading_row_border = False
def summary(
model: tf.keras.models.Model,
print_fn: Optional[Callable[[str], Any]] = None,
include_macs: bool = True,
) -> None:
"""Prints a string summary of the network.
The summary includes the following information per layer:
- input precision,
- output dimension,
- weight count (broken down by bidtwidth),
- memory footprint in kilobytes (`8*1024` 1-bit weights = 1 kB),
- number of multiply-accumulate (MAC) operations broken down by precision (*optional & expermental*).
A single MAC operation contains both a multiplication and an addition. The precision
of a MAC operation is defined as the maximum bitwidth of its inputs.
Additionally, the following overall statistics for the model are supplied:
- total number of weights,
- total number of trainable weights,
- total number of non-trainable weights,
- model size,
- model size (8-bit FP weights): memory footprint if FP weights were 8 bit,
- float-32 equivalent size: memory footprint if all weights were 32 bit,
- compression ratio achieved by quantizing weights,
- total number of MAC operations,
- ratio of MAC operations that is binarized and can be accelated with XNOR-gates.
# Arguments
model: model instance.
print_fn: Print function to use. Defaults to `print`. You can set it to a custom
function in order to capture the string summary.
include_macs: whether or not to include the number of MAC-operations in the
summary.
# Raises
ValueError: if called before the model is built.
"""
if not model.built:
raise ValueError(
"This model has not yet been built. Build the model first by calling "
"`model.build()` or calling `model.fit()` with some data, or specify an "
"`input_shape` argument in the first layer(s) for automatic build."
)
if not print_fn:
print_fn = print
model_profile = ModelProfile(model)
print_fn(
LayersTable(model_profile.generate_table(), title=f"{model.name} stats").table
)
print_fn(
SummaryTable(
model_profile.generate_summary(include_macs), title=f"{model.name} summary"
).table
)
| 16,824 | 31.861328 | 105 | py |
larq | larq-main/larq/metrics.py | """We add metrics specific to extremely quantized networks using a
`larq.context.metrics_scope` rather than through the `metrics` parameter of
`model.compile()`, where most common metrics reside. This is because, to calculate
metrics like the `flip_ratio`, we need a layer's kernel or activation and not just the
`y_true` and `y_pred` that Keras passes to metrics defined in the usual way.
"""
import numpy as np
import tensorflow as tf
from larq import utils
@utils.register_alias("flip_ratio")
@utils.register_keras_custom_object
class FlipRatio(tf.keras.metrics.Metric):
"""Computes the mean ratio of changed values in a given tensor.
!!! example
```python
m = metrics.FlipRatio()
m.update_state((1, 1)) # result: 0
m.update_state((2, 2)) # result: 1
m.update_state((1, 2)) # result: 0.75
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
# Arguments
name: Name of the metric.
values_dtype: Data type of the tensor for which to track changes.
dtype: Data type of the moving mean.
"""
def __init__(self, values_dtype="int8", name="flip_ratio", dtype=None):
super().__init__(name=name, dtype=dtype)
self.built = False
self.values_dtype = tf.as_dtype(values_dtype)
def build(self, input_shape):
self._previous_values = self.add_weight(
"previous_values",
shape=input_shape,
dtype=self.values_dtype,
initializer=tf.keras.initializers.zeros,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
self.total = self.add_weight(
"total",
initializer=tf.keras.initializers.zeros,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
self.count = self.add_weight(
"count",
initializer=tf.keras.initializers.zeros,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
self._size = tf.cast(np.prod(input_shape), self.dtype)
self.built = True
def update_state(self, values, sample_weight=None):
values = tf.cast(values, self.values_dtype)
if not self.built:
with tf.name_scope(self.name), tf.init_scope():
self.build(values.shape)
unchanged_values = tf.math.count_nonzero(
tf.equal(self._previous_values, values)
)
flip_ratio = 1 - (
tf.cast(unchanged_values, self.dtype) / tf.cast(self._size, self.dtype)
)
update_total_op = self.total.assign_add(flip_ratio * tf.sign(self.count))
with tf.control_dependencies([update_total_op]):
update_count_op = self.count.assign_add(1)
with tf.control_dependencies([update_count_op]):
return self._previous_values.assign(values)
def result(self):
return tf.compat.v1.div_no_nan(self.total, self.count - 1)
def reset_state(self):
tf.keras.backend.batch_set_value(
[(v, 0) for v in self.variables if v is not self._previous_values]
)
def reset_states(self):
self.reset_state() # For backwards compatibility with < 2.5
def get_config(self):
return {**super().get_config(), "values_dtype": self.values_dtype.name}
| 3,341 | 34.935484 | 86 | py |
larq | larq-main/larq/activations.py | """Activations can either be used through an `Activation` layer, or through the
`activation` argument supported by all forward layers:
```python
import tensorflow as tf
import larq as lq
model.add(lq.layers.QuantDense(64))
model.add(tf.keras.layers.Activation('hard_tanh'))
```
This is equivalent to:
```python
model.add(lq.layers.QuantDense(64, activation='hard_tanh'))
```
You can also pass an element-wise TensorFlow function as an activation:
```python
model.add(lq.layers.QuantDense(64, activation=lq.activations.hard_tanh))
```
"""
import tensorflow as tf
from larq import utils
@utils.register_keras_custom_object
def hard_tanh(x: tf.Tensor) -> tf.Tensor:
"""Hard tanh activation function.
```plot-activation
activations.hard_tanh
```
# Arguments
x: Input tensor.
# Returns
Hard tanh activation.
"""
return tf.clip_by_value(x, -1, 1)
@utils.register_keras_custom_object
def leaky_tanh(x: tf.Tensor, alpha: float = 0.2) -> tf.Tensor:
r"""Leaky tanh activation function.
Similar to hard tanh, but with non-zero slopes as in leaky ReLU.
```plot-activation
activations.leaky_tanh
```
# Arguments
x: Input tensor.
alpha: Slope of the activation function outside of [-1, 1].
# Returns
Leaky tanh activation.
"""
return (
tf.clip_by_value(x, -1, 1)
+ (tf.math.maximum(x, 1) - 1) * alpha
+ (tf.math.minimum(x, -1) + 1) * alpha
)
| 1,481 | 21.119403 | 79 | py |
larq | larq-main/larq/quantizers_test.py | import functools
import numpy as np
import pytest
import tensorflow as tf
from packaging import version
import larq as lq
from larq import testing_utils
class DummyTrainableQuantizer(tf.keras.layers.Layer):
"""Used to test whether we can set layers as quantizers without any throws."""
_custom_metrics = None
def build(self, input_shape):
self.dummy_weight = self.add_weight("dummy_weight", trainable=True)
super().build(input_shape)
def call(self, inputs):
return self.dummy_weight * inputs
class TestCommonFunctionality:
"""Test functionality common to all quantizers, like serialization and usage."""
@pytest.mark.parametrize("module", [lq.quantizers, tf.keras.activations])
@pytest.mark.parametrize(
"name,ref_cls",
[
("ste_sign", lq.quantizers.SteSign),
("approx_sign", lq.quantizers.ApproxSign),
("ste_heaviside", lq.quantizers.SteHeaviside),
("magnitude_aware_sign", lq.quantizers.MagnitudeAwareSign),
("swish_sign", lq.quantizers.SwishSign),
("ste_tern", lq.quantizers.SteTern),
],
)
def test_serialization(self, module, name, ref_cls):
if module == tf.keras.activations and (
version.parse(tf.__version__) < version.parse("2.13")
):
# New serialisation in Keras doesn't support using quantizers strings as activations
fn = module.get(name)
assert fn.__class__ == ref_cls
fn = module.get(ref_cls())
assert fn.__class__ == ref_cls
assert type(fn.precision) == int
if module == tf.keras.activations and (
version.parse(tf.__version__) < version.parse("1.15")
):
pytest.skip(
"TensorFlow < 1.15 does not support Quantizer classes as activations"
)
config = module.serialize(fn)
fn = module.deserialize(config)
assert fn.__class__ == ref_cls
assert type(fn.precision) == int
def test_noop_serialization(self):
fn = lq.quantizers.get(lq.quantizers.NoOp(precision=1))
assert fn.__class__ == lq.quantizers.NoOp
assert fn.precision == 1
config = lq.quantizers.serialize(fn)
fn = lq.quantizers.deserialize(config)
assert fn.__class__ == lq.quantizers.NoOp
assert fn.precision == 1
def test_invalid_usage(self):
with pytest.raises(ValueError):
lq.quantizers.get(42)
with pytest.raises(ValueError):
lq.quantizers.get("unknown")
with pytest.raises(ValueError):
lq.quantizers.DoReFa(k_bit=2, mode="unknown")
f = lq.quantizers.DoReFa(k_bit=2, mode="activations")
f.mode = "unknown"
with pytest.raises(ValueError):
f.call([0.0])
@pytest.mark.parametrize("quantizer", ["input_quantizer", "kernel_quantizer"])
def test_layer_as_quantizer(self, quantizer, keras_should_run_eagerly):
"""Test whether a keras.layers.Layer can be used as quantizer."""
input_data = testing_utils.random_input((1, 10))
model = tf.keras.Sequential(
[lq.layers.QuantDense(1, **{quantizer: DummyTrainableQuantizer()})]
)
model.compile(optimizer="sgd", loss="mse", run_eagerly=keras_should_run_eagerly)
model.fit(input_data, np.ones((1,)), epochs=1)
assert any(["dummy_weight" in var.name for var in model.trainable_variables])
class TestQuantization:
"""Test binarization and ternarization."""
@pytest.mark.parametrize(
"fn",
[
"ste_sign",
lq.quantizers.SteSign(),
"approx_sign",
lq.quantizers.ApproxSign(),
"swish_sign",
lq.quantizers.SwishSign(),
],
)
def test_xnor_binarization(self, fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])
binarized_values = np.random.choice([-1, 1], size=(2, 5))
result = f([binarized_values])[0]
np.testing.assert_allclose(result, binarized_values)
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert not np.any(result == 0)
assert np.all(result[real_values < 0] == -1)
assert np.all(result[real_values >= 0] == 1)
zero_values = np.zeros((2, 5))
result = f([zero_values])[0]
assert np.all(result == 1)
@pytest.mark.parametrize("fn", ["ste_heaviside", lq.quantizers.SteHeaviside()])
def test_and_binarization(self, fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])
binarized_values = np.random.choice([0, 1], size=(2, 5))
result = f([binarized_values])[0]
np.testing.assert_allclose(result, binarized_values)
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert np.all(result[real_values <= 0] == 0)
assert np.all(result[real_values > 0] == 1)
@pytest.mark.usefixtures("eager_mode")
def test_magnitude_aware_sign_binarization(self):
a = np.random.uniform(-2, 2, (3, 2, 2, 3))
x = tf.Variable(a)
y = lq.quantizers.MagnitudeAwareSign()(x)
assert y.shape == x.shape
# check sign
np.testing.assert_allclose(tf.sign(y).numpy(), np.sign(a))
# check magnitude
np.testing.assert_allclose(
tf.reduce_mean(tf.abs(y), axis=[0, 1, 2]).numpy(),
[np.mean(np.reshape(np.abs(a[:, :, :, i]), [-1])) for i in range(3)],
)
@pytest.mark.parametrize(
"fn",
[
"ste_tern",
lq.quantizers.SteTern(),
lq.quantizers.SteTern(ternary_weight_networks=True),
lq.quantizers.SteTern(threshold_value=np.random.uniform(0.01, 0.8)),
],
)
def test_ternarization_basic(self, fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])
ternarized_values = np.random.choice([-1, 0, 1], size=(4, 10))
result = f([ternarized_values])[0]
np.testing.assert_allclose(result, ternarized_values)
assert not np.any(result > 1)
assert not np.any(result < -1)
assert np.any(result == -1)
assert np.any(result == 1)
assert np.any(result == 0)
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert not np.any(result > 1)
assert not np.any(result < -1)
assert np.any(result == -1)
assert np.any(result == 1)
assert np.any(result == 0)
@pytest.mark.parametrize("fn", ["ste_tern", lq.quantizers.SteTern()])
def test_ternarization_with_default_threshold(self, fn):
x = tf.keras.backend.placeholder(ndim=2)
test_threshold = 0.05 # This is the default
f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert np.all(result[real_values > test_threshold] == 1)
assert np.all(result[real_values < -test_threshold] == -1)
assert np.all(result[np.abs(real_values) < test_threshold] == 0)
assert not np.any(result > 1)
assert not np.any(result < -1)
def test_ternarization_with_custom_threshold(self):
x = tf.keras.backend.placeholder(ndim=2)
test_threshold = np.random.uniform(0.01, 0.8)
fn = lq.quantizers.SteTern(threshold_value=test_threshold)
f = tf.keras.backend.function([x], [fn(x)])
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert np.all(result[real_values > test_threshold] == 1)
assert np.all(result[real_values < -test_threshold] == -1)
assert np.all(result[np.abs(real_values) < test_threshold] == 0)
assert not np.any(result > 1)
assert not np.any(result < -1)
def test_ternarization_with_ternary_weight_networks(self):
x = tf.keras.backend.placeholder(ndim=2)
real_values = testing_utils.generate_real_values_with_zeros()
test_threshold = 0.7 * np.sum(np.abs(real_values)) / np.size(real_values)
fn = lq.quantizers.SteTern(ternary_weight_networks=True)
f = tf.keras.backend.function([x], [fn(x)])
result = f([real_values])[0]
assert np.all(result[real_values > test_threshold] == 1)
assert np.all(result[real_values < -test_threshold] == -1)
assert np.all(result[np.abs(real_values) < test_threshold] == 0)
assert not np.any(result > 1)
assert not np.any(result < -1)
@pytest.mark.parametrize("k_bit", [1, 2, 4, 6, 8])
@pytest.mark.parametrize("mode", ["activations", "weights"])
def test_dorefa_quantize(self, k_bit, mode):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.quantizers.DoReFa(k_bit, mode)(x)])
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
n = 2**k_bit - 1
if mode == "weights":
# Create the preprocessed and scaled stimulus, which is then ready to
# go through the same test like for the activation quantizer
divider = np.amax(np.abs(np.tanh(real_values)))
real_values = np.tanh(real_values) / divider
real_values = (real_values / 2.0) + 0.5
# The results, which are currently on [-1, 1] range get the same
# scaling, so they behave like they were created on the activation
# range and can be tested like that
result = result / 2.0 + 0.5
assert not np.any(result > 1)
assert not np.any(result < 0)
for i in range(n + 1):
np.testing.assert_allclose(
result[
(real_values > (2 * i - 1) / (2 * n))
& (real_values < (2 * i + 1) / (2 * n))
],
i / n,
atol=1e-6,
)
@pytest.mark.usefixtures("eager_mode")
class TestGradients:
"""Test gradients for different quantizers."""
@pytest.mark.parametrize(
"fn",
[
lq.quantizers.SteSign(clip_value=None),
lq.quantizers.SteTern(clip_value=None),
lq.quantizers.SteHeaviside(clip_value=None),
],
)
def test_identity_ste_grad(self, fn):
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = fn(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), np.ones_like(x))
@pytest.mark.parametrize(
"fn",
[
lq.quantizers.SteSign(),
lq.quantizers.SteTern(),
lq.quantizers.SteHeaviside(),
],
)
def test_ste_grad(self, fn):
@np.vectorize
def ste_grad(x):
if np.abs(x) <= 1:
return 1.0
return 0.0
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = fn(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), ste_grad(x))
# Test with and without default threshold
def test_swish_grad(self):
def swish_grad(x, beta):
return (
beta * (2 - beta * x * np.tanh(beta * x / 2)) / (1 + np.cosh(beta * x))
)
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = lq.quantizers.SwishSign()(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=5.0))
with tf.GradientTape() as tape:
activation = lq.quantizers.SwishSign(beta=10.0)(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=10.0))
def test_approx_sign_grad(self):
@np.vectorize
def approx_sign_grad(x):
if np.abs(x) <= 1:
return 2 - 2 * np.abs(x)
return 0.0
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = lq.quantizers.ApproxSign()(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), approx_sign_grad(x))
def test_magnitude_aware_sign_grad(self):
a = np.random.uniform(-2, 2, (3, 2, 2, 3))
x = tf.Variable(a)
with tf.GradientTape() as tape:
y = lq.quantizers.MagnitudeAwareSign()(x)
grad = tape.gradient(y, x)
scale_vector = [
np.mean(np.reshape(np.abs(a[:, :, :, i]), [-1])) for i in range(3)
]
np.testing.assert_allclose(
grad.numpy(), np.where(abs(a) < 1, np.ones(a.shape) * scale_vector, 0)
)
@pytest.mark.parametrize("mode", ["activations", "weights"])
def test_dorefa_ste_grad(self, mode):
@np.vectorize
def ste_grad(x):
if x <= 1 and x >= 0:
return 1.0
return 0.0
def tanh_grad(x):
# 1/(cosh**2) is the derivative of tanh. The gradients of the
# scaling operations cancel each other and the gradient of the
# quantizek function is supposed to be 1 everywhere, because it
# is used on its linear region only. tanh does all the limiting.
dividend = np.amax(np.abs(np.tanh(x)))
return 1 / (np.cosh(x) ** 2.0) / dividend
expected_gradient = ste_grad if mode == "activations" else tanh_grad
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = lq.quantizers.DoReFa(2, mode)(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), expected_gradient(x))
@pytest.mark.parametrize(
"quantizer",
[
("ste_sign", lq.quantizers.SteSign),
("approx_sign", lq.quantizers.ApproxSign),
("ste_heaviside", lq.quantizers.SteHeaviside),
("swish_sign", lq.quantizers.SwishSign),
("magnitude_aware_sign", lq.quantizers.MagnitudeAwareSign),
("ste_tern", lq.quantizers.SteTern),
("dorefa_quantizer", lq.quantizers.DoReFa),
("dorefa_quantizer", functools.partial(lq.quantizers.DoReFa, mode="weights")),
],
)
def test_metrics(quantizer):
quantizer_str, quantizer_cls = quantizer
# No metric
model = tf.keras.models.Sequential(
[lq.layers.QuantDense(3, kernel_quantizer=quantizer_str, input_shape=(32,))]
)
model.compile(loss="mse", optimizer="sgd")
assert len(model.layers[0]._metrics) == 0
# Metric added using scope
with lq.context.metrics_scope(["flip_ratio"]):
model = tf.keras.models.Sequential(
[lq.layers.QuantDense(3, kernel_quantizer=quantizer_str, input_shape=(32,))]
)
model.compile(loss="mse", optimizer="sgd")
if version.parse(tf.__version__) > version.parse("1.14"):
assert len(model.layers[0].kernel_quantizer._metrics) == 1
else:
# In TF1.14, call() gets called twice, resulting in having an extra initial
# metrics copy.
assert len(model.layers[0].kernel_quantizer._metrics) == 2
# Metric added explicitly to quantizer
model = tf.keras.models.Sequential(
[
lq.layers.QuantDense(
3,
kernel_quantizer=quantizer_cls(metrics=["flip_ratio"]),
input_shape=(32,),
)
]
)
model.compile(loss="mse", optimizer="sgd")
if version.parse(tf.__version__) > version.parse("1.14"):
assert len(model.layers[0].kernel_quantizer._metrics) == 1
else:
# In TF1.14, call() gets called twice, resulting in having an extra initial
# metrics copy.
assert len(model.layers[0].kernel_quantizer._metrics) == 2
def test_get_kernel_quantizer_assigns_metrics():
with lq.context.metrics_scope(["flip_ratio"]):
ste_sign = lq.quantizers.get_kernel_quantizer("ste_sign")
assert "flip_ratio" in lq.context.get_training_metrics()
assert isinstance(ste_sign, lq.quantizers.SteSign)
assert "flip_ratio" in ste_sign._custom_metrics
def test_get_kernel_quantizer_accepts_function():
custom_quantizer = lq.quantizers.get_kernel_quantizer(lambda x: x)
assert callable(custom_quantizer)
assert not hasattr(custom_quantizer, "_custom_metrics")
def test_backwards_compat_aliases():
assert lq.quantizers.DoReFaQuantizer == lq.quantizers.DoReFa
assert lq.quantizers.NoOpQuantizer == lq.quantizers.NoOp
| 17,283 | 37.238938 | 96 | py |
larq | larq-main/larq/activations_test.py | import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("name", ["hard_tanh", "leaky_tanh"])
def test_serialization(name):
fn = tf.keras.activations.get(name)
ref_fn = getattr(lq.activations, name)
assert fn == ref_fn
config = tf.keras.activations.serialize(fn)
fn = tf.keras.activations.deserialize(config)
assert fn == ref_fn
def test_hard_tanh():
real_values = generate_real_values_with_zeros()
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.activations.hard_tanh(x)])
result = f([real_values])[0]
np.testing.assert_allclose(result, np.clip(real_values, -1, 1))
def test_leaky_tanh():
@np.vectorize
def leaky_tanh(x, alpha):
if x <= -1:
return -1 + alpha * (x + 1)
elif x <= 1:
return x
else:
return 1 + alpha * (x - 1)
real_values = generate_real_values_with_zeros()
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.activations.leaky_tanh(x)])
result = f([real_values])[0]
np.testing.assert_allclose(result, leaky_tanh(real_values, alpha=0.2))
| 1,259 | 29 | 74 | py |
larq | larq-main/larq/quantized_variable_test.py | import numpy as np
import pytest
import tensorflow as tf
from numpy.testing import assert_almost_equal, assert_array_equal
from packaging import version
from tensorflow.python.distribute.values import DistributedVariable
from larq import context, testing_utils
from larq.quantized_variable import QuantizedVariable
from larq.testing_utils import evaluate
def get_var(val, dtype=None, name=None):
return tf.compat.v1.Variable(val, use_resource=True, dtype=dtype, name=name)
def test_inheritance(distribute_scope):
variable = get_var(3.0)
quantized_variable = QuantizedVariable.from_variable(variable)
assert isinstance(quantized_variable, QuantizedVariable)
assert isinstance(quantized_variable, tf.Variable)
assert isinstance(quantized_variable, DistributedVariable) is distribute_scope
@pytest.mark.usefixtures("eager_and_graph_mode", "distribute_scope")
def test_read():
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
assert evaluate(x) == 3.5
assert evaluate(x.value()) == 3.5
assert evaluate(x.read_value()) == 3.5
assert evaluate(tf.identity(x)) == 3.5
with context.quantized_scope(True):
assert evaluate(x) == 7
assert evaluate(x.value()) == 7
assert evaluate(x.read_value()) == 7
assert evaluate(tf.identity(x)) == 7
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_sparse_reads():
x = QuantizedVariable.from_variable(get_var([1.0, 2.0]), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
assert evaluate(x.sparse_read([0])) == 1
assert evaluate(x.gather_nd([0])) == 1
with context.quantized_scope(True):
assert evaluate(x.sparse_read([0])) == 2
assert evaluate(x.gather_nd([0])) == 2
@pytest.mark.usefixtures("eager_and_graph_mode", "distribute_scope")
def test_read_nested_scopes():
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
with context.quantized_scope(True):
assert evaluate(x.read_value()) == 7
with context.quantized_scope(False):
assert evaluate(x.read_value()) == 3.5
assert evaluate(x.read_value()) == 7
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_method_delegations(distribute_scope):
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
with context.quantized_scope(True):
evaluate(x.initializer)
assert evaluate(x.value()) == 7
assert evaluate(x.read_value()) == 7
assert x.trainable
if version.parse(tf.__version__) > version.parse("1.14"):
assert x.synchronization == x.latent_variable.synchronization
assert x.aggregation == x.latent_variable.aggregation
if version.parse(tf.__version__) < version.parse("2.13"):
assert evaluate(x.initialized_value()) == 7
if not tf.executing_eagerly():
if not distribute_scope:
# These functions are not supported for DistributedVariables
x.load(4.5)
assert x.eval() == 9
assert evaluate(x.initial_value) == 7
assert x.op == x.latent_variable.op
assert x.graph == x.latent_variable.graph
if not distribute_scope:
# These attributes are not supported for DistributedVariables
assert x.constraint is None
assert x.initializer == x.latent_variable.initializer
def apply_and_read(x, fn, args):
evaluate(fn(*args))
return evaluate(x)
assert apply_and_read(x, x.assign, [4]) == 8
assert apply_and_read(x, x.assign_add, [1]) == 10
assert apply_and_read(x, x.assign_sub, [1.5]) == 7
assert x.name == x.latent_variable.name
assert x.device == x.latent_variable.device
assert x.shape == ()
assert x.get_shape() == ()
try:
x.set_shape(())
assert x.shape == ()
except NotImplementedError:
pass
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_scatter_method_delegations():
x = QuantizedVariable.from_variable(get_var([3.5, 4]), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
with context.quantized_scope(True):
assert_array_equal(evaluate(x.value()), [7, 8])
def slices(val, index):
return tf.IndexedSlices(values=val, indices=index)
assert_array_equal(evaluate(x.scatter_sub(slices(0.5, 0))), [6, 8])
assert_array_equal(evaluate(x.scatter_add(slices(0.5, 0))), [7, 8])
if version.parse(tf.__version__) > version.parse("1.14"):
assert_array_equal(evaluate(x.scatter_max(slices(4.5, 1))), [7, 9])
assert_array_equal(evaluate(x.scatter_min(slices(4.0, 1))), [7, 8])
assert_array_equal(evaluate(x.scatter_mul(slices(2.0, 1))), [7, 16])
assert_array_equal(evaluate(x.scatter_div(slices(2.0, 1))), [7, 8])
assert_array_equal(evaluate(x.scatter_update(slices(2.0, 1))), [7, 4])
assert_array_equal(evaluate(x.scatter_nd_sub([[0], [1]], [0.5, 1.0])), [6, 2])
assert_array_equal(evaluate(x.scatter_nd_add([[0], [1]], [0.5, 1.0])), [7, 4])
assert_array_equal(
evaluate(x.scatter_nd_update([[0], [1]], [0.5, 1.0])), [1, 2]
)
assert_array_equal(evaluate(x.batch_scatter_update(slices([2.0], [1]))), [1, 4])
@pytest.mark.usefixtures("eager_and_graph_mode", "distribute_scope")
def test_overloads(quantized):
if quantized:
x = QuantizedVariable.from_variable(get_var(3.5), quantizer=lambda x: 2 * x)
else:
x = QuantizedVariable.from_variable(get_var(7.0))
evaluate(x.initializer)
assert_almost_equal(8, evaluate(x + 1))
assert_almost_equal(10, evaluate(3 + x))
assert_almost_equal(14, evaluate(x + x))
assert_almost_equal(5, evaluate(x - 2))
assert_almost_equal(6, evaluate(13 - x))
assert_almost_equal(0, evaluate(x - x))
assert_almost_equal(14, evaluate(x * 2))
assert_almost_equal(21, evaluate(3 * x))
assert_almost_equal(49, evaluate(x * x))
assert_almost_equal(3.5, evaluate(x / 2))
assert_almost_equal(1.5, evaluate(10.5 / x))
assert_almost_equal(3, evaluate(x // 2))
assert_almost_equal(2, evaluate(15 // x))
assert_almost_equal(1, evaluate(x % 2))
assert_almost_equal(2, evaluate(16 % x))
assert evaluate(x < 12)
assert evaluate(x <= 12)
assert not evaluate(x > 12)
assert not evaluate(x >= 12)
assert not evaluate(12 < x)
assert not evaluate(12 <= x)
assert evaluate(12 > x)
assert evaluate(12 >= x)
assert_almost_equal(343, evaluate(pow(x, 3)))
assert_almost_equal(128, evaluate(pow(2, x)))
assert_almost_equal(-7, evaluate(-x))
assert_almost_equal(7, evaluate(abs(x)))
@pytest.mark.usefixtures("eager_mode")
def test_tensor_equality(quantized):
if quantized:
x = QuantizedVariable.from_variable(
get_var([3.5, 4.0, 4.5]), quantizer=lambda x: 2 * x
)
else:
x = QuantizedVariable.from_variable(get_var([7.0, 8.0, 9.0]))
evaluate(x.initializer)
assert_array_equal(evaluate(x), [7.0, 8.0, 9.0])
if version.parse(tf.__version__) >= version.parse("2"):
assert_array_equal(x == [7.0, 8.0, 10.0], [True, True, False])
assert_array_equal(x != [7.0, 8.0, 10.0], [False, False, True])
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_assign(quantized, distribute_scope):
x = QuantizedVariable.from_variable(
get_var(0.0, tf.float64), quantizer=lambda x: 2 * x
)
evaluate(x.initializer)
latent_value = 3.14
value = latent_value * 2 if quantized else latent_value
# Assign doesn't correctly return a quantized variable in graph mode if a strategy is used
if tf.executing_eagerly() or not distribute_scope or not quantized:
# Assign float32 values
lv = tf.constant(latent_value, dtype=tf.float64)
assert_almost_equal(evaluate(x.assign(lv)), value)
assert_almost_equal(evaluate(x.assign_add(lv)), value * 2)
assert_almost_equal(evaluate(x.assign_sub(lv)), value)
# Assign Python floats
assert_almost_equal(evaluate(x.assign(0.0)), 0.0)
assert_almost_equal(evaluate(x.assign(latent_value)), value)
assert_almost_equal(evaluate(x.assign_add(latent_value)), value * 2)
assert_almost_equal(evaluate(x.assign_sub(latent_value)), value)
# Use the tf.assign functions instead of the var.assign methods.
assert_almost_equal(evaluate(tf.compat.v1.assign(x, 0.0)), 0.0)
assert_almost_equal(evaluate(tf.compat.v1.assign(x, latent_value)), value)
assert_almost_equal(
evaluate(tf.compat.v1.assign_add(x, latent_value)), value * 2
)
assert_almost_equal(evaluate(tf.compat.v1.assign_sub(x, latent_value)), value)
# Assign multiple times
if version.parse(tf.__version__) >= version.parse("2.2") and (
tf.executing_eagerly() or not distribute_scope
):
assign = x.assign(0.0)
assert_almost_equal(evaluate(assign), 0.0)
assert_almost_equal(evaluate(assign.assign(latent_value)), value)
assert_almost_equal(
evaluate(x.assign_add(latent_value).assign_add(latent_value)), value * 3
)
assert_almost_equal(evaluate(x), value * 3)
assert_almost_equal(
evaluate(x.assign_sub(latent_value).assign_sub(latent_value)), value
)
assert_almost_equal(evaluate(x), value)
# Assign with read_value=False
assert_almost_equal(evaluate(x.assign(0.0)), 0.0)
assert evaluate(x.assign(latent_value, read_value=False)) is None
assert_almost_equal(evaluate(x), value)
assert evaluate(x.assign_add(latent_value, read_value=False)) is None
assert_almost_equal(evaluate(x), 2 * value)
assert evaluate(x.assign_sub(latent_value, read_value=False)) is None
assert_almost_equal(evaluate(x), value)
@pytest.mark.usefixtures("eager_mode", "distribute_scope")
def test_assign_tf_function(quantized):
x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x)
@tf.function
def run_assign():
return x.assign(1.0).assign_add(3.0).assign_add(3.0).assign_sub(2.0)
assert_almost_equal(evaluate(run_assign()), 10.0 if quantized else 5.0)
@pytest.mark.usefixtures("eager_and_graph_mode", "distribute_scope")
def test_assign_op():
x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x)
@tf.function
def func():
assert x.assign(1.0).op is not None
assert x.assign_add(1.0).op is not None
assert x.assign_sub(1.0).op is not None
func()
@pytest.mark.usefixtures("eager_mode", "distribute_scope")
def test_tf_function_control_dependencies(quantized):
x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x)
@tf.function
def func():
update = x.assign_add(1.0)
with tf.control_dependencies([update]):
x.assign_add(1.0)
func()
assert_almost_equal(evaluate(x), 4.0 if quantized else 2.0)
def test_tf_function_with_variable_and_quantized_variable():
variable = get_var(tf.ones(2, 2))
quantized_variable = QuantizedVariable.from_variable(variable)
@tf.function
def f(x):
return x + 1
f(variable)
f(quantized_variable)
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_checkpoint(tmp_path):
x = QuantizedVariable.from_variable(get_var(0.0), quantizer=lambda x: 2 * x)
evaluate(x.initializer)
evaluate(x.assign(123.0))
checkpoint = tf.train.Checkpoint(x=x)
save_path = checkpoint.save(tmp_path)
evaluate(x.assign(234.0))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
assert isinstance(x, QuantizedVariable)
assert evaluate(x) == 123.0
with context.quantized_scope(True):
assert evaluate(x) == 123.0 * 2
@pytest.mark.usefixtures("distribute_scope")
def test_invalid_wrapped_usage():
with pytest.raises(ValueError, match="`variable` must be of type"):
QuantizedVariable.from_variable(tf.constant([1.0]))
with pytest.raises(ValueError, match="`quantizer` must be `callable` or `None`"):
QuantizedVariable.from_variable(get_var([1.0]), 1) # type: ignore
with pytest.raises(ValueError, match="`precision` must be of type `int` or `None`"):
QuantizedVariable.from_variable(get_var([1.0]), precision=1.0) # type: ignore
@pytest.mark.usefixtures("eager_and_graph_mode")
def test_repr(snapshot):
x = get_var(0.0, name="x")
class Quantizer:
def __call__(self, x):
return x
snapshot.assert_match(
repr(QuantizedVariable.from_variable(x, quantizer=lambda x: 2 * x))
)
snapshot.assert_match(
repr(QuantizedVariable.from_variable(x, quantizer=Quantizer()))
)
snapshot.assert_match(repr(QuantizedVariable.from_variable(x, precision=1)))
@pytest.mark.usefixtures("eager_mode")
@pytest.mark.parametrize("should_quantize", [True, False])
def test_optimizer(should_quantize):
x = QuantizedVariable.from_variable(get_var(1.0), quantizer=lambda x: -x)
opt = tf.keras.optimizers.SGD(1.0)
def loss():
with context.quantized_scope(should_quantize):
return x + 1.0
@tf.function
def f():
opt.minimize(loss, var_list=[x])
f()
if should_quantize:
assert evaluate(x) == 2.0
with context.quantized_scope(should_quantize):
assert evaluate(x) == -2.0
else:
assert evaluate(x) == 0.0
@pytest.mark.skipif(
version.parse(tf.__version__) < version.parse("2"),
reason="Requires TensorFlow 2",
)
def test_saved_model(tmp_path):
model_path = str(tmp_path / "model")
x = np.random.normal(size=(4, 32))
model = testing_utils.get_small_bnn_model(x.shape[1], 16, 10)
weights = model.get_weights()
model.save(model_path, save_format="tf")
reloaded_model = tf.keras.models.load_model(model_path)
reloaded_weights = reloaded_model.get_weights()
assert_almost_equal(reloaded_model.predict(x), model.predict(x))
assert len(reloaded_weights) == len(weights)
for reloaded_weight, weight in zip(reloaded_weights, weights):
assert_almost_equal(reloaded_weight, weight)
| 14,405 | 37.31383 | 94 | py |
DAC2018 | DAC2018-master/setup.py | from distutils.core import setup, Extension
module = Extension('mypack',extra_compile_args=['-std=c++11'], include_dirs=['/usr/local/cuda/include'],
sources = ['Detector.cpp'],extra_objects = ['./plugin.o', './kernel.o'], extra_link_args=['-lnvinfer', '-lnvcaffe_parser', '-lcudnn'])
setup(name = 'mypack', version='1.0', description="Detector package", ext_modules = [module])
| 388 | 54.571429 | 142 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_category_model/capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import ScaledDotAttention
from src.module.attention.bilinear_attention import BilinearAttention
from src.module.attention.tanh_bilinear_attention import TanhBilinearAttention
from src.module.attention.concat_attention import ConcatAttention
from src.module.attention.tanh_concat_attention import TanhConcatAttention
from src.module.attention.mlp_attention import MlpAttention
import numpy as np
from src.module.utils.squash import squash
class CapsuleNetwork(nn.Module):
def __init__(self, embedding, aspect_embedding, hidden_size, capsule_size, dropout, num_categories):
super(CapsuleNetwork, self).__init__()
self.embedding = embedding
self.aspect_embedding = aspect_embedding
embed_size = embedding.embedding_dim
self.capsule_size = capsule_size
self.aspect_transform = nn.Sequential(
nn.Linear(embed_size, capsule_size),
nn.Dropout(dropout)
)
self.sentence_transform = nn.Sequential(
nn.Linear(hidden_size, capsule_size),
nn.Dropout(dropout)
)
self.norm_attention = BilinearAttention(capsule_size, capsule_size)
self.guide_capsule = nn.Parameter(
torch.Tensor(num_categories, capsule_size)
)
self.guide_weight = nn.Parameter(
torch.Tensor(capsule_size, capsule_size)
)
self.scale = nn.Parameter(torch.tensor(5.0))
self.capsule_projection = nn.Linear(capsule_size, capsule_size * num_categories)
self.dropout = dropout
self.num_categories = num_categories
self._reset_parameters()
def _reset_parameters(self):
init.xavier_uniform_(self.guide_capsule)
init.xavier_uniform_(self.guide_weight)
def load_sentiment(self, path):
sentiment = np.load(path)
e1 = np.mean(sentiment)
d1 = np.std(sentiment)
e2 = 0
d2 = np.sqrt(2.0 / (sentiment.shape[0] + sentiment.shape[1]))
sentiment = (sentiment - e1) / d1 * d2 + e2
self.guide_capsule.data.copy_(torch.tensor(sentiment))
def forward(self, sentence, aspect):
# get lengths and masks
sentence = sentence_clip(sentence)
sentence_mask = (sentence != PAD_INDEX)
# embedding
sentence = self.embedding(sentence)
sentence = F.dropout(sentence, p=self.dropout, training=self.training)
aspect = self.embedding(aspect)
aspect = F.dropout(aspect, p=self.dropout, training=self.training)
# sentence encode layer
sentence = self._sentence_encode(sentence, aspect)
# primary capsule layer
sentence = self.sentence_transform(sentence)
primary_capsule = squash(sentence, dim=-1)
# aspect capsule layer
aspect = self.aspect_transform(aspect)
aspect_capsule = squash(aspect, dim=-1)
# aspect aware normalization
norm_weight = self.norm_attention.get_attention_weights(aspect_capsule, primary_capsule, sentence_mask)
# capsule guided routing
category_capsule = self._capsule_guided_routing(primary_capsule, norm_weight)
category_capsule_norm = torch.sqrt(torch.sum(category_capsule * category_capsule, dim=-1, keepdim=False))
return category_capsule_norm
def _sentence_encode(self, sentence, aspect, mask=None):
raise NotImplementedError('_sentence_encode method is not implemented.')
def _capsule_guided_routing(self, primary_capsule, norm_weight):
guide_capsule = squash(self.guide_capsule)
guide_matrix = primary_capsule.matmul(self.guide_weight).matmul(guide_capsule.transpose(0, 1))
guide_matrix = F.softmax(guide_matrix, dim=-1)
guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale # (batch_size, time_step, num_categories)
category_capsule = guide_matrix.transpose(1, 2).matmul(primary_capsule)
category_capsule = F.dropout(category_capsule, p=self.dropout, training=self.training)
category_capsule = squash(category_capsule)
return category_capsule | 4,384 | 45.648936 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_category_model/recurrent_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from src.aspect_category_model.capsnet import CapsuleNetwork
class RecurrentCapsuleNetwork(CapsuleNetwork):
def __init__(self, embedding, aspect_embedding, num_layers, bidirectional, capsule_size, dropout, num_categories):
super(RecurrentCapsuleNetwork, self).__init__(
embedding=embedding,
aspect_embedding=aspect_embedding,
hidden_size=embedding.embedding_dim * (2 if bidirectional else 1),
capsule_size=capsule_size,
dropout=dropout,
num_categories=num_categories
)
embed_size = embedding.embedding_dim
self.rnn = nn.GRU(
input_size=embed_size * 2,
hidden_size=embed_size,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True
)
self.bidirectional = bidirectional
def _sentence_encode(self, sentence, aspect, mask=None):
batch_size, time_step, embed_size = sentence.size()
aspect_aware_sentence = torch.cat((
sentence, aspect.unsqueeze(1).expand(batch_size, time_step, embed_size)
), dim=-1)
output, _ = self.rnn(aspect_aware_sentence)
if self.bidirectional:
sentence = sentence.unsqueeze(-1).expand(batch_size, time_step, embed_size, 2)
sentence = sentence.contiguous().view(batch_size, time_step, embed_size * 2)
output = output + sentence
output = F.dropout(output, p=self.dropout, training=self.training)
return output | 1,597 | 41.052632 | 118 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_category_model/bert_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import ScaledDotAttention
from src.module.attention.bilinear_attention import BilinearAttention
from src.module.attention.tanh_bilinear_attention import TanhBilinearAttention
from src.module.attention.concat_attention import ConcatAttention
from src.module.attention.tanh_concat_attention import TanhConcatAttention
from src.module.attention.mlp_attention import MlpAttention
from src.aspect_term_model.capsnet import squash
import numpy as np
class BertCapsuleNetwork(nn.Module):
def __init__(self, bert, bert_size, capsule_size, dropout, num_categories):
super(BertCapsuleNetwork, self).__init__()
self.bert = bert
self.bert_size = bert_size
self.capsule_size = capsule_size
self.aspect_transform = nn.Sequential(
nn.Linear(bert_size, capsule_size),
nn.Dropout(dropout)
)
self.sentence_transform = nn.Sequential(
nn.Linear(bert_size, capsule_size),
nn.Dropout(dropout)
)
self.norm_attention = BilinearAttention(capsule_size, capsule_size)
self.guide_capsule = nn.Parameter(
torch.Tensor(num_categories, capsule_size)
)
self.guide_weight = nn.Parameter(
torch.Tensor(capsule_size, capsule_size)
)
self.scale = nn.Parameter(torch.tensor(5.0))
self.capsule_projection = nn.Linear(bert_size, bert_size * num_categories)
self.dropout = dropout
self.num_categories = num_categories
self._reset_parameters()
def _reset_parameters(self):
init.xavier_uniform_(self.guide_capsule)
init.xavier_uniform_(self.guide_weight)
def load_sentiment(self, path):
sentiment = np.load(path)
e1 = np.mean(sentiment)
d1 = np.std(sentiment)
e2 = 0
d2 = np.sqrt(2.0 / (sentiment.shape[0] + sentiment.shape[1]))
sentiment = (sentiment - e1) / d1 * d2 + e2
self.guide_capsule.data.copy_(torch.tensor(sentiment))
def forward(self, bert_token, bert_segment):
# BERT encoding
encoder_layer, _ = self.bert(bert_token, bert_segment, output_all_encoded_layers=False)
batch_size, segment_len = bert_segment.size()
max_segment_len = bert_segment.argmax(dim=-1, keepdim=True)
batch_arrange = torch.arange(segment_len).unsqueeze(0).expand(batch_size, segment_len).to(bert_segment.device)
segment_mask = batch_arrange <= max_segment_len
sentence_mask = segment_mask & (1 - bert_segment).byte()
aspect_mask = bert_segment
sentence_lens = sentence_mask.long().sum(dim=1, keepdim=True)
aspect_lens = aspect_mask.long().sum(dim=1, keepdim=True)
aspect = encoder_layer.masked_fill(aspect_mask.unsqueeze(-1) == 0, 0)
aspect = aspect.sum(dim=1, keepdim=False) / aspect_lens.float()
# sentence encode layer
max_len = sentence_lens.max().item()
sentence = encoder_layer[:, 0: max_len].contiguous()
sentence_mask = sentence_mask[:, 0: max_len].contiguous()
sentence = sentence.masked_fill(sentence_mask.unsqueeze(-1) == 0, 0)
# primary capsule layer
sentence = self.sentence_transform(sentence)
primary_capsule = squash(sentence, dim=-1)
# aspect capsule layer
aspect = self.aspect_transform(aspect)
aspect_capsule = squash(aspect, dim=-1)
# aspect aware normalization
norm_weight = self.norm_attention.get_attention_weights(aspect_capsule, primary_capsule, sentence_mask)
# capsule guided routing
category_capsule = self._capsule_guided_routing(primary_capsule, norm_weight)
category_capsule_norm = torch.sqrt(torch.sum(category_capsule * category_capsule, dim=-1, keepdim=False))
return category_capsule_norm
def _capsule_guided_routing(self, primary_capsule, norm_weight):
guide_capsule = squash(self.guide_capsule)
guide_matrix = primary_capsule.matmul(self.guide_weight).matmul(guide_capsule.transpose(0, 1))
guide_matrix = F.softmax(guide_matrix, dim=-1)
guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale # (batch_size, time_step, num_categories)
category_capsule = guide_matrix.transpose(1, 2).matmul(primary_capsule)
category_capsule = F.dropout(category_capsule, p=self.dropout, training=self.training)
category_capsule = squash(category_capsule)
return category_capsule | 4,772 | 48.206186 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_term_model/capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import ScaledDotAttention
from src.module.attention.bilinear_attention import BilinearAttention
from src.module.attention.tanh_bilinear_attention import TanhBilinearAttention
from src.module.attention.concat_attention import ConcatAttention
from src.module.attention.tanh_concat_attention import TanhConcatAttention
from src.module.attention.mlp_attention import MlpAttention
import numpy as np
from src.module.utils.squash import squash
class CapsuleNetwork(nn.Module):
def __init__(self, embedding, hidden_size, capsule_size, dropout, num_categories):
super(CapsuleNetwork, self).__init__()
self.embedding = embedding
embed_size = embedding.embedding_dim
self.capsule_size = capsule_size
self.aspect_transform = nn.Sequential(
nn.Linear(embed_size, capsule_size),
nn.Dropout(dropout)
)
self.sentence_transform = nn.Sequential(
nn.Linear(hidden_size, capsule_size),
nn.Dropout(dropout)
)
self.norm_attention = BilinearAttention(capsule_size, capsule_size)
self.guide_capsule = nn.Parameter(
torch.Tensor(num_categories, capsule_size)
)
self.guide_weight = nn.Parameter(
torch.Tensor(capsule_size, capsule_size)
)
self.scale = nn.Parameter(torch.tensor(4.0))
self.capsule_projection = nn.Linear(capsule_size, capsule_size * num_categories)
self.dropout = dropout
self.num_categories = num_categories
self._reset_parameters()
def _reset_parameters(self):
init.xavier_uniform_(self.guide_capsule)
init.xavier_uniform_(self.guide_weight)
def load_sentiment(self, path):
sentiment = np.load(path)
e1 = np.mean(sentiment)
d1 = np.std(sentiment)
e2 = 0
d2 = np.sqrt(2.0 / (sentiment.shape[0] + sentiment.shape[1]))
sentiment = (sentiment - e1) / d1 * d2 + e2
self.guide_capsule.data.copy_(torch.tensor(sentiment))
def forward(self, sentence, aspect):
# get lengths and masks
sentence = sentence_clip(sentence)
aspect = sentence_clip(aspect)
sentence_mask = (sentence != PAD_INDEX)
aspect_mask = (aspect != PAD_INDEX)
# sentence_lens = sentence_mask.long().sum(dim=1, keepdim=True)
aspect_lens = aspect_mask.long().sum(dim=1, keepdim=True)
# embedding
sentence = self.embedding(sentence)
sentence = F.dropout(sentence, p=self.dropout, training=self.training)
aspect = self.embedding(aspect)
aspect = F.dropout(aspect, p=self.dropout, training=self.training)
# aspect average pooling
aspect = aspect.masked_fill(aspect_mask.unsqueeze(-1) == 0, 0)
aspect = aspect.sum(dim=1, keepdim=False) / aspect_lens.float()
# sentence encode layer
sentence = self._sentence_encode(sentence, aspect)
# primary capsule layer
sentence = self.sentence_transform(sentence)
primary_capsule = squash(sentence, dim=-1)
# aspect capsule layer
aspect = self.aspect_transform(aspect)
aspect_capsule = squash(aspect, dim=-1)
# aspect aware normalization
norm_weight = self.norm_attention.get_attention_weights(aspect_capsule, primary_capsule, sentence_mask)
# capsule guided routing
category_capsule = self._capsule_guided_routing(primary_capsule, norm_weight)
category_capsule_norm = torch.sqrt(torch.sum(category_capsule * category_capsule, dim=-1, keepdim=False))
return category_capsule_norm
def _sentence_encode(self, sentence, aspect, mask=None):
raise NotImplementedError('_sentence_encode method is not implemented.')
def _capsule_guided_routing(self, primary_capsule, norm_weight):
guide_capsule = squash(self.guide_capsule)
guide_matrix = primary_capsule.matmul(self.guide_weight).matmul(guide_capsule.transpose(0, 1))
guide_matrix = F.softmax(guide_matrix, dim=-1)
guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale # (batch_size, time_step, num_categories)
category_capsule = guide_matrix.transpose(1, 2).matmul(primary_capsule)
category_capsule = F.dropout(category_capsule, p=self.dropout, training=self.training)
category_capsule = squash(category_capsule)
return category_capsule | 4,714 | 46.15 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_term_model/recurrent_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from src.aspect_term_model.capsnet import CapsuleNetwork
class RecurrentCapsuleNetwork(CapsuleNetwork):
def __init__(self, embedding, num_layers, bidirectional, capsule_size, dropout, num_categories):
super(RecurrentCapsuleNetwork, self).__init__(
embedding=embedding,
hidden_size=embedding.embedding_dim * (2 if bidirectional else 1),
capsule_size=capsule_size,
dropout=dropout,
num_categories=num_categories
)
embed_size = embedding.embedding_dim
self.rnn = nn.GRU(
input_size=embed_size * 2,
hidden_size=embed_size,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True
)
self.bidirectional = bidirectional
def _sentence_encode(self, sentence, aspect, mask=None):
batch_size, time_step, embed_size = sentence.size()
aspect_aware_sentence = torch.cat((
sentence, aspect.unsqueeze(1).expand(batch_size, time_step, embed_size)
), dim=-1)
output, _ = self.rnn(aspect_aware_sentence)
if self.bidirectional:
sentence = sentence.unsqueeze(-1).expand(batch_size, time_step, embed_size, 2)
sentence = sentence.contiguous().view(batch_size, time_step, embed_size * 2)
output = output + sentence
output = F.dropout(output, p=self.dropout, training=self.training)
return output | 1,528 | 40.324324 | 100 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/aspect_term_model/bert_capsnet.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
from src.module.utils.constants import PAD_INDEX, INF
from src.module.utils.sentence_clip import sentence_clip
from src.module.attention.dot_attention import DotAttention
from src.module.attention.scaled_dot_attention import ScaledDotAttention
from src.module.attention.bilinear_attention import BilinearAttention
from src.module.attention.tanh_bilinear_attention import TanhBilinearAttention
from src.module.attention.concat_attention import ConcatAttention
from src.module.attention.tanh_concat_attention import TanhConcatAttention
from src.module.attention.mlp_attention import MlpAttention
from src.module.utils.squash import squash
import numpy as np
import time
class BertCapsuleNetwork(nn.Module):
def __init__(self, bert, bert_size, capsule_size, dropout, num_categories):
super(BertCapsuleNetwork, self).__init__()
self.bert = bert
self.bert_size = bert_size
self.capsule_size = capsule_size
self.aspect_transform = nn.Sequential(
nn.Linear(bert_size, capsule_size),
nn.Dropout(dropout)
)
self.sentence_transform = nn.Sequential(
nn.Linear(bert_size, capsule_size),
nn.Dropout(dropout)
)
self.norm_attention = BilinearAttention(capsule_size, capsule_size)
self.guide_capsule = nn.Parameter(
torch.Tensor(num_categories, capsule_size)
)
self.guide_weight = nn.Parameter(
torch.Tensor(capsule_size, capsule_size)
)
self.scale = nn.Parameter(torch.tensor(5.0))
self.capsule_projection = nn.Linear(bert_size, bert_size * num_categories)
self.dropout = dropout
self.num_categories = num_categories
self._reset_parameters()
def _reset_parameters(self):
init.xavier_uniform_(self.guide_capsule)
init.xavier_uniform_(self.guide_weight)
def load_sentiment(self, path):
sentiment = np.load(path)
e1 = np.mean(sentiment)
d1 = np.std(sentiment)
e2 = 0
d2 = np.sqrt(2.0 / (sentiment.shape[0] + sentiment.shape[1]))
sentiment = (sentiment - e1) / d1 * d2 + e2
self.guide_capsule.data.copy_(torch.tensor(sentiment))
def forward(self, bert_token, bert_segment):
# BERT encoding
encoder_layer, _ = self.bert(bert_token, bert_segment, output_all_encoded_layers=False)
batch_size, segment_len = bert_segment.size()
max_segment_len = bert_segment.argmax(dim=-1, keepdim=True)
batch_arrange = torch.arange(segment_len).unsqueeze(0).expand(batch_size, segment_len).to(bert_segment.device)
segment_mask = batch_arrange <= max_segment_len
sentence_mask = segment_mask & (1 - bert_segment).byte()
aspect_mask = bert_segment
sentence_lens = sentence_mask.long().sum(dim=1, keepdim=True)
# aspect average pooling
aspect_lens = aspect_mask.long().sum(dim=1, keepdim=True)
aspect = encoder_layer.masked_fill(aspect_mask.unsqueeze(-1) == 0, 0)
aspect = aspect.sum(dim=1, keepdim=False) / aspect_lens.float()
# sentence encode layer
max_len = sentence_lens.max().item()
sentence = encoder_layer[:, 0: max_len].contiguous()
sentence_mask = sentence_mask[:, 0: max_len].contiguous()
sentence = sentence.masked_fill(sentence_mask.unsqueeze(-1) == 0, 0)
# primary capsule layer
sentence = self.sentence_transform(sentence)
primary_capsule = squash(sentence, dim=-1)
aspect = self.aspect_transform(aspect)
aspect_capsule = squash(aspect, dim=-1)
# aspect aware normalization
norm_weight = self.norm_attention.get_attention_weights(aspect_capsule, primary_capsule, sentence_mask)
# capsule guided routing
category_capsule = self._capsule_guided_routing(primary_capsule, norm_weight)
category_capsule_norm = torch.sqrt(torch.sum(category_capsule * category_capsule, dim=-1, keepdim=False))
return category_capsule_norm
def _capsule_guided_routing(self, primary_capsule, norm_weight):
guide_capsule = squash(self.guide_capsule)
guide_matrix = primary_capsule.matmul(self.guide_weight).matmul(guide_capsule.transpose(0, 1))
guide_matrix = F.softmax(guide_matrix, dim=-1)
guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale # (batch_size, time_step, num_categories)
category_capsule = guide_matrix.transpose(1, 2).matmul(primary_capsule)
category_capsule = F.dropout(category_capsule, p=self.dropout, training=self.training)
category_capsule = squash(category_capsule)
return category_capsule | 4,780 | 47.785714 | 119 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/concat_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class ConcatAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(ConcatAttention, self).__init__(dropout)
self.query_weights = nn.Parameter(torch.Tensor(query_size, 1))
self.key_weights = nn.Parameter(torch.Tensor(key_size, 1))
init.xavier_uniform_(self.query_weights)
init.xavier_uniform_(self.key_weights)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
batch_size, num_queries, time_step = query.size(0), query.size(1), key.size(1)
query = query.matmul(self.query_weights).expand(batch_size, num_queries, time_step)
key = key.matmul(self.key_weights).transpose(1, 2).expand(batch_size, num_queries, time_step)
score = query + key
return score | 1,007 | 41 | 101 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/bilinear_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class BilinearAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(BilinearAttention, self).__init__(dropout)
self.weights = nn.Parameter(torch.FloatTensor(query_size, key_size))
init.xavier_uniform_(self.weights)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
score = query.matmul(self.weights).matmul(key.transpose(1, 2))
return score | 659 | 33.736842 | 76 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/tanh_bilinear_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class TanhBilinearAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(TanhBilinearAttention, self).__init__(dropout)
self.weights = nn.Parameter(torch.FloatTensor(query_size, key_size))
init.xavier_uniform_(self.weights)
self.bias = nn.Parameter(torch.zeros(1))
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
score = torch.tanh(query.matmul(self.weights).matmul(key.transpose(1, 2)) + self.bias)
return score | 740 | 36.05 | 94 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/tanh_concat_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class TanhConcatAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(TanhConcatAttention, self).__init__(dropout)
self.query_weights = nn.Parameter(torch.Tensor(query_size, 1))
self.key_weights = nn.Parameter(torch.Tensor(key_size, 1))
init.xavier_uniform_(self.query_weights)
init.xavier_uniform_(self.key_weights)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
batch_size, num_queries, time_step = query.size(0), query.size(1), key.size(1)
query = query.matmul(self.query_weights).expand(batch_size, num_queries, time_step)
key = key.matmul(self.key_weights).transpose(1, 2).expand(batch_size, num_queries, time_step)
score = query + key
score = torch.tanh(score)
return score | 1,049 | 41 | 101 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/multi_head_attention.py | from torch import nn
from torch.nn import init
import math
class MultiHeadAttention(nn.Module):
def __init__(self, attention, num_heads, hidden_size, key_size='default', value_size='default', out_size='default'):
key_size = hidden_size // num_heads if key_size == 'default' else key_size
value_size = hidden_size // num_heads if value_size == 'default' else value_size
out_size = hidden_size if out_size == 'default' else out_size
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.key_size = key_size
self.value_size = value_size
self.query_projection = nn.Linear(hidden_size, num_heads * key_size)
self.key_projection = nn.Linear(hidden_size, num_heads * key_size)
self.value_projection = nn.Linear(hidden_size, num_heads * value_size)
init.normal_(self.query_projection.weight, mean=0, std=math.sqrt(2.0 / (hidden_size + key_size)))
init.normal_(self.key_projection.weight, mean=0, std=math.sqrt(2.0 / (hidden_size + key_size)))
init.normal_(self.value_projection.weight, mean=0, std=math.sqrt(2.0 / (hidden_size + value_size)))
self.output_projection = nn.Linear(num_heads * value_size, out_size)
init.xavier_normal_(self.output_projection.weight)
self.attention = attention
def forward(self, query, key, value, mask=None):
"""
query: FloatTensor (batch_size, hidden_size) or (batch_size, num_queries, hidden_size)
key: FloatTensor (batch_size, time_step, hidden_size)
value: FloatTensor (batch_size, time_step, hidden_size)
mask: ByteTensor (batch_size, time_step) or ByteTensor (batch_size, num_queries, time_step)
subsequent_mask: ByteTensor (num_queries, time_step)
"""
single_query = False
if len(query.size()) == 2:
query = query.unsqueeze(1)
single_query = True
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(1)
else:
assert mask.size(1) == query.size(1)
num_heads, key_size, value_size = self.num_heads, self.key_size, self.value_size
batch_size, num_queries, time_step = query.size(0), query.size(1), key.size(1)
query = self.query_projection(query).view(batch_size, num_queries, num_heads, key_size)
key = self.key_projection(key).view(batch_size, time_step, num_heads, key_size)
value = self.value_projection(value).view(batch_size, time_step, num_heads, value_size)
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(0).repeat(num_heads, 1, 1).view(-1, time_step)
else:
mask = mask.unsqueeze(0).repeat(num_heads, 1, 1, 1).view(-1, num_queries, time_step)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, num_queries, key_size)
key = key.permute(2, 0, 1, 3).contiguous().view(-1, time_step, key_size)
value = value.permute(2, 0, 1, 3).contiguous().view(-1, time_step, value_size)
output = self.attention(query, key, value, mask)
output = output.view(num_heads, batch_size, num_queries, value_size)
output = output.permute(1, 2, 0, 3).contiguous().view(batch_size, num_queries, -1)
output = self.output_projection(output)
if single_query:
output = output.squeeze(1)
return output | 3,451 | 55.590164 | 120 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/attention.py | from torch import nn
import torch.nn.functional as F
from src.module.utils import constants
class Attention(nn.Module):
"""
The base class of attention.
"""
def __init__(self, dropout):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value, mask=None):
"""
query: FloatTensor (batch_size, query_size) or FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
value: FloatTensor (batch_size, time_step, hidden_size)
mask: ByteTensor (batch_size, time_step) or ByteTensor (batch_size, num_queries, time_step)
"""
single_query = False
if len(query.size()) == 2:
query = query.unsqueeze(1)
single_query = True
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(1)
else:
assert mask.size(1) == query.size(1)
score = self._score(query, key) # FloatTensor (batch_size, num_queries, time_step)
weights = self._weights_normalize(score, mask)
weights = F.dropout(weights, p=self.dropout, training=self.training)
output = weights.matmul(value)
if single_query:
output = output.squeeze(1)
return output
def _score(self, query, key):
raise NotImplementedError('Attention score method is not implemented.')
def _weights_normalize(self, score, mask):
if not mask is None:
score = score.masked_fill(mask == 0, -constants.INF)
weights = F.softmax(score, dim=-1)
return weights
def get_attention_weights(self, query, key, mask=None):
single_query = False
if len(query.size()) == 2:
query = query.unsqueeze(1)
single_query = True
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(1)
else:
assert mask.size(1) == query.size(1)
score = self._score(query, key) # FloatTensor (batch_size, num_queries, time_step)
weights = self._weights_normalize(score, mask)
weights = F.dropout(weights, p=self.dropout, training=self.training)
if single_query:
weights = weights.squeeze(1)
return weights | 2,355 | 37 | 104 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.