repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
phocnet | phocnet-master/examples/prediction_example.py | import caffe
import numpy as np
def main():
# This example is going to show you how you can use the API to predict
# PHOCs from a trained PHOCNet for your own word images.
# First we need to load the trained PHOCNet. We are going to use the trained
# PHOCNet supplied at
# http://patrec.cs.tu-dortmund.de/files/cnns/phocnet_gw_cv1.binaryproto
deploy_path = 'deploy_phocnet.prototxt'
trainet_net_path = 'phocnet_gw_cv1.binaryproto'
phocnet = caffe.Net(deploy_path, caffe.TEST, weights=trainet_net_path)
# Now you can supply your own images. For the sake of example, we use
# random arrays. We generate 4 images of shape 60 x 160, each having one
# channel. The pixel range is 0 - 255
images = [np.around(np.random.rand(60, 160, 1)*255)
for _ in xrange(4)]
# Note that the image ndarray arrays are now in the typical shape and pixel
# range of what you would get if you were to load your images with the
# standard tools such as OpenCV or skimage. For Caffe, we need to translate
# it into a 4D tensor of shape (num. images, channels, height, width)
for idx in xrange(4):
images[idx] = np.transpose(images[idx], (2, 0, 1))
images[idx] = np.reshape(images[idx], (1, 1, 60, 160))
# The PHOCNet accepts images in a pixel range of 0 (white) to 1 (black).
# Typically, the pixel intensities are inverted i.e. white is 255 and
# black is 0. We thus need to prepare our word images to be in the
# correct range. If your images are already in 0 (white) to 1 (black)
# you can skip this step.
images[idx] -= 255.0
images[idx] /= -255.0
# Now we are all set to shove the images through the PHOCNet.
# As we usually have different image sizes, we need to predict them
# one by one from the net.
# First, you need to reshape the input layer blob (word_images) to match
# the current word image shape you want to process.
phocs = []
for image in images:
phocnet.blobs['word_images'].reshape(*image.shape)
phocnet.reshape()
# Put the current image into the input layer...
phocnet.blobs['word_images'].data[...] = image
# ... and predict the PHOC (flatten automatically returns a copy)
phoc = phocnet.forward()['sigmoid'].flatten()
phocs.append(phoc)
# Congrats, you have a set of PHOCs for your word images.
# If you run into errors with the code above, make sure that your word images are
# shape (num. images, channels, height, width).
# Only in cases where you have images of the exact same size should num. images
# be different from 1
if __name__ == '__main__':
main() | 2,767 | 44.377049 | 85 | py |
phocnet | phocnet-master/src/phocnet/evaluation/cnn.py | '''
Created on Jul 10, 2016
@author: ssudholt
'''
import logging
import numpy as np
from skimage.transform import resize
from phocnet.evaluation.retrieval import map_from_feature_matrix
def net_output_for_word_image_list(phocnet, word_img_list,
min_img_width_height=-1,input_layer='word_images',
output_layer='sigmoid', print_frequency=1000):
'''
Predict PHOCs from the given PHOCNet
@param phocnet: caffe.Net
A pretrained PHOCNet. The first layer of the PHOCNet must be an InputLayer
(no LMDB or MemoryDataLayers)
@param word_img_list: list of ndarrays
A list of word images for which to predict the PHOCs.
Every image in the last has to be a single channel gray-scale or binary
ndarray in the range from 0 (black) to 255 (white).
@param min_img_width_height: int
The minimum height or width of an image to be passed to the PHOCNet.
If an image in the word_img_list is smaller than the supplied number
it is automatically resized before processed by the CNN. Default: -1
@param input_layer: str
The name of the input layer blob. Default: word_images
@param output_layer: str
The name of the output layer blob. Default: sigmoid
@param print_frequency: int
Output is generated after this amount of images has been prcessed by
the PHOCNet.
'''
output = []
logger = logging.getLogger('NetOutput')
logger.info('Evaluating net...')
for idx, word_img in enumerate(word_img_list):
# scale to correct pixel values (0 = background, 1 = text)
word_img = word_img.astype(np.float32)
word_img -= 255.0
word_img /= -255.0
# check size
if np.amin(word_img.shape[:2]) < min_img_width_height:
scale = float(min_img_width_height+1)/float(np.amin(word_img.shape[:2]))
new_shape = (int(scale*word_img.shape[0]), int(scale*word_img.shape[1]))
word_img = resize(image=word_img, output_shape=new_shape)
word_img = word_img.reshape((1,1,) + word_img.shape).astype(np.float32)
# reshape the PHOCNet
phocnet.blobs[input_layer].reshape(*word_img.shape)
phocnet.reshape()
# forward the word image through the PHOCNet
phocnet.blobs[input_layer].data[...] = word_img
output.append(phocnet.forward()[output_layer].flatten())
if ((idx+1)%print_frequency == 0 or (idx+1) == len(word_img_list)):
logger.debug(' [ %*d / %d ]', len(str(len(word_img_list))), idx+1, len(word_img_list))
return np.vstack(output)
def calc_map_from_cnn_features(solver, test_iterations, metric):
net_output = np.zeros((test_iterations, solver.test_nets[0].blobs['sigmoid'].data.flatten().shape[0]))
labels = np.zeros(test_iterations)
for idx in xrange(solver.param.test_iter[0]):
# calculate the net output
solver.test_nets[0].forward()
net_output[idx] = solver.test_nets[0].blobs['sigmoid'].data.flatten()
labels[idx] = solver.test_nets[0].blobs['label'].data.flatten()
# calculate mAP
_, ave_precs = map_from_feature_matrix(features=net_output, labels=labels,
metric=metric, drop_first=True)
# some queries might not have a relevant sample in the test set
# -> exclude them
mean_ap = np.mean(ave_precs[ave_precs > 0])
return mean_ap, ave_precs | 3,594 | 44.506329 | 113 | py |
phocnet | phocnet-master/src/phocnet/evaluation/phocnet_evaluator.py | '''
Created on Aug 29, 2016
@author: ssudholt
'''
import logging
import os
import caffe
import numpy as np
from skimage.transform import resize
from phocnet.caffe.model_proto_generator import ModelProtoGenerator
from phocnet.io.xml_io import XMLReader
from phocnet.io.context_manager import Suppressor
from phocnet.attributes.phoc import unigrams_from_word_list, build_phoc,\
get_most_common_n_grams
from phocnet.io.files import write_list
from phocnet.evaluation.retrieval import map_from_feature_matrix,\
map_from_query_test_feature_matrices
from phocnet.io import word_list
class PHOCNetEvaluation(object):
def __init__(self):
logging_format = '[%(asctime)-19s, %(name)s] %(message)s'
logging.basicConfig(level=logging.INFO, format=logging_format)
self.logger = logging.getLogger(self.__class__.__name__)
def predict_and_save_phocs(self,phocnet_bin_path, train_xml_file, test_xml_file,
gpu_id, debug_mode, doc_img_dir, phoc_unigram_levels,
deploy_proto_path, phoc_size, output_dir, no_bigrams,
annotation_delimiter):
self.logger.info('--- Predict and save PHOCS ---')
train_list = self._load_word_list_from_xml(train_xml_file, doc_img_dir)
test_list = self._load_word_list_from_xml(test_xml_file, doc_img_dir)
phoc_unigrams = unigrams_from_word_list(word_list=train_list, split_character=annotation_delimiter)
phoc_size = np.sum(phoc_unigram_levels)*len(phoc_unigrams)
if not no_bigrams:
phoc_size += 100
phocnet = self._load_pretrained_phocnet(phocnet_bin_path, gpu_id, debug_mode,
deploy_proto_path, phoc_size)
self.logger.info('Predicting PHOCs for %d test words', len(test_list))
phocs = self._net_output_for_word_list(word_list=test_list, cnn=phocnet,
suppress_caffe_output=not debug_mode)
self._save_phocs(phocs, output_dir)
def extract_unigrams(self, word_xml_file, doc_img_dir, annotation_delimiter):
self.logger.info('--- Extract Unigrams ---')
self.logger.info('Loading XML file from: %s...', word_xml_file)
xml_reader = XMLReader(make_lower_case=True)
dataset_name, word_list = xml_reader.load_word_list_from_READ_xml(xml_filename=word_xml_file, img_dir=doc_img_dir)
self.logger.info('Found dataset: %s', dataset_name)
self.logger.info('Saving unigrams to current working directory...')
phoc_unigrams = unigrams_from_word_list(word_list=word_list, split_character=annotation_delimiter)
idx_list = ['%d: %s' % elem for elem in enumerate(phoc_unigrams)]
write_list(file_path='phoc_unigrams.txt', line_list=idx_list)
def eval_qbs(self, phocnet_bin_path, train_xml_file, test_xml_file, phoc_unigram_levels,
gpu_id, debug_mode, doc_img_dir, deploy_proto_path, metric,
annotation_delimiter, no_bigrams):
self.logger.info('--- Query-by-String Evaluation ---')
train_list = self._load_word_list_from_xml(train_xml_file, doc_img_dir)
test_list = self._load_word_list_from_xml(test_xml_file, doc_img_dir)
phoc_unigrams = unigrams_from_word_list(word_list=train_list, split_character=annotation_delimiter)
phoc_size = np.sum(phoc_unigram_levels)*len(phoc_unigrams)
if no_bigrams:
n_bigrams = 0
bigrams = None
bigram_levels = None
else:
n_bigrams = 50
bigrams = get_most_common_n_grams(words=[word.get_transcription()
for word in train_list],
num_results=n_bigrams, n=2)
bigram_levels = [2]
phoc_size += 100
# Set CPU/GPU mode
if gpu_id != None:
self.logger.info('Setting Caffe to GPU mode using device %d', gpu_id)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
else:
self.logger.info('Setting Caffe to CPU mode')
caffe.set_mode_cpu()
phocnet = self._load_pretrained_phocnet(phocnet_bin_path, gpu_id, debug_mode,
deploy_proto_path, phoc_size)
self.logger.info('Predicting PHOCs for %d test words', len(test_list))
test_phocs = self._net_output_for_word_list(word_list=test_list, cnn=phocnet,
suppress_caffe_output=not debug_mode)
test_strings = [word.get_transcription() for word in test_list]
qry_strings = list(sorted(set(test_strings)))
qry_phocs = build_phoc(words=qry_strings, phoc_unigrams=phoc_unigrams, unigram_levels=phoc_unigram_levels,
split_character=annotation_delimiter, phoc_bigrams=bigrams, bigram_levels=bigram_levels)
self.logger.info('Calculating mAP...')
mean_ap, _ = map_from_query_test_feature_matrices(query_features=qry_phocs, test_features=test_phocs, query_labels=qry_strings,
test_labels=test_strings, metric=metric, drop_first=False)
self.logger.info('mAP: %f', mean_ap*100)
def eval_qbe(self, phocnet_bin_path, train_xml_file, test_xml_file,
gpu_id, debug_mode, doc_img_dir, annotation_delimiter,
deploy_proto_path, metric, phoc_unigram_levels, no_bigrams):
self.logger.info('--- Query-by-Example Evaluation ---')
train_list = self._load_word_list_from_xml(train_xml_file, doc_img_dir)
test_list = self._load_word_list_from_xml(test_xml_file, doc_img_dir)
phoc_unigrams = unigrams_from_word_list(word_list=train_list, split_character=annotation_delimiter)
phoc_size = np.sum(phoc_unigram_levels)*len(phoc_unigrams)
if not no_bigrams:
phoc_size += 100
# Set CPU/GPU mode
if gpu_id != None:
self.logger.info('Setting Caffe to GPU mode using device %d', gpu_id)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
else:
self.logger.info('Setting Caffe to CPU mode')
caffe.set_mode_cpu()
phocnet = self._load_pretrained_phocnet(phocnet_bin_path, gpu_id, debug_mode,
deploy_proto_path, phoc_size)
self.logger.info('Predicting PHOCs for %d test words', len(test_list))
phocs = self._net_output_for_word_list(word_list=test_list, cnn=phocnet,
suppress_caffe_output=not debug_mode)
self.logger.info('Calculating mAP...')
_, avg_precs = map_from_feature_matrix(features=phocs, labels=[word.get_transcription() for word in test_list],
metric=metric, drop_first=True)
self.logger.info('mAP: %f', np.mean(avg_precs[avg_precs > 0])*100)
def _net_output_for_word_list(self, word_list, cnn,
min_img_width_height=26,input_layer='word_images',
output_layer='sigmoid', suppress_caffe_output=False):
output = []
for idx, word in enumerate(word_list):
# scale to correct pixel values (0 = background, 1 = text)
word_img = word.get_word_image().astype(np.float32)
word_img -= 255.0
word_img /= -255.0
# check size
if np.amin(word_img.shape[:2]) < min_img_width_height:
scale = float(min_img_width_height+1)/float(np.amin(word_img.shape[:2]))
new_shape = (int(scale*word_img.shape[0]), int(scale*word_img.shape[1]))
word_img = resize(image=word_img, output_shape=new_shape)
word_img = word_img.reshape((1,1,) + word_img.shape).astype(np.float32)
# reshape the PHOCNet
cnn.blobs[input_layer].reshape(*word_img.shape)
cnn.reshape()
# forward the word image through the PHOCNet
cnn.blobs[input_layer].data[...] = word_img
if suppress_caffe_output:
with Suppressor():
output.append(cnn.forward()[output_layer].flatten())
else:
output.append(cnn.forward()[output_layer].flatten())
if ((idx+1)%100 == 0 or (idx+1) == len(word_list)):
self.logger.info(' [ %*d / %d ]', len(str(len(word_list))), idx+1, len(word_list))
return np.vstack(output)
def _load_pretrained_phocnet(self, phocnet_bin_path, gpu_id, debug_mode, deploy_proto_path, phoc_size):
# create a deploy proto file
self.logger.info('Saving PHOCNet deploy proto file to %s...', deploy_proto_path)
mpg = ModelProtoGenerator(initialization='msra', use_cudnn_engine=gpu_id is not None)
proto = mpg.get_phocnet(word_image_lmdb_path=None, phoc_lmdb_path=None, phoc_size=phoc_size, generate_deploy=True)
with open(deploy_proto_path, 'w') as proto_file:
proto_file.write(str(proto))
# create the Caffe PHOCNet object
self.logger.info('Creating PHOCNet...')
if debug_mode:
phocnet = caffe.Net(deploy_proto_path, phocnet_bin_path, caffe.TEST)
else:
with Suppressor():
phocnet = caffe.Net(deploy_proto_path, phocnet_bin_path, caffe.TEST)
return phocnet
def _load_word_list_from_xml(self, word_xml_file, doc_img_dir):
self.logger.info('Loading XML file from: %s...', word_xml_file)
dataset_name, word_list = XMLReader().load_word_list_from_READ_xml(xml_filename=word_xml_file, img_dir=doc_img_dir)
self.logger.info('Found dataset: %s', dataset_name)
return word_list
def _setup_caffe(self, gpu_id):
if gpu_id != None:
self.logger.info('Setting Caffe to GPU mode using device %d', gpu_id)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
else:
self.logger.info('Setting Caffe to CPU mode')
caffe.set_mode_cpu()
def _predict_phocs(self, phocnet_bin_path, word_xml_file, gpu_id, debug_mode, doc_img_dir,
deploy_proto_path, phoc_size):
self._setup_caffe(gpu_id)
# load everything
word_list = self._load_word_list_from_xml(word_xml_file, doc_img_dir)
phocnet = self._load_pretrained_phocnet(phocnet_bin_path, gpu_id, debug_mode,
deploy_proto_path, phoc_size)
# compute the PHOCs
self.logger.info('Predicting PHOCs...')
phocs = self._net_output_for_word_list(word_list=word_list, cnn=phocnet,
suppress_caffe_output=not debug_mode)
return phocs
def _predict_phocs_for_sliding_window(self, net, word, frame_width, step_size, phoc_size,
padding=True, input_layer_name='word_images', output_layer_name='sigmoid'):
# load and transform image for PHOCNet
img = word.get_word_image().astype(np.float32)
img -= 255
img /= -255
# pad if requested
if padding:
img = np.pad(array=img, pad_width=((0,0), (frame_width/2,frame_width/2)), mode='constant')
# determine the output mat shape and init the mat
phoc_mat = np.zeros((len(xrange(0, img.shape[1]-frame_width, step_size)), phoc_size), dtype=np.float32)
# push every frame through the net
for idx, offset in enumerate(xrange(0, img.shape[1]-frame_width, step_size)):
frame = img[:, offset:offset+frame_width]
# convert to 4D array for Caffe
frame = frame.reshape((1,1,) + frame.shape)
# push the frame through the net
net.blobs[input_layer_name].reshape(*frame.shape)
net.reshape()
net.blobs[input_layer_name].data[...] = frame
phoc = net.forward()[output_layer_name].flatten()
phoc_mat[idx] = phoc
return phoc_mat
def _save_phocs(self, phocs, output_dir):
self.logger.info('Saving PHOCs as .npy-file...')
np.save(os.path.join(output_dir, 'phocs.npy'), phocs)
self.logger.info('Finished')
| 12,724 | 51.80083 | 136 | py |
phocnet | phocnet-master/src/phocnet/training/phocnet_trainer.py | '''
Created on Aug 29, 2016
@author: ssudholt
'''
import logging
import os
import time
import caffe
import numpy as np
from skimage.transform import resize
from phocnet.attributes.phoc import build_phoc, unigrams_from_word_list,\
get_most_common_n_grams
from phocnet.caffe.model_proto_generator import ModelProtoGenerator
from phocnet.caffe.solver_proto_generator import generate_solver_proto
from phocnet.caffe.lmdb_creator import CaffeLMDBCreator
from phocnet.caffe.augmentation import AugmentationCreator
from phocnet.evaluation.time import convert_secs2HHMMSS
from phocnet.evaluation.cnn import calc_map_from_cnn_features
from phocnet.io.xml_io import XMLReader
from phocnet.io.files import save_prototxt, write_list
from phocnet.io.context_manager import Suppressor
from phocnet.numpy.numpy_helper import NumpyHelper
class PHOCNetTrainer(object):
'''
Driver class for all PHOCNet experiments
'''
def __init__(self, doc_img_dir, train_annotation_file, test_annotation_file,
proto_dir, n_train_images, lmdb_dir, save_net_dir,
phoc_unigram_levels, recreate_lmdbs, gpu_id, learning_rate, momentum,
weight_decay, batch_size, test_interval, display, max_iter, step_size,
gamma, debug_mode, metric, annotation_delimiter, use_lower_case_only,
use_bigrams):
'''
The constructor
Args:
doc_img_dir (str): the location of the document images for the given dataset
train_annotation_file (str): the absolute path to the READ-style annotation file for the training samples
test_annotation_file (str): the absolute path to the READ-style annotation file for the test samples
proto_dir (str): absolute path where to save the Caffe protobuffer files
n_train_images (int): the total number of training images to be used
lmdb_dir (str): directory to save the LMDB files into
save_net_dir (str): directory where to save the trained PHOCNet
phoc_unigrams_levels (list of int): the list of unigram levels
recreate_lmdbs (bool): whether to delete and recompute existing LMDBs
debug_mode (bool): flag indicating to run this experiment in debug mode
metric (str): metric for comparing the PHOCNet output during test
annotation_delimiter (str): delimiter for the annotation in the XML files
use_lower_case_only (bool): convert annotation to lower case before creating LMDBs
use_bigrams (bool): if true, the PHOC predicted from the net contains bigrams
gpu_id (int): the ID of the GPU to use
learning_rate (float): the learning rate to be used in training
momentum (float): the SGD momentum to be used in training
weight_decay (float): the SGD weight decay to be used in training
batch_size (int): the number of images to be used in a mini batch
test_interval (int): the number of steps after which to evaluate the PHOCNet during training
display (int): the number of iterations after which to show the training net loss
max_iter (int): the maximum number of SGD iterations
step_size (int): the number of iterations after which to reduce the learning rate
gamma (float): the factor to multiply the step size with after step_size iterations
'''
# store the experiment parameters
self.doc_img_dir = doc_img_dir
self.train_annotation_file = train_annotation_file
self.test_annotation_file = test_annotation_file
self.proto_dir = proto_dir
self.n_train_images = n_train_images
self.lmdb_dir = lmdb_dir
self.save_net_dir = save_net_dir
self.phoc_unigram_levels = phoc_unigram_levels
self.recreate_lmdbs = recreate_lmdbs
self.debug_mode = debug_mode
self.metric = metric
self.annotation_delimiter = annotation_delimiter
self.use_lower_case_only = use_lower_case_only
self.use_bigrams = use_bigrams
# store the Caffe parameters
self.gpu_id = gpu_id
self.learning_rate = learning_rate
self.momentum = momentum
self.weight_decay = weight_decay
self.batch_size = batch_size
self.test_interval = test_interval
self.display = display
self.max_iter = max_iter
self.step_size = step_size
self.gamma = gamma
# misc members for training/evaluation
if self.gpu_id is not None:
self.solver_mode = 'GPU'
else:
self.solver_mode = 'CPU'
self.min_image_width_height = 26
self.epoch_map = None
self.test_iter = None
self.dataset_name = None
# set up the logging
logging_format = '[%(asctime)-19s, %(name)s] %(message)s'
if self.debug_mode:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
logging.basicConfig(level=logging_level, format=logging_format)
self.logger = logging.getLogger(self.__class__.__name__)
def train_phocnet(self):
self.logger.info('--- Running PHOCNet Training ---')
# --- Step 1: check if we need to create the LMDBs
# load the word lists
xml_reader = XMLReader(make_lower_case=self.use_lower_case_only)
self.dataset_name, train_list, test_list = xml_reader.load_train_test_xml(train_xml_path=self.train_annotation_file,
test_xml_path=self.test_annotation_file,
img_dir=self.doc_img_dir)
phoc_unigrams = unigrams_from_word_list(word_list=train_list, split_character=self.annotation_delimiter)
self.logger.info('PHOC unigrams: %s', ' '.join(phoc_unigrams))
self.test_iter = len(test_list)
self.logger.info('Using dataset \'%s\'', self.dataset_name)
# check if we need to create LMDBs
lmdb_prefix = '%s_nti%d_pul%s' % (self.dataset_name, self.n_train_images,
'-'.join([str(elem) for elem in self.phoc_unigram_levels]))
train_word_images_lmdb_path = os.path.join(self.lmdb_dir, '%s_train_word_images_lmdb' % lmdb_prefix)
train_phoc_lmdb_path = os.path.join(self.lmdb_dir, '%s_train_phocs_lmdb' % lmdb_prefix)
test_word_images_lmdb_path = os.path.join(self.lmdb_dir, '%s_test_word_images_lmdb' % lmdb_prefix)
test_phoc_lmdb_path = os.path.join(self.lmdb_dir, '%s_test_phocs_lmdb' % lmdb_prefix)
lmdbs_exist = (os.path.exists(train_word_images_lmdb_path),
os.path.exists(train_phoc_lmdb_path),
os.path.exists(test_word_images_lmdb_path),
os.path.exists(test_phoc_lmdb_path))
if self.use_bigrams:
n_bigrams = 50
bigrams = get_most_common_n_grams(words=[word.get_transcription()
for word in train_list],
num_results=n_bigrams, n=2)
bigram_levels = [2]
else:
n_bigrams = 0
bigrams = None
bigram_levels = None
if not np.all(lmdbs_exist) or self.recreate_lmdbs:
self.logger.info('Creating LMDBs...')
train_phocs = build_phoc(words=[word.get_transcription() for word in train_list],
phoc_unigrams=phoc_unigrams, unigram_levels=self.phoc_unigram_levels,
phoc_bigrams=bigrams, bigram_levels=bigram_levels,
split_character=self.annotation_delimiter,
on_unknown_unigram='warn')
test_phocs = build_phoc(words=[word.get_transcription() for word in test_list],
phoc_unigrams=phoc_unigrams, unigram_levels=self.phoc_unigram_levels,
phoc_bigrams=bigrams, bigram_levels=bigram_levels,
split_character=self.annotation_delimiter,
on_unknown_unigram='warn')
self._create_train_test_phocs_lmdbs(train_list=train_list, train_phocs=train_phocs,
test_list=test_list, test_phocs=test_phocs,
train_word_images_lmdb_path=train_word_images_lmdb_path,
train_phoc_lmdb_path=train_phoc_lmdb_path,
test_word_images_lmdb_path=test_word_images_lmdb_path,
test_phoc_lmdb_path=test_phoc_lmdb_path)
else:
self.logger.info('Found LMDBs...')
# --- Step 2: create the proto files
self.logger.info('Saving proto files...')
# prepare the output paths
train_proto_path = os.path.join(self.proto_dir, 'train_phocnet_%s.prototxt' % self.dataset_name)
test_proto_path = os.path.join(self.proto_dir, 'test_phocnet_%s.prototxt' % self.dataset_name)
solver_proto_path = os.path.join(self.proto_dir, 'solver_phocnet_%s.prototxt' % self.dataset_name)
# generate the proto files
n_attributes = np.sum(self.phoc_unigram_levels)*len(phoc_unigrams)
if self.use_bigrams:
n_attributes += np.sum(bigram_levels)*n_bigrams
mpg = ModelProtoGenerator(initialization='msra', use_cudnn_engine=self.gpu_id is not None)
train_proto = mpg.get_phocnet(word_image_lmdb_path=train_word_images_lmdb_path, phoc_lmdb_path=train_phoc_lmdb_path,
phoc_size=n_attributes,
generate_deploy=False)
test_proto = mpg.get_phocnet(word_image_lmdb_path=test_word_images_lmdb_path, phoc_lmdb_path=test_phoc_lmdb_path,
phoc_size=n_attributes, generate_deploy=False)
solver_proto = generate_solver_proto(train_net=train_proto_path, test_net=test_proto_path,
base_lr=self.learning_rate, momentum=self.momentum, display=self.display,
lr_policy='step', gamma=self.gamma, stepsize=self.step_size,
solver_mode=self.solver_mode, iter_size=self.batch_size, max_iter=self.max_iter,
average_loss=self.display, test_iter=self.test_iter, test_interval=self.test_interval,
weight_decay=self.weight_decay)
# save the proto files
save_prototxt(file_path=train_proto_path, proto_object=train_proto, header_comment='Train PHOCNet %s' % self.dataset_name)
save_prototxt(file_path=test_proto_path, proto_object=test_proto, header_comment='Test PHOCNet %s' % self.dataset_name)
save_prototxt(file_path=solver_proto_path, proto_object=solver_proto, header_comment='Solver PHOCNet %s' % self.dataset_name)
# --- Step 3: train the PHOCNet
self.logger.info('Starting SGD...')
self._run_sgd(solver_proto_path=solver_proto_path)
def pretrain_callback(self, solver):
'''
Method called before starting the training
'''
# init numpy arrays for mAP results
epochs = self.max_iter/self.test_interval
self.epoch_map = np.zeros(epochs+1)
self.epoch_map[0], _ = calc_map_from_cnn_features(solver=solver,
test_iterations=self.test_iter,
metric=self.metric)
self.logger.info('mAP: %f', self.epoch_map[0])
def test_callback(self, solver, epoch):
'''
Method called every self.test_interval iterations during training
'''
self.logger.info('Evaluating CNN after %d steps:', epoch*solver.param.test_interval)
self.epoch_map[epoch+1], _ = calc_map_from_cnn_features(solver=solver,
test_iterations=self.test_iter,
metric=self.metric)
self.logger.info('mAP: %f', self.epoch_map[epoch+1])
def posttrain_callback(self, solver):
'''
Method called after finishing the training
'''
# if self.save_net is not None, save the PHOCNet to the desired location
if self.save_net_dir is not None:
filename = 'phocnet_%s_nti%d_pul%s.binaryproto' % (self.dataset_name, self.n_train_images,
'-'.join([str(elem) for elem in self.phoc_unigram_levels]))
solver.net.save(os.path.join(self.save_net_dir, filename))
def _create_train_test_phocs_lmdbs(self, train_list, train_phocs, test_list, test_phocs,
train_word_images_lmdb_path, train_phoc_lmdb_path,
test_word_images_lmdb_path, test_phoc_lmdb_path):
start_time = time.time()
# --- TRAIN IMAGES
# find all unique transcriptions and the label map...
_, transcription_map = self.__get_unique_transcriptions_and_labelmap(train_list, test_list)
# get the numeric training labels plus a random order to insert them into
# create the numeric labels and counts
train_labels = np.array([transcription_map[word.get_transcription()] for word in train_list])
unique_train_labels, counts = np.unique(train_labels, return_counts=True)
# find the number of images that should be present for training per class
n_images_per_class = self.n_train_images/unique_train_labels.shape[0] + 1
# create randomly shuffled numbers for later use as keys
random_indices = list(xrange(n_images_per_class*unique_train_labels.shape[0]))
np.random.shuffle(random_indices)
#set random limits for affine transform
random_limits = (0.8, 1.1)
n_rescales = 0
# loading should be done in gray scale
load_grayscale = True
# create train LMDB
self.logger.info('Creating Training LMDB (%d total word images)', len(random_indices))
lmdb_creator = CaffeLMDBCreator()
lmdb_creator.open_dual_lmdb_for_write(image_lmdb_path=train_word_images_lmdb_path,
additional_lmdb_path=train_phoc_lmdb_path,
create=True)
for cur_label, count in zip(unique_train_labels, counts):
# find the words for the current class label and the
# corresponding PHOC
cur_word_indices = np.where(train_labels == cur_label)[0]
cur_transcription = train_list[cur_word_indices[0]].get_transcription()
cur_phoc = NumpyHelper.get_unique_rows(train_phocs[cur_word_indices])
# unique rows should only return one specific PHOC
if cur_phoc.shape[0] != 1:
raise ValueError('Extracted more than one PHOC for label %d' % cur_label)
cur_phoc = np.atleast_3d(cur_phoc).transpose((2,0,1)).astype(np.uint8)
# if there are to many images for the current word image class,
# draw from them and cut the rest off
if count > n_images_per_class:
np.random.shuffle(cur_word_indices)
cur_word_indices = cur_word_indices[:n_images_per_class]
# load the word images
cur_word_images = []
for idx in cur_word_indices:
img = train_list[idx].get_word_image(gray_scale=load_grayscale)
# check image size
img, resized = self.__check_size(img)
n_rescales += int(resized)
# append to the current word images and
# put into LMDB
cur_word_images.append(img)
key = '%s_%s' % (str(random_indices.pop()).zfill(8), cur_transcription.encode('ascii', 'ignore'))
lmdb_creator.put_dual(img_mat=np.atleast_3d(img).transpose((2,0,1)).astype(np.uint8),
additional_mat=cur_phoc, label=cur_label, key=key)
# extract the extra augmented images
# the random limits are the maximum percentage
# that the destination point may deviate from the reference point
# in the affine transform
if len(cur_word_images) < n_images_per_class:
# create the warped images
inds = np.random.randint(len(cur_word_images), size=n_images_per_class - len(cur_word_images))
for ind in inds:
aug_img = AugmentationCreator.create_affine_transform_augmentation(img=cur_word_images[ind], random_limits=random_limits)
aug_img = np.atleast_3d(aug_img).transpose((2,0,1)).astype(np.uint8)
key = '%s_%s' % (str(random_indices.pop()).zfill(8), cur_transcription.encode('ascii', 'ignore'))
lmdb_creator.put_dual(img_mat=aug_img, additional_mat=cur_phoc, label=cur_label, key=key)
# wrap up training LMDB creation
if len(random_indices) != 0:
raise ValueError('Random Indices are not empty, something went wrong during training LMDB creation')
lmdb_creator.finish_creation()
# write the label map to the LMDBs as well
write_list(file_path=train_word_images_lmdb_path + '/label_map.txt',
line_list=['%s %s' % elem for elem in transcription_map.items()])
write_list(file_path=train_phoc_lmdb_path + '/label_map.txt',
line_list=['%s %s' % elem for elem in transcription_map.items()])
self.logger.info('Finished processing train words (took %s, %d rescales)', convert_secs2HHMMSS(time.time() - start_time), n_rescales)
# --- TEST IMAGES
self.logger.info('Creating Test LMDB (%d total word images)', len(test_list))
n_rescales = 0
start_time = time.time()
lmdb_creator.open_dual_lmdb_for_write(image_lmdb_path=test_word_images_lmdb_path, additional_lmdb_path=test_phoc_lmdb_path,
create=True, label_map=transcription_map)
for word, phoc in zip(test_list, test_phocs):
if word.get_transcription() not in transcription_map:
transcription_map[word.get_transcription()] = len(transcription_map)
img = word.get_word_image(gray_scale=load_grayscale)
img, resized = self.__check_size(img)
if img is None:
self.logger.warning('!WARNING! Found image with 0 width or height!')
else:
n_rescales += int(resized)
img = np.atleast_3d(img).transpose((2,0,1)).astype(np.uint8)
phoc_3d = np.atleast_3d(phoc).transpose((2,0,1)).astype(np.uint8)
lmdb_creator.put_dual(img_mat=img, additional_mat=phoc_3d, label=transcription_map[word.get_transcription()])
lmdb_creator.finish_creation()
write_list(file_path=test_word_images_lmdb_path + '/label_map.txt',
line_list=['%s %s' % elem for elem in transcription_map.items()])
write_list(file_path=test_phoc_lmdb_path + '/label_map.txt',
line_list=['%s %s' % elem for elem in transcription_map.items()])
self.logger.info('Finished processing test words (took %s, %d rescales)', convert_secs2HHMMSS(time.time() - start_time), n_rescales)
def __check_size(self, img):
'''
checks if the image accords to the minimum size requirements
Returns:
tuple (img, bool):
img: the original image if the image size was ok, a resized image otherwise
bool: flag indicating whether the image was resized
'''
if np.amin(img.shape[:2]) < self.min_image_width_height:
if np.amin(img.shape[:2]) == 0:
return None, False
scale = float(self.min_image_width_height+1)/float(np.amin(img.shape[:2]))
new_shape = (int(scale*img.shape[0]), int(scale*img.shape[1]))
new_img = resize(image=img, output_shape=new_shape)
return new_img, True
else:
return img, False
def __get_unique_transcriptions_and_labelmap(self, train_list, test_list):
'''
Returns a list of unique transcriptions for the given train and test lists
and creates a dictionary mapping transcriptions to numeric class labels.
'''
unique_transcriptions = [word.get_transcription() for word in train_list]
unique_transcriptions.extend([word.get_transcription() for word in test_list])
unique_transcriptions = list(set(unique_transcriptions))
transcription_map = dict((k,v) for v,k in enumerate(unique_transcriptions))
return unique_transcriptions, transcription_map
def _run_sgd(self, solver_proto_path):
'''
Starts the SGD training of the PHOCNet
Args:
solver_proto_path (str): the absolute path to the solver protobuffer file to use
'''
# Set CPU/GPU mode for solver training
if self.gpu_id != None:
self.logger.info('Setting Caffe to GPU mode using device %d', self.gpu_id)
caffe.set_mode_gpu()
caffe.set_device(self.gpu_id)
else:
self.logger.info('Setting Caffe to CPU mode')
caffe.set_mode_cpu()
# Create SGD solver
self.logger.info('Using solver protofile at %s', solver_proto_path)
solver = self.__get_solver(solver_proto_path)
epochs = self.max_iter/self.test_interval
# run test on the net before training
self.logger.info('Running pre-train evaluation')
self.pretrain_callback(solver=solver)
# run the training
self.logger.info('Finished Setup, running SGD')
for epoch in xrange(epochs):
# run training until we want to test
self.__solver_step(solver, self.test_interval)
# run test callback after test_interval iterations
self.logger.debug('Running test evaluation')
self.test_callback(solver=solver, epoch=epoch)
# if we have iterations left to compute, do so
iters_left = self.max_iter % self.test_interval
if iters_left > 0:
self.__solver_step(solver, iters_left)
# run post train callback
self.logger.info('Running post-train evaluation')
self.posttrain_callback(solver=solver)
# return the solver
return solver
def __solver_step(self, solver, steps):
'''
Runs Caffe solver suppressing Caffe output if necessary
'''
if not self.debug_mode:
with Suppressor():
solver.step(steps)
else:
solver.step(steps)
def __get_solver(self, solver_proto_path):
'''
Returns a caffe.SGDSolver for the given protofile path,
ignoring Caffe command line chatter if debug mode is not set
to True.
'''
if not self.debug_mode:
# disable Caffe init chatter when not in debug
with Suppressor():
return caffe.SGDSolver(solver_proto_path)
else:
return caffe.SGDSolver(solver_proto_path) | 24,351 | 53.970655 | 141 | py |
phocnet | phocnet-master/src/phocnet/caffe/solver_proto_generator.py | '''
Created on Jul 9, 2016
@author: ssudholt
'''
from caffe.proto import caffe_pb2
from google.protobuf.internal.containers import RepeatedScalarFieldContainer
def generate_solver_proto(**kwargs):
sp = caffe_pb2.SolverParameter()
for k,v in kwargs.iteritems():
if not hasattr(sp, k):
raise ValueError('The argument \'%s\' is not part of the Caffe solver parameters!')
elif v is not None:
elem = getattr(sp, k)
if type(elem) == RepeatedScalarFieldContainer:
elem.append(v)
elif k == 'solver_mode':
setattr(sp, k, sp.SolverMode.DESCRIPTOR.values_by_name[v].number)
else:
setattr(sp, k, v)
return sp | 734 | 32.409091 | 95 | py |
phocnet | phocnet-master/src/phocnet/caffe/model_proto_generator.py | # pylint: disable=too-many-arguments
'''
Created on Jul 8, 2016
@author: ssudholt
'''
import logging
from caffe import NetSpec
from caffe import layers as L
from caffe import params as P
from caffe.io import caffe_pb2
import argparse
class ModelProtoGenerator(object):
'''
Class for generating Caffe CNN models through protobuffer files.
'''
def __init__(self, initialization='msra', use_cudnn_engine=False):
# set up the engines
self.conv_engine = None
self.spp_engine = None
if use_cudnn_engine:
self.conv_engine = P.Convolution.CUDNN
self.spp_engine = P.SPP.CUDNN
else:
self.conv_engine = P.Convolution.CAFFE
self.spp_engine = P.SPP.CAFFE
self.phase_train = caffe_pb2.Phase.DESCRIPTOR.values_by_name['TRAIN'].number
self.phase_test = caffe_pb2.Phase.DESCRIPTOR.values_by_name['TEST'].number
self.initialization = initialization
self.logger = logging.getLogger(self.__class__.__name__)
def set_phocnet_data(self, n, generate_deploy, word_image_lmdb_path, phoc_lmdb_path):
if generate_deploy:
n.word_images = L.Input(shape=dict(dim=[1, 1, 100, 250]))
else:
n.word_images, n.label = L.Data(batch_size=1, backend=P.Data.LMDB, source=word_image_lmdb_path, prefetch=20,
transform_param=dict(mean_value=255, scale=-1. / 255,), ntop=2)
n.phocs, n.label_phocs = L.Data(batch_size=1, backend=P.Data.LMDB, source=phoc_lmdb_path, prefetch=20,
ntop=2)
def set_phocnet_conv_body(self, n, relu_in_place):
n.conv1_1, n.relu1_1 = self.conv_relu(n.word_images, nout=64, relu_in_place=relu_in_place)
n.conv1_2, n.relu1_2 = self.conv_relu(n.relu1_1, nout=64, relu_in_place=relu_in_place)
n.pool1 = L.Pooling(n.relu1_2, pooling_param=dict(pool=P.Pooling.MAX, kernel_size=2, stride=2))
n.conv2_1, n.relu2_1 = self.conv_relu(n.pool1, nout=128, relu_in_place=relu_in_place)
n.conv2_2, n.relu2_2 = self.conv_relu(n.relu2_1, nout=128, relu_in_place=relu_in_place)
n.pool2 = L.Pooling(n.relu2_2, pooling_param=dict(pool=P.Pooling.MAX, kernel_size=2, stride=2))
n.conv3_1, n.relu3_1 = self.conv_relu(n.pool2, nout=256, relu_in_place=relu_in_place)
n.conv3_2, n.relu3_2 = self.conv_relu(n.relu3_1, nout=256, relu_in_place=relu_in_place)
n.conv3_3, n.relu3_3 = self.conv_relu(n.relu3_2, nout=256, relu_in_place=relu_in_place)
n.conv3_4, n.relu3_4 = self.conv_relu(n.relu3_3, nout=256, relu_in_place=relu_in_place)
n.conv3_5, n.relu3_5 = self.conv_relu(n.relu3_4, nout=256, relu_in_place=relu_in_place)
n.conv3_6, n.relu3_6 = self.conv_relu(n.relu3_5, nout=256, relu_in_place=relu_in_place)
n.conv4_1, n.relu4_1 = self.conv_relu(n.relu3_6, nout=512, relu_in_place=relu_in_place)
n.conv4_2, n.relu4_2 = self.conv_relu(n.relu4_1, nout=512, relu_in_place=relu_in_place)
n.conv4_3, n.relu4_3 = self.conv_relu(n.relu4_2, nout=512, relu_in_place=relu_in_place)
def conv_relu(self, bottom, nout, kernel_size=3, stride=1, pad=1, relu_in_place=True):
'''
Helper method for returning a ReLU activated Conv layer
'''
conv = L.Convolution(bottom, kernel_size=kernel_size, stride=stride,
num_output=nout, pad=pad, engine=self.conv_engine,
weight_filler=dict(type=self.initialization),
bias_filler=dict(type='constant'))
return conv, L.ReLU(conv, in_place=relu_in_place)
def fc_relu(self, bottom, layer_size, dropout_ratio=0.0, relu_in_place=True):
'''
Helper method for returning a ReLU activated Fully Connected layer. It can be specified also
if the layer should make use of Dropout as well.
'''
fc = L.InnerProduct(bottom, num_output=layer_size,
weight_filler=dict(type=self.initialization),
bias_filler=dict(type='constant'))
relu = L.ReLU(fc, in_place=relu_in_place)
if dropout_ratio == 0.0:
return fc, relu
else:
return fc, relu, L.Dropout(relu, dropout_ratio=0.5, in_place=True, include=dict(phase=self.phase_train))
def get_phocnet(self, word_image_lmdb_path, phoc_lmdb_path,
phoc_size=604, generate_deploy=False):
'''
Returns a NetSpec definition of the PHOCNet. The definition can then be transformed
into a protobuffer message by casting it into a str.
'''
n = NetSpec()
# Data
self.set_phocnet_data(n=n, generate_deploy=generate_deploy,
word_image_lmdb_path=word_image_lmdb_path,
phoc_lmdb_path=phoc_lmdb_path)
# Conv Part
self.set_phocnet_conv_body(n=n, relu_in_place=True)
# FC Part
n.spp5 = L.SPP(n.relu4_3, spp_param=dict(pool=P.SPP.MAX, pyramid_height=3, engine=self.spp_engine))
n.fc6, n.relu6, n.drop6 = self.fc_relu(bottom=n.spp5, layer_size=4096,
dropout_ratio=0.5, relu_in_place=True)
n.fc7, n.relu7, n.drop7 = self.fc_relu(bottom=n.drop6, layer_size=4096,
dropout_ratio=0.5, relu_in_place=True)
n.fc8 = L.InnerProduct(n.drop7, num_output=phoc_size,
weight_filler=dict(type=self.initialization),
bias_filler=dict(type='constant'))
n.sigmoid = L.Sigmoid(n.fc8, include=dict(phase=self.phase_test))
# output part
if not generate_deploy:
n.silence = L.Silence(n.sigmoid, ntop=0, include=dict(phase=self.phase_test))
n.loss = L.SigmoidCrossEntropyLoss(n.fc8, n.phocs)
return n.to_proto()
def get_tpp_phocnet(self, word_image_lmdb_path, phoc_lmdb_path, phoc_size, tpp_levels=5,
generate_deploy=False):
'''
Returns a NetSpec definition of the TPP-PHOCNet. The definition can then be transformed
into a protobuffer message by casting it into a str.
'''
n = NetSpec()
# Data
self.set_phocnet_data(n=n, generate_deploy=generate_deploy,
word_image_lmdb_path=word_image_lmdb_path,
phoc_lmdb_path=phoc_lmdb_path)
# Conv Part
self.set_phocnet_conv_body(n=n, relu_in_place=True)
# FC Part
n.tpp5 = L.TPP(n.relu4_3, tpp_param=dict(pool=P.TPP.MAX, pyramid_layer=range(1, tpp_levels + 1), engine=self.spp_engine))
n.fc6, n.relu6, n.drop6 = self.fc_relu(bottom=n.tpp5, layer_size=4096,
dropout_ratio=0.5, relu_in_place=True)
n.fc7, n.relu7, n.drop7 = self.fc_relu(bottom=n.drop6, layer_size=4096,
dropout_ratio=0.5, relu_in_place=True)
n.fc8 = L.InnerProduct(n.drop7, num_output=phoc_size,
weight_filler=dict(type=self.initialization),
bias_filler=dict(type='constant'))
n.sigmoid = L.Sigmoid(n.fc8, include=dict(phase=self.phase_test))
# output part
if not generate_deploy:
n.silence = L.Silence(n.sigmoid, ntop=0, include=dict(phase=self.phase_test))
n.loss = L.SigmoidCrossEntropyLoss(n.fc8, n.phocs)
return n.to_proto()
def main():
'''
this module can be called as main function in which case it prints the
prototxt definition for the given net
'''
parser = argparse.ArgumentParser()
parser.add_argument('--cnn_architecture', '-ca', choices=['phocnet', 'tpp-phocnet'], default='phocnet',
help='The CNN architecture to print to standard out')
parser.add_argument('--word_image_lmdb_path', '-wilp', default='./word_images_lmdb')
parser.add_argument('--phoc_lmdb_path', '-plp', default='./phoc_lmdb')
parser.add_argument('--phoc_size', type=int, default=604)
args = parser.parse_args()
if args.cnn_architecture == 'phocnet':
print str(ModelProtoGenerator().get_phocnet(word_image_lmdb_path=args.word_image_lmdb_path,
phoc_lmdb_path=args.phoc_lmdb_path,
phoc_size=args.phoc_size,
generate_deploy=args.generate_deploy))
elif args.cnn_architecture == 'tpp-phocnet':
print str(ModelProtoGenerator().get_tpp_phocnet(word_image_lmdb_path=args.word_image_lmdb_path,
phoc_lmdb_path=args.phoc_lmdb_path,
phoc_size=args.phoc_size,
generate_deploy=args.generate_deploy))
if __name__ == '__main__':
main()
| 9,101 | 49.566667 | 129 | py |
phocnet | phocnet-master/src/phocnet/caffe/lmdb_creator.py | '''
Created on Feb 18, 2016
@author: ssudholt
'''
import os
import shutil
import logging
import numpy as np
import lmdb
import caffe.io
# from patrec.serialization.list_io import LineListIO
class CaffeLMDBCreator(object):
def __init__(self):
'''
LMDB creator can create a single LMDB for single label classification
or two LMDBs where each element in the database_images has a corresponding
counterpart in database_additional with the same key. This is useful for creating
for example LMDBs for PHOCs, attributes or segmentation.
'''
self.logger = logging.getLogger('CaffeLMDBCreator')
self.database_images = None
self.database_additional = None
self.txn_images = None
self.txn_additional = None
self.label_map = None
self.internal_counter = 0
self.logger.debug('Using LMDB version %d.%d.%d' % lmdb.version())
def open_single_lmdb_for_write(self, lmdb_path, max_lmdb_size=1024**4, create=True, label_map=None):
'''
Opens a single LMDB for inserting ndarrays (i.e. images)
Args:
lmdb_path (str): Where to save the LMDB
max_lmdb_size (int): The maximum size in bytes of the LMDB (default: 1TB)
create (bool): If this flag is set, a potentially previously created LMDB at lmdb_path
is deleted and overwritten by this new LMDB
label_map (dictionary): If you supply a dictionary mapping string labels to integer indices, you can later
call put_single with string labels instead of int labels
'''
# delete existing LMDB if necessary
if os.path.exists(lmdb_path) and create:
self.logger.debug('Erasing previously created LMDB at %s', lmdb_path)
shutil.rmtree(lmdb_path)
self.logger.info('Opening single LMDB at %s for writing', lmdb_path)
self.database_images = lmdb.open(path=lmdb_path, map_size=max_lmdb_size)
self.txn_images = self.database_images.begin(write=True)
self.label_map = label_map
def open_dual_lmdb_for_write(self, image_lmdb_path, additional_lmdb_path, max_lmdb_size=1024**4, create=True, label_map=None):
'''
Opens two LMDBs where each element in the first has a counterpart in the second
Args:
image_lmdb_path (str): Where to save the image LMDB
additional_lmdb_path (str): Where to save the additional LMDB
max_lmdb_size (int): The maximum size in bytes of each LMDB (default: 1TB)
create (bool): If this flag is set, potentially previously created LMDBs at lmdb_path
and additional_lmdb_path are deleted and overwritten by new LMDBs
label_map (dictionary): If you supply a dictionary mapping string labels to integer indices, you can later
call put_dual with string labels instead of int labels
'''
# delete existing LMDBs if necessary
if os.path.exists(image_lmdb_path) and create:
self.logger.debug('Erasing previously created LMDB at %s', image_lmdb_path)
shutil.rmtree(image_lmdb_path)
if os.path.exists(additional_lmdb_path) and create:
self.logger.debug('Erasing previously created LMDB at %s', additional_lmdb_path)
shutil.rmtree(additional_lmdb_path)
self.logger.info('Opening LMDBs at %s and %s for writing', image_lmdb_path, additional_lmdb_path)
self.database_images = lmdb.open(path=image_lmdb_path, map_size=max_lmdb_size)
self.txn_images = self.database_images.begin(write=True)
self.database_additional = lmdb.open(path=additional_lmdb_path, map_size=max_lmdb_size)
self.txn_additional = self.database_additional.begin(write=True)
self.label_map = label_map
def put_single(self, img_mat, label, key=None):
'''
Puts an ndarray into the previously opened single LMDB
Args:
img_mat (3d-ndarray): The image data to be inserted in the LMDB
label (str or int): The label for the image
key (str): The key under which to save the data in the LMDB
If key is None, a generic key is generated
'''
# some checks
if self.database_images is None:
raise ValueError('No LMDB to write to. Have you called open_single_lmdb_for_write?')
if self.database_additional is not None:
raise ValueError('Cannot execute put_single as open_dual_lmdb_for_write has been chosen for LMDB creation')
if img_mat.dtype != np.uint8 or img_mat.ndim != 3:
raise ValueError('img_mat must be a 3d-ndarray of type np.uint8')
# label may be a string if a label map was supplied
datum_label = None
if type(label) == str:
if self.label_map is None:
raise ValueError('You may only supply a label of type str if you called open_single_lmdb_for_write with a valid label_map')
else:
datum_label = self.label_map[label]
elif type(label) == int:
datum_label = label
else:
raise ValueError('label must be of type str or int')
# convert img_mat to Caffe Datum
datum = caffe.io.array_to_datum(arr=img_mat, label=datum_label)
if key is None:
key = '%s_%s' % (str(self.internal_counter).zfill(8), str(label))
# push Datum into the LMDB
self.txn_images.put(key=key, value=datum.SerializeToString())
self.internal_counter += 1
if self.internal_counter % 1000 == 0:
self.txn_images.commit()
self.logger.info(' Finished %*d ndarrays', 8, self.internal_counter)
# after a commit the txn object becomes invalid, so we need to get a new one
self.txn_images = self.database_images.begin(write=True)
def put_dual(self, img_mat, additional_mat, label, key=None):
'''
Puts an image and its corresponding additional information ndarray into the
previously opened LMDBs
Args:
img_mat (3d-ndarray): The image data to be inserted in the LMDB
additional_mat (3d-ndarray): The label matrix (attributes, PHOC, ...) to be inserted
label (str or int): The label for the image
key (str): The key under which to save the data in the LMDB
If key is None, a generic key is generated
'''
# some checks
if self.database_images is None:
raise ValueError('No LMDB to write to. Have you called open_dual_lmdb_for_write?')
if self.database_additional is None:
raise ValueError('Cannot execute put_dual as open_single_lmdb_for_write has been chosen for LMDB creation')
if img_mat.dtype != np.uint8 or img_mat.ndim != 3:
raise TypeError('img_mat must be a 3d-ndarray of type np.uint8')
if additional_mat.dtype != np.uint8 or additional_mat.ndim != 3:
raise TypeError('additional_mat must be a 3d-ndarray of type np.uint8')
# label may be a string if a label map was supplied
datum_label = None
if type(label) == str:
if self.label_map is None:
raise ValueError('You may only supply a label of type str if you called open_single_lmdb_for_write with a valid label_map')
elif not label in self.label_map.keys():
self.logger.warn('Warning, unknown key - skipping this entry')
return
else:
datum_label = self.label_map[label]
elif type(label) in [int, np.int32, np.int64]:
datum_label = int(label)
else:
raise TypeError('label must be of type str or int')
# convert img_mat and additional_mat to Caffe Data
datum_img = caffe.io.array_to_datum(arr=img_mat, label=datum_label)
datum_additional = caffe.io.array_to_datum(arr=additional_mat, label=datum_label)
if key is None:
key = '%s_%s' % (str(self.internal_counter).zfill(8), str(label))
# push Data in the current LMDBs
self.txn_images.put(key=key, value=datum_img.SerializeToString())
self.txn_additional.put(key=key, value=datum_additional.SerializeToString())
self.internal_counter += 1
if self.internal_counter % 10000 == 0:
self.txn_images.commit()
self.txn_additional.commit()
self.logger.info(' Finished %*d ndarrays', 8, self.internal_counter)
# after a commit the txn objects becomes invalid, so we need to get new ones
self.txn_images = self.database_images.begin(write=True)
self.txn_additional = self.database_additional.begin(write=True)
return
def finish_creation(self):
'''
Wraps up LMDB creation and resets all internal variables
'''
self.txn_images.commit()
self.database_images.sync()
self.database_images.close()
if self.database_additional is not None:
self.txn_additional.commit()
self.database_additional.sync()
self.database_additional.close()
self.logger.info('Finished after writing %d ndarrays', self.internal_counter)
self.database_images = None
self.database_additional = None
self.txn_images = None
self.txn_additional = None
self.label_map = None
self.internal_counter = 0
| 9,744 | 48.217172 | 139 | py |
pLogicNet | pLogicNet-master/kge/dataloader.py | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
from torch.utils.data import Dataset
class TrainDataset(Dataset):
def __init__(self, triples, nentity, nrelation, negative_sample_size, mode):
self.len = len(triples)
self.triples = triples
self.triple_set = set(triples)
self.nentity = nentity
self.nrelation = nrelation
self.negative_sample_size = negative_sample_size
self.mode = mode
self.count = self.count_frequency(triples)
self.true_head, self.true_tail = self.get_true_head_and_tail(self.triples)
def __len__(self):
return self.len
def __getitem__(self, idx):
positive_sample = self.triples[idx]
head, relation, tail = positive_sample
subsampling_weight = self.count[(head, relation)] + self.count[(tail, -relation-1)]
subsampling_weight = torch.sqrt(1 / torch.Tensor([subsampling_weight]))
negative_sample_list = []
negative_sample_size = 0
while negative_sample_size < self.negative_sample_size:
negative_sample = np.random.randint(self.nentity, size=self.negative_sample_size*2)
if self.mode == 'head-batch':
mask = np.in1d(
negative_sample,
self.true_head[(relation, tail)],
assume_unique=True,
invert=True
)
elif self.mode == 'tail-batch':
mask = np.in1d(
negative_sample,
self.true_tail[(head, relation)],
assume_unique=True,
invert=True
)
else:
raise ValueError('Training batch mode %s not supported' % self.mode)
negative_sample = negative_sample[mask]
negative_sample_list.append(negative_sample)
negative_sample_size += negative_sample.size
negative_sample = np.concatenate(negative_sample_list)[:self.negative_sample_size]
negative_sample = torch.from_numpy(negative_sample)
positive_sample = torch.LongTensor(positive_sample)
return positive_sample, negative_sample, subsampling_weight, self.mode
@staticmethod
def collate_fn(data):
positive_sample = torch.stack([_[0] for _ in data], dim=0)
negative_sample = torch.stack([_[1] for _ in data], dim=0)
subsample_weight = torch.cat([_[2] for _ in data], dim=0)
mode = data[0][3]
return positive_sample, negative_sample, subsample_weight, mode
@staticmethod
def count_frequency(triples, start=4):
'''
Get frequency of a partial triple like (head, relation) or (relation, tail)
The frequency will be used for subsampling like word2vec
'''
count = {}
for head, relation, tail in triples:
if (head, relation) not in count:
count[(head, relation)] = start
else:
count[(head, relation)] += 1
if (tail, -relation-1) not in count:
count[(tail, -relation-1)] = start
else:
count[(tail, -relation-1)] += 1
return count
@staticmethod
def get_true_head_and_tail(triples):
'''
Build a dictionary of true triples that will
be used to filter these true triples for negative sampling
'''
true_head = {}
true_tail = {}
for head, relation, tail in triples:
if (head, relation) not in true_tail:
true_tail[(head, relation)] = []
true_tail[(head, relation)].append(tail)
if (relation, tail) not in true_head:
true_head[(relation, tail)] = []
true_head[(relation, tail)].append(head)
for relation, tail in true_head:
true_head[(relation, tail)] = np.array(list(set(true_head[(relation, tail)])))
for head, relation in true_tail:
true_tail[(head, relation)] = np.array(list(set(true_tail[(head, relation)])))
return true_head, true_tail
class TestDataset(Dataset):
def __init__(self, triples, all_true_triples, nentity, nrelation, mode):
self.len = len(triples)
self.triple_set = set(all_true_triples)
self.triples = triples
self.nentity = nentity
self.nrelation = nrelation
self.mode = mode
def __len__(self):
return self.len
def __getitem__(self, idx):
head, relation, tail = self.triples[idx]
if self.mode == 'head-batch':
tmp = [(0, rand_head) if (rand_head, relation, tail) not in self.triple_set
else (-1, head) for rand_head in range(self.nentity)]
tmp[head] = (0, head)
elif self.mode == 'tail-batch':
tmp = [(0, rand_tail) if (head, relation, rand_tail) not in self.triple_set
else (-1, tail) for rand_tail in range(self.nentity)]
tmp[tail] = (0, tail)
else:
raise ValueError('negative batch mode %s not supported' % self.mode)
tmp = torch.LongTensor(tmp)
filter_bias = tmp[:, 0].float()
negative_sample = tmp[:, 1]
positive_sample = torch.LongTensor((head, relation, tail))
return positive_sample, negative_sample, filter_bias, self.mode
@staticmethod
def collate_fn(data):
positive_sample = torch.stack([_[0] for _ in data], dim=0)
negative_sample = torch.stack([_[1] for _ in data], dim=0)
filter_bias = torch.stack([_[2] for _ in data], dim=0)
mode = data[0][3]
return positive_sample, negative_sample, filter_bias, mode
class BidirectionalOneShotIterator(object):
def __init__(self, dataloader_head, dataloader_tail):
self.iterator_head = self.one_shot_iterator(dataloader_head)
self.iterator_tail = self.one_shot_iterator(dataloader_tail)
self.step = 0
def __next__(self):
self.step += 1
if self.step % 2 == 0:
data = next(self.iterator_head)
else:
data = next(self.iterator_tail)
return data
@staticmethod
def one_shot_iterator(dataloader):
'''
Transform a PyTorch Dataloader into python iterator
'''
while True:
for data in dataloader:
yield data | 6,670 | 35.255435 | 107 | py |
pLogicNet | pLogicNet-master/kge/model.py | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import average_precision_score
from torch.utils.data import DataLoader
from dataloader import TestDataset
class KGEModel(nn.Module):
def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma,
double_entity_embedding=False, double_relation_embedding=False):
super(KGEModel, self).__init__()
self.model_name = model_name
self.nentity = nentity
self.nrelation = nrelation
self.hidden_dim = hidden_dim
self.epsilon = 2.0
self.gamma = nn.Parameter(
torch.Tensor([gamma]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]),
requires_grad=False
)
self.entity_dim = hidden_dim*2 if double_entity_embedding else hidden_dim
self.relation_dim = hidden_dim*2 if double_relation_embedding else hidden_dim
self.entity_embedding = nn.Parameter(torch.zeros(nentity, self.entity_dim))
nn.init.uniform_(
tensor=self.entity_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.relation_dim))
nn.init.uniform_(
tensor=self.relation_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
if model_name == 'pRotatE':
self.modulus = nn.Parameter(torch.Tensor([[0.5 * self.embedding_range.item()]]))
#Do not forget to modify this line when you add a new model in the "forward" function
if model_name not in ['TransE', 'DistMult', 'ComplEx', 'RotatE', 'pRotatE']:
raise ValueError('model %s not supported' % model_name)
if model_name == 'RotatE' and (not double_entity_embedding or double_relation_embedding):
raise ValueError('RotatE should use --double_entity_embedding')
if model_name == 'ComplEx' and (not double_entity_embedding or not double_relation_embedding):
raise ValueError('ComplEx should use --double_entity_embedding and --double_relation_embedding')
def forward(self, sample, mode='single'):
'''
Forward function that calculate the score of a batch of triples.
In the 'single' mode, sample is a batch of triple.
In the 'head-batch' or 'tail-batch' mode, sample consists two part.
The first part is usually the positive sample.
And the second part is the entities in the negative samples.
Because negative samples and positive samples usually share two elements
in their triple ((head, relation) or (relation, tail)).
'''
if mode == 'single':
batch_size, negative_sample_size = sample.size(0), 1
head = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:,0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=sample[:,1]
).unsqueeze(1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:,2]
).unsqueeze(1)
elif mode == 'head-batch':
tail_part, head_part = sample
batch_size, negative_sample_size = head_part.size(0), head_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part.view(-1)
).view(batch_size, negative_sample_size, -1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=tail_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=tail_part[:, 2]
).unsqueeze(1)
elif mode == 'tail-batch':
head_part, tail_part = sample
batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part[:, 0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=head_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=tail_part.view(-1)
).view(batch_size, negative_sample_size, -1)
else:
raise ValueError('mode %s not supported' % mode)
model_func = {
'TransE': self.TransE,
'DistMult': self.DistMult,
'ComplEx': self.ComplEx,
'RotatE': self.RotatE,
'pRotatE': self.pRotatE
}
if self.model_name in model_func:
score = model_func[self.model_name](head, relation, tail, mode)
else:
raise ValueError('model %s not supported' % self.model_name)
return score
def TransE(self, head, relation, tail, mode):
if mode == 'head-batch':
score = head + (relation - tail)
else:
score = (head + relation) - tail
score = self.gamma.item() - torch.norm(score, p=1, dim=2)
return score
def DistMult(self, head, relation, tail, mode):
if mode == 'head-batch':
score = head * (relation * tail)
else:
score = (head * relation) * tail
score = score.sum(dim = 2)
return score
def ComplEx(self, head, relation, tail, mode):
re_head, im_head = torch.chunk(head, 2, dim=2)
re_relation, im_relation = torch.chunk(relation, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
score = re_head * re_score + im_head * im_score
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = re_score * re_tail + im_score * im_tail
score = score.sum(dim = 2)
return score
def RotatE(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
#Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation/(self.embedding_range.item()/pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim = 0)
score = score.norm(dim = 0)
score = self.gamma.item() - score.sum(dim = 2)
return score
def pRotatE(self, head, relation, tail, mode):
pi = 3.14159262358979323846
#Make phases of entities and relations uniformly distributed in [-pi, pi]
phase_head = head/(self.embedding_range.item()/pi)
phase_relation = relation/(self.embedding_range.item()/pi)
phase_tail = tail/(self.embedding_range.item()/pi)
if mode == 'head-batch':
score = phase_head + (phase_relation - phase_tail)
else:
score = (phase_head + phase_relation) - phase_tail
score = torch.sin(score)
score = torch.abs(score)
score = self.gamma.item() - score.sum(dim = 2) * self.modulus
return score
@staticmethod
def train_step(model, optimizer, train_iterator, args):
'''
A single train step. Apply back-propation and return the loss
'''
model.train()
optimizer.zero_grad()
positive_sample, negative_sample, subsampling_weight, mode = next(train_iterator)
if args.cuda:
positive_sample = positive_sample.cuda()
negative_sample = negative_sample.cuda()
subsampling_weight = subsampling_weight.cuda()
negative_score = model((positive_sample, negative_sample), mode=mode)
if args.negative_adversarial_sampling:
#In self-adversarial sampling, we do not apply back-propagation on the sampling weight
negative_score = (F.softmax(negative_score * args.adversarial_temperature, dim = 1).detach()
* F.logsigmoid(-negative_score)).sum(dim = 1)
else:
negative_score = F.logsigmoid(-negative_score).mean(dim = 1)
positive_score = model(positive_sample)
positive_score = F.logsigmoid(positive_score).squeeze(dim = 1)
if args.uni_weight:
positive_sample_loss = - positive_score.mean()
negative_sample_loss = - negative_score.mean()
else:
positive_sample_loss = - (subsampling_weight * positive_score).sum()/subsampling_weight.sum()
negative_sample_loss = - (subsampling_weight * negative_score).sum()/subsampling_weight.sum()
loss = (positive_sample_loss + negative_sample_loss)/2
if args.regularization != 0.0:
#Use L3 regularization for ComplEx and DistMult
regularization = args.regularization * (
model.entity_embedding.norm(p = 3)**3 +
model.relation_embedding.norm(p = 3).norm(p = 3)**3
)
loss = loss + regularization
regularization_log = {'regularization': regularization.item()}
else:
regularization_log = {}
loss.backward()
optimizer.step()
log = {
**regularization_log,
'positive_sample_loss': positive_sample_loss.item(),
'negative_sample_loss': negative_sample_loss.item(),
'loss': loss.item()
}
return log
@staticmethod
def test_step(model, test_triples, all_true_triples, args):
'''
Evaluate the model on test or valid datasets
'''
model.eval()
if args.countries:
#Countries S* datasets are evaluated on AUC-PR
#Process test data for AUC-PR evaluation
sample = list()
y_true = list()
for head, relation, tail in test_triples:
for candidate_region in args.regions:
y_true.append(1 if candidate_region == tail else 0)
sample.append((head, relation, candidate_region))
sample = torch.LongTensor(sample)
if args.cuda:
sample = sample.cuda()
with torch.no_grad():
y_score = model(sample).squeeze(1).cpu().numpy()
y_true = np.array(y_true)
#average_precision_score is the same as auc_pr
auc_pr = average_precision_score(y_true, y_score)
metrics = {'auc_pr': auc_pr}
else:
#Otherwise use standard (filtered) MRR, MR, HITS@1, HITS@3, and HITS@10 metrics
#Prepare dataloader for evaluation
test_dataloader_head = DataLoader(
TestDataset(
test_triples,
all_true_triples,
args.nentity,
args.nrelation,
'head-batch'
),
batch_size=args.test_batch_size,
num_workers=max(1, args.cpu_num//2),
collate_fn=TestDataset.collate_fn
)
test_dataloader_tail = DataLoader(
TestDataset(
test_triples,
all_true_triples,
args.nentity,
args.nrelation,
'tail-batch'
),
batch_size=args.test_batch_size,
num_workers=max(1, args.cpu_num//2),
collate_fn=TestDataset.collate_fn
)
test_dataset_list = [test_dataloader_head, test_dataloader_tail]
logs = []
step = 0
total_steps = sum([len(dataset) for dataset in test_dataset_list])
# --------------------------------------------------
# Comments by Meng:
# Here we slightly modify the codes to save the intermediate prediction results of KGE models, so that we can combine the predictions from KGE and MLN to improve the results.
# --------------------------------------------------
predictions = []
with torch.no_grad():
for test_dataset in test_dataset_list:
for positive_sample, negative_sample, filter_bias, mode in test_dataset:
if args.cuda:
positive_sample = positive_sample.cuda()
negative_sample = negative_sample.cuda()
filter_bias = filter_bias.cuda()
# Save prediction results
prediction = positive_sample.data.cpu().numpy().tolist()
batch_size = positive_sample.size(0)
score = torch.sigmoid(model((positive_sample, negative_sample), mode))
score += filter_bias
#Explicitly sort all the entities to ensure that there is no test exposure bias
valsort, argsort = torch.sort(score, dim = 1, descending=True)
if mode == 'head-batch':
positive_arg = positive_sample[:, 0]
elif mode == 'tail-batch':
positive_arg = positive_sample[:, 2]
else:
raise ValueError('mode %s not supported' % mode)
for i in range(batch_size):
#Notice that argsort is not ranking
ranking = (argsort[i, :] == positive_arg[i]).nonzero()
assert ranking.size(0) == 1
# For each test triplet, save the ranked list (h, r, [ts]) and ([hs], r, t)
if mode == 'head-batch':
prediction[i].append('h')
prediction[i].append(ranking.item() + 1)
ls = zip(argsort[i, 0:args.topk].data.cpu().numpy().tolist(), valsort[i, 0:args.topk].data.cpu().numpy().tolist())
prediction[i].append(ls)
elif mode == 'tail-batch':
prediction[i].append('t')
prediction[i].append(ranking.item() + 1)
ls = zip(argsort[i, 0:args.topk].data.cpu().numpy().tolist(), valsort[i, 0:args.topk].data.cpu().numpy().tolist())
prediction[i].append(ls)
#ranking + 1 is the true ranking used in evaluation metrics
ranking = 1 + ranking.item()
logs.append({
'MR': float(ranking),
'MRR': 1.0/ranking,
'HITS@1': 1.0 if ranking <= 1 else 0.0,
'HITS@3': 1.0 if ranking <= 3 else 0.0,
'HITS@10': 1.0 if ranking <= 10 else 0.0,
})
predictions += prediction
if step % args.test_log_steps == 0:
logging.info('Evaluating the model... (%d/%d)' % (step, total_steps))
step += 1
metrics = {}
for metric in logs[0].keys():
metrics[metric] = sum([log[metric] for log in logs])/len(logs)
return metrics, predictions
# --------------------------------------------------
# Comments by Meng:
# Here we add a new function, which will predict the probability of each hidden triplet being true.
# The results will be used by MLN.
# --------------------------------------------------
@staticmethod
def infer_step(model, infer_triples, args):
batch_size = args.batch_size
scores = []
model.eval()
for k in range(0, len(infer_triples), batch_size):
bg = k
ed = min(k + batch_size, len(infer_triples))
batch = infer_triples[bg:ed]
batch = torch.LongTensor(batch)
if args.cuda:
batch = batch.cuda()
score = torch.sigmoid(model(batch)).squeeze(1)
scores += score.data.cpu().numpy().tolist()
return scores
| 18,193 | 37.222689 | 186 | py |
pLogicNet | pLogicNet-master/kge/run.py | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--countries', action='store_true', help='Use Countries S1/S2/S3 datasets')
parser.add_argument('--regions', type=int, nargs='+', default=None,
help='Region Id for Countries S1/S2/S3 datasets, DO NOT MANUALLY SET')
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--workspace_path', type=str, default=None)
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-de', '--double_entity_embedding', action='store_true')
parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true',
help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=10000, type=int)
parser.add_argument('--valid_steps', default=10000, type=int)
parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--record', action='store_true')
parser.add_argument('--topk', default=100, type=int)
return parser.parse_args(args)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
args.countries = argparse_dict['countries']
if args.data_path is None:
args.data_path = argparse_dict['data_path']
args.model = argparse_dict['model']
args.double_entity_embedding = argparse_dict['double_entity_embedding']
args.double_relation_embedding = argparse_dict['double_relation_embedding']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
def save_model(model, optimizer, save_variable_list, args):
'''
Save the parameters of the model and the optimizer,
as well as some other variables such as step and learning_rate
'''
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
torch.save({
**save_variable_list,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
os.path.join(args.save_path, 'checkpoint')
)
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'entity_embedding'),
entity_embedding
)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'relation_embedding'),
relation_embedding
)
def read_triple(file_path, entity2id, relation2id):
'''
Read triples and map them into ids.
'''
triples = []
with open(file_path) as fin:
for line in fin:
h, r, t = line.strip().split('\t')
triples.append((entity2id[h], relation2id[r], entity2id[t]))
return triples
def set_logger(args):
'''
Write logs to checkpoint and console
'''
if args.do_train:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')
else:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
def ensure_dir(d):
if not os.path.exists(d):
os.makedirs(d)
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
elif args.data_path is None:
raise ValueError('one of init_checkpoint/data_path must be choosed.')
if args.do_train and args.save_path is None:
raise ValueError('Where do you want to save your trained model?')
if args.save_path and not os.path.exists(args.save_path):
os.makedirs(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
with open(os.path.join(args.data_path, 'entities.dict')) as fin:
entity2id = dict()
id2entity = dict()
for line in fin:
eid, entity = line.strip().split('\t')
entity2id[entity] = int(eid)
id2entity[int(eid)] = entity
with open(os.path.join(args.data_path, 'relations.dict')) as fin:
relation2id = dict()
id2relation = dict()
for line in fin:
rid, relation = line.strip().split('\t')
relation2id[relation] = int(rid)
id2relation[int(rid)] = relation
# Read regions for Countries S* datasets
if args.countries:
regions = list()
with open(os.path.join(args.data_path, 'regions.list')) as fin:
for line in fin:
region = line.strip()
regions.append(entity2id[region])
args.regions = regions
nentity = len(entity2id)
nrelation = len(relation2id)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Data Path: %s' % args.data_path)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
# --------------------------------------------------
# Comments by Meng:
# During training, pLogicNet will augment the training triplets,
# so here we load both the augmented triplets (train.txt) for training and
# the original triplets (train_kge.txt) for evaluation.
# Also, the hidden triplets (hidden.txt) are also loaded for annotation.
# --------------------------------------------------
train_triples = read_triple(os.path.join(args.workspace_path, 'train_kge.txt'), entity2id, relation2id)
logging.info('#train: %d' % len(train_triples))
train_original_triples = read_triple(os.path.join(args.data_path, 'train.txt'), entity2id, relation2id)
logging.info('#train original: %d' % len(train_original_triples))
valid_triples = read_triple(os.path.join(args.data_path, 'valid.txt'), entity2id, relation2id)
logging.info('#valid: %d' % len(valid_triples))
test_triples = read_triple(os.path.join(args.data_path, 'test.txt'), entity2id, relation2id)
logging.info('#test: %d' % len(test_triples))
hidden_triples = read_triple(os.path.join(args.workspace_path, 'hidden.txt'), entity2id, relation2id)
logging.info('#hidden: %d' % len(hidden_triples))
#All true triples
all_true_triples = train_original_triples + valid_triples + test_triples
kge_model = KGEModel(
model_name=args.model,
nentity=nentity,
nrelation=nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding
)
logging.info('Model Parameter Configuration:')
for name, param in kge_model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if args.cuda:
kge_model = kge_model.cuda()
if args.do_train:
# Set training dataloader iterator
train_dataloader_head = DataLoader(
TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'head-batch'),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_dataloader_tail = DataLoader(
TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'tail-batch'),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)
# Set training configuration
current_learning_rate = args.learning_rate
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
if args.warm_up_steps:
warm_up_steps = args.warm_up_steps
else:
warm_up_steps = args.max_steps // 2
if args.init_checkpoint:
# Restore model from checkpoint directory
logging.info('Loading checkpoint %s...' % args.init_checkpoint)
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
init_step = checkpoint['step']
kge_model.load_state_dict(checkpoint['model_state_dict'])
if args.do_train:
current_learning_rate = checkpoint['current_learning_rate']
warm_up_steps = checkpoint['warm_up_steps']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
logging.info('Ramdomly Initializing %s Model...' % args.model)
init_step = 0
step = init_step
logging.info('Start Training...')
logging.info('init_step = %d' % init_step)
logging.info('learning_rate = %d' % current_learning_rate)
logging.info('batch_size = %d' % args.batch_size)
logging.info('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling)
logging.info('hidden_dim = %d' % args.hidden_dim)
logging.info('gamma = %f' % args.gamma)
logging.info('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling))
if args.negative_adversarial_sampling:
logging.info('adversarial_temperature = %f' % args.adversarial_temperature)
if args.record:
local_path = args.workspace_path
ensure_dir(local_path)
opt = vars(args)
with open(local_path + '/opt.txt', 'w') as fo:
for key, val in opt.items():
fo.write('{} {}\n'.format(key, val))
# Set valid dataloader as it would be evaluated during training
if args.do_train:
training_logs = []
#Training Loop
for step in range(init_step, args.max_steps):
log = kge_model.train_step(kge_model, optimizer, train_iterator, args)
training_logs.append(log)
if step >= warm_up_steps:
current_learning_rate = current_learning_rate / 10
logging.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
warm_up_steps = warm_up_steps * 3
if step % args.save_checkpoint_steps == 0:
save_variable_list = {
'step': step,
'current_learning_rate': current_learning_rate,
'warm_up_steps': warm_up_steps
}
save_model(kge_model, optimizer, save_variable_list, args)
if step % args.log_steps == 0:
metrics = {}
for metric in training_logs[0].keys():
metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)
log_metrics('Training average', step, metrics)
training_logs = []
if args.do_valid and (step + 1) % args.valid_steps == 0:
logging.info('Evaluating on Valid Dataset...')
metrics, preds = kge_model.test_step(kge_model, valid_triples, all_true_triples, args)
log_metrics('Valid', step, metrics)
save_variable_list = {
'step': step,
'current_learning_rate': current_learning_rate,
'warm_up_steps': warm_up_steps
}
save_model(kge_model, optimizer, save_variable_list, args)
if args.do_valid:
logging.info('Evaluating on Valid Dataset...')
metrics, preds = kge_model.test_step(kge_model, valid_triples, all_true_triples, args)
log_metrics('Valid', step, metrics)
# --------------------------------------------------
# Comments by Meng:
# Save the prediction results of KGE on validation set.
# --------------------------------------------------
if args.record:
# Save the final results
with open(local_path + '/result_kge_valid.txt', 'w') as fo:
for metric in metrics:
fo.write('{} : {}\n'.format(metric, metrics[metric]))
# Save the predictions on test data
with open(local_path + '/pred_kge_valid.txt', 'w') as fo:
for h, r, t, f, rk, l in preds:
fo.write('{}\t{}\t{}\t{}\t{}\n'.format(id2entity[h], id2relation[r], id2entity[t], f, rk))
for e, val in l:
fo.write('{}:{:.4f} '.format(id2entity[e], val))
fo.write('\n')
if args.do_test:
logging.info('Evaluating on Test Dataset...')
metrics, preds = kge_model.test_step(kge_model, test_triples, all_true_triples, args)
log_metrics('Test', step, metrics)
# --------------------------------------------------
# Comments by Meng:
# Save the prediction results of KGE on test set.
# --------------------------------------------------
if args.record:
# Save the final results
with open(local_path + '/result_kge.txt', 'w') as fo:
for metric in metrics:
fo.write('{} : {}\n'.format(metric, metrics[metric]))
# Save the predictions on test data
with open(local_path + '/pred_kge.txt', 'w') as fo:
for h, r, t, f, rk, l in preds:
fo.write('{}\t{}\t{}\t{}\t{}\n'.format(id2entity[h], id2relation[r], id2entity[t], f, rk))
for e, val in l:
fo.write('{}:{:.4f} '.format(id2entity[e], val))
fo.write('\n')
# --------------------------------------------------
# Comments by Meng:
# Save the annotations on hidden triplets.
# --------------------------------------------------
if args.record:
# Annotate hidden triplets
scores = kge_model.infer_step(kge_model, hidden_triples, args)
with open(local_path + '/annotation.txt', 'w') as fo:
for (h, r, t), s in zip(hidden_triples, scores):
fo.write('{}\t{}\t{}\t{}\n'.format(id2entity[h], id2relation[r], id2entity[t], s))
if args.evaluate_train:
logging.info('Evaluating on Training Dataset...')
metrics, preds = kge_model.test_step(kge_model, train_triples, all_true_triples, args)
log_metrics('Test', step, metrics)
if __name__ == '__main__':
main(parse_args())
| 18,047 | 39.832579 | 113 | py |
GigaSpeech | GigaSpeech-main/utils/extract_subset_segments.py | #!/usr/bin/env python3
# coding=utf8
# Copyright 2022 Jiayu DU
'''
This tool is used to extract supervised segments from GigaSpeech,
segments are saved in .wav format, supervisions are saved in a simple .tsv file:
--- exampler tsv begin ---
ID AUDIO BEGIN DURATION TEXT
POD1000000004_S0000017 audio/POD1000000004_S0000017.wav 0 3.163 YOU KNOW TO PUT THIS STUFF TOGETHER
...
...
--- exampler tsv end---
It can be, but not should be used to extract large subsets such as L, XL (because it would be extremely slow).
'''
import os, sys
import argparse
import csv
from speechcolab.datasets.gigaspeech import GigaSpeech
import torchaudio
gigaspeech_punctuations = ['<COMMA>', '<PERIOD>', '<QUESTIONMARK>', '<EXCLAMATIONPOINT>']
gigaspeech_garbage_utterance_tags = ['<SIL>', '<NOISE>', '<MUSIC>', '<OTHER>']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Save the audio segments into wav, and meta into tsv.')
parser.add_argument('--subset', choices = ['XS', 'S', 'M', 'L', 'XL', 'DEV', 'TEST'], default='XS', help='The subset name')
parser.add_argument('gigaspeech_dataset_dir', help='The GigaSpeech corpus directory')
parser.add_argument('dst_dir', help='Ouput subset directory')
args = parser.parse_args()
os.makedirs(args.dst_dir, exist_ok = True)
gigaspeech = GigaSpeech(args.gigaspeech_dataset_dir)
subset = '{' + args.subset + '}'
with open(os.path.join(args.dst_dir, 'metadata.tsv'), 'w+', encoding='utf8') as fo:
csv_header_fields = ['ID', 'AUDIO', 'DURATION', 'TEXT']
csv_writer = csv.DictWriter(fo, delimiter='\t', fieldnames=csv_header_fields, lineterminator='\n')
csv_writer.writeheader()
for audio in gigaspeech.audios(subset):
aid = audio['aid']
audio_path = os.path.join(args.gigaspeech_dataset_dir, audio["path"])
audio_info = torchaudio.info(audio_path)
opus_sample_rate = audio_info.sample_rate
assert opus_sample_rate == 48000
nc = audio_info.num_channels
assert nc == 1
sample_rate = 16000
long_waveform, _ = torchaudio.load(audio_path)
long_waveform = torchaudio.transforms.Resample(opus_sample_rate, sample_rate)(long_waveform)
for segment in audio['segments']:
sid = segment['sid']
if subset not in segment['subsets']:
continue
text = segment['text_tn']
for punctuation in gigaspeech_punctuations:
text = text.replace(punctuation, '').strip()
text = ' '.join(text.split())
if text in gigaspeech_garbage_utterance_tags:
continue
begin = segment['begin_time']
duration = segment['end_time'] - segment['begin_time']
frame_offset = int(begin * sample_rate)
num_frames = int(duration * sample_rate)
waveform = long_waveform[0][frame_offset : frame_offset + num_frames] # mono
segment_path = os.path.join('audio', aid, f'{sid}.wav')
os.makedirs(os.path.join(args.dst_dir, os.path.dirname(segment_path)), exist_ok = True)
torchaudio.save(
os.path.join(args.dst_dir, segment_path),
waveform.unsqueeze(0),
sample_rate = sample_rate,
format = 'wav',
encoding = 'PCM_S',
bits_per_sample = 16,
)
utt = {'ID': segment['sid'], 'AUDIO': segment_path, 'DURATION': f'{duration:.4f}', 'TEXT': text }
csv_writer.writerow(utt)
| 3,742 | 39.247312 | 127 | py |
MIED | MIED-main/mied/validators/metrics.py | '''
Metrics to evaluate samples:
1. With ground truth samples, we can compute
a. Wasserstein distance between mu and mu^*
b. KL(mu^* || mu)
2. With access to ground truth density (which is always the case), we can compute
a. KSD, directly applicable
b. KL(mu || mu^*)
'''
import torch
import numpy as np
import ot
from scipy import stats
from mied.solvers.ksdd import compute_ksd
from mied.utils.kernels import GaussianKernel
from mied.utils.batch_jacobian import compute_jacobian
def estimate_log_p(X, P):
'''
:param X: (B, D), samples used to build KDE
:param P: (B, D), samples to evaluate at
:return: density, (B,)
'''
kernel = stats.gaussian_kde(X.detach().cpu().numpy().T)
P_log_p = kernel.logpdf(P.detach().cpu().numpy().T)
return torch.from_numpy(P_log_p).to(P.device)
def filter_samples(samples, filter_range):
if filter_range > 0:
mask = torch.logical_and(samples < filter_range,
samples > -filter_range).all(-1)
return samples[mask]
return samples
def batch_expected_diff_norm(X, Y, batch_size=1000):
'''
Compute E[||X-Y||] for energy distance.
:param X: (N, D)
:param Y: (M, D)
'''
total_size = Y.shape[0]
cur = 0
total = 0
while cur < total_size:
cur_size = min(total_size - cur, batch_size)
tmp = X.unsqueeze(1) - Y[cur:cur+cur_size].unsqueeze(0)
tmp = tmp.square().sum(-1).sqrt().sum()
total += tmp.item() / X.shape[0]
cur += cur_size
return total / total_size
def compute_metric(source_samples, target_problem, *,
metric,
refresh,
gt_samples,
gt_multiplier,
ot_lib='pol',
ksd_sigma=1.0,
filter_range=-1,
strip_last_n=-1):
'''
:param source_samples: (B, D), can be on any device
:param target_problem: an instance of ProblemBase
:return: a scalar of the computed metric
'''
source_samples = filter_samples(source_samples, filter_range)
if strip_last_n > 0:
source_samples = source_samples[:, :-strip_last_n]
if metric in ['sinkhorn', 'KL_st', 'chi2_st', 'energy_dist']:
if gt_samples is None:
target_samples = target_problem.sample_gt(
gt_multiplier * source_samples.shape[0],
refresh=refresh
)
target_samples = filter_samples(target_samples, filter_range)
assert(target_samples is not None)
else:
target_samples = gt_samples
if strip_last_n > 0:
target_samples = target_samples[:, :-strip_last_n]
if metric == 'sinkhorn':
if ot_lib == 'pol':
import ot
source_weights = (np.ones(source_samples.shape[0]) /
source_samples.shape[0])
target_weights = (np.ones(target_samples.shape[0]) /
target_samples.shape[0])
M = ot.dist(source_samples.cpu().detach().numpy(),
target_samples.cpu().detach().numpy())
W = ot.emd2(source_weights, target_weights, M)
return W
else:
assert(ot_lib == 'geomloss')
import geomloss
loss = geomloss.SamplesLoss('sinkhorn', blur=0.0)
return loss(source_samples, target_samples)
if metric == 'energy_dist':
# SS = (source_samples.unsqueeze(1) -
# source_samples.unsqueeze(0)).square().sum(-1).sqrt().mean()
# ST = (source_samples.unsqueeze(1) -
# target_samples.unsqueeze(0)).square().sum(-1).sqrt().mean()
# TT = (target_samples.unsqueeze(1) -
# target_samples.unsqueeze(0)).square().sum(-1).sqrt().mean()
# return (2 * ST - SS - TT).item()
SS = batch_expected_diff_norm(source_samples, source_samples)
ST = batch_expected_diff_norm(source_samples, target_samples)
TT = batch_expected_diff_norm(target_samples, target_samples)
return (2 * ST - SS - TT)
if metric in ['KL_ts', 'chi2_ts']:
source_log_p = estimate_log_p(source_samples,
source_samples)
target_log_p = target_problem.eval_log_p(source_samples)
if metric == 'KL_ts':
return (source_log_p - target_log_p).mean().item()
else:
return ((target_log_p - source_log_p).exp() - 1).square().mean().item()
if metric in ['KL_st', 'chi2_st']:
target_log_p = target_problem.eval_log_p(target_samples)
source_log_p = estimate_log_p(source_samples,
target_samples)
if metric == 'KL_st':
return (target_log_p - source_log_p).mean().item()
else:
return ((source_log_p - target_log_p).exp() - 1).square().mean().item()
if metric == 'KL_sym':
return (compute_metric(source_samples, target_problem,
metric='KL_st') +
compute_metric(source_samples, target_problem,
metric='KL_ts'))
if metric == 'chi2_sym':
return (compute_metric(source_samples, target_problem,
metric='chi2_st') +
compute_metric(source_samples, target_problem,
metric='chi2_ts'))
if metric == 'ksd':
kernel = GaussianKernel(sigma=ksd_sigma)
X = source_samples.detach().clone()
X.requires_grad_(True)
log_p = target_problem.eval_log_p(X)
grad_log_p = compute_jacobian(log_p.unsqueeze(-1),
X,
create_graph=False,
retain_graph=False)
grad_log_p = grad_log_p.squeeze(-2)
return compute_ksd(X, grad_log_p, kernel).item()
| 5,968 | 36.074534 | 83 | py |
MIED | MIED-main/mied/validators/particle.py | import torch
import numpy as np
import math
from mied.utils.h5_helpers import save_dict_h5
from mied.utils.batch_eval import batch_eval_index
from mied.validators.metrics import compute_metric
class ParticleValidator:
def __init__(self, *,
problem):
self.problem = problem
self.device = problem.device
def generate_density_grid(self, *,
density_bbox,
density_grid_len=500):
assert(density_bbox.shape[0] == 2)
x_linspace = torch.linspace(
density_bbox[0, 0],
density_bbox[0, 1],
density_grid_len, device=self.device)
y_linspace = torch.linspace(
density_bbox[1, 0],
density_bbox[1, 1],
density_grid_len, device=self.device)
grid_x, grid_y = torch.meshgrid(x_linspace, y_linspace, indexing='ij') # (L, L) x 2
grid = torch.stack([grid_x, grid_y], -1) # (L, L, 2)
grid_flat = grid.reshape(-1, 2) # (L*L, 2)
density = batch_eval_index(
lambda inds: self.problem.eval_log_p(
grid_flat[inds, :]),
grid_flat.shape[0],
no_tqdm=True,
batch_size=10000
)
density = torch.cat(density, 0) # (L*L)
density = density.reshape(grid.shape[:2]) # (L, L)
return {
'density_bbox': density_bbox.detach().cpu(),
'grid_x': grid_x.detach().cpu(),
'grid_y': grid_y.detach().cpu(),
'grid_density': density.detach().cpu()
}
def run(self, *,
samples,
updates=None,
save_path=None,
include_density=False,
metrics=[],
num_trial=1,
gt_samples=None,
gt_multiplier=10,
filter_range=-1,
strip_last_n=-1,
include_gt=False,
**kwargs):
result_dict = {}
result_dict.update({
'samples': samples.detach().cpu(),
})
if updates is not None:
result_dict.update({
'updates': updates.detach().cpu()
})
if include_gt:
assert(gt_samples is None)
target_samples = self.problem.sample_gt(
gt_multiplier * samples.shape[0],
refresh=False
)
result_dict['target_samples'] = target_samples.detach().cpu()
result_dict.update(
self.problem.custom_eval(samples)
)
if include_density:
result_dict.update(self.generate_density_grid(
density_bbox=kwargs['density_bbox']
))
for metric in metrics:
result_list = []
for trial in range(num_trial):
tmp = compute_metric(samples,
self.problem,
metric=metric,
gt_samples=gt_samples,
refresh=(num_trial > 1),
gt_multiplier=gt_multiplier,
filter_range=filter_range,
strip_last_n=strip_last_n)
result_list.append(tmp)
result_list = np.array(result_list)
# if metric in ['KL_st', 'KL_ts', 'chi2_st', 'chi2_ts']:
# tmp = math.log(abs(tmp))
result_dict[metric] = np.mean(result_list)
if num_trial > 1:
result_dict[metric + '_std'] = np.std(result_list)
if save_path is not None:
save_dict_h5(result_dict, save_path, create_dir=True)
return result_dict
| 3,751 | 32.20354 | 91 | py |
MIED | MIED-main/mied/solvers/ksdd.py | import torch
import numpy as np
from mied.utils.batch_jacobian import compute_jacobian
from mied.utils.kernels import GaussianKernel
from mied.solvers.particle_base import ParticleBasedSolver
def compute_ksd(X, grad_log_p, kernel):
'''
:param X: (B, D)
:param grad_log_p: (B, D)
:param kernel: an instance of KernelBase, assumed to be symmetric
:return: a scalar, the kernel stein discrepancy
'''
B, D = X.shape
X_ex1 = X.unsqueeze(1).expand(-1, B, -1).reshape(-1, D) # (BB, D)
X_ex2 = X.unsqueeze(0).expand(B, -1, -1).reshape(-1, D) # (BB, D)
score_ex1 = grad_log_p.unsqueeze(1).expand(-1, B, -1).reshape(-1, D)
score_ex2 = grad_log_p.unsqueeze(0).expand(B, -1, -1).reshape(-1, D)
k = kernel.eval(X_ex1, X_ex2) # (BB,)
grad_1_k = kernel.grad_1(X_ex1, X_ex2) # (BB, D)
grad_2_k = kernel.grad_1(X_ex2, X_ex1) # (BB, D)
div_2_grad_1_k = kernel.div_2_grad_1(X_ex1, X_ex2) # (BB,)
tmp = (score_ex1 * score_ex2).sum(-1) * k # (BB,)
tmp = tmp + (score_ex1 * grad_2_k).sum(-1)
tmp = tmp + (score_ex2 * grad_1_k).sum(-1)
tmp = tmp + div_2_grad_1_k
return tmp.mean()
class KSDD(ParticleBasedSolver):
def __init__(self,
sigma=1.0,
**kwargs):
super().__init__(**kwargs)
self.kernel = GaussianKernel(sigma=sigma)
def compute_update(self, i, X):
'''
:return: (B, D)
'''
log_p = self.problem.eval_log_p(X) # (B,)
# Note: KSDD requires second-order derivatives of log_p.
grad_log_p = compute_jacobian(log_p.unsqueeze(-1), X,
create_graph=True, retain_graph=True)
grad_log_p = grad_log_p.squeeze(-2) # (B, D)
F = compute_ksd(X, grad_log_p, self.kernel)
self.last_F = F.item()
grad_F = torch.autograd.grad(F, X)[0] # (B, D)
return -grad_F
def custom_post_step(self, i):
return {
'KSD': self.last_F
}
def get_progress_msg(self):
return 'KSD: {:6f}, G_vio: {:6f}'.format(
self.last_F, self.projector.get_violation())
| 2,133 | 30.382353 | 75 | py |
MIED | MIED-main/mied/solvers/lmc.py | import torch
import numpy as np
from mied.solvers.particle_base import ParticleBasedSolver
from mied.utils.batch_jacobian import compute_jacobian
from mied.solvers.mirror_maps import BoxMap, BoxEntropicMap
class LMC(ParticleBasedSolver):
def __init__(self,
lmc_lr,
mirror_map,
**kwargs):
super().__init__(direct_update=True,
**kwargs)
assert(self.optimizer_conf['cls'] == 'SGD' and
self.optimizer_conf['lr'] == 1.0)
self.lr = lmc_lr
if mirror_map == 'box':
self.mirror_map = BoxMap()
elif mirror_map == 'box_entropic':
self.mirror_map = BoxEntropicMap()
else:
raise Exception(f'Unknown mirror map: {mirror_map}')
def compute_update(self, i, X):
B, D = X.shape
log_p = self.problem.eval_log_p(X) # (B,)
grad_log_p = compute_jacobian(log_p.unsqueeze(-1), X,
create_graph=False, retain_graph=False)
grad_log_p = grad_log_p.squeeze(-2) # (B, D)
Z = self.mirror_map.nabla_phi(X) # (B, D)
xi = torch.randn([B, D], device=X.device) # (B, D)
drift = np.sqrt(2 * self.lr) * self.mirror_map.nabla2_phi_sqrt_mul(X, xi) # (B, D)
Z_new = Z + self.lr * grad_log_p + drift
return self.mirror_map.nabla_phi_star(Z_new)
| 1,405 | 32.47619 | 90 | py |
MIED | MIED-main/mied/solvers/no_op_projector.py | import torch
from mied.solvers.projector_base import ProjectorBase
'''
Handle multiple constraints by projecting the gradients using
Dystra algorithm.
'''
class NoOpProjector(ProjectorBase):
def __init__(self):
pass
def step(self, X, update, problem):
return update
def get_violation(self):
return 0.0
| 343 | 17.105263 | 61 | py |
MIED | MIED-main/mied/solvers/particle_base.py | from abc import ABC, abstractmethod
import torch
import numpy as np
from pathlib import Path
from tqdm import trange
from mied.utils.batch_hessian import compute_hessian
class ParticleBasedSolver(ABC):
def __init__(self, *,
problem,
projector,
num_particle,
precondition,
optimizer_conf,
direct_update=False,
val_freq,
ckpt_path,
logger_fn):
'''
Abstract base class for particle-based solvers that differ only in
the updates.
The default parameters are set in ExperimentCoordinator class.
:param problem: the constrained sampling problem
:param projector: handler of the constraints
:param num_particle: number of particles
'''
self.problem = problem
self.projector = projector
self.precondition = precondition
self.direct_update = direct_update
self.val_freq = val_freq
self.ckpt_path = ckpt_path
self.logger_fn = logger_fn
self.particles = self.problem.sample_prior(num_particle)
self.particles.requires_grad_(True)
self.optimizer_conf = optimizer_conf
self.create_optimizer()
self.init_global_step = 0
def create_optimizer(self):
conf = self.optimizer_conf
if conf['cls'] == 'Adam':
self.optimizer = torch.optim.Adam(
[self.particles], lr=conf['lr'],
betas=(conf['beta1'], conf['beta2']),
)
elif conf['cls'] == 'LBFGS':
self.optimizer = torch.optim.LBFGS(
[self.particles], lr=conf['lr'])
elif conf['cls'] == 'SGD':
self.optimizer = torch.optim.SGD(
[self.particles], lr=conf['lr'],
momentum=conf['beta1']
)
elif self.optimizer_conf['cls'] == 'RMSprop':
self.optimizer = torch.optim.RMSprop(
[self.particles], lr=conf['lr'],
alpha=conf['beta1']
)
else:
raise Exception(f'Unknown optimizer class {self.optimizer_conf["cls"]}')
def load_ckpt(self):
'''
:return: the current global step.
'''
p = Path(self.ckpt_path)
if not p.exists():
print('No checkpoint file found. Use default initialization.')
self.init_global_step = 0
return
ckpt = torch.load(self.ckpt_path)
global_step = ckpt['global_step']
self.particles = ckpt['particles']
self.particles.to(self.problem.device)
self.particles.requires_grad_(True)
self.create_optimizer()
self.optimizer.load_state_dict(ckpt['optimizer_state_dict'])
print('Loading solver from {} at step {}...'.format(p, global_step))
np.random.set_state(ckpt['np_rng_state'])
torch.set_rng_state(ckpt['torch_rng_state'])
self.init_global_step = global_step
def save_ckpt(self, global_step):
print('Saving solver at global step {}...'.format(global_step))
p = Path(self.ckpt_path)
p.parent.mkdir(parents=True, exist_ok=True)
all_dict = {
'optimizer_state_dict': self.optimizer.state_dict(),
'particles': self.particles,
'global_step': global_step,
'np_rng_state': np.random.get_state(),
'torch_rng_state': torch.get_rng_state()
}
torch.save(all_dict, self.ckpt_path)
@abstractmethod
def compute_update(self, i, X):
'''
:param i: current step
:param X: particles (after reparameterization)
:return: (B, in_dim),
* if direct_update = False, then update directions so that
x_new = x_old + eta * x_update,
where eta is modulated by the optimizer and the projector.
* if direct_update = True, then x_new = x_update
'''
pass
def step(self, i):
if self.direct_update:
# Skip using optimizer (e.g. in LMC).
self.particles = self.compute_update(i, self.particles).detach()
self.particles.requires_grad_(True)
return
self.optimizer.zero_grad()
X = self.problem.reparametrize(self.particles)
# Compute update w.r.t. X.
update = self.compute_update(i, X).detach() # (B, D)
# Optional preconditioning.
if self.precondition:
log_p_fn = lambda X: self.problem.eval_log_p(X)
hess = compute_hessian(log_p_fn, X) # (B, D, D)
update = torch.linalg.lstsq(-hess, update.unsqueeze(-1)).solution # (B, D)
update = update.squeeze(-1)
# The projector may modify update.
update = self.projector.step(X,
update=update,
problem=self.problem)
# Manual chain rule.
if self.particles.grad is not None:
self.particles.grad.zero_()
X.backward(gradient=update, inputs=self.particles)
update = self.particles.grad.detach()
self.particles.grad = -update.detach()
self.optimizer.step()
def get_samples(self):
'''
Obtain the resulting samples.
'''
return self.problem.reparametrize(self.particles)
def post_step(self, i):
'''
Stuff to do after each step, e.g., update log_msg.
'''
pass
def run(self, *,
num_itr,
ckpt_save_freq=-1,
post_step_fn=None):
if ckpt_save_freq == -1:
ckpt_save_freq = num_itr
loop_range = trange(self.init_global_step, num_itr)
for i in loop_range:
self.step(i)
if post_step_fn is not None:
post_step_fn(i)
self.post_step(i)
loop_range.set_description(self.get_progress_msg())
global_step = i + 1
if self.ckpt_path and ckpt_save_freq:
if global_step % ckpt_save_freq == 0:
self.save_ckpt(global_step)
def post_step(self, i):
if self.logger_fn is not None:
if (i + 1) % self.val_freq == 0:
log_dict = {
'step': i + 1,
'violation': self.projector.get_violation(),
}
log_dict.update(self.problem.custom_post_step(self.particles))
log_dict.update(self.custom_post_step(i))
self.logger_fn(log_dict)
def compute_variance(self):
samples = self.get_samples() # (B, D)
mean = samples.mean(0) # (D,)
dist = (samples - mean).square().sum(-1) # (B,)
return dist.mean()
def compute_min_separation(self):
samples = self.get_samples() # (B, D)
dist = (samples.unsqueeze(1) - samples.unsqueeze(0)).square().sum(-1) # (B, B)
val, _ = torch.topk(dist, 2, largest=False, dim=-1) # (B, 2)
return (val[:, 1].min() + 1e-8).sqrt()
def get_progress_msg(self):
return 'G_vio: {:6f}'.format(self.projector.get_violation())
def custom_post_step(self, i):
return {}
| 7,284 | 31.092511 | 86 | py |
MIED | MIED-main/mied/solvers/dynamic_barrier.py | import torch
from mied.solvers.projector_base import ProjectorBase
from mied.utils.batch_jacobian import compute_jacobian
from mied.utils.proj_polyhedra import proj_polyhedra
'''
Handle multiple constraints by projecting the gradients using
Dystra algorithm.
'''
class DynamicBarrier(ProjectorBase):
def __init__(self, *,
alpha_db=1.0,
merge_eq=True,
max_proj_itr=20):
self.alpha = alpha_db
self.merge_eq = merge_eq
self.max_proj_itr = max_proj_itr
self.violation = 0.0
def step(self, X, update, problem):
G = problem.eval_ineq(X) # (B, N_ineq)
H = problem.eval_eq(X) # (B, N_eq)
B = X.shape[0]
if G is None and H is None:
v = update
else:
if G is None:
G = torch.zeros([B, 0]).to(X)
if H is None:
H = torch.zeros([B, 0]).to(X)
if self.merge_eq:
G = torch.cat([G, H.square().sum(-1, keepdim=True)], -1)
else:
G = torch.cat([G, H.square()], -1)
self.violation = G.relu().sum()
grad_G = compute_jacobian(G, X, create_graph=True,
retain_graph=True) # (B, N_ineq, D)
barrier = -self.alpha * G # (B, N_ineq)
# Constraints are grad_G^T v <= barrier
v = proj_polyhedra(update, grad_G, barrier,
max_num_itr=self.max_proj_itr) # (B, D)
return v
def get_violation(self):
return self.violation
| 1,597 | 29.150943 | 73 | py |
MIED | MIED-main/mied/solvers/mirror_maps.py | from abc import ABC, abstractmethod
import torch
def safe_log(x):
# return torch.log(torch.maximum(1e-32, x))
# return torch.log(torch.maximum(1e-8, x))
return torch.log(x + 1e-32)
class MirrorMapBase(ABC):
@abstractmethod
def phi(self, theta):
pass
@abstractmethod
def nabla_phi(self, theta):
pass
@abstractmethod
def nabla_phi_star(self, eta):
pass
@abstractmethod
def nabla2_phi_sqrt_mul(self, theta, rhs):
pass
class BoxMap(MirrorMapBase):
def phi(self, theta):
return (-safe_log(1-theta) - safe_log(1+theta)).sum(-1)
def nabla_phi(self, theta):
return 1 / (1 - theta) - 1 / (1 + theta)
def nabla_phi_star(self, eta):
return ((1 + eta.square()).sqrt() - 1) / eta
def nabla2_phi_sqrt_mul(self, theta, rhs):
diag = 1 / (1-theta).square() + 1 / (1+theta).square()
diag = diag.sqrt()
return diag * rhs
class BoxEntropicMap(MirrorMapBase):
def phi(self, theta):
return (1 + theta) * safe_log(1 + theta) + (1 - theta) * safe_log(1 - theta)
def nabla_phi(self, theta):
return safe_log(1 + theta) - safe_log(1 - theta)
def nabla_phi_star(self, eta):
return 1 - 2 / (eta.exp() + 1)
def nabla2_phi_sqrt_mul(self, theta, rhs):
diag = 1 / (1-theta) + 1 / (1+theta)
diag = diag.sqrt()
return diag * rhs
| 1,416 | 22.616667 | 84 | py |
MIED | MIED-main/mied/solvers/svgd.py | import torch
import numpy as np
from mied.solvers.particle_base import ParticleBasedSolver
from mied.utils.batch_jacobian import compute_jacobian
def svgd_update(P, grad_log_p, kernel='gaussian', kernel_h=-1,
riesz_s=-1, riesz_eps=1e-4):
'''
SVGD update with Gaussian kernel.
:param P: (B, D)
:return: update direction, (B, D)
'''
assert(not P.isnan().any())
assert(not grad_log_p.isnan().any())
n = P.shape[0]
P_diff = P.unsqueeze(1) - P.unsqueeze(0) # (B, B, D)
dist_sqr = P_diff.square().sum(-1) # (B, B)
if kernel == 'gaussian':
if kernel_h < 0:
mean_dist_sqr = dist_sqr.reshape(-1).median()
h = mean_dist_sqr / (np.log(n) + 1e-6)
else:
h = kernel_h
K = torch.exp(- dist_sqr / (h + 1e-8)) # (B, B)
grad_K = 2 * K.unsqueeze(-1) * P_diff / (h + 1e-8) # (B, B, D)
else:
assert(kernel == 'riesz')
if riesz_s < 0:
riesz_s = P.shape[-1] + 1.0
K = torch.pow(dist_sqr + riesz_eps, -riesz_s / 2)
grad_K = (riesz_s/2) * torch.pow(
dist_sqr + riesz_eps, -riesz_s / 2 - 1).unsqueeze(-1) * 2 * P_diff
'''
phi(x_i) = 1/n * \sum_j k(x_i, x_j) grad_log_p(x_j) + grad_K_x_j(x_i, x_j)
'''
Phi = K.unsqueeze(-1) * grad_log_p.unsqueeze(0) + grad_K # (B, B, D)
Phi = Phi.mean(1) # (B, D)
assert(not Phi.isnan().any())
return Phi
class SVGD(ParticleBasedSolver):
def __init__(self,
kernel_h=-1,
kernel='gaussian',
**kwargs):
super().__init__(**kwargs)
self.kernel = kernel
self.kernel_h = kernel_h
def compute_update(self, i, X):
log_p = self.problem.eval_log_p(X) # (B,)
grad_log_p = compute_jacobian(log_p.unsqueeze(-1), X,
create_graph=False, retain_graph=False)
grad_log_p = grad_log_p.squeeze(-2)
update = svgd_update(X, grad_log_p, kernel=self.kernel,
kernel_h=self.kernel_h)
self.last_update_norm = update.square().sum().item()
P_diff = X.unsqueeze(1) - X.unsqueeze(0) # (B, B, D)
dist_sqr = P_diff.square().sum(-1) # (B, B)
self.last_med_dist = dist_sqr.reshape(-1).median().sqrt().item()
return update
def custom_post_step(self, i):
return {
'Update norm': self.last_update_norm,
'Median': self.last_med_dist,
}
def get_progress_msg(self):
return 'Norm: {:6f}, G_vio: {:6f}'.format(
self.last_update_norm, self.projector.get_violation())
| 2,656 | 29.193182 | 78 | py |
MIED | MIED-main/mied/solvers/projector_base.py | from abc import ABC, abstractmethod
import torch
class ProjectorBase:
def __init__(self):
pass
@abstractmethod
def step(self, particles, update_grad, problem, optimizer):
'''
Update particles given update directions update_grad while projecting
to the constraints given by the problem.
'''
pass
@abstractmethod
def get_violation(self):
pass
| 422 | 19.142857 | 77 | py |
MIED | MIED-main/mied/solvers/mied.py | import torch
import numpy as np
import math
from mied.solvers.particle_base import ParticleBasedSolver
def log_exp_diff(a, b):
'''
Compute log|e^a - e^b| * sign(a-b)
:param a, b: torch scalars
'''
if a > b:
return a + torch.log(1 - torch.exp(b - a))
else:
return -(b + torch.log(1 - torch.exp(a - b)))
class MIED(ParticleBasedSolver):
def __init__(self, *,
kernel,
eps,
riesz_s,
alpha_mied,
include_diag,
diag_mul=1.3,
**kwargs):
'''
Descent on energy
E(\mu) = \iint \phi_\eps(x-y) (p(x)p(y))^{-\alpha} \dd\mu(x)\dd\mu(y)
:param kernel: ['riesz', 'gaussian', 'laplace']
:param eps: epsilon in the kernel
:param include_diag: ['ignore', 'include', 'diag_only', 'nnd']
'''
super().__init__(**kwargs)
self.device = self.problem.device
if kernel in ['gaussian', 'laplace']:
assert(eps >= 1e-6)
embed_dim = self.problem.get_embed_dim()
self.embed_dim = embed_dim
self.alpha = alpha_mied
if kernel == 'riesz' and riesz_s < 0:
assert(self.alpha >= 0.5) # requirement for hypersingular riesz energy
riesz_s = 2 * self.alpha * embed_dim + 1e-4
self.kernel = kernel
self.eps = eps
self.riesz_s = riesz_s
self.include_diag = include_diag
self.diag_mul = diag_mul
def compute_energy(self, X):
'''
:param X: (B, D)
:return: a scalar, the weighted riesz energy
'''
log_p = self.problem.eval_log_p(X) # (B,)
B = X.shape[0]
diff = X.unsqueeze(1) - X.unsqueeze(0) # (B, B, D)
diff_norm_sqr = diff.square().sum(-1) # (B, B)
if self.include_diag == 'nnd_scale':
vals, _ = torch.topk(diff_norm_sqr, 2, dim=-1, largest=False)
vals = vals.detach()[:, 1]
# Use \phi(h_i / (1.3d)^{1/d}) for the diagonal term.
vals = vals / math.pow(self.diag_mul * self.embed_dim, 2.0 / self.embed_dim)
diff_norm_sqr = diff_norm_sqr + torch.diag(vals)
if self.kernel == 'gaussian':
# \phi(x-y) = \exp(-||x-y||^2/(2 * eps))
tmp = -diff_norm_sqr / (2 * self.eps)
elif self.kernel == 'laplace':
tmp = -(diff_norm_sqr + 1e-10).sqrt() / self.eps
else:
assert(self.kernel == 'riesz')
log_dist_sqr = (diff_norm_sqr + self.eps).log() # (B, B)
tmp = log_dist_sqr * -self.riesz_s / 2
tmp2 = (log_p.unsqueeze(1) + log_p.unsqueeze(0)) # (B, B)
tmp2 = tmp2 * -self.alpha # (B, B)
tmp = tmp + tmp2
mask = torch.eye(B, device=X.device, dtype=torch.bool) # (B, B)
mask = torch.logical_not(mask) # (B, B)
if self.include_diag != 'ignore':
mask = torch.logical_or(mask,
torch.eye(B, device=X.device,
dtype=torch.bool))
else:
mask = torch.eye(B, device=X.device, dtype=torch.bool) # (B, B)
mask = mask.reshape(-1)
tmp = tmp.reshape(-1)
tmp = torch.masked_select(tmp, mask)
energy = torch.logsumexp(tmp, 0) # scalar
# if self.include_diag in ['include', 'nnd']:
# energy = energy + -2 * math.log(B)
# else:
# energy = energy + -2 * math.log(B - 1)
return energy
def step(self, i):
if self.optimizer_conf['cls'] == 'LBFGS':
def closure():
self.optimizer.zero_grad()
X = self.problem.reparametrize(self.particles)
# Subclass must have a compute_energy function.
F = self.compute_energy(X)
self.last_F = F.item()
F.backward()
return F
self.optimizer.step(closure)
else:
super().step(i)
def compute_update(self, i, X):
'''
:return: (B, D)
'''
F = self.compute_energy(X) # scalar
self.last_F = F.item()
grad_F = torch.autograd.grad(F, X)[0] # (B, D)
return -grad_F
def custom_post_step(self, i):
return {
'Riesz energy': self.last_F
}
def get_progress_msg(self):
return 'E: {:6f}, G_vio: {:6f}'.format(self.last_F,
self.projector.get_violation())
| 4,561 | 29.824324 | 88 | py |
MIED | MIED-main/mied/solvers/ipd.py | import torch
import numpy as np
from mied.solvers.particle_base import ParticleBasedSolver
from mied.utils.batch_jacobian import compute_jacobian
'''
Independent particle descent, a dumb baseline.
'''
class IPD(ParticleBasedSolver):
def __init__(self,
**kwargs):
super().__init__(**kwargs)
def compute_update(self, i, X):
log_p = self.problem.eval_log_p(X) # (B,)
self.last_log_p = log_p.mean()
grad_log_p = compute_jacobian(log_p.unsqueeze(-1), X,
create_graph=False, retain_graph=False)
update = grad_log_p.squeeze(1)
return update
def get_progress_msg(self):
return 'log_p: {:6f}, G_vio: {:6f}'.format(
self.last_log_p, self.projector.get_violation())
| 798 | 25.633333 | 77 | py |
MIED | MIED-main/mied/problems/problem_base.py | from abc import ABC, abstractmethod
import torch
from mied.utils.batch_jacobian import compute_jacobian
class ProblemBase(ABC):
def __init__(self, *,
device,
in_dim):
'''
A problem describes the sampling problem with unnormalized density
p(x) and constraints g(x) <= 0, h(x) = 0.
:param device: used to generate ambient samples
:param in_dim: ambient dimension
'''
self.device = device
self.in_dim = in_dim
@abstractmethod
def sample_prior(self, batch_size):
'''
Sample prior particles (before applying reparameterization).
'''
pass
@abstractmethod
def get_embed_dim(self):
pass
@abstractmethod
def eval_log_p(self, P):
'''
Evaluate the log density.
- For sampling, can ignore the constant.
- For evaluation purpose however the constant should be included
if possible.
:param P: positions, tensor of size (batch_size, in_dim)
:return: (batch_size,)
'''
pass
def sample_gt(self, batch_size, refresh):
'''
Implement this in cases where the problem can be sampled (for evaluation).
'''
return None
def reparametrize(self, Z):
'''
:param Z: (B, Z)
:return: (B, D)
'''
return Z
def eval_eq(self, P):
'''
Evaluate the constraints h(x).
:param P: positions, tensor of size (batch_size, in_dim)
:return: (batch_size, num_eq)
'''
return None
def eval_ineq(self, P):
'''
Evaluate the constraints g(x).
:param P: positions, tensor of size (batch_size, in_dim)
:return: (batch_size, num_ineq)
'''
return None
def custom_eval(self, samples):
return {}
def custom_post_step(self, samples):
return {}
| 1,956 | 20.988764 | 82 | py |
MIED | MIED-main/mied/problems/logistics.py | import torch
import torch.distributions
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
from tqdm import tqdm
import scipy.io
from mied.problems.problem_base import ProblemBase
class BayesianLogistics(ProblemBase):
def __init__(self, *,
device,
data_path,
data_name='banana',
exp_lambda=0.01,
split_seed=42,
batch_size=50):
self.exp_lambda = exp_lambda
self.exp_dist = torch.distributions.Exponential(exp_lambda)
data = scipy.io.loadmat(data_path)
if data_name == 'covtype':
X = torch.from_numpy(data['covtype'][:, 1:])
Y = torch.from_numpy(data['covtype'][:, 0])
Y[Y == 2] = 0
self.use_batch = True
else:
X = torch.from_numpy(data[data_name]['x'][0][0]) # NxM
Y = torch.from_numpy(data[data_name]['t'][0][0]) # Nx1
Y = Y.squeeze(-1) # N
Y[Y == -1] = 0
self.use_batch = False
dataset = TensorDataset(X, Y)
N, self.M = X.shape
N_train = int(N * 0.8)
N_test = N - N_train
self.train_dset, self.test_dset = torch.utils.data.random_split(
dataset, [N_train, N_test],
generator=torch.Generator().manual_seed(split_seed))
# Always use batch for test.
self.test_dl = DataLoader(self.test_dset,
batch_size=batch_size,
shuffle=False)
if self.use_batch:
self.train_dl = DataLoader(self.train_dset,
batch_size=batch_size,
shuffle=True)
self.train_dl_itr = iter(self.train_dl)
else:
# Otherwise put everything onto device.
self.train_X, self.train_Y = self.train_dset[:]
self.train_X = self.train_X.to(device)
self.train_Y = self.train_Y.to(device)
self.dim = self.M + 1
super().__init__(device=device,
in_dim=self.dim)
def mcmc(self, num_warmup, num_sample, *,
log_file=None):
'''
Generate posterior samples using MCMC to serve as ground truth.
'''
import jax
import jax.numpy as jnp
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
def model(data, labels):
alpha = numpyro.sample('alpha', dist.Exponential(self.exp_lambda))
W = numpyro.sample('W', dist.Normal(jnp.zeros(self.M), 1.0 / alpha))
logits = jnp.sum(W * data, axis=-1)
return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels)
data, labels = self.train_dset[:]
data = data.numpy()
labels = labels.numpy()
mcmc = MCMC(NUTS(model=model), num_warmup=num_warmup,
num_samples=num_sample)
mcmc.run(jax.random.PRNGKey(0), data, labels)
from contextlib import ExitStack, redirect_stdout
samples = mcmc.get_samples()
W = torch.from_numpy(np.array(samples['W']))
alpha = torch.from_numpy(np.array(samples['alpha'])).log()
P = torch.cat([W, alpha.unsqueeze(-1)], -1)
test_acc = self.eval_test_accurarcy(P.to(self.device))
with ExitStack() as stack:
if log_file is not None:
f = stack.enter_context(open(log_file, 'w'))
stack.enter_context(redirect_stdout(f))
mcmc.print_summary()
print('MCMC test accuracy: {}'.format(test_acc))
return P
def sample_prior(self, batch_size):
alpha = self.exp_dist.sample([batch_size, 1]).to(self.device)
W = torch.randn([batch_size, self.M],
device=self.device) / alpha.sqrt()
return torch.cat([W, alpha.log()], -1)
def get_embed_dim(self):
return self.dim
def eval_log_p(self, P):
if self.use_batch:
try:
X, Y = self.train_dl_itr.next()
except StopIteration:
self.train_dl_itr = iter(self.train_dl)
X, Y = self.train_dl_itr.next()
X = X.to(self.device)
Y = Y.to(self.device)
else:
X, Y = (self.train_X, self.train_Y)
W = P[:, :-1] # BxM
alpha = P[:, -1].exp() # B
log_p = -(W.square().sum(-1) * alpha / 2)
log_p = log_p - self.exp_lambda * alpha
out_logit = (W.unsqueeze(1) * X.unsqueeze(0)).sum(-1) # BxN
log_p_data = -F.binary_cross_entropy_with_logits(
out_logit,
Y.unsqueeze(0).expand(W.shape[0], -1).float(),
reduction='none')
log_p_data = log_p_data.sum(-1) # B
log_p = log_p + log_p_data
return log_p
def eval_test_accurarcy(self, P):
W = P[:, :-1] # BxM
alpha = P[:, -1].exp() # B
total_correct = 0
total_test = 0
for test_batch in self.test_dl:
X, Y = test_batch
X = X.to(self.device) # NxM
Y = Y.to(self.device) # N
pred = (W.unsqueeze(1) * X.unsqueeze(0)).sum(-1) # BxN
pred = pred.sigmoid() # BxN
# We average pred from all particles before computing accuracy.
total_correct += (Y == (pred.mean(0) > 0.5)).sum()
total_test += X.shape[0]
return total_correct / total_test
def custom_post_step(self, P):
return {
'train_log_p': self.eval_log_p(P).mean(),
'test_acc': self.eval_test_accurarcy(P)
}
| 5,753 | 32.649123 | 83 | py |
MIED | MIED-main/mied/problems/analytical_problems.py | import torch
import numpy as np
import math
from abc import ABC, abstractmethod
'''
bbox corresponds to the dimension of the variable (=dim) which is
not the same as the intrinsic dimension.
'''
def sample_simplex(dim, batch_size, device):
samples = torch.from_numpy(np.random.dirichlet(
torch.ones([dim]) * 5, size=batch_size)).float().to(device)
return samples
'''
Below are reparameterization options.
reparam_fn always come together with eq_fn and ineq_fn,
and prior_sample_fn.
'''
def id_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': None,
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
torch.randn([B, dim], device=device)),
}
def box_id_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': lambda X: torch.cat([-1 - X, X - 1], -1),
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
torch.rand([B, dim], device=device) - 0.5),
}
def box_tanh_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': None,
'reparam_fn': lambda Z: torch.tanh(Z),
'prior_sample_fn': (lambda B, device:
torch.atanh(torch.rand([B, dim], device=device) - 0.5)),
}
def box_mirror_reparam(dim, entropic=False):
if entropic:
def nabla_psi(X):
return torch.log(1 + X) - torch.log(1 - X)
def nabla_psi_star(Z):
return 1 - 2 / (Z.exp() + 1)
else:
def nabla_psi(X):
return (1 / (1 - X)) - (1 / (1 + X))
def nabla_psi_star(Z):
return ((1 + Z**2).sqrt() - 1) / Z
return {
'eq_fn': None,
'ineq_fn': None,
'reparam_fn': lambda Z: nabla_psi_star(Z),
'prior_sample_fn': (lambda B, device:
nabla_psi(torch.rand([B, dim], device=device) - 0.5)),
}
def sphere_reparam(dim):
return {
'eq_fn': lambda X: X.square().sum(-1, keepdim=True) - 1,
'ineq_fn': None,
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
torch.randn([B, dim], device=device)),
}
def heart_id_reparam():
return {
'eq_fn': None,
'ineq_fn': lambda X: ((X[:,0]**2+X[:,1]**2-1)**3-(X[:,0]**2)*(X[:,1]**3)).unsqueeze(-1),
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
torch.rand([B, 2], device=device)),
}
def period_id_reparam():
return {
'eq_fn': None,
'ineq_fn': lambda X: torch.cat([
((torch.cos(3 * np.pi * X[:, 0]) + torch.cos(3 * np.pi * X[:, 1])).square() - 0.3).unsqueeze(-1),
-1-X,
X-1], -1),
'reparam_fn': None,
'prior_sample_fn': (lambda B, device:
0.5+0.5*torch.rand([B, 2], device=device)),
}
def simplex_id_reparam(dim):
return {
'eq_fn': lambda X: (X.sum(-1, keepdim=True) - 1),
'ineq_fn': lambda X: -X,
'reparam_fn': None,
'prior_sample_fn': lambda B, device: sample_simplex(
dim, B, device=device),
}
def simplex_pos_sum_one_reparam(dim):
return {
'eq_fn': lambda X: (X.sum(-1, keepdim=True) - 1),
'ineq_fn': None,
'reparam_fn': lambda Z: Z.square(),
'prior_sample_fn': (
lambda b, device: sample_simplex(
dim, b, device=device).sqrt()),
}
def simplex_pos_sum_one_ineq_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': lambda X: torch.stack([X.sum(-1) - 1, 1 - X.sum(-1)], -1),
'reparam_fn': lambda Z: Z.square(),
'prior_sample_fn': (
lambda B, device: sample_simplex(
dim, B, device=device).sqrt()),
}
def simplex_softmax_reparam(dim):
return {
'eq_fn': None,
'ineq_fn': None,
'reparam_fn': lambda Z: torch.nn.functional.softmax(Z, dim=-1),
'prior_sample_fn': (
lambda B, device: sample_simplex(
dim, B, device=device).log()),
}
# def cube_constraint(dim, bound=[0, 1]):
# return {
# 'ineq_fn': lambda X: torch.cat([bound[0] - X, X - bound[1]], -1),
# 'bbox': torch.tensor([bound] * dim),
# 'embed_dim': dim,
# # 'reparam_dict': {
# # 'reparam_fn': lambda Z: torch.sigmoid(Z) * (bound[1] - bound[0]) + bound[0],
# # 'prior_sample_fn': lambda B, device: torch.randn(
# # [B, dim], device=device),
# # }
# }
# def sphere_constraint(dim):
# return {
# 'eq_fn': lambda X: X.square().sum(-1, keepdim=True) - 1,
# 'bbox': torch.tensor([[-1, 1]] * dim),
# 'embed_dim': dim - 1,
# }
# def free_constraint(dim, r=3):
# return {
# 'bbox': torch.tensor([[-r, r]] * dim),
# 'embed_dim': dim,
# # 'prior_sample_fn': prior_sample_fn,
# }
# def ellipse_constraint(dim):
# assert(dim == 2)
# return {
# 'eq_fn': lambda X: (X[..., 0].square() / 9 +
# X[..., 1].square() / 1).unsqueeze(-1) - 1,
# 'bbox': torch.tensor([[-4, 4], [-2, 2]]),
# 'embed_dim': dim - 1,
# }
class DistributionBase(ABC):
def __init__(self, *, dim, embed_dim, bbox, device):
'''
:param dim: ambient dimension
'''
self.dim = dim
self.embed_dim = embed_dim
self.bbox = bbox
self.device = device
self.gt_samples = None
@abstractmethod
def log_p(self, X):
'''
:param X: (B, D)
:return: (B,)
'''
pass
def get_reject_ineq_fn(self):
'''
:return: a function (B, D) -> (B, K)
'''
return None
def sample_gt(self, B, refresh):
if not refresh:
if self.gt_samples is not None and self.gt_samples.shape[0] == B:
return self.gt_samples
# We sample multiple times and reject samples that don't satisfy
# inequality constraints.
ineq_fn = self.get_reject_ineq_fn()
remain = B
sample_list = []
while remain > 0:
samples = self.sample_gt_impl(2 * remain) # (2B, D)
if samples is None:
return None
if ineq_fn is not None:
satisfy = (ineq_fn(samples) <= 0).all(-1) # (2B)
else:
satisfy = torch.ones([samples.shape[0]],
device=samples.device, dtype=torch.bool)
samples = samples[satisfy, :]
count = min(remain, samples.shape[0])
remain -= count
sample_list.append(samples[:count])
self.gt_samples = torch.cat(sample_list, 0) # (B, D)
assert(self.gt_samples.shape[0] == B)
assert(self.gt_samples is not None)
return self.gt_samples
def sample_gt_impl(self, B):
'''
Can be overriden if it is possible to sample from the ground truth.
:return: (B, D)
'''
return None
class Dirichlet(DistributionBase):
def __init__(self, dim, *, device):
super().__init__(dim=dim, embed_dim=dim - 1,
bbox=torch.tensor([[0, 1]] * dim),
device=device)
alpha = np.ones([dim],
dtype=np.float64) * 0.1
if alpha.shape[0] >= 3:
alpha[:3] += np.array([90., 5., 5.])
alpha = torch.from_numpy(alpha).float().to(device)
self.alpha = alpha
def log_p(self, X):
assert(self.alpha.shape[0] == X.shape[-1])
return ((self.alpha - 1) / 2 * (X.square() + 1e-6).log()).sum(-1)
# return ((self.alpha - 1) * (X + 1e-40).log()).sum(-1)
# return ((self.alpha - 1) * (X).log()).sum(-1)
def sample_gt_impl(self, B):
# rng = np.random.RandomState(123)
return torch.from_numpy(np.random.dirichlet(
self.alpha.cpu().detach(), size=B)).float().to(self.device)
class QuadraticFullDim(DistributionBase):
def __init__(self, dim, *,
device,
ineq_fn=None,
seed=123):
'''
:param temp: smaller temp leads to smaller variance
'''
super().__init__(dim=dim, embed_dim=dim,
bbox=torch.tensor([[-2,2]] * dim, device=device),
device=device)
self.ineq_fn = ineq_fn
if seed is None:
# Standard Gaussian
A = np.eye(dim)
else:
rng = np.random.RandomState(seed)
A_sqrt = rng.uniform(-1.0, 1.0, size=(dim, dim))
A = A_sqrt @ A_sqrt.T
A = np.linalg.inv(A)
A /= np.linalg.det(A) # to have unit determinant
self.A = torch.from_numpy(A).float().to(device)
from torch.distributions.multivariate_normal import MultivariateNormal
self.dist = MultivariateNormal(loc=torch.zeros([dim], device=device),
covariance_matrix=self.A)
def log_p(self, X):
assert(self.A.shape[0] == X.shape[-1])
return self.dist.log_prob(X)
def sample_gt_impl(self, B):
return self.dist.sample([B])
def get_reject_ineq_fn(self):
return self.ineq_fn
class StudentTFullDim(DistributionBase):
def __init__(self, dim, *,
device,
ineq_fn=None,
df=2.0,
seed=50):
'''
:param temp: smaller temp leads to smaller variance
'''
super().__init__(dim=dim, embed_dim=dim,
bbox=torch.tensor([[-5, 5]] * dim, device=device),
device=device)
self.ineq_fn = ineq_fn
if seed is None:
# Standard student T
A = np.eye(dim)
else:
rng = np.random.RandomState(seed)
A_sqrt = rng.uniform(-1.0, 1.0, size=(dim, dim))
A = A_sqrt @ A_sqrt.T
A = np.linalg.inv(A)
A /= np.linalg.det(A) # to have unit determinant
self.A = torch.from_numpy(A).float().to(device)
self.A_inv = torch.from_numpy(np.linalg.inv(A)).float().to(device)
from torch.distributions.studentT import StudentT
self.dist = StudentT(df=df)
def log_p(self, X):
# X: (B, D), A: (D, D)
assert(self.A.shape[0] == X.shape[-1])
B = X.shape[0]
Z = torch.bmm(self.A_inv.unsqueeze(0).expand(B, -1, -1), X.unsqueeze(-1)).squeeze(-1) # (B, D)
return self.dist.log_prob(Z).sum(-1) # (B,)
def sample_gt_impl(self, B):
Z = self.dist.sample([B, self.dim]).to(self.device)
X = torch.bmm(self.A.unsqueeze(0).expand(B, -1, -1), Z.unsqueeze(-1)).squeeze(-1) # (B, D)
return X
def get_reject_ineq_fn(self):
return self.ineq_fn
class UniformBox(DistributionBase):
def __init__(self, dim, *, device):
super().__init__(dim=dim, embed_dim=dim,
bbox=torch.tensor([[-1.3, 1.3]] * dim, device=device),
device=device)
def log_p(self, X):
# HACK: this will force X to be in the computation graph.
return (X-X).sum(-1)
def sample_gt_impl(self, B):
return 2 * torch.rand([B, self.dim], device=self.device) - 1
class UniformHeart(DistributionBase):
def __init__(self, *, device):
super().__init__(dim=2, embed_dim=2,
bbox=torch.tensor([[-1.3, 1.3]] * 2, device=device),
device=device)
def log_p(self, X):
# HACK: this will force X to be in the computation graph.
return (X-X).sum(-1)
def sample_gt_impl(self, B):
return 3 * (torch.rand([B, 2], device=self.device) - 0.5)
def get_reject_ineq_fn(self):
return heart_id_reparam()['ineq_fn']
class UniformSimplex3D(DistributionBase):
def __init__(self, dim, *, device):
assert(dim==3)
super().__init__(dim=dim, embed_dim=dim-1,
bbox=torch.tensor([[-0.3, 1.3]] * dim, device=device),
device=device)
def log_p(self, X):
# HACK: this will force X to be in the computation graph.
return (X-X).sum(-1)
def sample_gt_impl(self, batch_size):
A = np.array([1, 0, 0], dtype=np.float64)
B = np.array([0, 1, 0], dtype=np.float64)
C = np.array([0, 0, 1], dtype=np.float64)
r1 = np.expand_dims(np.random.random([batch_size]), -1)
r2 = np.expand_dims(np.random.random([batch_size]), -1)
P = ((1-np.sqrt(r1)) * A + (np.sqrt(r1)*(1-r2)) * B + (r2 * np.sqrt(r1)) * C)
return torch.from_numpy(P).to(self.device).float()
class GaussianMixtureBox(DistributionBase):
def __init__(self, *, dim,
ineq_fn=None,
centers, variances, weights, bbox,
device):
'''
:param centers: (M, D)
:param variances: (M,)
:param weights: (M,)
'''
super().__init__(dim=dim, embed_dim=dim, bbox=bbox, device=device)
assert(dim == centers.shape[1])
self.centers = centers.to(device)
self.variances = variances.to(device)
self.weights = weights.to(device)
self.weights /= self.weights.sum()
self.ineq_fn = ineq_fn
def log_p(self, X):
'''
:param X: (B, D)
'''
tmp = X.unsqueeze(1) - self.centers.unsqueeze(0) # (B, M, D)
tmp = -tmp.square().sum(-1) / (2 * self.variances.unsqueeze(0)) # (B, M)
coef = torch.pow(2 * np.pi * self.variances, self.dim / 2) # (M,)
tmp = tmp.exp() / coef # (B, M)
log_p = (tmp * self.weights).sum(-1).log()
return log_p
def sample_gt_impl(self, B):
tmp = torch.randn([B, self.dim], device=self.device).unsqueeze(1) # (B, 1, D)
tmp = tmp * self.variances.unsqueeze(-1).sqrt() # (B, M, D)
tmp = tmp + self.centers # (B, M, D)
inds = torch.multinomial(self.weights, B,
replacement=True).to(self.device) # (B,)
M = self.centers.shape[0]
D = self.dim
flatten_idx = ((torch.arange(B, device=self.device) * M * D +
inds * D).unsqueeze(-1) +
torch.arange(D, device=self.device)) # (B, D)
# Want: out[i, j] = tmp[i, inds[i], j]
out = tmp.reshape(-1)[flatten_idx.reshape(-1)].reshape(B, D) # (B, D)
return out
def get_reject_ineq_fn(self):
return box_id_reparam(2)['ineq_fn']
def create_problem(device, prob, reparam_name):
from mied.problems.analytical import Analytical
import re
m = re.search('([0-9]+)d$', prob)
dim_group = m.group(0)
dim = int(dim_group[:-1])
prob_name = prob[:-len(dim_group)-1]
if prob_name == 'dirichlet':
dist = Dirichlet(dim, device=device)
elif prob_name == 'quadratic_uc':
dist = QuadraticFullDim(dim, device=device, ineq_fn=None)
elif prob_name == 'quadratic_2_uc':
dist = QuadraticFullDim(dim, device=device, ineq_fn=None, seed=40)
elif prob_name == 'std_gaussian_uc':
dist = QuadraticFullDim(dim, device=device, ineq_fn=None, seed=None)
elif prob_name == 'student_uc':
dist = StudentTFullDim(dim, device=device, ineq_fn=None)
elif prob_name == 'uniform_box':
dist = UniformBox(dim, device=device)
elif prob_name == 'uniform_heart':
dist = UniformHeart(device=device)
elif prob_name == 'uniform_simplex':
dist = UniformSimplex3D(dim, device=device)
elif prob_name == 'mog_box':
assert(dim == 2)
dist = GaussianMixtureBox(dim=2,
bbox=torch.tensor([[-1.3, 1.3]]*2, device=device),
centers=torch.tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]],
dtype=torch.float32),
variances=torch.tensor([0.3, 0.3, 0.3, 0.3]),
weights=torch.tensor([0.25, 0.25, 0.25, 0.25]),
device=device)
elif prob_name == 'vmf':
dist = QuadraticFullDim(dim, device=device, ineq_fn=None)
else:
raise Exception(f'Unknown problem name: {prob_name}')
if reparam_name == 'id':
reparam = id_reparam(dim)
elif reparam_name == 'box_id':
reparam = box_id_reparam(dim)
elif reparam_name == 'box_tanh':
reparam = box_tanh_reparam(dim)
elif reparam_name == 'box_mirror':
reparam = box_mirror_reparam(dim)
elif reparam_name == 'box_mirror_entropic':
reparam = box_mirror_reparam(dim, True)
elif reparam_name == 'sphere':
reparam = sphere_reparam(dim)
elif reparam_name == 'heart_id':
reparam = heart_id_reparam()
elif reparam_name == 'period_id':
reparam = period_id_reparam()
elif reparam_name == 'simplex_pos_sum_one':
reparam = simplex_pos_sum_one_reparam(dim)
elif reparam_name == 'simplex_pos_sum_one_ineq':
reparam = simplex_pos_sum_one_ineq_reparam(dim)
elif reparam_name == 'simplex_id':
reparam = simplex_id_reparam(dim)
elif reparam_name == 'simplex_softmax':
reparam = simplex_softmax_reparam(dim)
else:
raise Exception(f'Unknown reparametrization name: {reparam_name}')
return Analytical(device=device,
bbox=dist.bbox,
in_dim=dist.dim,
embed_dim=dist.embed_dim,
log_p_fn=lambda X: dist.log_p(X),
gt_sample_fn=lambda B, refresh: dist.sample_gt(B, refresh),
**reparam)
| 17,849 | 31.046679 | 109 | py |
MIED | MIED-main/mied/problems/analytical.py | import torch
from mied.problems.problem_base import ProblemBase
class Analytical(ProblemBase):
def __init__(self, *,
bbox,
embed_dim,
log_p_fn,
prior_sample_fn,
eq_fn=None,
ineq_fn=None,
reparam_fn=None,
gt_sample_fn=None,
**kwargs):
'''
A base class for simple analytical functions.
:param bbox: a (D, 2) tensor, used only for evaluation.
'''
super().__init__(**kwargs)
self.bbox = bbox
self.embed_dim = embed_dim
self.log_p_fn = log_p_fn
self.eq_fn = eq_fn
self.ineq_fn = ineq_fn
self.prior_sample_fn = prior_sample_fn
self.gt_sample_fn = gt_sample_fn
self.reparam_fn = reparam_fn
def get_embed_dim(self):
return self.embed_dim
def eval_log_p(self, P):
return self.log_p_fn(P)
def sample_prior(self, batch_size):
return self.prior_sample_fn(batch_size, self.device)
def reparametrize(self, Z):
if self.reparam_fn is None:
return super().reparametrize(Z)
return self.reparam_fn(Z)
def sample_gt(self, batch_size, refresh):
if self.gt_sample_fn is not None:
return self.gt_sample_fn(batch_size, refresh)
return None
def eval_eq(self, P):
if self.eq_fn is None:
return None
return self.eq_fn(P)
def eval_ineq(self, P):
if self.ineq_fn is None:
return None
return self.ineq_fn(P)
| 1,621 | 23.208955 | 63 | py |
MIED | MIED-main/mied/problems/fairness_bnn.py | import torch
from torch.distributions import Normal
import torch.nn.functional as F
import numpy as np
import random
from mied.problems.problem_base import ProblemBase
from mied.utils.adult_loader import load_data
# Using the same setup as https://proceedings.neurips.cc/paper/2021/hash/c61aed648da48aa3893fb3eaadd88a7f-Abstract.html
class BayesianNN:
def __init__(self, idx, X_train, y_train, batch_size, hidden_dim, thres):
self.idx = idx
self.X_train = X_train
self.y_train = y_train
self.batch_size = batch_size
self.n_features = X_train.shape[1] - 1
self.hidden_dim = hidden_dim
self.thres = thres
def forward(self, inputs, theta):
assert(theta.shape[1] == (self.n_features + 2) * self.hidden_dim + 1)
# Unpack theta
w1 = theta[:, 0:self.n_features * self.hidden_dim].reshape(-1, self.n_features, self.hidden_dim)
b1 = theta[:, self.n_features * self.hidden_dim:(self.n_features + 1) * self.hidden_dim].unsqueeze(1)
w2 = theta[:, (self.n_features + 1) * self.hidden_dim:(self.n_features + 2) * self.hidden_dim].unsqueeze(2)
b2 = theta[:, -1].reshape(-1, 1, 1)
inputs = inputs.unsqueeze(0).repeat(w1.shape[0], 1, 1)
inter = (torch.bmm(inputs, w1) + b1).relu()
out_logit = torch.bmm(inter, w2) + b2
out = out_logit.squeeze()
return out
def get_log_prob_and_constraint(self, theta):
model_w = theta[:, :]
w_prior = Normal(0., 1.)
random_idx = random.sample([i for i in range(self.X_train.shape[0])], self.batch_size)
X_batch = self.X_train[random_idx]
y_batch = self.y_train[random_idx]
out_logit = self.forward(X_batch[:, self.idx], theta) # [num_particle, batch_size]
y_batch_repeat = y_batch.unsqueeze(0).repeat(out_logit.shape[0], 1)
log_p_data = F.binary_cross_entropy_with_logits(out_logit, y_batch_repeat, reduction='none')
log_p_data = (-1.)*log_p_data.sum(dim=1)
log_p0 = w_prior.log_prob(model_w.t()).sum(dim=0)
log_p = log_p0 + log_p_data * (self.X_train.shape[0] / self.batch_size) # (8) in paper
### NOTE: compute fairness loss
mean_sense = X_batch[:, 45].mean()
weight_sense = X_batch[:, 45] - mean_sense # [batch_size]
#weight_sense = weight_sense.view(1, -1).repeat(self.num_particles, 1)
# Modify here as well.
out = out_logit.sigmoid()
out = out - out.mean(dim=1, keepdim=True) # [num_particle, batch_size]
# constrain = ((weight_sense.unsqueeze(0) * out_logit).mean(-1))**2 - self.thres
constrain = ((weight_sense.unsqueeze(0) * out).mean(-1))**2 - self.thres
return log_p, constrain
class FairnessBNN(ProblemBase):
def __init__(self, data_dir,
thres,
ineq_scale,
device=torch.device('cpu')):
self.ineq_scale = ineq_scale
idx = [i for i in range(87)]
del idx[45]
X_train, y_train, X_test, y_test, start_index, cat_length = load_data(
data_dir, get_categorical_info=True)
X_train = X_train[:20000]
y_train = y_train[:20000]
n = X_train.shape[0]
n = int(0.99 * n)
# Note: X_val is not used.
X_train = X_train[:n, :]
y_train = y_train[:n]
X_train = np.delete(X_train, 46, axis=1)
X_test = np.delete(X_test, 46, axis=1)
X_train = torch.tensor(X_train).float().to(device)
X_test = torch.tensor(X_test).float().to(device)
y_train = torch.tensor(y_train).float().to(device)
y_test = torch.tensor(y_test).float().to(device)
X_train_mean, X_train_std = torch.mean(X_train[:, idx], dim=0), torch.std(X_train[:, idx], dim=0)
X_train[:, idx] = (X_train [:, idx]- X_train_mean) / X_train_std
X_test[:, idx] = (X_test[:, idx] - X_train_mean) / X_train_std
batch_size, hidden_dim = 19800, 50
in_dim = (X_train.shape[1] - 1 + 2) * hidden_dim + 1
super().__init__(device=device,
in_dim=in_dim)
self.bnn = BayesianNN(idx,
X_train, y_train, batch_size, hidden_dim, thres)
self.X_train, self.y_train = X_train, y_train
self.X_test, self.y_test = X_test, y_test
self.idx = idx
def sample_prior(self, batch_size):
return 0.1 * torch.randn([batch_size, self.in_dim], device=self.device)
def eval_log_p(self, theta):
log_p, constraint = self.bnn.get_log_prob_and_constraint(theta)
return log_p
def eval_ineq(self, theta):
log_p, constraint = self.bnn.get_log_prob_and_constraint(theta)
return self.ineq_scale * constraint.unsqueeze(-1)
def get_embed_dim(self):
return self.in_dim # full dimension
def custom_eval(self, theta):
X_test = self.X_test
y_test = self.y_test
with torch.no_grad():
prob = self.bnn.forward(X_test[:, self.idx], theta)
y_pred = torch.sigmoid(prob).mean(dim=0) # Average among outputs from different network parameters(particles)
y_pred = y_pred.cpu().numpy()
sum_positive = np.zeros(2).astype(float)
count_group = np.zeros(2).astype(float)
for j in range(2):
A = y_pred[X_test.cpu().numpy()[:,45]==j]
count_group[j] = A.shape[0]
sum_positive[j] = np.sum(A >= 0.5)
ratio = sum_positive/count_group
CV = np.max(ratio) - np.min(ratio)
y_pred[y_pred>= 0.5] = 1
y_pred[y_pred<0.5] = 0
acc_bnn = np.sum(y_pred==y_test.cpu().numpy())/float(y_test.shape[0])
cv_bnn = CV
print('acc: ', np.sum(y_pred==y_test.cpu().numpy())/float(y_test.shape[0]), 'fairness:', CV)
acc_cllt = []
cv_cllt = []
for i in range(prob.shape[0]):
y_pred = torch.sigmoid(prob[i, :])
y_pred = y_pred.cpu().numpy()
sum_positive = np.zeros(2).astype(float)
count_group = np.zeros(2).astype(float)
for j in range(2):
A = y_pred[X_test.cpu().numpy()[:,45]==j]
count_group[j] = A.shape[0]
sum_positive[j] = np.sum(A >= 0.5)
ratio = sum_positive/count_group
CV = np.max(ratio) - np.min(ratio)
y_pred[y_pred>= 0.5] = 1
y_pred[y_pred<0.5] = 0
acc_cllt.append(np.sum(y_pred==y_test.cpu().numpy())/float(y_test.shape[0]))
cv_cllt.append(CV)
# print('mean CV {}, best CV {}, worst CV {}'.format(
# np.mean(np.array(cv_cllt)),
# np.min(np.array(cv_cllt)),
# np.max(np.array(cv_cllt))))
return {
'acc_all': np.stack(acc_cllt, 0),
'cv_all': np.stack(cv_cllt, 0),
'acc_bnn': acc_bnn,
'cv_bnn': cv_bnn,
}
def custom_post_step(self, theta):
eval_dict = self.custom_eval(theta)
del eval_dict['acc_all']
del eval_dict['cv_all']
log_p, constraint = self.bnn.get_log_prob_and_constraint(theta)
constraint = constraint + self.bnn.thres
# Average across all particles.
eval_dict['log_p'] = log_p.sum(-1).mean()
eval_dict['constraint_mean'] = constraint.mean()
eval_dict['constraint_max'] = constraint.max()
return eval_dict
| 7,599 | 38.175258 | 122 | py |
MIED | MIED-main/mied/utils/kernels.py | from abc import ABC, abstractmethod
import torch
from mied.utils.batch_jacobian import compute_jacobian
class KernelBase(ABC):
@abstractmethod
def eval(self, X, Y):
'''
:param X: (B, D)
:param Y: (B, D)
:return: (B,)
'''
pass
@abstractmethod
def grad_1(self, X, Y):
'''
:param X: (B, D)
:param Y: (B, D)
:return: (B, D)
'''
pass
@abstractmethod
def div_2_grad_1(self, X, Y):
'''
:param X: (B, D)
:param Y: (B, D)
:return: (B,)
'''
pass
class GaussianKernel(KernelBase):
def __init__(self, sigma):
'''
k(x, y) = exp(-||x-y||^2/(2 sigma))
:param sigma:
'''
self.sigma = sigma
def eval(self, X, Y):
return torch.exp(-(X - Y).square().sum(-1) / (self.sigma * 2))
def grad_1(self, X, Y):
return -(X - Y) / self.sigma * self.eval(X, Y).unsqueeze(-1)
def div_2_grad_1(self, X, Y):
D = X.shape[-1]
return self.eval(X, Y) * (-(X - Y).square().sum(-1) / (self.sigma ** 2)
+ D / self.sigma)
| 1,184 | 19.431034 | 79 | py |
MIED | MIED-main/mied/utils/batch_jacobian.py | import torch
def compute_jacobian(outputs, inputs,
create_graph=True, retain_graph=True):
'''
Compute Jacobian matrices in batch.
:param outputs: (..., D1)
:param inputs: (..., D2)
:returns: (..., D1, D2), computed Jacobian
'''
J = torch.cat([
torch.autograd.grad(
outputs=outputs[..., d], inputs=inputs,
create_graph=create_graph, retain_graph=retain_graph,
grad_outputs=torch.ones(inputs.size()[:-1], device=inputs.device)
)[0].unsqueeze(-2)
for d in range(outputs.shape[-1])
], -2) # (..., D1, D2)
return J
| 631 | 27.727273 | 77 | py |
MIED | MIED-main/mied/utils/batch_hessian.py | import torch
from mied.utils.batch_jacobian import compute_jacobian
def compute_hessian(func, inputs):
'''
Compute Hessianmatrices in batch.
:param func: (B, D) -> (B,)
:param inputs: (B, D)
:returns: (B, D, D)
'''
outputs = func(inputs) # (B,)
grad = compute_jacobian(outputs.unsqueeze(-1), inputs).squeeze(-2) # (B, D)
result = compute_jacobian(grad, inputs)
return result
| 420 | 21.157895 | 79 | py |
MIED | MIED-main/mied/utils/random.py | import torch
import numpy as np
import random
def seed_all(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 280 | 20.615385 | 42 | py |
MIED | MIED-main/mied/utils/proj_polyhedra.py | import torch
def proj_halfspace(p, c, y):
'''
Project p to halfspace defined by {x: c^T x <= y}.
:param p: (B, D)
:param c: (B, D)
:param y: (B,)
:return: (B, D), projected points
'''
norm = torch.norm(c, dim=-1) + 1e-8 # (B,)
c = c / norm.unsqueeze(-1) # (B, D)
y = y / norm # (B,)
dot = (p * c).sum(-1) # (B,)
return p - (dot - y).relu().unsqueeze(-1) * c
def calc_suboptimality(X, C, Y):
'''
Calculate the suboptimality for projecting X onto the polyhedral
defined by C and Y.
:param X: (B, D)
:param C: (B, K, D)
:param Y: (B, K)
:return: scalar, representing average suboptimality
'''
return (torch.matmul(C, X.unsqueeze(-1)).squeeze(-1) - Y).relu().mean()
def proj_polyhedra(X, C, Y,
parallel=False,
max_num_itr=50, logging=False, early_stop_eps=1e-6):
'''
Project each X to the intersection of {C_i^T x <= Y_i, for all i < K}.
:param X: (B, D)
:param C: (B, K, D)
:param Y: (B, K)
:return: (B, D), projected points
'''
if logging:
hist_loss = [calc_suboptimality(X, C, Y)]
if C.shape[1] == 1:
# Single constraint.
sol = proj_halfspace(X, C[:, 0, :], Y[:, 0])
else:
with torch.no_grad():
K = C.shape[1]
D = C.shape[2]
if parallel:
u_prev_stack = X.unsqueeze(1).expand(-1, K, -1) # (B, K, D)
z_prev_stack = torch.zeros_like(u_prev_stack) # (B, K, D)
else:
u_prev_list = []
z_prev_list = []
for _ in range(K + 1):
u_prev_list.append(X.clone().detach())
z_prev_list.append(torch.zeros_like(X))
for _ in range(max_num_itr):
if parallel:
u0 = u_prev_stack.mean(1) # (B, D)
tmp = u0.unsqueeze(1) + z_prev_stack # (B, K, D)
u_next_stack = proj_halfspace(tmp.reshape(-1, D),
C.reshape(-1, D),
Y.reshape(-1)).reshape(-1, K, D) # (B, K, D)
z_next_stack = tmp - u_next_stack
u_prev_stack = u_next_stack
z_prev_stack = z_next_stack
else:
u_next_list = []
u_next_list.append(u_prev_list[K])
z_next_list = [None]
for i in range(K):
tmp = u_next_list[i] + z_prev_list[i + 1]
u_next_list.append(proj_halfspace(tmp,
C[:, i, :], Y[:, i]))
z_next_list.append(tmp - u_next_list[-1])
u_prev_list = u_next_list
z_prev_list = z_next_list
sol = u_prev_stack.mean(1) if parallel else u_prev_list[-1]
if logging:
subopt = calc_suboptimality(
sol,
C, Y)
hist_loss.append(subopt)
if subopt < early_stop_eps:
break
if logging:
return sol, hist_loss
return sol
| 3,305 | 32.393939 | 94 | py |
MIED | MIED-main/mied/utils/h5_helpers.py | import torch
import numpy as np
import h5py
from pathlib import Path
def save_dict_h5(save_dict, h5_path, create_dir=False):
def recurse(remain_dict, parent_handle):
for k, v in remain_dict.items():
if isinstance(v, dict):
child_handle = parent_handle.create_group(k)
recurse(v, child_handle)
else:
if torch.is_tensor(v):
arr = v.cpu().detach().numpy()
elif isinstance(v, np.ndarray):
arr = v
else:
# Save as attributes.
parent_handle.attrs[k] = v
continue
parent_handle.create_dataset(k, data=arr)
if create_dir:
Path(h5_path).parent.mkdir(parents=True, exist_ok=True)
root_handle = h5py.File(h5_path, 'w')
recurse(save_dict, root_handle)
| 893 | 33.384615 | 63 | py |
MIED | MIED-main/mied/utils/ec.py | import torch
import argparse
import yaml
import copy
from pathlib import Path
import shutil
from datetime import datetime
from uuid import uuid4
from collections import namedtuple
import wandb
from mied.utils.shortname import \
convert_method_cls_to_str, convert_method_str_to_cls, \
convert_projector_cls_to_str, convert_projector_str_to_cls
class Config:
def __init__(self, param_dict):
self.param_dict = copy.copy(param_dict)
def has_same_params(self, other):
return self.param_dict == other.param_dict
def __getitem__(self, k):
return self.param_dict[k]
def get(self, k, default_v):
return self.param_dict.get(k, default_v)
def __repr__(self):
return str(self.param_dict)
@staticmethod
def from_yaml(yaml_path):
return Config(yaml.safe_load(open(yaml_path, 'r')))
def save_yaml(self, yaml_path):
open(yaml_path, 'w').write(yaml.dump(self.param_dict))
class ConfigBlueprint:
def __init__(self, default_param_dict):
'''
:param default_param_dict: a dict, where the values have to be one of
[string, int, float]
'''
self.default_param_dict = default_param_dict
def prepare_parser(self, parser):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
for k, v in self.default_param_dict.items():
if type(v) == bool:
parser.add_argument('--{}'.format(k), type=str2bool, default=v)
else:
parser.add_argument('--{}'.format(k), type=type(v), default=v)
ECParseResult = namedtuple('ECParseResult',
['tmp_args', 'config', 'exp_dir'])
class ExperimentCoordinator:
def __init__(self, root_dir):
'''
We assume the following hierarchy of directories:
root_dir/exps/exp_name/...
- conf.yml: the configuration corresponding to an instance of
Config class
Then arbitrary files and subfolders can be placed here, e.g.,
- result.h5: result in hdf5 format
- log/: tensorboard log
- ckpt.tar: checkpoint
When possible, we assume each conf.yml corresponds to a
unique exp_name.
:param root_dir: root directory of the experiments
'''
self.root_path = Path(root_dir)
# Temporary blueprints are non-persistent.
self.temporary_blueprints = [ConfigBlueprint({
'device': 'cuda',
'val_freq': 100,
})]
self.common_blueprints = [ConfigBlueprint({
'project': 'uncategorized',
'wandb': True,
'seed': 42,
'optimizer': 'Adam',
'lr': 1e-2,
'beta1': 0.9,
'beta2': 0.999,
# Every method is particle-based.
'num_particle': 50,
'precondition': False,
})]
self.method_blueprint_dict = {}
self.projector_blueprint_dict = {}
def add_temporary_arguments(self, param_dict):
self.temporary_blueprints.append(ConfigBlueprint(param_dict))
def add_common_arguments(self, param_dict):
self.common_blueprints.append(ConfigBlueprint(param_dict))
def add_method_arguments(self, method_cls, param_dict):
self.method_blueprint_dict[method_cls] = ConfigBlueprint(param_dict)
def add_projector_arguments(self, projector_cls, param_dict):
self.projector_blueprint_dict[projector_cls] = ConfigBlueprint(param_dict)
def parse_args(self):
tmp_parser = argparse.ArgumentParser()
'''
* --resume: continue an experiment (the corresponding folder
must have a conf.yml file)
* --exp_name: name of the experiment which is the same as the
folder name containing this experiment's related files. If not
provided, a random unique name will be generated (which can later
be changed).
'''
tmp_parser.add_argument('--resume', type=str)
tmp_parser.add_argument('--override', action='store_true', default=False)
tmp_parser.add_argument('--restart', action='store_true', default=False)
tmp_parser.add_argument('--exp_name', type=str)
for b in self.temporary_blueprints:
b.prepare_parser(tmp_parser)
tmp_args, _ = tmp_parser.parse_known_args()
if tmp_args.resume:
assert(tmp_args.exp_name is None)
exp_dir = self.get_exps_path() / Path(tmp_args.resume)
config = Config.from_yaml(exp_dir / 'conf.yml')
print('Resuming experiment {}...'.format(exp_dir))
else:
common_parser = argparse.ArgumentParser()
common_parser.add_argument('--method', type=str, default='RED')
common_parser.add_argument('--projector', type=str, default='DB')
for b in self.common_blueprints:
b.prepare_parser(common_parser)
common_args, _ = common_parser.parse_known_args()
method_cls = convert_method_str_to_cls(common_args.method)
projector_cls = convert_projector_str_to_cls(common_args.projector)
if method_cls not in self.method_blueprint_dict:
raise Exception('Cannot find blueprint for '
f'method {method_cls}!')
if projector_cls not in self.projector_blueprint_dict:
raise Exception('Cannot find blueprint for '
f'projector {method_cls}!')
method_parser = argparse.ArgumentParser()
self.method_blueprint_dict[method_cls].prepare_parser(
method_parser
)
method_args, _ = method_parser.parse_known_args()
projector_parser = argparse.ArgumentParser()
self.projector_blueprint_dict[projector_cls].prepare_parser(
projector_parser
)
projector_args, _ = projector_parser.parse_known_args()
config_dict = vars(common_args)
config_dict['method_config'] = vars(method_args)
config_dict['projector_config'] = vars(projector_args)
config_dict['wandb_id'] = wandb.util.generate_id()
config = Config(config_dict)
exp_dir = self.make_persistent(config, tmp_args.exp_name,
override=tmp_args.override)
self.parse_result = ECParseResult(
tmp_args=tmp_args,
config=config,
exp_dir=exp_dir
)
return self.parse_result
def create_solver(self, problem):
config = self.parse_result.config
exp_dir = self.parse_result.exp_dir
tmp_args = self.parse_result.tmp_args
wandb.init(
project=config['project'],
mode='online' if config['wandb'] else 'offline',
config={
'exp_dir': exp_dir,
**config.param_dict
},
name=('' if tmp_args.exp_name is None else f'{tmp_args.exp_name}'),
id=config['wandb_id'],
resume='allow'
)
projector_cls = convert_projector_str_to_cls(config['projector'])
projector = projector_cls(**config['projector_config'])
method_cls = convert_method_str_to_cls(config['method'])
solver = method_cls(problem=problem,
projector=projector,
num_particle=config['num_particle'],
precondition=config['precondition'],
val_freq=self.parse_result.tmp_args.val_freq,
ckpt_path=exp_dir / 'ckpt.tar',
logger_fn=lambda d: wandb.log(d),
optimizer_conf={
'cls': config['optimizer'],
'lr': config['lr'],
'beta1': config['beta1'],
'beta2': config['beta2']
},
**config['method_config'])
if not self.parse_result.tmp_args.restart:
solver.load_ckpt()
return solver
def get_exps_path(self):
path = self.root_path / 'exps/'
path.mkdir(exist_ok=True)
return path
def make_persistent(self, config, exp_name, override):
exist = False
# Check if params match any existing conf.yml.
for p in self.get_exps_path().iterdir():
if p.is_dir():
another_exp_name = p.stem
config_path = p / 'conf.yml'
if not config_path.exists():
continue
if exp_name == another_exp_name:
another_config = Config.from_yaml(config_path)
print(f'Found existing experiment {exp_name}!')
diff = False
for k, v in config.param_dict.items():
if k not in another_config.param_dict:
print(f'Existing config missing {k}!')
diff = True
elif another_config[k] != v:
print(f'Existing config has {k}={another_config[k]}'
f' whereas new config has {k}={v}!')
diff = True
for k in another_config.param_dict:
if k not in config.param_dict:
print(f'New config missing {k}!')
diff = True
if not override:
override = input("Override? [Y/N]")
if override == True or override == 'Y':
shutil.rmtree(p)
exist = False
break
if diff:
raise Exception('Found config with same name'
' but different parameters! Abort.')
print('Resuming experiment {} with '.format(p) +
'identical config...')
exist = True
config = another_config
exp_dir = p
if not exist:
# Save config
if exp_name is None:
exp_name = config['wandb_id']
exp_dir = self.get_exps_path() / exp_name
exp_dir.mkdir(parents=True, exist_ok=True)
config.save_yaml(exp_dir / 'conf.yml')
print('Saved a new config to {}.'.format(exp_dir))
return exp_dir
| 11,026 | 35.392739 | 82 | py |
MIED | MIED-main/mied/utils/batch_eval.py | import torch
from tqdm import tqdm
def batch_eval_index(f, total_count, batch_size=1024,
result_device=torch.device('cpu'),
detach=True,
no_tqdm=False):
'''
Batch evaluate f.
:param f: function to be evalutated. It should take in (B,) of indices.
:param total_count: total number of indices
:param batch_size: batch size in each invocation of f
:return: a list of results. You might want to call torch.cat afterwards.
'''
result = []
current_count = 0
with tqdm(total=total_count, disable=no_tqdm) as pbar:
while current_count < total_count:
count = min(batch_size, total_count - current_count)
inds = slice(current_count, current_count + count)
cur_result = f(inds)
if detach:
cur_result = cur_result.detach()
result.append(cur_result.to(result_device))
current_count += count
pbar.update(count)
pbar.close()
return result
| 1,050 | 32.903226 | 76 | py |
MIED | MIED-main/tests/analytical/run.py | import torch
import argparse
from pathlib import Path
import math
import wandb
import matplotlib.pyplot as plt
from mied.validators.particle import ParticleValidator
from mied.utils.random import seed_all
from mied.utils.ec import ExperimentCoordinator
from mied.problems.analytical_problems import create_problem
from mied.solvers.mied import MIED
from mied.solvers.svgd import SVGD
from mied.solvers.ksdd import KSDD
from mied.solvers.ipd import IPD
from mied.solvers.lmc import LMC
from mied.solvers.dynamic_barrier import DynamicBarrier
from mied.solvers.no_op_projector import NoOpProjector
if __name__ == '__main__':
root_dir = Path(__file__).resolve().parent
ec = ExperimentCoordinator(root_dir)
ec.add_temporary_arguments({
'num_itr': 2000,
'traj_freq': 10,
'plot_update': False,
'num_trial': 10,
'gt_multiplier': 10,
})
ec.add_common_arguments({
'prob': 'uniform_box_2d',
'reparam': 'box_tanh',
'filter_range': -1,
})
ec.add_method_arguments(MIED, {
'kernel': 'riesz',
'eps': 1e-8,
'riesz_s': -1.0,
'alpha_mied': 0.5,
'include_diag': 'nnd_scale',
'diag_mul': 1.3,
})
ec.add_method_arguments(KSDD, {
'sigma': 1.0,
})
ec.add_method_arguments(IPD, {
})
ec.add_method_arguments(SVGD, {
'kernel_h': -1.0,
})
ec.add_method_arguments(LMC, {
'lmc_lr': 1e-3,
'mirror_map': 'box_entropic'
})
ec.add_projector_arguments(DynamicBarrier, {
'alpha_db': 1.0,
'merge_eq': True,
'max_proj_itr': 20
})
ec.add_projector_arguments(NoOpProjector, {
})
ec_result = ec.parse_args()
tmp_args = ec_result.tmp_args
config = ec_result.config
seed_all(config['seed'])
problem = create_problem(ec_result.tmp_args.device,
config['prob'],
config['reparam'])
solver = ec.create_solver(problem)
validator = ParticleValidator(problem=problem)
def post_step_fn(i):
if tmp_args.traj_freq <= 0:
return
if (i + 1) % (tmp_args.val_freq * tmp_args.traj_freq) == 0:
metrics = ['sinkhorn', 'energy_dist']
result = validator.run(samples=solver.get_samples(),
updates=solver.compute_update(
i, solver.get_samples()),
include_density=False,
metrics=metrics,
num_trial=tmp_args.num_trial,
gt_multipler=tmp_args.gt_multiplier,
filter_range=config['filter_range'],
save_path=(ec_result.exp_dir /
'step-{:05}.h5'.format(i + 1)))
samples = result['samples']
bbox = problem.bbox.cpu().detach()
fig, ax = plt.subplots()
ax.scatter(samples[:, 0], samples[:, 1], s=5, alpha=0.6)
if tmp_args.plot_update:
updates = result['updates']
ax.quiver(samples[:, 0], samples[:, 1],
updates[:, 0], updates[:, 1],
angles='xy', scale_units='xy', scale=1)
ax.set_xlim(bbox[0, :])
ax.set_ylim(bbox[1, :])
ax.set_aspect('equal')
log_dict = {
'metrics': {m: result[m] for m in metrics},
'samples': wandb.Image(fig),
}
if tmp_args.num_trial > 1:
log_dict['metrics_std'] = {m: result[m + '_std']
for m in metrics}
plt.close(fig)
gt_samples = problem.sample_gt(
samples.shape[0], refresh=True).cpu().detach()
if gt_samples is not None:
fig, ax = plt.subplots()
ax.scatter(gt_samples[:, 0], gt_samples[:, 1], s=5)
ax.set_xlim(bbox[0, :])
ax.set_ylim(bbox[1, :])
ax.set_aspect('equal')
log_dict['gt_samples'] = wandb.Image(fig)
plt.close(fig)
wandb.log(log_dict, commit=False)
solver.run(num_itr=tmp_args.num_itr,
post_step_fn=post_step_fn)
validator.run(samples=solver.get_samples(),
include_gt=True,
include_density=problem.in_dim == 2,
density_bbox=problem.bbox,
save_path=ec_result.exp_dir / 'result.h5')
| 4,651 | 33.977444 | 77 | py |
MIED | MIED-main/tests/fairness_bnn/run.py | import torch
import argparse
from pathlib import Path
import math
import h5py
import wandb
from mied.validators.particle import ParticleValidator
from mied.utils.random import seed_all
from mied.utils.ec import ExperimentCoordinator
from mied.utils.h5_helpers import save_dict_h5
from mied.problems.fairness_bnn import FairnessBNN
from mied.solvers.mied import MIED
from mied.solvers.svgd import SVGD
from mied.solvers.ksdd import KSDD
from mied.solvers.ipd import IPD
from mied.solvers.dynamic_barrier import DynamicBarrier
if __name__ == '__main__':
root_dir = Path(__file__).resolve().parent
ec = ExperimentCoordinator(root_dir)
ec.add_temporary_arguments({
'num_itr': 1000,
'traj_freq': 10,
})
ec.add_common_arguments({
'thres': 0.01,
'ineq_scale': 1.0,
})
ec.add_method_arguments(MIED, {
'kernel': 'riesz',
'eps': 1e-8,
'riesz_s': -1.0,
'alpha_mied': 0.5,
'include_diag': 'nnd_scale',
'diag_mul': 1.3,
})
ec.add_method_arguments(KSDD, {
'sigma': 1.0,
})
ec.add_method_arguments(SVGD, {
})
ec.add_method_arguments(IPD, {
})
ec.add_projector_arguments(DynamicBarrier, {
'alpha_db': 1.0,
'merge_eq': True,
'max_proj_itr': 20
})
ec_result = ec.parse_args()
tmp_args, config = ec_result.tmp_args, ec_result.config
seed_all(config['seed'])
problem = FairnessBNN(device=tmp_args.device,
data_dir='data/',
thres=config['thres'],
ineq_scale=config['ineq_scale'])
validator = ParticleValidator(problem=problem)
def post_step_fn(i):
pass
solver = ec.create_solver(problem)
solver.run(num_itr=tmp_args.num_itr,
post_step_fn=post_step_fn)
print('Validating ...')
validator.run(samples=solver.get_samples(),
include_density=False,
save_path=ec_result.exp_dir / 'result.h5')
| 2,025 | 27.138889 | 60 | py |
MIED | MIED-main/tests/logistics/run.py | import torch
import argparse
from pathlib import Path
import math
import h5py
import wandb
from mied.validators.particle import ParticleValidator
from mied.utils.random import seed_all
from mied.utils.ec import ExperimentCoordinator
from mied.utils.h5_helpers import save_dict_h5
from mied.problems.logistics import BayesianLogistics
from mied.solvers.mied import MIED
from mied.solvers.svgd import SVGD
from mied.solvers.ksdd import KSDD
from mied.solvers.ipd import IPD
from mied.solvers.dynamic_barrier import DynamicBarrier
g_data_names = ['banana', 'breast_cancer', 'diabetis', 'flare_solar',
'german', 'heart', 'image', 'ringnorm', 'splice',
'thyroid', 'titanic', 'twonorm', 'waveform', 'covtype']
if __name__ == '__main__':
root_dir = Path(__file__).resolve().parent
ec = ExperimentCoordinator(root_dir)
ec.add_temporary_arguments({
'num_itr': 1000,
'traj_freq': 10,
'mcmc_only': False,
})
ec.add_common_arguments({
'data_name': 'banana',
})
ec.add_method_arguments(MIED, {
'kernel': 'riesz',
'eps': 1e-8,
'riesz_s': -1.0,
'alpha_mied': 0.5,
'include_diag': 'nnd_scale',
'diag_mul': 1.3,
})
ec.add_method_arguments(KSDD, {
'sigma': 1.0,
})
ec.add_method_arguments(SVGD, {
'kernel_h': -1.0,
})
ec.add_method_arguments(IPD, {
})
ec.add_projector_arguments(DynamicBarrier, {
'alpha_db': 1.0,
'merge_eq': True,
'max_proj_itr': 20
})
ec_result = ec.parse_args()
tmp_args, config = ec_result.tmp_args, ec_result.config
seed_all(config['seed'])
data_name = config['data_name']
if data_name in g_data_names:
if data_name != 'covtype':
data_path = 'data/benchmarks.mat'
else:
data_path = 'data/covertype.mat'
else:
raise Exception(f'Unknown dataset name: {data_name}!')
problem = BayesianLogistics(
device=tmp_args.device,
data_path=data_path,
data_name=data_name)
# Generate ground truth using mcmc.
(root_dir / 'mcmc').mkdir(parents=True, exist_ok=True)
mcmc_file = root_dir / 'mcmc' / '{}.h5'.format(data_name)
mcmc_log_file = root_dir / 'mcmc' / '{}.log'.format(data_name)
if data_name != 'covtype':
# MCMC for covtype is just too slow.
if not mcmc_file.exists():
samples = problem.mcmc(num_warmup=10000,
num_sample=10000,
log_file=mcmc_log_file)
save_dict_h5({'samples': samples},
mcmc_file, create_dir=True)
h5_handle = h5py.File(mcmc_file, 'r')
mcmc_samples = torch.from_numpy(h5_handle['samples'][:]).to(problem.device)
h5_handle.close()
if not tmp_args.mcmc_only:
validator = ParticleValidator(problem=problem)
def post_step_fn(i):
if tmp_args.traj_freq <= 0:
return
if (i + 1) % (tmp_args.val_freq * tmp_args.traj_freq) == 0:
metrics = ['sinkhorn', 'energy_dist']
samples = solver.get_samples()
result = validator.run(samples=samples,
metrics=metrics,
gt_samples=mcmc_samples,
strip_last_n=1,
save_path=(ec_result.exp_dir /
'step-{:05}.h5'.format(i + 1)))
log_dict = {
'metrics': {m: result[m] for m in metrics},
}
wandb.log(log_dict, commit=False)
solver = ec.create_solver(problem)
solver.run(num_itr=tmp_args.num_itr,
post_step_fn=post_step_fn if data_name != 'covtype' else None)
print('Validating ...')
validator.run(samples=solver.get_samples(),
include_density=False,
save_path=ec_result.exp_dir / 'result.h5')
| 4,112 | 32.991736 | 83 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/experiment.py | import time
import torch
import math
import os
import random
import datetime
from pathlib import Path
import numpy as np
from explib import config
from explib.expmaker.experiment_defs import make_wuuid, exp_dict_to_str
from . import logging, problem
class Experiment:
def __init__(
self,
exp_dict,
slug,
exp_uuid,
disable_wandb,
gpu,
dummy_run=False,
):
"""Create an experiment"""
self.seed = exp_dict["seed"]
self.apply_seed()
cpu_only = exp_dict.get("cpu_only", False)
self.device = gpu if torch.cuda.is_available() and not cpu_only else "cpu"
self.data_logger = logging.init_logging_for_exp(
slug,
exp_uuid,
exp_dict,
disable_wandb,
additional_config={
"device": self.device,
"uuid": exp_uuid,
"wuuid": make_wuuid(exp_dict),
"exp_dict_str": exp_dict_to_str(exp_dict),
},
)
logging.info(f"Creating experiment. Received experiment dictionnary {exp_dict}")
self.max_epoch = exp_dict["max_epoch"]
self.fake_full_batch_mode = exp_dict.get("fake_full_batch_mode", False)
self.drop_last = exp_dict.get("drop_last", False)
self.trained_norms = exp_dict.get("trained_norms", False)
self.init_noise_norm = exp_dict.get("init_noise_norm", False)
exp_dict["device"] = self.device
exp_dict["trained_norms"] = self.trained_norms
exp_dict["exp_uuid"] = exp_uuid
exp_dict["dummy_run"] = dummy_run
self.problem = problem.init(exp_dict)
self.save_path = os.path.join(
config.get_workspace(), exp_dict["dataset"], exp_uuid
)
self.model_dir = os.path.join(self.save_path, "model")
Path(self.model_dir).mkdir(parents=True, exist_ok=True)
self.model_file = os.path.join(self.model_dir, "model.pt")
if not os.path.isfile(self.model_file):
self.model_file = os.path.join(
self.model_dir, "model_{}.pt".format(self.max_epoch)
)
def apply_seed(self):
np.random.seed(self.seed)
random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
def run(self):
"""Run the experiment"""
start_time = time.time()
# TODO: Allow to continue training from a nonzero epoch?
logging.info("Starting experiment run")
starting_epoch = 0
if self.init_noise_norm or self.trained_norms:
logging.info("Initial run to compute noise norms")
r_start = time.time()
self.problem.calc_norms(norm_epoch=0)
r_end = time.time()
logging.info(f"Norm computation time: {r_end - r_start}")
logging.info("Initial evaluation")
r_start = time.time()
initial_train_metrics = self.problem.eval_loop(is_validation=False)
print(initial_train_metrics)
initial_valid_metrics = self.problem.eval_loop(is_validation=True)
self.data_logger(initial_valid_metrics, commit=False)
self.data_logger(initial_train_metrics)
r_end = time.time()
logging.info(f"Initial evaluation time: {r_end - r_start}")
epochs_to_compute_noise_norm = [
1,
int(self.max_epoch * 0.1),
int(self.max_epoch * 0.25),
int(self.max_epoch * 0.5),
int(self.max_epoch * 0.75),
]
for epoch in range(starting_epoch, self.max_epoch):
logging.info(f"Epoch {epoch}/{self.max_epoch}")
if self.trained_norms and epoch in epochs_to_compute_noise_norm:
logging.info(f"Computing noise norms at epoch {epoch}")
self.problem.calculate_noise_norm(epoch=epoch)
# run training loop
epoch_begin_time = time.time()
train_loss, func_vals, gnorms_1, gnorms_2 = self.problem.train_loop()
epoch_end_time = time.time()
epoch_training_time = epoch_end_time - epoch_begin_time
if math.isnan(train_loss) or math.isinf(train_loss):
if math.isnan(train_loss):
self.data_logger({"training_error": "nan"})
else:
self.data_logger({"training_error": "inf"})
break
# run eval loop
logging.info(f"Running evaluation")
train_metrics = self.problem.eval_loop(is_validation=False)
self.data_logger(train_metrics, commit=False)
valid_metrics = self.problem.eval_loop(is_validation=True)
self.data_logger(valid_metrics, commit=False)
self.data_logger(
{
"epoch": epoch,
"average_training_loss": train_loss,
"function_values": func_vals,
"norm_squared_gradients": gnorms_2,
"norm_squared_gradients_l1": gnorms_1,
"epoch_training_time": epoch_training_time,
}
)
# save model
if not os.path.isfile(self.model_file):
with open(self.model_file, "wb") as f:
torch.save(self.problem.model.state_dict(), f)
if self.trained_norms:
self.problem.calculate_noise_norm(epoch=self.max_epoch)
end_time = time.time()
self.data_logger(
{"exp_runtime": str(datetime.timedelta(seconds=end_time - start_time))}
)
| 5,622 | 34.815287 | 88 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/util.py | import torch
import torch.nn as nn
def get_grads(model):
res = []
for p in model.parameters():
if p.requires_grad:
res.append(p.grad.view(-1))
grad_flat = torch.cat(res)
return grad_flat
INIT_STD = 0.02
PROJ_INIT_STD = 0.01
def init_weight(weight):
nn.init.normal_(weight, 0.0, INIT_STD)
def init_bias(bias):
nn.init.constant_(bias, 0.0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
if hasattr(m, "weight") and m.weight is not None:
init_weight(m.weight)
if hasattr(m, "bias") and m.bias is not None:
init_bias(m.bias)
elif classname.find("AdaptiveEmbedding") != -1:
if hasattr(m, "emb_projs"):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, PROJ_INIT_STD)
elif classname.find("Embedding") != -1:
if hasattr(m, "weight"):
init_weight(m.weight)
elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
init_weight(m.cluster_weight)
if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
init_bias(m.cluster_bias)
if hasattr(m, "out_projs"):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, PROJ_INIT_STD)
elif classname.find("LayerNorm") != -1:
if hasattr(m, "weight"):
nn.init.normal_(m.weight, 1.0, INIT_STD)
if hasattr(m, "bias") and m.bias is not None:
init_bias(m.bias)
elif classname.find("TransformerLM") != -1:
if hasattr(m, "r_emb"):
init_weight(m.r_emb)
if hasattr(m, "r_w_bias"):
init_weight(m.r_w_bias)
if hasattr(m, "r_r_bias"):
init_weight(m.r_r_bias)
if hasattr(m, "r_bias"):
init_bias(m.r_bias)
def disable_running_stats(m):
if isinstance(m, torch.nn.modules.batchnorm._BatchNorm):
m.track_running_stats = False
def enable_running_stats(m):
if isinstance(m, torch.nn.modules.batchnorm._BatchNorm):
m.track_running_stats = True
| 2,310 | 30.22973 | 73 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/language_loader.py | import os, sys
import glob
from collections import Counter, OrderedDict
import numpy as np
import torch
import subprocess
# Code copied from https://github.com/kimiyoung/transformer-xl
from explib import config
class Vocab(object):
def __init__(
self,
special=[],
min_freq=0,
max_size=None,
lower_case=True,
delimiter=None,
vocab_file=None,
):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == "":
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ["<S>"] + symbols + ["<S>"]
elif add_eos:
return symbols + ["<eos>"]
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose:
print("counting file {} ...".format(path))
assert os.path.exists(path)
sents = []
with open(path, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(" line {}".format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose:
print("counting {} sents ...".format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(" line {}".format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx["<UNK>"]
def build_vocab(self):
if self.vocab_file:
print("building vocab from {}".format(self.vocab_file))
self._build_from_file(self.vocab_file)
print("final vocab size {}".format(len(self)))
else:
print(
"building vocab with min_freq={}, max_size={}".format(
self.min_freq, self.max_size
)
)
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
self.add_symbol(sym)
print(
"final vocab size {} from {} unique tokens".format(
len(self), len(self.counter)
)
)
def encode_file(
self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False
):
if verbose:
print("encoding file {} ...".format(path))
assert os.path.exists(path)
encoded = []
with open(path, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(" line {}".format(idx))
symbols = self.tokenize(
line, add_eos=add_eos, add_double_eos=add_double_eos
)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose:
print("encoding {} sents ...".format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(" line {}".format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, "{}_idx".format(sym.strip("<>")), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), "Index {} out of range".format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
assert "<eos>" not in sym
assert hasattr(self, "unk_idx")
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return " ".join([self.get_sym(idx) for idx in indices])
else:
return " ".join(
[self.get_sym(idx) for idx in indices if idx not in exclude]
)
def convert_to_sent_from_tensor(self, indices):
sents = []
for sent in indices:
sents.append(" ".join([self.get_sym(int(idx)) for idx in sent]))
return sents
def __len__(self):
return len(self.idx2sym)
class LMOrderedIterator(object):
def __init__(
self,
data,
bsz,
bptt,
device="cpu",
ext_len=None,
drop_last=False,
outliers_filename=None,
):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
if outliers_filename is not None:
outlier_indices = np.load(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"outliers",
outliers_filename,
)
)
outlier_indices = sorted(
list(map(int, np.ndarray.tolist(outlier_indices))), reverse=True
)
for idx in outlier_indices:
data = torch.cat(
[data[0 : idx * self.bptt], data[(idx + 1) * self.bptt :]]
)
self.n_step = data.size(0) // bsz
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
self.drop_last = drop_last
if self.drop_last and (self.n_step + self.bptt - 1) % self.bptt != 0:
self.n_batch = self.n_batch - 1
def __len__(self):
return self.n_batch
def get_batch(self, i, bptt=None):
if bptt is None:
bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i + 1 : i + 1 + seq_len]
return data, target, seq_len
def get_fixlen_iter(self, start=0):
end = self.data.size(0) - 1
if self.drop_last:
end = self.data.size(0) - 1 - ((self.data.size(0) - 1) % self.bptt)
for i in range(start, end, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = (
np.random.permutation(len(self.data))
if self.shuffle
else np.array(range(len(self.data)))
)
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[
n_retain + n_filled : n_retain + n_filled + n_new, i
] = streams[i][:n_new]
target[n_filled : n_filled + n_new, i] = streams[i][
1 : n_new + 1
]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data = data.to(self.device)
target = target.to(self.device)
yield data, target, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class Corpus(object):
def __init__(self, path, dataset, *args, **kwargs):
self.dataset = dataset
self.vocab = Vocab(*args, **kwargs)
if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
self.vocab.count_file(os.path.join(path, "train.txt"))
self.vocab.count_file(os.path.join(path, "valid.txt"))
self.vocab.count_file(os.path.join(path, "test.txt"))
elif self.dataset == "wt103":
self.vocab.count_file(os.path.join(path, "train.txt"))
elif self.dataset == "lm1b":
train_path_pattern = os.path.join(
path,
"1-billion-word-language-modeling-benchmark-r13output",
"training-monolingual.tokenized.shuffled",
"news.en-*",
)
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ["ptb", "wt2", "wt103"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True
)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True
)
elif self.dataset in ["enwik8", "text8"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True, add_eos=False
)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True, add_eos=False
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True, add_eos=False
)
elif self.dataset == "lm1b":
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=False, add_double_eos=True
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=False, add_double_eos=True
)
def get_iterator(self, split, *args, **kwargs):
if split == "train":
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif split in ["valid", "test"]:
data = self.valid if split == "valid" else self.test
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, "cache.pt")
if os.path.exists(fn):
print("Loading cached dataset...")
corpus = torch.load(fn)
else:
print("Producing dataset {}...".format(dataset))
kwargs = {}
if dataset in ["wt103", "wt2"]:
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = False
elif dataset == "ptb":
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = True
elif dataset == "lm1b":
kwargs["special"] = []
kwargs["lower_case"] = False
kwargs["vocab_file"] = os.path.join(datadir, "1b_word_vocab.txt")
elif dataset in ["enwik8", "text8"]:
pass
corpus = Corpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
def ptb_loader(
batch_size,
device,
tgt_len,
drop_last=False,
outliers_filename=None,
):
datadir = os.path.join(config.get_workspace(), "datasets", "penn")
cwd = os.path.dirname(os.path.realpath(__file__))
if not os.path.isdir(datadir):
result = subprocess.run(
[
"sh",
"./get_ptb.sh",
os.path.abspath(os.path.join(config.get_workspace(), "datasets")),
],
check=True,
cwd=cwd,
capture_output=True,
text=True,
)
print("Shell get_ptb.sh: stdout")
print(result.stdout)
print("Shell get_ptb.sh: stderr")
print(result.stderr)
corpus = get_lm_corpus(datadir, "ptb")
ntokens = len(corpus.vocab)
tr_iter = corpus.get_iterator(
"train",
batch_size,
tgt_len,
device=device,
ext_len=0,
drop_last=drop_last,
outliers_filename=outliers_filename,
)
te_iter = corpus.get_iterator("test", batch_size, tgt_len, device=device, ext_len=0)
return tr_iter, te_iter, ntokens
def wikitext2_loader(
batch_size,
device,
tgt_len,
drop_last=False,
):
datadir = os.path.join(config.get_workspace(), "datasets", "wikitext-2")
cwd = os.path.dirname(os.path.realpath(__file__))
if not os.path.isdir(datadir):
result = subprocess.run(
[
"sh",
"./get_wikitext2.sh",
os.path.abspath(os.path.join(config.get_workspace(), "datasets")),
],
check=True,
cwd=cwd,
capture_output=True,
text=True,
)
print("Shell get_wikitext2.sh: stdout")
print(result.stdout)
print("Shell get_wikitext2.sh: stderr")
print(result.stderr)
corpus = get_lm_corpus(datadir, "wt2")
ntokens = len(corpus.vocab)
tr_iter = corpus.get_iterator(
"train",
batch_size,
tgt_len,
device=device,
ext_len=0,
drop_last=drop_last,
)
te_iter = corpus.get_iterator("test", batch_size, tgt_len, device=device, ext_len=0)
return tr_iter, te_iter, ntokens, corpus
| 17,819 | 31.459016 | 88 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/glue_loader.py | import os
import random
from explib import config
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
)
MAX_LENGTH = 128
EVAL_BASE_BATCH_SIZE = 64
model_name = "bert-base-cased"
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
def glue_loader(task_name, accelerator, batch_size):
if task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
"glue",
task_name,
cache_dir=os.path.join(config.get_workspace(), "datasets"),
)
# Labels
if task_name is not None:
is_regression = task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
autoconfig = AutoConfig.from_pretrained(
model_name, num_labels=num_labels, finetuning_task=task_name
)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
model = AutoModelForSequenceClassification.from_pretrained(
model_name,
from_tf=False,
config=autoconfig,
)
# Preprocessing the datasets
if task_name is not None:
sentence1_key, sentence2_key = task_to_keys[task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [
name for name in raw_datasets["train"].column_names if name != "label"
]
if (
"sentence1" in non_label_column_names
and "sentence2" in non_label_column_names
):
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {
i: label_name_to_id[label_list[i]] for i in range(num_labels)
}
else:
print(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif task_name is None:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in autoconfig.label2id.items()}
elif task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in autoconfig.label2id.items()}
padding = False
def preprocess_function(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],)
if sentence2_key is None
else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(
*texts, padding=padding, max_length=MAX_LENGTH, truncation=True
)
if "label" in examples:
if label_to_id is not None:
# Map labels to IDs (not necessary for GLUE tasks)
result["labels"] = [label_to_id[l] for l in examples["label"]]
else:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
remove_columns=raw_datasets["train"].column_names,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets[
"validation_matched" if task_name == "mnli" else "validation"
]
# for index in random.sample(range(len(train_dataset)), 3):
# print(f"Sample {index} of the training set: {train_dataset[index]}.")
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorWithPadding(
tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=batch_size
)
train_eval_dataloader = DataLoader(
train_dataset,
collate_fn=data_collator,
batch_size=max(EVAL_BASE_BATCH_SIZE, batch_size),
)
eval_dataloader = DataLoader(
eval_dataset,
collate_fn=data_collator,
batch_size=max(EVAL_BASE_BATCH_SIZE, batch_size),
)
return (
train_dataloader,
train_eval_dataloader,
eval_dataloader,
num_labels,
task_name,
is_regression,
)
| 6,484 | 34.828729 | 117 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/torchvision_loader.py | import os
import torch
import torchvision
from explib import config
from torchvision import transforms
from torchvision.datasets import MNIST, USPS
def torchvision_loader(dataset_name, batch_size, drop_last=False, shuffle=True):
if dataset_name == "mnist":
loader = MNIST
elif dataset_name == "usps":
loader = USPS
else:
raise Exception("Dataset {} not available".format(dataset_name))
train_dataloader = torch.utils.data.DataLoader(
loader(
os.path.join(config.get_workspace(), "datasets"),
train=True,
download=True,
transform=transforms.Compose(
[
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
),
),
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
)
valid_dataloader = torch.utils.data.DataLoader(
loader(
os.path.join(config.get_workspace(), "datasets"),
train=False,
download=True,
transform=transforms.Compose(
[
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
),
),
batch_size=batch_size,
shuffle=False,
# drop_last=drop_last,
)
return train_dataloader, valid_dataloader
| 1,511 | 26.490909 | 80 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/squad_loader.py | import tokenize
import datasets
import os
from datasets import load_dataset
from accelerate import Accelerator
from explib import config
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoTokenizer,
DataCollatorWithPadding,
)
from torch.utils.data.dataloader import DataLoader
import numpy as np
BERT_BASE_PRETRAINED = "bert-base-uncased"
DISTILBERT = "distilbert-base-uncased"
def squad_loader(
dataset_name,
batch_size,
tgt_len,
doc_stride,
model_name,
drop_last=False,
fake_full_batch_mode=False,
shuffle=True,
outliers_filename=None,
):
split = "train"
if fake_full_batch_mode:
seed = np.random.get_state()[1][0] % 13
start = seed * 6144
end = (seed + 1) * 6144
split = "train[{}:{}]".format(start, end)
if dataset_name == "squad":
raw_datasets = load_dataset(
"squad",
cache_dir=os.path.join(config.get_workspace(), "datasets"),
split=split,
)
else:
raw_datasets = load_dataset(
"adversarial_qa",
"dbert",
cache_dir=os.path.join(config.get_workspace(), "datasets"),
split=split,
)
column_names = raw_datasets.column_names
question_column_name = "question" if "question" in column_names else column_names[0]
context_column_name = "context" if "context" in column_names else column_names[1]
answer_column_name = "answers" if "answers" in column_names else column_names[2]
if model_name == "bert_base_pretrained":
model_name = BERT_BASE_PRETRAINED
else:
model_name = DISTILBERT
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == "right"
max_seq_length = tokenizer.model_max_length
# Training preprocessing
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding=False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (
offsets[token_start_index][0] <= start_char
and offsets[token_end_index][1] >= end_char
):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while (
token_start_index < len(offsets)
and offsets[token_start_index][0] <= start_char
):
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
# if "train" not in raw_datasets:
# raise ValueError("--do_train requires a train dataset")
train_examples = raw_datasets
# Create train feature from dataset
train_dataset = train_examples.map(
prepare_train_features,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
# Validation preprocessing
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding=False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if dataset_name == "squad":
eval_examples = load_dataset(
"squad",
cache_dir=os.path.join(config.get_workspace(), "datasets"),
split="validation",
)
else:
eval_examples = load_dataset(
"adversarial_qa",
"dbert",
cache_dir=os.path.join(config.get_workspace(), "datasets"),
split="validation",
)
# Validation Feature Creation
eval_dataset = eval_examples.map(
prepare_train_features,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on validation dataset",
)
eval_dataset_valid = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on validation dataset",
)
train_dataset_for_eval = train_examples.map(
prepare_validation_features,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on train dataset for eval",
)
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=None)
if outliers_filename is not None:
outlier_indices = np.load(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"outliers",
outliers_filename,
)
)
outlier_indices = np.ndarray.tolist(outlier_indices)
indices = [
i for i in range(len(train_dataset)) if str(i) not in outlier_indices
]
train_dataset = train_dataset.select(indices)
train_dataset_for_eval = train_dataset_for_eval.select(indices)
train_dataloader = DataLoader(
train_dataset,
shuffle=shuffle,
collate_fn=data_collator,
batch_size=batch_size,
drop_last=drop_last,
)
# eval_dataset_for_model = eval_dataset_prepared.remove_columns(
# ["example_id", "offset_mapping"]
# )
eval_dataloader = DataLoader(
eval_dataset, collate_fn=data_collator, batch_size=batch_size
)
# train_dataset_eval_for_model = train_dataset_for_eval.remove_columns(
# ["example_id", "offset_mapping"]
# )
train_dataloader_for_eval = DataLoader(
train_dataset,
shuffle=False,
collate_fn=data_collator,
batch_size=batch_size,
drop_last=drop_last,
)
return (
train_dataloader,
train_dataloader_for_eval,
eval_dataloader,
eval_dataset_valid,
eval_examples,
train_dataset_for_eval,
train_examples,
tokenizer,
)
| 11,801 | 38.209302 | 118 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/cifar_loader.py | import os
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from explib import config
def cifar_loader(
batch_size,
load_100=False,
drop_last=False,
fake_full_batch_mode=False,
shuffle=True,
):
data_class = "CIFAR100" if load_100 else "CIFAR10"
stats = (
{"mean": [0.5071, 0.4867, 0.4408], "std": [0.2675, 0.2565, 0.2761]}
if load_100
else {"mean": [0.491, 0.482, 0.447], "std": [0.247, 0.243, 0.262]}
)
trans = [
transforms.ToTensor(),
lambda t: t.type(torch.get_default_dtype()),
transforms.Normalize(**stats),
]
tr_data = getattr(datasets, data_class)(
root=os.path.join(config.get_workspace(), "datasets"),
train=True,
download=True,
transform=transforms.Compose(trans),
)
te_data = getattr(datasets, data_class)(
root=os.path.join(config.get_workspace(), "datasets"),
train=False,
download=True,
transform=transforms.Compose(trans),
)
if fake_full_batch_mode:
tr_data = torch.utils.data.Subset(tr_data, torch.randperm(batch_size))
shuffle = False
train_loader = torch.utils.data.DataLoader(
dataset=tr_data,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
)
val_loader = torch.utils.data.DataLoader(
dataset=te_data,
batch_size=batch_size,
shuffle=False,
# drop_last=drop_last,
)
return train_loader, val_loader
| 1,561 | 23.40625 | 78 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/dataset/__init__.py | """Datasets.
General interface to load a dataset
"""
import os
from pathlib import Path
from explib import config
from .cifar_loader import cifar_loader
from .glue_loader import glue_loader
from .language_loader import ptb_loader, wikitext2_loader
from .squad_loader import squad_loader
from .torchvision_loader import torchvision_loader
MNIST = "mnist"
WIKITEXT2 = "wikitext2"
CIFAR10 = "cifar10"
CIFAR100 = "cifar100"
PTB = "ptb"
SQUAD = "squad"
ADVERSARIAL_QA = "adversarial_qa"
GLUE = "glue"
AVAILABLE_DATASET = [
MNIST,
WIKITEXT2,
CIFAR10,
CIFAR100,
PTB,
SQUAD,
# GLUE,
]
def init(
dataset_name,
batch_size,
device,
extra_params=None,
drop_last=False,
fake_full_batch_mode=False,
accelerator=None,
shuffle=True,
outliers_filename=None,
):
extra_params = extra_params if extra_params is not None else {}
dataset_path = os.path.join(config.get_workspace(), "datasets")
Path(dataset_path).mkdir(parents=True, exist_ok=True)
if fake_full_batch_mode and dataset_name not in [CIFAR10, CIFAR100, SQUAD]:
raise NotImplementedError(
"Fake full batch mode not implemented for {dataset_name}"
)
if dataset_name == MNIST:
return torchvision_loader(
dataset_name, batch_size, drop_last=drop_last, shuffle=shuffle
)
elif dataset_name == WIKITEXT2:
return wikitext2_loader(
batch_size,
device,
extra_params.get("tgt_len", 128),
drop_last=drop_last,
)
elif dataset_name == CIFAR10:
return cifar_loader(
batch_size,
drop_last=drop_last,
fake_full_batch_mode=fake_full_batch_mode,
shuffle=shuffle,
)
elif dataset_name == CIFAR100:
return cifar_loader(
batch_size,
load_100=True,
drop_last=drop_last,
fake_full_batch_mode=fake_full_batch_mode,
)
elif dataset_name == PTB:
return ptb_loader(
batch_size,
device,
extra_params.get("tgt_len", 128),
drop_last=drop_last,
outliers_filename=outliers_filename,
)
elif dataset_name == SQUAD or dataset_name == ADVERSARIAL_QA:
return squad_loader(
dataset_name,
batch_size,
extra_params.get("tgt_len", 384),
extra_params.get("doc_stride", 128),
model_name=extra_params.get("model_name", "bert_base_pretrained"),
drop_last=drop_last,
fake_full_batch_mode=fake_full_batch_mode,
shuffle=shuffle,
outliers_filename=outliers_filename,
)
else:
raise Exception("Dataset {} not available".format(dataset_name))
| 2,805 | 26.242718 | 79 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/signum.py | import torch
from torch.optim import Optimizer
class Signum(Optimizer):
r"""
Code taken from https://github.com/jiaweizzhao/Signum_pytorch/blob/master/Example/signum.py
Implements Signum optimizer that takes the sign of gradient or momentum.
See details in the original paper at:https://arxiv.org/abs/1711.05101
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0.9)
weight_decay (float, optional): weight decay (default: 0)
Example:
>>> optimizer = signum.Signum(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
.. note::
The optimizer updates the weight by:
buf = momentum * buf + (1-momentum)*rescaled_grad
weight = (1 - lr * weight_decay) * weight - lr * sign(buf)
Considering the specific case of Momentum, the update Signum can be written as
.. math::
\begin{split}g_t = \nabla J(W_{t-1})\\
m_t = \beta m_{t-1} + (1 - \beta) g_t\\
W_t = W_{t-1} - \eta_t \text{sign}(m_t)}\end{split}
where p, g, v and :math:`\rho` denote the parameters, gradient,
velocity, and momentum respectively.
If do not consider Momentum, the update Sigsgd can be written as
.. math::
g_t = \nabla J(W_{t-1})\\
W_t = W_{t-1} - \eta_t \text{sign}(g_t)}
"""
def __init__(self, params, lr=0.01, momentum=0, weight_decay=0, **kwargs):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super(Signum, self).__init__(params, defaults)
def __setstate__(self, state):
super(Signum, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
# signum
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.zeros_like(p.data)
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_((1 - momentum), d_p)
d_p = torch.sign(buf)
else: # signsgd
d_p = torch.sign(d_p)
p.data.add_(d_p, alpha=-group["lr"])
return loss
| 3,282 | 36.735632 | 95 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/normalized_gd.py | import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer, required
from torch.nn.utils import parameters_to_vector as p2v
from typing import List, Optional
class CopyOfSGD(Optimizer):
def __init__(
self,
params,
lr=required,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
*,
maximize=False,
):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
maximize=maximize,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(CopyOfSGD, self).__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
group.setdefault("maximize", False)
@torch.no_grad()
def _step_with_direction(self, closure=None, direction_func=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# grad norm comp
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
has_sparse_grad = False
for p in group["params"]:
if p.grad is not None:
params_with_grad.append(p)
# norm decent
d_p_list.append(direction_func(p.grad))
if p.grad.is_sparse:
has_sparse_grad = True
state = self.state[p]
if "momentum_buffer" not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state["momentum_buffer"])
sgd(
params_with_grad,
d_p_list,
momentum_buffer_list,
weight_decay=group["weight_decay"],
momentum=group["momentum"],
lr=group["lr"],
dampening=group["dampening"],
nesterov=group["nesterov"],
maximize=group["maximize"],
has_sparse_grad=has_sparse_grad,
)
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state["momentum_buffer"] = momentum_buffer
return loss
def _eval_closure(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
return loss
def _total_grad_norm(self):
total_grad = p2v(
[
p.grad if p.grad is not None else torch.zeros_like(p)
for group in self.param_groups
for p in group["params"]
]
)
return total_grad.norm(self.norm)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = self._eval_closure(closure)
self._step_with_direction(closure, lambda g: g)
return loss
def sgd(
params: List[Tensor],
d_p_list: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
has_sparse_grad: bool = None,
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
maximize: bool,
):
for i, param in enumerate(params):
d_p = d_p_list[i]
if weight_decay != 0:
d_p = d_p.add(param, alpha=weight_decay)
if momentum != 0:
buf = momentum_buffer_list[i]
if buf is None:
buf = torch.clone(d_p).detach()
momentum_buffer_list[i] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
alpha = lr if maximize else -lr
param.add_(d_p, alpha=alpha)
class PlainSGD(CopyOfSGD):
def __init__(self, params, lr=required, momentum=0):
super(PlainSGD, self).__init__(
params=params,
lr=lr,
momentum=momentum,
dampening=0,
weight_decay=0,
nesterov=False,
)
class BlockNormalizedSGD(PlainSGD):
"""Change the magnitude and direction, but by block rather than coordinate"""
def __init__(self, params, lr=required, momentum=0, norm=1):
assert norm > 0
self.norm = norm
super(BlockNormalizedSGD, self).__init__(params, momentum=momentum, lr=lr)
@torch.no_grad()
def step(self, closure=None):
loss = self._eval_closure(closure)
self._step_with_direction(closure, lambda g: g / g.norm(self.norm))
return loss
class RescaledSignDescent(PlainSGD):
"""
Change the direction using the sign but keep the magnitude
"""
def __init__(self, params, lr=required, momentum=0, norm=1):
assert norm > 0
self.norm = norm
super(RescaledSignDescent, self).__init__(params, lr=lr, momentum=momentum)
@torch.no_grad()
def step(self, closure=None):
loss = self._eval_closure(closure)
total_grad_norm = self._total_grad_norm()
self._step_with_direction(closure, lambda g: torch.sign(g) * total_grad_norm)
return loss
class NormalizedSGD(PlainSGD):
"""
Change the magnitude but keep the direction
"""
def __init__(self, params, lr=required, momentum=0, norm=2):
assert norm > 0
self.norm = norm
super(NormalizedSGD, self).__init__(params, lr=lr, momentum=momentum)
@torch.no_grad()
def step(self, closure=None):
loss = self._eval_closure(closure)
total_grad_norm = self._total_grad_norm()
self._step_with_direction(closure, lambda g: g / total_grad_norm)
return loss
class SignSGD(PlainSGD):
"""
Change the magnitude and direction
"""
def __init__(self, params, lr=required, momentum=0):
super(SignSGD, self).__init__(params, lr=lr, momentum=momentum)
@torch.no_grad()
def step(self, closure=None):
loss = self._eval_closure(closure)
self._step_with_direction(closure, lambda g: torch.sign(g))
return loss
| 7,502 | 30.004132 | 101 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/modified_adam.py | from torch.optim import Optimizer
import math
import torch
from torch import Tensor
from typing import List, Optional
def f_modifiedadam(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
use_bias_correction: bool,
eps: float
):
r"""Functional API that performs Adam algorithm computation."""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1**step if use_bias_correction else 1
bias_correction2 = 1 - beta2**step if use_bias_correction else 1
print(use_bias_correction, bias_correction1, bias_correction2, end="")
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
class ModifiedAdam(Optimizer):
r"""Modified Adam Implementation for ablation."""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
use_bias_correction=True,
amsgrad=False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
use_bias_correction=use_bias_correction,
)
super(ModifiedAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(ModifiedAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("amsgrad", False)
group.setdefault("use_bias_correction", True)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group["betas"]
for p in group["params"]:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
if group["amsgrad"]:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if group["amsgrad"]:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
# update the steps for each param group update
state["step"] += 1
# record the step after step update
state_steps.append(state["step"])
f_modifiedadam(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group["amsgrad"],
beta1=beta1,
beta2=beta2,
lr=group["lr"],
weight_decay=group["weight_decay"],
use_bias_correction=group["use_bias_correction"],
eps=group["eps"],
)
return loss
| 6,017 | 34.192982 | 104 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/__init__.py | """Optimizers
Generic interface to build optimizers by name,
possibly interfacing with pytorch
"""
import json
import torch
from .signum import Signum
from .modified_adam import ModifiedAdam
from .normalized_gd import (
PlainSGD,
NormalizedSGD,
BlockNormalizedSGD,
SignSGD,
RescaledSignDescent,
)
from .clipped_sgd import ClippedGD
SGD = "SGD"
ADAM = "Adam"
ADAM_ABLATION = "AdamAblation"
SIGNUM = "Signum"
PLAIN_SGD = "PlainSGD"
NORMALIZED_GD = "NormalizedGD"
BLOCK_NORMALIZED_GD = "BlockNormalizedGD"
SIGN_D = "SignDescent"
RESCALED_SIGN_D = "RescaledSignDescent"
CLIPPED_SGD = "ClippedGD"
AVAILABLE_OPTIMIZERS = [
SGD,
ADAM,
SIGNUM,
ADAM_ABLATION,
NORMALIZED_GD,
BLOCK_NORMALIZED_GD,
SIGN_D,
RESCALED_SIGN_D,
CLIPPED_SGD,
]
def init(params, model):
name = params["name"]
momentum = params["momentum"] if "momentum" in params else 0
if name not in AVAILABLE_OPTIMIZERS:
raise Exception("Optimizer {} not available".format(name))
if name == SGD:
return torch.optim.SGD(
model.parameters(), lr=params["alpha"], momentum=momentum
)
if name == ADAM:
return torch.optim.Adam(
model.parameters(),
lr=params["alpha"],
betas=(params["b1"], params["b2"]),
)
if name == ADAM_ABLATION:
params_ = json.loads(json.dumps(params))
lr = params_.get("alpha")
betas = (params_.get("b1"), params_.get("b2"))
params_.pop("name")
params_.pop("alpha")
params_.pop("b1")
params_.pop("b2")
return ModifiedAdam(model.parameters(), lr=lr, betas=betas, **params_)
if name == SIGNUM:
return Signum(model.parameters(), lr=params["alpha"], momentum=momentum)
if name == PLAIN_SGD:
return PlainSGD(model.parameters(), lr=params["alpha"], momentum=momentum)
if name == NORMALIZED_GD:
return NormalizedSGD(model.parameters(), lr=params["alpha"], momentum=momentum)
if name == BLOCK_NORMALIZED_GD:
return BlockNormalizedSGD(
model.parameters(), lr=params["alpha"], momentum=momentum
)
if name == SIGN_D:
return SignSGD(model.parameters(), lr=params["alpha"], momentum=momentum)
if name == RESCALED_SIGN_D:
return RescaledSignDescent(
model.parameters(), lr=params["alpha"], momentum=momentum
)
if name == CLIPPED_SGD:
return ClippedGD(
model.parameters(),
lr=params["alpha"],
momentum=momentum,
clipat=params.get("clipat", 0.5),
)
| 2,628 | 24.77451 | 87 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/optim/clipped_sgd.py | import itertools
import torch
from torch import Tensor
from torch.optim import SGD
from torch.optim.optimizer import Optimizer, required
from torch.nn.utils import parameters_to_vector as p2v
from typing import List, Optional
class ClippedGD(SGD):
def __init__(
self,
params,
lr=required,
clipat=0.5,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
):
if clipat < 0.0:
raise ValueError("Invalid clipat value: {}".format(clipat))
self._clipat = clipat
self.params = params
super().__init__(
params,
lr,
momentum,
dampening,
weight_decay,
nesterov,
)
def step(self, closure=None):
"""Clips the gradients and takes a step of GD. Changes the values of the gradients."""
torch.nn.utils.clip_grad_norm_(
itertools.chain(*[group["params"] for group in self.param_groups]),
max_norm=self._clipat,
)
super().step(closure=closure)
| 1,091 | 24.395349 | 94 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/problem.py | import torch
from torch.nn.utils import parameters_to_vector as p2v
from abc import ABCMeta, abstractmethod
from explib import config
from ..util import get_grads, enable_running_stats, disable_running_stats
import os
import numpy as np
from pathlib import Path
import csv
from ..dataset import *
class Problem(metaclass=ABCMeta):
def __init__(self, exp_dict):
self.model_name = exp_dict["model"]
self.batch_size = exp_dict["batch_size"]
self.seed = exp_dict["seed"]
self.fake_full_batch_mode = (
"fake_full_batch_mode" in exp_dict and exp_dict["fake_full_batch_mode"]
)
self.drop_last = "drop_last" in exp_dict and exp_dict["drop_last"]
self.device = exp_dict["device"]
self.dataset_name = exp_dict["dataset"]
self.optim_name = exp_dict["opt"]["name"]
self.init_noise_norm = (
"init_noise_norm" in exp_dict and exp_dict["init_noise_norm"]
)
self.save_path = os.path.join(
config.get_workspace(), exp_dict["dataset"], exp_dict["exp_uuid"]
)
self.trained_norms = exp_dict["trained_norms"]
self.save_norm_samples = (
"save_norm_samples" in exp_dict and exp_dict["save_norm_samples"]
)
self.dummy_run = exp_dict["dummy_run"]
if "loss_func" in exp_dict:
self.loss_func = self.get_loss_function(exp_dict["loss_func"])
# Gradient accumulation for noise norm calculation
if "accumulate_steps" in exp_dict:
self.accumulate_steps = exp_dict["accumulate_steps"]
self.grad_accumulate = True
else:
self.accumulate_steps = 1
self.grad_accumulate = False
self.exp_uuid = exp_dict["exp_uuid"]
@abstractmethod
def calculate_loss(self, data):
pass
@abstractmethod
def eval_loop(self, is_validation=False):
pass
def train_loop(self):
"""Train for one epoch"""
self.model.train()
self.model.to(self.device)
self.optim.zero_grad()
epoch_loss = 0.0
iteration_counter = 0
accumulation_counter = 0
fvals, gnorms_1, gnorms_2 = [], [], []
for (step, *data) in enumerate(self.train_dataloader):
loss = self.calculate_loss(data)
if self.grad_accumulate:
loss = loss / self.accumulate_steps
loss.backward()
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
fvals.append(loss.item())
gnorms_1.append(grad_norm_squared(self.optim, p=1).item())
gnorms_2.append(grad_norm_squared(self.optim, p=2).item())
self.optim.step()
self.optim.zero_grad()
accumulation_counter += 1
epoch_loss += loss.item()
if self.fake_full_batch_mode and accumulation_counter == 1:
break
if self.dummy_run:
accumulation_counter = 1
break
epoch_loss = epoch_loss / accumulation_counter
return epoch_loss, fvals, gnorms_1, gnorms_2
def calc_norms(self, norm_epoch, mean_grad=None):
"""
Calculate noise norms. If mean_grad is None, will calculate
the gradient mean first. If not None, will calculate the norms and save them
"""
self.model.train()
self.model.to(self.device)
self.optim.zero_grad()
iteration_counter = 0
accumulation_counter = 0
calc_total_grad = mean_grad is None
self.model.apply(disable_running_stats)
if calc_total_grad:
logs_path = os.path.join(self.save_path, "noise")
Path(logs_path).mkdir(parents=True, exist_ok=True)
grads = None
else:
# calc norms
noise_norms = []
for (step, *data) in enumerate(self.train_dataloader):
loss = self.calculate_loss(data)
if self.grad_accumulate:
loss = loss / self.accumulate_steps
loss.backward()
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
if calc_total_grad:
grad = get_grads(self.model).cpu()
grads = grad if grads is None else grads + grad
else:
# calc norms
grad = get_grads(self.model).cpu()
noise_norm = (grad - mean_grad).norm().item() ** 2
noise_norms.append(noise_norm)
self.optim.zero_grad()
accumulation_counter += 1
if self.fake_full_batch_mode and accumulation_counter == 1:
break
if self.dummy_run:
break
if calc_total_grad:
torch.save(
grads,
self.save_path
+ "/noise/grad_{}_{}".format(accumulation_counter, norm_epoch),
)
self.calc_norms(
norm_epoch=norm_epoch, mean_grad=grads / accumulation_counter
)
self.model.apply(enable_running_stats)
return
else:
# calc norms
final_noise_norms = np.asarray(noise_norms)
np.save(
self.save_path
+ "/noise/norm_{}_{}_{}_{}_{}_{}".format(
self.model_name,
self.dataset_name,
self.batch_size * self.accumulate_steps,
self.seed,
self.optim_name,
norm_epoch,
),
final_noise_norms,
)
if self.save_norm_samples:
if self.dataset_name in [PTB, WIKITEXT2, SQUAD]:
self.get_outliers_helper(final_noise_norms)
def logLoss(self, predicted, actual):
criterion = torch.nn.CrossEntropyLoss()
return criterion(predicted, actual.long())
def get_loss_function(self, function_name):
if function_name == "logloss":
criterion = self.logLoss
elif function_name == "mse":
criterion = torch.nn.MSELoss()
else:
raise Exception("unsupported loss function: " + function_name)
return criterion
@torch.no_grad()
def grad_norm_squared(optim, p=2):
v = p2v(
[
p.grad
for group in optim.param_groups
for p in group["params"]
if p.grad is not None
]
)
return v.norm(p=p) ** 2
| 6,790 | 32.78607 | 84 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/bert_squad_prob.py | import csv
import torch
from accelerate import Accelerator
from datasets import load_metric
from .. import dataset, model, optim
from .problem import Problem
class BertSquadProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
(
self.train_dataloader,
self.train_dataloader_for_eval,
self.valid_dataloader,
self.valid_dataset,
self.valid_examples,
self.train_dataset,
self.train_examples,
self.tokenizer,
) = dataset.init(
exp_dict["dataset"],
self.batch_size,
self.device,
extra_params={**exp_dict["model_args"], "model_name": self.model_name},
drop_last=self.drop_last,
fake_full_batch_mode=self.fake_full_batch_mode,
shuffle=False if self.save_norm_samples else exp_dict.get("shuffle", True),
outliers_filename=exp_dict.get("outliers_filename", None),
)
self.model = model.init(
exp_dict["model"],
model_args=exp_dict["model_args"],
)
self.model.to(self.device)
self.optim = optim.init(
exp_dict["opt"],
self.model,
)
self.accelerator = Accelerator()
(
self.model,
self.optim,
self.train_dataloader,
self.valid_dataloader,
) = self.accelerator.prepare(
self.model, self.optim, self.train_dataloader, self.valid_dataloader
)
self.train_dataloader_for_eval = self.accelerator.prepare(
self.train_dataloader_for_eval
)
self.metric = load_metric("squad")
def calculate_loss(self, data):
return self.model(**data[0]).loss
@torch.no_grad()
def eval_loop(self, is_validation=False):
if self.dummy_run:
results = {}
if not is_validation:
results["training_loss"] = float("nan")
results["train_exact_f1"] = float("nan")
results["train_exact_match"] = float("nan")
else:
results["valid_exact_match"] = float("nan")
results["valid_exact_f1"] = float("nan")
return results
if is_validation:
dataloader = self.valid_dataloader
dataset = self.valid_dataset
examples = self.valid_examples
else:
dataloader = self.train_dataloader_for_eval
dataset = self.train_dataset
examples = self.train_examples
# TODO: merge the loss and metrics calculations here into one loop
# loss = model.bert_base_pretrained.eval_loss(
# self, self.model, self.train_dataloader
# )
metrics, loss = model.bert_base_pretrained.evaluate(
self,
self.model,
dataloader,
self.accelerator,
dataset,
examples,
self.metric,
)
results = {}
if not is_validation:
results["training_loss"] = loss
results["train_exact_f1"] = metrics["f1"]
results["train_exact_match"] = metrics["exact_match"]
else:
results["valid_loss"] = loss
results["valid_exact_match"] = metrics["exact_match"]
results["valid_exact_f1"] = metrics["f1"]
return results
def get_outliers_helper(self, final_noise_norms):
with open(
self.save_path + "/noise/outliers_{}.csv".format(self.exp_uuid),
"w",
) as fw:
writer = csv.writer(fw, delimiter=",")
writer.writerow(["index", "norm", "question", "context"])
rows = []
for (step, *data) in enumerate(self.train_dataloader):
noise = final_noise_norms[step]
input_ids = data[0]["input_ids"].tolist()
questions, contexts = self.norm_helper(input_ids)
row = [step, noise, questions, contexts]
rows.append(row)
rows = sorted(rows, key=lambda x: x[1], reverse=True)
writer.writerows(rows)
def norm_helper(self, input_ids):
decoded = self.tokenizer.batch_decode(input_ids)
questions, contexts = [], []
for x in decoded:
x = x.split("[SEP]")
questions.append(x[0])
contexts.append(x[1])
return questions, contexts
| 4,505 | 31.185714 | 87 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/image_prob.py | import torch
import torch.nn.functional as F
from .. import dataset, model, optim
from .problem import Problem
class ImageProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
self.train_dataloader, self.valid_dataloader = dataset.init(
self.dataset_name,
self.batch_size,
self.device,
drop_last=self.drop_last,
shuffle=(
False if self.fake_full_batch_mode else exp_dict.get("shuffle", True)
),
fake_full_batch_mode=self.fake_full_batch_mode,
)
if "model_args" not in exp_dict and exp_dict["dataset"] == "mnist":
exp_dict["model_args"] = {}
exp_dict["model_args"]["in_channels"] = 1
self.model = model.init(
exp_dict["model"],
model_args=exp_dict["model_args"] if "model_args" in exp_dict else None,
)
self.model.to(self.device)
self.optim = optim.init(
exp_dict["opt"],
self.model,
)
def calculate_loss(self, data):
labels = data[0][1:][0].to(self.device).float()
X = data[0][0]
X = X.to(self.device)
y = self.model(X.float())
return self.loss_func(y, labels)
@torch.no_grad()
def eval_loop(self, is_validation=False):
dataloader = self.valid_dataloader if is_validation else self.train_dataloader
correct = torch.zeros(1).to(self.device)
epoch_loss = 0
images_counter = 0
accumulation_counter = 0
iteration_counter = 0
self.model.eval()
self.model.to(self.device)
for (X, labels) in dataloader:
X = X.to(self.device)
labels = labels.to(self.device).float()
y = self.model(X)
predicted = F.softmax(y, dim=1)
_, predicted_labels = torch.max(predicted, 1)
images_counter += labels.size(0)
correct += (predicted_labels == labels).sum()
loss = self.loss_func(y, labels)
if self.grad_accumulate:
loss = loss / self.accumulate_steps
epoch_loss += loss.item()
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
accumulation_counter += 1
if self.dummy_run:
accumulation_counter = 1
break
results = {}
accuracy = correct.item() / images_counter
if is_validation:
results["valid_accuracy"] = accuracy
else:
results["train_accuracy"] = accuracy
results["training_loss"] = epoch_loss / max(accumulation_counter, 1)
return results
| 2,823 | 29.042553 | 86 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/simple_prob.py | from .problem import Problem
from .. import dataset, model, optim
import torch
class SimpleProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
self.train_dataloader, self.valid_dataloader = dataset.init(
self.dataset_name,
self.batch_size,
self.device,
drop_last=self.drop_last,
shuffle=exp_dict.get("shuffle", True),
)
features_dim = next(iter(self.train_dataloader))[0].shape[1]
self.model = model.init(
exp_dict["model"],
features_dim=features_dim,
)
self.model.to(self.device)
self.optim = optim.init(
exp_dict["opt"],
self.model,
)
def calculate_loss(self, data):
labels = data[0][1:][0].to(self.device).float()
X = data[0][0]
X = X.to(self.device)
y = self.model(X.float())
return self.loss_func(y, labels)
@torch.no_grad()
def eval_loss(self, is_validation=False):
dataloader = self.valid_dataloader if is_validation else self.train_dataloader
self.model.eval()
self.model.to(self.device)
epoch_loss = 0.0
iteration_counter = 0
accumulation_counter = 0
for (X, labels) in dataloader:
labels = labels.to(self.device).float()
y = self.model(X.float())
loss = self.loss_func(y, labels)
if self.grad_accumulate:
loss = loss / self.accumulate_steps
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
accumulation_counter += 1
epoch_loss += loss.item()
if self.fake_full_batch_mode and accumulation_counter == 1:
break
if self.dummy_run:
accumulation_counter = 1
break
epoch_loss = epoch_loss / max(accumulation_counter, 1)
results = {}
if is_validation:
results["valid_mse"] = epoch_loss
else:
results["train_mse"] = epoch_loss
results["training_loss"] = epoch_loss
return results
| 2,272 | 28.141026 | 86 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/problem/transformer_prob.py | import csv
import math
import torch
from .. import dataset, model, optim
from .problem import Problem
class TransformerProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
init_outputs = dataset.init(
self.dataset_name,
self.batch_size,
self.device,
extra_params=exp_dict.get("model_args", None),
drop_last=self.drop_last,
shuffle=exp_dict.get("shuffle", False),
outliers_filename=exp_dict.get("outliers_filename", None),
)
if len(init_outputs) == 3:
(
self.train_dataloader,
self.valid_dataloader,
transformer_len,
) = init_outputs
elif len(init_outputs) == 4:
(
self.train_dataloader,
self.valid_dataloader,
transformer_len,
self.corpus,
) = init_outputs
else:
raise ValueError(
"Don't know how to process this number of dataset.init output values"
)
self.model = model.init(
exp_dict["model"],
model_args=exp_dict["model_args"],
transformer_len=transformer_len,
)
self.model.to(self.device)
self.optim = optim.init(
exp_dict["opt"],
self.model,
)
def calculate_loss(self, data):
labels_seq_len = data[0][1:]
X = data[0][0]
X = X.to(self.device)
labels, seq_len = labels_seq_len[0], labels_seq_len[1]
return self.loss_helper(X, labels, seq_len)
def transformer_xl_loss(self, data, target):
mems = tuple()
ret = self.model(data, target, *mems)
loss, mems = ret[0], ret[1:]
return loss.float().mean().type_as(loss)
def transformer_encoder_loss(self, data, target, seq_len):
src_mask = self.model.generate_square_subsequent_mask(seq_len).to(self.device)
output = self.model(data, src_mask)
output_flat = output.view(-1, self.model.ntoken)
return self.loss_func(output_flat, target.view(-1))
@torch.no_grad()
def eval_loop(self, is_validation=False):
dataloader = self.valid_dataloader if is_validation else self.train_dataloader
self.model.eval()
self.model.to(self.device)
self.optim.zero_grad()
epoch_loss = 0.0
ppl_loss = 0.0
total_len = 0
iteration_counter = 0
accumulation_counter = 0
for (X, labels, seq_len) in dataloader:
loss = self.loss_helper(X, labels, seq_len)
ppl_loss += seq_len * loss
total_len += seq_len
if self.grad_accumulate:
loss = loss / self.accumulate_steps
epoch_loss += loss
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
accumulation_counter += 1
if (
self.fake_full_batch_mode
and accumulation_counter == 1
and not is_validation
):
break
if self.dummy_run:
accumulation_counter = 1
break
results = {}
ppl_loss = ppl_loss / total_len
try:
ppl = math.exp(ppl_loss)
except OverflowError:
ppl = float("inf")
if is_validation:
results["valid_ppl"] = ppl
else:
results["train_ppl"] = ppl
results["training_loss"] = epoch_loss / max(accumulation_counter, 1)
return results
def loss_helper(self, X, labels, seq_len):
if self.model_name in [model.TRANSFORMER_XL, model.TRANSFORMER_XL_DET]:
loss = self.transformer_xl_loss(X, labels)
elif self.model_name in [
model.TRANSFORMER_ENCODER,
model.TRANSFORMER_ENCODER_DET,
]:
loss = self.transformer_encoder_loss(X, labels, seq_len)
else:
raise Exception("Transformer not supported!")
return loss
def get_outliers_helper(self, final_noise_norms):
with open(
self.save_path + "/noise/outliers_{}.csv".format(self.exp_uuid),
"w",
encoding="utf-8",
) as fw:
writer = csv.writer(fw, delimiter=",")
writer.writerow(["index", "norm", "text"])
rows = []
for (step, *data) in enumerate(self.train_dataloader):
noise = final_noise_norms[step]
X = data[0][0]
X = X.to(self.device)
sentences = self.corpus.vocab.convert_to_sent_from_tensor(X)
row = [step, noise, sentences]
rows.append(row)
rows = sorted(rows, key=lambda x: x[1], reverse=True)
writer.writerows(rows)
| 4,995 | 31.025641 | 86 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/full_connected.py | import torch
from torch import nn
import copy
class FullyConnected(nn.Module):
def __init__(self, input_dim=3 * 32 * 32, width=100, depth=3, num_classes=10):
super(FullyConnected, self).__init__()
self.input_dim = input_dim
self.width = width
self.depth = depth
self.num_classes = num_classes
layers = self.get_layers()
self.fc = nn.Sequential(
nn.Linear(self.input_dim, self.width, bias=False),
nn.ReLU(inplace=True),
*layers,
nn.Linear(self.width, self.num_classes, bias=False),
)
def get_layers(self):
layers = []
for i in range(self.depth - 2):
layers.append(nn.Linear(self.width, self.width, bias=False))
layers.append(nn.ReLU())
return layers
def forward(self, x):
x = x.view(x.size(0), self.input_dim)
x = self.fc(x)
return x
| 935 | 26.529412 | 82 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/transformer_xl.py | import math
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer("inv_freq", inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head**0.5)
self.pre_lnorm = pre_lnorm
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum("ibnd,jbnd->ijbn", (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
attn_mask = attn_mask.bool()
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float("inf"))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float("inf"))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head
)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(
self,
n_head,
d_model,
d_head,
dropout,
dropatt=0,
tgt_len=None,
ext_len=None,
mem_len=None,
pre_lnorm=False,
):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head**0.5)
self.pre_lnorm = pre_lnorm
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m, :m] = torch.triu(mask[:m, :m])
mask[-m:, -m:] = torch.tril(mask[-m:, -m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros(
(x.size(0), qlen - 1, x.size(2), x.size(3)),
device=x.device,
dtype=x.dtype,
)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:, :, None, None]).view(
qlen, klen, x.size(2), x.size(3)
)
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad = torch.zeros(
(x.size(0), 1, *x.size()[2:]), device=x.device, dtype=x.dtype
)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:])
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:, :, None, None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(
qlen, bsz, self.n_head, self.d_head
) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(
klen, bsz, self.n_head, self.d_head
) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(
klen, bsz, self.n_head, self.d_head
) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(
rlen, self.n_head, self.d_head
) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum(
"ibnd,jbnd->ijbn", (rw_head_q, w_head_k)
) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + r_r_bias
BD = torch.einsum(
"ibnd,jnd->ijbn", (rr_head_q, r_head_k)
) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
attn_mask = attn_mask.bool()
if attn_mask.dim() == 2:
attn_score = (
attn_score.float()
.masked_fill(attn_mask[None, :, :, None], -float("inf"))
.type_as(attn_score)
)
elif attn_mask.dim() == 3:
attn_score = (
attn_score.float()
.masked_fill(attn_mask[:, :, :, None], -float("inf"))
.type_as(attn_score)
)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head
)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen - r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen - r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum(
"ibnd,jbnd->ijbn", (rw_head_q, w_head_k)
) # qlen x klen x bsz x n_head
B_ = torch.einsum(
"ibnd,jnd->ijbn", (w_head_q, r_emb)
) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
attn_mask = attn_mask.bool()
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float("inf"))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float("inf"))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head
)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm")
)
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask, mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(
n_head, d_model, d_head, dropout, **kwargs
)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm")
)
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(
dec_inp, r_emb, r_w_bias, r_bias, attn_mask=dec_attn_mask, mems=mems
)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(
n_head, d_model, d_head, dropout, **kwargs
)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm")
)
def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(
dec_inp, r, r_w_bias, r_r_bias, attn_mask=dec_attn_mask, mems=mems
)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(
self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False
):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj**0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros(
[inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device
)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed = emb_flat.view(*inp.size(), self.d_proj)
embed.mul_(self.emb_scale)
return embed
class MemTransformerLM(nn.Module):
def __init__(
self,
n_token,
n_layer,
n_head,
d_model,
d_head,
d_inner,
dropout,
dropatt,
tie_weight=True,
d_embed=None,
div_val=1,
tie_projs=[False],
pre_lnorm=False,
tgt_len=None,
ext_len=None,
mem_len=None,
cutoffs=[],
adapt_inp=False,
same_length=False,
attn_type=0,
clamp_len=-1,
sample_softmax=-1,
):
super(MemTransformerLM, self).__init__()
self.n_token = n_token
d_embed = d_model if d_embed is None else d_embed
self.d_embed = d_embed
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.word_emb = AdaptiveEmbedding(
n_token, d_embed, d_model, cutoffs, div_val=div_val
)
self.drop = nn.Dropout(dropout)
self.n_layer = n_layer
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
self.max_klen = tgt_len + ext_len + mem_len
self.attn_type = attn_type
self.layers = nn.ModuleList()
if attn_type == 0: # the default attention
for i in range(n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
tgt_len=tgt_len,
ext_len=ext_len,
mem_len=mem_len,
dropatt=dropatt,
pre_lnorm=pre_lnorm,
)
)
elif attn_type == 1: # learnable embeddings
for i in range(n_layer):
self.layers.append(
RelLearnableDecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
tgt_len=tgt_len,
ext_len=ext_len,
mem_len=mem_len,
dropatt=dropatt,
pre_lnorm=pre_lnorm,
)
)
elif attn_type in [2, 3]: # absolute embeddings
for i in range(n_layer):
self.layers.append(
DecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
dropatt=dropatt,
pre_lnorm=pre_lnorm,
)
)
self.sample_softmax = sample_softmax
# use sampled softmax
if sample_softmax > 0:
self.out_layer = nn.Linear(d_model, n_token)
if tie_weight:
self.out_layer.weight = self.word_emb.weight
self.tie_weight = tie_weight
self.sampler = LogUniformSampler(n_token, sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(
n_token, d_embed, d_model, cutoffs, div_val=div_val
)
if tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.word_emb.emb_layers[i].weight
if tie_projs:
for i, tie_proj in enumerate(tie_projs):
if tie_proj and div_val == 1 and d_model != d_embed:
self.crit.out_projs[i] = self.word_emb.emb_projs[0]
elif tie_proj and div_val != 1:
self.crit.out_projs[i] = self.word_emb.emb_projs[i]
self.same_length = same_length
self.clamp_len = clamp_len
self._create_params()
def backward_compatible(self):
self.sample_softmax = -1
def _create_params(self):
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(
torch.Tensor(self.n_layer, self.max_klen, self.n_head, self.d_head)
)
self.r_w_bias = nn.Parameter(
torch.Tensor(self.n_layer, self.n_head, self.d_head)
)
self.r_bias = nn.Parameter(
torch.Tensor(self.n_layer, self.max_klen, self.n_head)
)
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(
torch.Tensor(self.n_layer, self.max_klen, self.n_head, self.d_head)
)
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer + 1):
empty = torch.empty(0, dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None:
return None
# mems is not None
assert len(hids) == len(mems), "len(hids) != len(mems)"
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (
torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len)
).byte()[
:, :, None
] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1 + mlen
).byte()[:, :, None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(
klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype
)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
core_out = layer(
core_out,
pos_emb,
self.r_w_bias,
self.r_r_bias,
dec_attn_mask=dec_attn_mask,
mems=mems_i,
)
hids.append(core_out)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(
core_out,
r_emb,
self.r_w_bias[i],
r_bias,
dec_attn_mask=dec_attn_mask,
mems=mems_i,
)
hids.append(core_out)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(
klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype
)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen - cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, data, target, *mems):
# nn.DataParallel does not allow size(0) tensors to be broadcasted.
# So, have to initialize size(0) mems inside the model forward.
# Moreover, have to return new_mems to allow nn.DataParallel to piece
# them together.
if not mems:
mems = self.init_mems()
tgt_len = target.size(0)
hidden, new_mems = self._forward(data, mems=mems)
pred_hid = hidden[-tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.tie_weight
logit = sample_logits(
self.word_emb, self.out_layer.bias, target, pred_hid, self.sampler
)
loss = -F.log_softmax(logit, -1)[:, :, 0]
else:
loss = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.view(-1))
loss = loss.view(tgt_len, -1)
if new_mems is None:
return [loss]
else:
return [loss] + new_mems
CUDA_MAJOR = int(torch.version.cuda.split(".")[0])
CUDA_MINOR = int(torch.version.cuda.split(".")[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(
torch.zeros(self.n_clusters, self.d_embed)
)
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target, keep_order=False):
"""
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
"""
if hidden.size(0) != target.size(0):
raise RuntimeError(
"Input and target should have the same size " "in the batch dimension."
)
if self.n_clusters == 0:
logit = self._compute_logit(
hidden,
self.out_layers[0].weight,
self.out_layers[0].bias,
self.out_projs[0],
)
nll = (
-F.log_softmax(logit, dim=-1).gather(1, target.unsqueeze(1)).squeeze(1)
)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(
hidden_i, weight_i, bias_i, proj_i
)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] + tail_logprob_i.gather(
1, target_i[:, None]
).squeeze(1)
if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1.0, range_max + 2.0, 1.0).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (
(-(-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
)
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(
self.dist, n_tries, replacement=True
).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[:-n_sample].view(b1, b2, -1)
sample_w = all_w[-n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[:-n_sample].view(b1, b2)
sample_b = all_b[-n_sample:]
hit = (labels[:, :, None] == neg_samples).detach().bool()
true_logits = (
torch.einsum("ijk,ijk->ij", [true_w, inputs]) + true_b - true_log_probs
)
sample_logits = (
torch.einsum("lk,ijk->ijl", [sample_w, inputs]) + sample_b - samp_log_probs
)
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
| 38,100 | 33.356177 | 119 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/resnet.py | import torchvision.models as models
def getResNet(size, pretrained=False):
if size == 50:
return models.resnet50(pretrained=pretrained)
elif size == 34:
return models.resnet34(pretrained=pretrained)
elif size == 101:
return models.resnet101(pretrained=pretrained)
elif size == 18:
return models.resnet18(pretrained=pretrained)
| 377 | 28.076923 | 54 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/bert_glue.py | import os
import random
from explib import config
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
)
def get_bert_glue(model_args):
num_labels, task_name = model_args["num_labels"], model_args["task_name"]
autoconfig = AutoConfig.from_pretrained(
"bert-base-cased", num_labels=num_labels, finetuning_task=task_name
)
model = AutoModelForSequenceClassification.from_pretrained(
"bert-base-cased",
from_tf=False,
config=autoconfig,
)
if "freeze_embedding" in model_args and model_args["freeze_embedding"]:
for param in model.bert.embeddings.parameters():
param.requires_grad = False
if "num_encoder_layers_to_freeze" in model_args:
num_layers = model_args["num_encoder_layers_to_freeze"]
for layer in model.bert.encoder.layer[:num_layers]:
for param in layer.parameters():
param.requires_grad = False
return model
| 1,138 | 28.205128 | 77 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/transformer_encoder.py | """
Simple transformer architecture used as introduction by the pytorch team
https://pytorch.org/tutorials/beginner/transformer_tutorial.html
Version used
https://github.com/pytorch/tutorials/blob/a981886fd8f1793ac5808b26e75dd50b788eb4e5/beginner_source/transformer_tutorial.py
Code covered by
See pytorch_
Copyright (c) 2017-2021, Pytorch contributors
"""
import math
import torch
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer
class TransformerEncoderModel(nn.Module):
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerEncoderModel, self).__init__()
self.model_type = "Transformer"
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.ntoken = ntoken
self.init_weights()
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
)
return mask
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src, src_mask):
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, src_mask)
output = self.decoder(output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
| 2,568 | 33.253333 | 122 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/letnet5.py | import torch
from torch import nn
class LeNet5(nn.Module):
def __init__(self, n_classes, in_channels=3):
super(LeNet5, self).__init__()
self.feature_extractor = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=6, kernel_size=5, stride=1),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2),
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2),
nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1),
nn.Tanh(),
)
self.classifier = nn.Sequential(
nn.Linear(in_features=120, out_features=84),
nn.Tanh(),
nn.Linear(in_features=84, out_features=n_classes),
)
def forward(self, x):
x = self.feature_extractor(x)
x = torch.flatten(x, 1)
logits = self.classifier(x)
return logits
| 961 | 30.032258 | 88 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/bert_base_pretrained.py | from datasets import load_metric
import numpy as np
from typing import Optional, Tuple
import json
import collections
import os
import torch
from transformers import (
AutoConfig,
AutoModelForQuestionAnswering,
EvalPrediction,
)
from .. import logging
def get_bert_base_pretrained():
config = AutoConfig.from_pretrained("bert-base-uncased")
model = AutoModelForQuestionAnswering.from_pretrained(
"bert-base-uncased",
from_tf=False,
config=config,
)
return model
def get_distilbert_base_pretrained():
config = AutoConfig.from_pretrained("distilbert-base-uncased")
model = AutoModelForQuestionAnswering.from_pretrained(
"distilbert-base-uncased",
from_tf=False,
config=config,
)
return model
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = None,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
assert (
len(predictions) == 2
), "`predictions` should be a tuple with two elements (start_logits, end_logits)."
all_start_logits, all_end_logits = predictions
assert len(predictions[0]) == len(
features
), f"Got {len(predictions[0])} predictions and {len(features)} features."
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
# logger.setLevel(log_level)
# logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(examples):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get(
"token_is_max_context", None
)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if (
min_null_prediction is None
or min_null_prediction["score"] > feature_null_score
):
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[
-1 : -n_best_size - 1 : -1
].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if (
end_index < start_index
or end_index - start_index + 1 > max_answer_length
):
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if (
token_is_max_context is not None
and not token_is_max_context.get(str(start_index), False)
):
continue
prelim_predictions.append(
{
"offsets": (
offset_mapping[start_index][0],
offset_mapping[end_index][1],
),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(
prelim_predictions, key=lambda x: x["score"], reverse=True
)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if version_2_with_negative and not any(
p["offsets"] == (0, 0) for p in predictions
):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (
len(predictions) == 1 and predictions[0]["text"] == ""
):
predictions.insert(
0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0}
)
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = (
null_score
- best_non_null_pred["start_logit"]
- best_non_null_pred["end_logit"]
)
scores_diff_json[example["id"]] = float(
score_diff
) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{
k: (
float(v)
if isinstance(v, (np.float16, np.float32, np.float64))
else v
)
for k, v in pred.items()
}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
assert os.path.isdir(output_dir), f"{output_dir} is not a directory."
prediction_file = os.path.join(
output_dir,
"predictions.json" if prefix is None else f"{prefix}_predictions.json",
)
nbest_file = os.path.join(
output_dir,
"nbest_predictions.json"
if prefix is None
else f"{prefix}_nbest_predictions.json",
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir,
"null_odds.json" if prefix is None else f"{prefix}_null_odds.json",
)
# logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
# logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
# logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
# Post-processing:
def post_processing_function(examples, features, predictions, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions = postprocess_qa_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=False,
n_best_size=20,
max_answer_length=30,
null_score_diff_threshold=0.0,
output_dir=None,
prefix=stage,
)
# Format the result to the format the metric expects.
# if args.version_2_with_negative:
# formatted_predictions = [
# {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
# ]
# else:
formatted_predictions = [
{"id": k, "prediction_text": v} for k, v in predictions.items()
]
references = [{"id": ex["id"], "answers": ex["answers"]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
# Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor
def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
"""
Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor
Args:
start_or_end_logits(:obj:`tensor`):
This is the output predictions of the model. We can only enter either start or end logits.
eval_dataset: Evaluation dataset
max_len(:obj:`int`):
The maximum length of the output tensor. ( See the model.eval() part for more details )
"""
step = 0
# create a numpy array and fill it with -100.
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather
for i, output_logit in enumerate(start_or_end_logits): # populate columns
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
# And after every iteration we have to change the step
batch_size = output_logit.shape[0]
cols = output_logit.shape[1]
if step + batch_size < len(dataset):
logits_concat[step : step + batch_size, :cols] = output_logit
else:
logits_concat[step:, :cols] = output_logit[: len(dataset) - step]
step += batch_size
return logits_concat
@torch.no_grad()
def evaluate(problem, model, dataloader, accelerator, dataset, examples, metric):
all_start_logits = []
all_end_logits = []
model.eval()
epoch_loss = 0
iteration_counter = 0
accumulation_counter = 0
logger = logging.logging.getLogger(__name__)
logger.debug("Starting evaluate loop")
for step, batch in enumerate(dataloader):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
iteration_counter += 1
epoch_loss += loss.item()
if (
not problem.grad_accumulate
or iteration_counter % problem.accumulate_steps == 0
):
# self.optim.step()
# self.optim.zero_grad()
accumulation_counter += 1
if problem.fake_full_batch_mode and accumulation_counter == 1:
break
start_logits = outputs.start_logits
end_logits = outputs.end_logits
start_logits = accelerator.pad_across_processes(
start_logits, dim=1, pad_index=-100
)
end_logits = accelerator.pad_across_processes(
end_logits, dim=1, pad_index=-100
)
all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())
if problem.dummy_run:
logger.debug("Breaking eval loop due to dummy run")
break
logger.debug("End evaluate loop")
max_len = max(
[x.shape[1] for x in all_start_logits]
) # Get the max_length of the tensor
logger.debug("Concatenate results")
# concatenate the numpy array
start_logits_concat = create_and_fill_np_array(all_start_logits, dataset, max_len)
end_logits_concat = create_and_fill_np_array(all_end_logits, dataset, max_len)
# delete the list of numpy arrays
del all_start_logits
del all_end_logits
logger.debug("Loading Squad Metric")
logger.debug("Post processing function")
outputs_numpy = (start_logits_concat, end_logits_concat)
prediction = post_processing_function(examples, dataset, outputs_numpy)
logger.debug("Computing metric")
eval_metric = metric.compute(
predictions=prediction.predictions, references=prediction.label_ids
)
epoch_loss = epoch_loss / max(accumulation_counter, 1)
return eval_metric, epoch_loss
# deprecated
# @torch.no_grad()
# def eval_loss(problem, model, dataloader):
# model.eval()
# epoch_loss = 0
# iteration_counter = 0
# accumulation_counter = 0
# for X in dataloader:
# if iteration_counter % 1000 == 0:
# print(iteration_counter)
# loss = model(**X).loss
# if problem.grad_accumulate:
# loss = loss / problem.accumulate_steps
# # print(loss)
# iteration_counter += 1
# epoch_loss += loss.item()
# if (
# not problem.grad_accumulate
# or iteration_counter % problem.accumulate_steps == 0
# ):
# # self.optim.step()
# # self.optim.zero_grad()
# accumulation_counter += 1
# # if n % 500 == 0:
# # self.logging({"eval_loss_iter": n})
# if problem.fake_full_batch_mode and accumulation_counter == 1:
# break
# if problem.dummy_run:
# break
# epoch_loss = epoch_loss / accumulation_counter
# return epoch_loss
| 18,940 | 39.997835 | 119 | py |
noise-sgd-adam-sign | noise-sgd-adam-sign-main/explib/explib/model/linear_model.py | import torch
class LinearModel(torch.nn.Module):
def __init__(self, inputSize, outputSize):
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(inputSize, outputSize)
def forward(self, X):
out = self.linear(X)
return out
| 278 | 22.25 | 60 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/naf_pendulum.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import NAFAgent
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
from rl.core import Processor
from noise_estimator import *
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--weight', type=float, default=0.6,
help='Weight of random confusion matrix [default: 0.6]')
parser.add_argument('--noise_type', type=str, default='norm_all',
help='Type of noise added: norm_all/norm_one/anti_iden/max_one [default: norm_all]')
FLAGS = parser.parse_args()
REWARD = FLAGS.reward
WEIGHT = FLAGS.weight
NOISE_TYPE = FLAGS.noise_type
assert (NOISE_TYPE in ["norm_all", "norm_one", "anti_iden", "max_one"])
if REWARD == "normal":
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "naf_pendulum"), "normal")
else:
LOG_DIR = os.path.join(os.path.join(os.path.join(FLAGS.log_dir, "naf_pendulum"), NOISE_TYPE), str(WEIGHT))
ENV_NAME = 'Pendulum-v0'
# gym.undo_logger_setup()
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp naf_pendulum.py %s' % (LOG_DIR)) # bkp of train procedure
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
assert len(env.action_space.shape) == 1
nb_actions = env.action_space.shape[0]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Build all necessary models: V, mu, and L networks.
V_model = Sequential()
V_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(1))
V_model.add(Activation('linear'))
V_model.summary()
mu_model = Sequential()
mu_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(nb_actions))
mu_model.add(Activation('linear'))
mu_model.summary()
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
x = Concatenate()([action_input, Flatten()(observation_input)])
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
x = Activation('linear')(x)
L_model = Model(inputs=[action_input, observation_input], outputs=x)
L_model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3, size=nb_actions)
if REWARD == "normal":
processor = NAFPendulumProcessor()
naf_normal = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3, processor=processor)
naf_normal.compile(Adam(lr=.00025, clipnorm=1.), metrics=['mae'])
history_normal = naf_normal.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
naf_normal.save_weights(os.path.join(LOG_DIR, 'naf_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
naf_normal.test(env, nb_episodes=10, visualize=False, nb_max_episode_steps=200)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
# processor_noisy = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=False, noise_type=NOISE_TYPE)
processor_noisy = PendulumProcessor(weight=WEIGHT, surrogate=False, noise_type=NOISE_TYPE)
naf_noisy = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3, processor=processor_noisy)
naf_noisy.compile(Adam(lr=.00025, clipnorm=1.), metrics=['mae'])
history_noisy = naf_noisy.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
naf_noisy.save_weights(os.path.join(LOG_DIR, 'naf_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
naf_noisy.test(env, nb_episodes=10, visualize=False, nb_max_episode_steps=200)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
elif REWARD == "surrogate":
# processor_surrogate = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=True, noise_type=NOISE_TYPE)
processor_surrogate = PendulumProcessor(weight=WEIGHT, surrogate=True, noise_type=NOISE_TYPE)
naf_surrogate = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3, processor=processor_surrogate)
naf_surrogate.compile(Adam(lr=.00025, clipnorm=1.), metrics=['mae'])
history_surrogate = naf_surrogate.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
naf_surrogate.save_weights(os.path.join(LOG_DIR, 'naf_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
naf_surrogate.test(env, nb_episodes=10, visualize=False, nb_max_episode_steps=200)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
raise NotImplementedError
if __name__ == "__main__":
train()
| 6,696 | 43.059211 | 122 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/dqn_cartpole.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents.dqn import DQNAgent
from rl.core import Processor
from rl.memory import SequentialMemory
from rl.policy import BoltzmannQPolicy
from noise_estimator import *
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='Reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "dqn_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "dqn_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp dqn_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
if REWARD == "normal":
dqn_normal = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn_normal.compile(Adam(lr=1e-3), metrics=['mae'])
history_normal = dqn_normal.fit(env, nb_steps=10000, visualize=False, verbose=2)
dqn_normal.save_weights(os.path.join(LOG_DIR, 'dqn_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
dqn_normal.test(env, nb_episodes=10, visualize=False, verbose=2)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, surrogate=False)
else:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=False)
# processor_noisy = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=False)
dqn_noisy = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy, processor=processor_noisy)
dqn_noisy.compile(Adam(lr=1e-3), metrics=['mae'])
history_noisy = dqn_noisy.fit(env, nb_steps=10000, visualize=False, verbose=2)
if not SMOOTH:
dqn_noisy.save_weights(os.path.join(LOG_DIR, 'dqn_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
dqn_noisy.save_weights(os.path.join(LOG_DIR, 'dqn_noisy_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy_smooth.csv"))
dqn_noisy.test(env, nb_episodes=10, visualize=False, verbose=2)
elif REWARD == "surrogate":
if not SMOOTH:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=True)
else:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=True)
# processor_surrogate = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=True)
dqn_surrogate = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy, processor=processor_surrogate)
dqn_surrogate.compile(Adam(lr=1e-3), metrics=['mae'])
history_surrogate = dqn_surrogate.fit(env, nb_steps=10000, visualize=False, verbose=2)
if not SMOOTH:
dqn_surrogate.save_weights(os.path.join(LOG_DIR, 'dqn_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
dqn_surrogate.save_weights(os.path.join(LOG_DIR, 'dqn_surrogate_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate_smooth.csv"))
dqn_surrogate.test(env, nb_episodes=10, visualize=False, verbose=2)
else:
raise NotImplementedError
if __name__ == "__main__":
train()
| 6,113 | 42.056338 | 133 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/duel_dqn_cartpole.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents.dqn import DQNAgent
from rl.core import Processor
from rl.memory import SequentialMemory
from rl.policy import BoltzmannQPolicy
from noise_estimator import *
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='Reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "duel_dqn_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "duel_dqn_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp duel_dqn_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
if REWARD == "normal":
dqn_normal = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg',
target_model_update=1e-2, policy=policy)
dqn_normal.compile(Adam(lr=1e-3), metrics=['mae'])
history_normal = dqn_normal.fit(env, nb_steps=10000, visualize=False, verbose=2)
dqn_normal.save_weights(os.path.join(LOG_DIR, 'duel_dqn_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
dqn_normal.test(env, nb_episodes=10, visualize=False, verbose=2)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=False)
else:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=False)
# processor_noisy = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=False)
dqn_noisy = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg',
target_model_update=1e-2, policy=policy, processor=processor_noisy)
dqn_noisy.compile(Adam(lr=1e-3), metrics=['mae'])
history_noisy = dqn_noisy.fit(env, nb_steps=10000, visualize=False, verbose=2)
if not SMOOTH:
dqn_noisy.save_weights(os.path.join(LOG_DIR, 'duel_dqn_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
dqn_noisy.save_weights(os.path.join(LOG_DIR, 'duel_dqn_noisy_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy_smooth.csv"))
dqn_noisy.test(env, nb_episodes=10, visualize=False, verbose=2)
elif REWARD == "surrogate":
if not SMOOTH:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=True)
else:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=True)
# processor_surrogate = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=True)
dqn_surrogate = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg',
target_model_update=1e-2, policy=policy, processor=processor_surrogate)
dqn_surrogate.compile(Adam(lr=1e-3), metrics=['mae'])
history_surrogate = dqn_surrogate.fit(env, nb_steps=10000, visualize=False, verbose=2)
if not SMOOTH:
dqn_surrogate.save_weights(os.path.join(LOG_DIR, 'duel_dqn_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
dqn_surrogate.save_weights(os.path.join(LOG_DIR, 'duel_dqn_surrogate_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate_smooth.csv"))
dqn_surrogate.test(env, nb_episodes=10, visualize=False, verbose=2)
else:
raise NotImplementedError
if __name__ == "__main__":
train() | 6,413 | 43.541667 | 138 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/sarsa_cartpole.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import SARSAAgent
from rl.core import Processor
from rl.policy import BoltzmannQPolicy
from noise_estimator import *
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "sarsa_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "sarsa_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp sarsa_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
print ('cp sarsa_cartpole.py %s' % (LOG_DIR))
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def build_state(features):
return int("".join(map(lambda feature: str(int(feature)), features)))
def to_bin(value, bins):
return np.digitize(x=[value], bins=bins)[0]
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# SARSA does not require a memory.
policy = BoltzmannQPolicy()
# processor_noisy = CartpoleSurrogateProcessor(e_= ERR_N, e=ERR_P, surrogate=False)
# processor_surrogate = CartpoleSurrogateProcessor(e_= ERR_N, e=ERR_P, surrogate=True)
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=False, surrogate=False)
processor_surrogate = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=False, surrogate=True)
else:
processor_noisy = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=True, surrogate=False)
processor_surrogate = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=True, surrogate=True)
if REWARD == "normal":
sarsa_normal = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10,
policy=policy)
sarsa_normal.compile(Adam(lr=1e-3), metrics=['mae'])
history_normal = sarsa_normal.fit(env, nb_steps=50000, visualize=False, verbose=2)
sarsa_normal.save_weights(os.path.join(LOG_DIR, 'sarsa_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
sarsa_normal.test(env, nb_episodes=10, visualize=False, verbose=2)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
sarsa_noisy = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10,
policy=policy, processor=processor_noisy)
sarsa_noisy.compile(Adam(lr=1e-3), metrics=['mae'])
history_noisy = sarsa_noisy.fit(env, nb_steps=50000, visualize=False, verbose=2)
if not SMOOTH:
sarsa_noisy.save_weights(os.path.join(LOG_DIR, 'sarsa_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
sarsa_noisy.save_weights(os.path.join(LOG_DIR, 'sarsa_noisy_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy_smooth.csv"))
sarsa_noisy.test(env, nb_episodes=10, visualize=False)
elif REWARD == "surrogate":
sarsa_surrogate = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10,
policy=policy, processor=processor_surrogate)
sarsa_surrogate.compile(Adam(lr=1e-3), metrics=['mae'])
history_surrogate = sarsa_surrogate.fit(env, nb_steps=50000, visualize=False, verbose=2)
if not SMOOTH:
sarsa_surrogate.save_weights(os.path.join(LOG_DIR, 'sarsa_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
sarsa_surrogate.save_weights(os.path.join(LOG_DIR, 'sarsa_surrogate_smooth_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate_smooth.csv"))
sarsa_surrogate.test(env, nb_episodes=10, visualize=False)
if __name__ == "__main__":
train() | 5,869 | 39.482759 | 137 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/ddpg_pendulum.py | import argparse
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import DDPGAgent
from rl.core import Processor
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--weight', type=float, default=0.6,
help='Weight of random confusion matrix [default: 0.6]')
parser.add_argument('--noise_type', type=str, default='norm_all',
help='Type of noise added: norm_all/norm_one/anti_iden/max_one [default: norm_all]')
FLAGS = parser.parse_args()
REWARD = FLAGS.reward
WEIGHT = FLAGS.weight
NOISE_TYPE = FLAGS.noise_type
assert (NOISE_TYPE in ["norm_all", "norm_one", "anti_iden", "max_one"])
if REWARD == "normal":
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "ddpg_pendulum"), NOISE_TYPE)
else:
LOG_DIR = os.path.join(os.path.join(os.path.join(FLAGS.log_dir, "ddpg_pendulum"), NOISE_TYPE), str(WEIGHT))
ENV_NAME = 'Pendulum-v0'
# gym.undo_logger_setup()
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp ddpg_pendulum.py %s' % (LOG_DIR)) # bkp of train procedure
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
assert len(env.action_space.shape) == 1
nb_actions = env.action_space.shape[0]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Next, we build a very simple model.
actor = Sequential()
actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(nb_actions))
actor.add(Activation('linear'))
# print(actor.summary())
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Concatenate()([action_input, flattened_observation])
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
# print(critic.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)
if REWARD == "normal":
ddpg_normal = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.99, target_model_update=1e-3)
ddpg_normal.compile(Adam(lr=.0005, clipnorm=1.), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
history_normal = ddpg_normal.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
# After training is done, we save the final weights.
ddpg_normal.save_weights(os.path.join(LOG_DIR, 'ddpg_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
ddpg_normal.test(env, nb_episodes=5, visualize=False, verbose=2, nb_max_episode_steps=200)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
processor_noisy = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=False, noise_type=NOISE_TYPE)
ddpg_noisy = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.99, target_model_update=1e-3,
processor=processor_noisy)
ddpg_noisy.compile(Adam(lr=.0005, clipnorm=1.), metrics=['mae'])
history_noisy = ddpg_noisy.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
ddpg_noisy.save_weights(os.path.join(LOG_DIR, 'ddpg_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
ddpg_noisy.test(env, nb_episodes=5, visualize=False, verbose=2, nb_max_episode_steps=200)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
elif REWARD == "surrogate":
processor_surrogate = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=True, noise_type=NOISE_TYPE)
ddpg_surrogate = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.99, target_model_update=1e-3,
processor=processor_surrogate)
ddpg_surrogate.compile(Adam(lr=.0005, clipnorm=1.), metrics=['mae'])
history_surrogate = ddpg_surrogate.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
ddpg_surrogate.save_weights(os.path.join(LOG_DIR, 'ddpg_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
ddpg_surrogate.test(env, nb_episodes=5, visualize=False, verbose=2, nb_max_episode_steps=200)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
raise NotImplementedError
if __name__ == "__main__":
train()
| 6,589 | 44.763889 | 124 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/cem_cartpole.py | import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
import tensorflow as tf
from rl.agents.cem import CEMAgent
from rl.memory import EpisodeParameterMemory
from noise_estimator import CartpoleProcessor, CartpoleSurrogateProcessor
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "cem_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "cem_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp cem_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
obs_dim = env.observation_space.shape[0]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Option 1 : Simple model
# model = Sequential()
# model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
# model.add(Dense(nb_actions))
# model.add(Activation('softmax'))
# Option 2: deep network
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('softmax'))
model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = EpisodeParameterMemory(limit=1000, window_length=1)
if REWARD == "normal":
cem = CEMAgent(model=model, nb_actions=nb_actions, memory=memory,
batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05)
cem.compile()
history_normal = cem.fit(env, nb_steps=100000, visualize=False, verbose=2)
cem.save_weights(os.path.join(LOG_DIR, 'cem_normal_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
cem.test(env, nb_episodes=5, visualize=False)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=False)
else:
processor_noisy = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=False)
# processor_surrogate = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=False)
cem = CEMAgent(model=model, nb_actions=nb_actions, memory=memory,
batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05,
processor=processor_noisy)
cem.compile()
history_noisy = cem.fit(env, nb_steps=100000, visualize=False, verbose=2)
if not SMOOTH:
cem.save_weights(os.path.join(LOG_DIR, 'cem_noisy_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
else:
cem.save_weights(os.path.join(LOG_DIR, 'cem_noisy_smooth_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy_smooth.csv"))
cem.test(env, nb_episodes=5, visualize=False)
elif REWARD == "surrogate":
if not SMOOTH:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=False, surrogate=True)
else:
processor_surrogate = CartpoleProcessor(e_=ERR_N, e=ERR_P, smooth=True, surrogate=True)
# processor_surrogate = CartpoleSurrogateProcessor(e_=ERR_N, e=ERR_P, surrogate=True)
cem = CEMAgent(model=model, nb_actions=nb_actions, memory=memory,
batch_size=50, nb_steps_warmup=2000, train_interval=50, elite_frac=0.05,
processor=processor_surrogate)
cem.compile()
history_surrogate = cem.fit(env, nb_steps=100000, visualize=False, verbose=2)
if not SMOOTH:
cem.save_weights(os.path.join(LOG_DIR, 'cem_surrogate_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
cem.save_weights(os.path.join(LOG_DIR, 'cem_surrogate_smooth_{}_params.h5f'.format(ENV_NAME)), overwrite=True)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate_smooth.csv"))
cem.test(env, nb_episodes=5, visualize=False)
else:
raise NotImplementedError
if __name__ == "__main__":
train()
| 5,860 | 39.42069 | 122 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/callbacks.py | from __future__ import division
from __future__ import print_function
import warnings
import timeit
import json
from tempfile import mkdtemp
import numpy as np
from keras import __version__ as KERAS_VERSION
from keras.callbacks import Callback as KerasCallback, CallbackList as KerasCallbackList
from keras.utils.generic_utils import Progbar
class Callback(KerasCallback):
def _set_env(self, env):
self.env = env
def on_episode_begin(self, episode, logs={}):
"""Called at beginning of each episode"""
pass
def on_episode_end(self, episode, logs={}):
"""Called at end of each episode"""
pass
def on_step_begin(self, step, logs={}):
"""Called at beginning of each step"""
pass
def on_step_end(self, step, logs={}):
"""Called at end of each step"""
pass
def on_action_begin(self, action, logs={}):
"""Called at beginning of each action"""
pass
def on_action_end(self, action, logs={}):
"""Called at end of each action"""
pass
class CallbackList(KerasCallbackList):
def _set_env(self, env):
""" Set environment for each callback in callbackList """
for callback in self.callbacks:
if callable(getattr(callback, '_set_env', None)):
callback._set_env(env)
def on_episode_begin(self, episode, logs={}):
""" Called at beginning of each episode for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_episode_begin` callback.
# If not, fall back to `on_epoch_begin` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_episode_begin', None)):
callback.on_episode_begin(episode, logs=logs)
else:
callback.on_epoch_begin(episode, logs=logs)
def on_episode_end(self, episode, logs={}):
""" Called at end of each episode for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_episode_end` callback.
# If not, fall back to `on_epoch_end` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_episode_end', None)):
callback.on_episode_end(episode, logs=logs)
else:
callback.on_epoch_end(episode, logs=logs)
def on_step_begin(self, step, logs={}):
""" Called at beginning of each step for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_step_begin` callback.
# If not, fall back to `on_batch_begin` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_step_begin', None)):
callback.on_step_begin(step, logs=logs)
else:
callback.on_batch_begin(step, logs=logs)
def on_step_end(self, step, logs={}):
""" Called at end of each step for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_step_end` callback.
# If not, fall back to `on_batch_end` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_step_end', None)):
callback.on_step_end(step, logs=logs)
else:
callback.on_batch_end(step, logs=logs)
def on_action_begin(self, action, logs={}):
""" Called at beginning of each action for each callback in callbackList"""
for callback in self.callbacks:
if callable(getattr(callback, 'on_action_begin', None)):
callback.on_action_begin(action, logs=logs)
def on_action_end(self, action, logs={}):
""" Called at end of each action for each callback in callbackList"""
for callback in self.callbacks:
if callable(getattr(callback, 'on_action_end', None)):
callback.on_action_end(action, logs=logs)
class TestLogger(Callback):
""" Logger Class for Test """
def on_train_begin(self, logs):
""" Print logs at beginning of training"""
print('Testing for {} episodes ...'.format(self.params['nb_episodes']))
def on_episode_end(self, episode, logs):
""" Print logs at end of each episode """
template = 'Episode {0}: reward: {1:.3f}, steps: {2}'
variables = [
episode + 1,
logs['episode_reward'],
logs['nb_steps'],
]
print(template.format(*variables))
class TrainEpisodeLogger(Callback):
def __init__(self):
# Some algorithms compute multiple episodes at once since they are multi-threaded.
# We therefore use a dictionary that is indexed by the episode to separate episodes
# from each other.
self.episode_start = {}
self.observations = {}
self.rewards = {}
self.actions = {}
self.metrics = {}
self.step = 0
def on_train_begin(self, logs):
""" Print training values at beginning of training """
self.train_start = timeit.default_timer()
self.metrics_names = self.model.metrics_names
print('Training for {} steps ...'.format(self.params['nb_steps']))
def on_train_end(self, logs):
""" Print training time at end of training """
duration = timeit.default_timer() - self.train_start
print('done, took {:.3f} seconds'.format(duration))
def on_episode_begin(self, episode, logs):
""" Reset environment variables at beginning of each episode """
self.episode_start[episode] = timeit.default_timer()
self.observations[episode] = []
self.rewards[episode] = []
self.actions[episode] = []
self.metrics[episode] = []
def on_episode_end(self, episode, logs):
""" Compute and print training statistics of the episode when done """
duration = timeit.default_timer() - self.episode_start[episode]
episode_steps = len(self.observations[episode])
# Format all metrics.
metrics = np.array(self.metrics[episode])
metrics_template = ''
metrics_variables = []
with warnings.catch_warnings():
warnings.filterwarnings('error')
for idx, name in enumerate(self.metrics_names):
if idx > 0:
metrics_template += ', '
try:
value = np.nanmean(metrics[:, idx])
metrics_template += '{}: {:f}'
except Warning:
value = '--'
metrics_template += '{}: {}'
metrics_variables += [name, value]
metrics_text = metrics_template.format(*metrics_variables)
nb_step_digits = str(int(np.ceil(np.log10(self.params['nb_steps']))) + 1)
template = '{step: ' + nb_step_digits + 'd}/{nb_steps}: episode: {episode}, duration: {duration:.3f}s, episode steps: {episode_steps}, steps per second: {sps:.0f}, episode reward: {episode_reward:.3f}, mean reward: {reward_mean:.3f} [{reward_min:.3f}, {reward_max:.3f}], mean action: {action_mean:.3f} [{action_min:.3f}, {action_max:.3f}], mean observation: {obs_mean:.3f} [{obs_min:.3f}, {obs_max:.3f}], {metrics}'
variables = {
'step': self.step,
'nb_steps': self.params['nb_steps'],
'episode': episode + 1,
'duration': duration,
'episode_steps': episode_steps,
'sps': float(episode_steps) / duration,
'episode_reward': np.sum(self.rewards[episode]),
'reward_mean': np.mean(self.rewards[episode]),
'reward_min': np.min(self.rewards[episode]),
'reward_max': np.max(self.rewards[episode]),
'action_mean': np.mean(self.actions[episode]),
'action_min': np.min(self.actions[episode]),
'action_max': np.max(self.actions[episode]),
'obs_mean': np.mean(self.observations[episode]),
'obs_min': np.min(self.observations[episode]),
'obs_max': np.max(self.observations[episode]),
'metrics': metrics_text,
}
print(template.format(**variables))
# Free up resources.
del self.episode_start[episode]
del self.observations[episode]
del self.rewards[episode]
del self.actions[episode]
del self.metrics[episode]
def on_step_end(self, step, logs):
""" Update statistics of episode after each step """
episode = logs['episode']
self.observations[episode].append(logs['observation'])
self.rewards[episode].append(logs['reward'])
self.actions[episode].append(logs['action'])
self.metrics[episode].append(logs['metrics'])
self.step += 1
class TrainIntervalLogger(Callback):
def __init__(self, interval=10000):
self.interval = interval
self.step = 0
self.reset()
def reset(self):
""" Reset statistics """
self.interval_start = timeit.default_timer()
self.progbar = Progbar(target=self.interval)
self.metrics = []
self.infos = []
self.info_names = None
self.episode_rewards = []
def on_train_begin(self, logs):
""" Initialize training statistics at beginning of training """
self.train_start = timeit.default_timer()
self.metrics_names = self.model.metrics_names
print('Training for {} steps ...'.format(self.params['nb_steps']))
def on_train_end(self, logs):
""" Print training duration at end of training """
duration = timeit.default_timer() - self.train_start
print('done, took {:.3f} seconds'.format(duration))
def on_step_begin(self, step, logs):
""" Print metrics if interval is over """
if self.step % self.interval == 0:
if len(self.episode_rewards) > 0:
metrics = np.array(self.metrics)
assert metrics.shape == (self.interval, len(self.metrics_names))
formatted_metrics = ''
if not np.isnan(metrics).all(): # not all values are means
means = np.nanmean(self.metrics, axis=0)
assert means.shape == (len(self.metrics_names),)
for name, mean in zip(self.metrics_names, means):
formatted_metrics += ' - {}: {:.3f}'.format(name, mean)
formatted_infos = ''
if len(self.infos) > 0:
infos = np.array(self.infos)
if not np.isnan(infos).all(): # not all values are means
means = np.nanmean(self.infos, axis=0)
assert means.shape == (len(self.info_names),)
for name, mean in zip(self.info_names, means):
formatted_infos += ' - {}: {:.3f}'.format(name, mean)
print('{} episodes - episode_reward: {:.3f} [{:.3f}, {:.3f}]{}{}'.format(len(self.episode_rewards), np.mean(self.episode_rewards), np.min(self.episode_rewards), np.max(self.episode_rewards), formatted_metrics, formatted_infos))
print('')
self.reset()
print('Interval {} ({} steps performed)'.format(self.step // self.interval + 1, self.step))
def on_step_end(self, step, logs):
""" Update progression bar at the end of each step """
if self.info_names is None:
self.info_names = logs['info'].keys()
values = [('reward', logs['reward'])]
if KERAS_VERSION > '2.1.3':
self.progbar.update((self.step % self.interval) + 1, values=values)
else:
self.progbar.update((self.step % self.interval) + 1, values=values, force=True)
self.step += 1
self.metrics.append(logs['metrics'])
if len(self.info_names) > 0:
self.infos.append([logs['info'][k] for k in self.info_names])
def on_episode_end(self, episode, logs):
""" Update reward value at the end of each episode """
self.episode_rewards.append(logs['episode_reward'])
class FileLogger(Callback):
def __init__(self, filepath, interval=None):
self.filepath = filepath
self.interval = interval
# Some algorithms compute multiple episodes at once since they are multi-threaded.
# We therefore use a dict that maps from episode to metrics array.
self.metrics = {}
self.starts = {}
self.data = {}
def on_train_begin(self, logs):
""" Initialize model metrics before training """
self.metrics_names = self.model.metrics_names
def on_train_end(self, logs):
""" Save model at the end of training """
self.save_data()
def on_episode_begin(self, episode, logs):
""" Initialize metrics at the beginning of each episode """
assert episode not in self.metrics
assert episode not in self.starts
self.metrics[episode] = []
self.starts[episode] = timeit.default_timer()
def on_episode_end(self, episode, logs):
""" Compute and print metrics at the end of each episode """
duration = timeit.default_timer() - self.starts[episode]
metrics = self.metrics[episode]
if np.isnan(metrics).all():
mean_metrics = np.array([np.nan for _ in self.metrics_names])
else:
mean_metrics = np.nanmean(metrics, axis=0)
assert len(mean_metrics) == len(self.metrics_names)
data = list(zip(self.metrics_names, mean_metrics))
data += list(logs.items())
data += [('episode', episode), ('duration', duration)]
for key, value in data:
if key not in self.data:
self.data[key] = []
self.data[key].append(value)
if self.interval is not None and episode % self.interval == 0:
self.save_data()
# Clean up.
del self.metrics[episode]
del self.starts[episode]
def on_step_end(self, step, logs):
""" Append metric at the end of each step """
self.metrics[logs['episode']].append(logs['metrics'])
def save_data(self):
""" Save metrics in a json file """
if len(self.data.keys()) == 0:
return
# Sort everything by episode.
assert 'episode' in self.data
sorted_indexes = np.argsort(self.data['episode'])
sorted_data = {}
for key, values in self.data.items():
assert len(self.data[key]) == len(sorted_indexes)
# We convert to np.array() and then to list to convert from np datatypes to native datatypes.
# This is necessary because json.dump cannot handle np.float32, for example.
sorted_data[key] = np.array([self.data[key][idx] for idx in sorted_indexes]).tolist()
# Overwrite already open file. We can simply seek to the beginning since the file will
# grow strictly monotonously.
with open(self.filepath, 'w') as f:
json.dump(sorted_data, f)
class Visualizer(Callback):
def on_action_end(self, action, logs):
""" Render environment at the end of each action """
self.env.render(mode='human')
class ModelIntervalCheckpoint(Callback):
def __init__(self, filepath, interval, verbose=0):
super(ModelIntervalCheckpoint, self).__init__()
self.filepath = filepath
self.interval = interval
self.verbose = verbose
self.total_steps = 0
def on_step_end(self, step, logs={}):
""" Save weights at interval steps during training """
self.total_steps += 1
if self.total_steps % self.interval != 0:
# Nothing to do.
return
filepath = self.filepath.format(step=self.total_steps, **logs)
if self.verbose > 0:
print('Step {}: saving model to {}'.format(self.total_steps, filepath))
self.model.save_weights(filepath, overwrite=True)
| 16,229 | 40.829897 | 423 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/core.py | # -*- coding: utf-8 -*-
import warnings
from copy import deepcopy
import numpy as np
from keras.callbacks import History
from rl.callbacks import (
CallbackList,
TestLogger,
TrainEpisodeLogger,
TrainIntervalLogger,
Visualizer
)
class Agent(object):
"""Abstract base class for all implemented agents.
Each agent interacts with the environment (as defined by the `Env` class) by first observing the
state of the environment. Based on this observation the agent changes the environment by performing
an action.
Do not use this abstract base class directly but instead use one of the concrete agents implemented.
Each agent realizes a reinforcement learning algorithm. Since all agents conform to the same
interface, you can use them interchangeably.
To implement your own agent, you have to implement the following methods:
- `forward`
- `backward`
- `compile`
- `load_weights`
- `save_weights`
- `layers`
# Arguments
processor (`Processor` instance): See [Processor](#processor) for details.
"""
def __init__(self, processor=None):
self.processor = processor
self.training = False
self.step = 0
def get_config(self):
"""Configuration of the agent for serialization.
# Returns
Dictionnary with agent configuration
"""
return {}
def fit(self, env, nb_steps, action_repetition=1, callbacks=None, verbose=1,
visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000,
nb_max_episode_steps=None):
"""Trains the agent on the given environment.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_steps (integer): Number of training steps to be performed.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = True
callbacks = [] if not callbacks else callbacks[:]
if verbose == 1:
callbacks += [TrainIntervalLogger(interval=log_interval)]
elif verbose > 1:
callbacks += [TrainEpisodeLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_steps': nb_steps,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_train_begin()
callbacks.on_train_begin()
episode = np.int16(0)
self.step = np.int16(0)
observation = None
episode_reward = None
episode_step = None
did_abort = False
try:
while self.step < nb_steps:
if observation is None: # start of a new episode
callbacks.on_episode_begin(episode)
episode_step = np.int16(0)
episode_reward = np.float32(0)
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, reward, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, reward, done, info = self.processor.process_step(observation, reward, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# At this point, we expect to be fully initialized.
assert episode_reward is not None
assert episode_step is not None
assert observation is not None
# Run a single step.
callbacks.on_step_begin(episode_step)
# This is were all of the work happens. We first perceive and compute the action
# (forward step) and then use the reward to improve (backward step).
action = self.forward(observation)
if self.processor is not None:
action = self.processor.process_action(action)
reward = np.float32(0)
accumulated_info = {}
done = False
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
# print (r, done)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
callbacks.on_action_end(action)
reward += r
if done:
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
# Force a terminal state.
done = True
metrics = self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'metrics': metrics,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
if done:
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
# This episode is finished, report and reset.
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'nb_steps': self.step,
}
callbacks.on_episode_end(episode, episode_logs)
episode += 1
observation = None
episode_step = None
episode_reward = None
except KeyboardInterrupt:
# We catch keyboard interrupts here so that training can be be safely aborted.
# This is so common that we've built this right into this function, which ensures that
# the `on_train_end` method is properly called.
did_abort = True
callbacks.on_train_end(logs={'did_abort': did_abort})
self._on_train_end()
return history
def test(self, env, nb_episodes=1, action_repetition=1, callbacks=None, visualize=True,
nb_max_episode_steps=None, nb_max_start_steps=0, start_step_policy=None, verbose=1):
"""Callback that is called before training begins.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_episodes (integer): Number of episodes to perform.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to test your agent but it hasn\'t been compiled yet. Please call `compile()` before `test()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = False
self.step = 0
callbacks = [] if not callbacks else callbacks[:]
if verbose >= 1:
callbacks += [TestLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_episodes': nb_episodes,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_test_begin()
callbacks.on_train_begin()
for episode in range(nb_episodes):
callbacks.on_episode_begin(episode)
episode_reward = 0.
episode_step = 0
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# Run the episode until we're done.
done = False
while not done:
callbacks.on_step_begin(episode_step)
action = self.forward(observation)
if self.processor is not None:
action = self.processor.process_action(action)
reward = 0.
accumulated_info = {}
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, d, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, d, info = self.processor.process_step(observation, r, d, info)
callbacks.on_action_end(action)
reward += r
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
if d:
done = True
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
done = True
self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
# Report end of episode.
episode_logs = {
'episode_reward': episode_reward,
'nb_steps': episode_step,
}
callbacks.on_episode_end(episode, episode_logs)
callbacks.on_train_end()
self._on_test_end()
return history
def reset_states(self):
"""Resets all internally kept states after an episode is completed.
"""
pass
def forward(self, observation):
"""Takes the an observation from the environment and returns the action to be taken next.
If the policy is implemented by a neural network, this corresponds to a forward (inference) pass.
# Argument
observation (object): The current observation from the environment.
# Returns
The next action to be executed in the environment.
"""
raise NotImplementedError()
def backward(self, reward, terminal):
"""Updates the agent after having executed the action returned by `forward`.
If the policy is implemented by a neural network, this corresponds to a weight update using back-prop.
# Argument
reward (float): The observed reward after executing the action returned by `forward`.
terminal (boolean): `True` if the new state of the environment is terminal.
# Returns
List of metrics values
"""
raise NotImplementedError()
def compile(self, optimizer, metrics=[]):
"""Compiles an agent and the underlaying models to be used for training and testing.
# Arguments
optimizer (`keras.optimizers.Optimizer` instance): The optimizer to be used during training.
metrics (list of functions `lambda y_true, y_pred: metric`): The metrics to run during training.
"""
raise NotImplementedError()
def load_weights(self, filepath):
"""Loads the weights of an agent from an HDF5 file.
# Arguments
filepath (str): The path to the HDF5 file.
"""
raise NotImplementedError()
def save_weights(self, filepath, overwrite=False):
"""Saves the weights of an agent as an HDF5 file.
# Arguments
filepath (str): The path to where the weights should be saved.
overwrite (boolean): If `False` and `filepath` already exists, raises an error.
"""
raise NotImplementedError()
@property
def layers(self):
"""Returns all layers of the underlying model(s).
If the concrete implementation uses multiple internal models,
this method returns them in a concatenated list.
# Returns
A list of the model's layers
"""
raise NotImplementedError()
@property
def metrics_names(self):
"""The human-readable names of the agent's metrics. Must return as many names as there
are metrics (see also `compile`).
# Returns
A list of metric's names (string)
"""
return []
def _on_train_begin(self):
"""Callback that is called before training begins."
"""
pass
def _on_train_end(self):
"""Callback that is called after training ends."
"""
pass
def _on_test_begin(self):
"""Callback that is called before testing begins."
"""
pass
def _on_test_end(self):
"""Callback that is called after testing ends."
"""
pass
class Processor(object):
"""Abstract base class for implementing processors.
A processor acts as a coupling mechanism between an `Agent` and its `Env`. This can
be necessary if your agent has different requirements with respect to the form of the
observations, actions, and rewards of the environment. By implementing a custom processor,
you can effectively translate between the two without having to change the underlaying
implementation of the agent or environment.
Do not use this abstract base class directly but instead use one of the concrete implementations
or write your own.
"""
def process_step(self, observation, reward, done, info):
"""Processes an entire step by applying the processor to the observation, reward, and info arguments.
# Arguments
observation (object): An observation as obtained by the environment.
reward (float): A reward as obtained by the environment.
done (boolean): `True` if the environment is in a terminal state, `False` otherwise.
info (dict): The debug info dictionary as obtained by the environment.
# Returns
The tupel (observation, reward, done, reward) with with all elements after being processed.
"""
observation = self.process_observation(observation)
reward = self.process_reward(reward)
info = self.process_info(info)
return observation, reward, done, info
def process_observation(self, observation):
"""Processes the observation as obtained from the environment for use in an agent and
returns it.
# Arguments
observation (object): An observation as obtained by the environment
# Returns
Observation obtained by the environment processed
"""
return observation
def process_reward(self, reward):
"""Processes the reward as obtained from the environment for use in an agent and
returns it.
# Arguments
reward (float): A reward as obtained by the environment
# Returns
Reward obtained by the environment processed
"""
return reward
def process_info(self, info):
"""Processes the info as obtained from the environment for use in an agent and
returns it.
# Arguments
info (dict): An info as obtained by the environment
# Returns
Info obtained by the environment processed
"""
return info
def process_action(self, action):
"""Processes an action predicted by an agent but before execution in an environment.
# Arguments
action (int): Action given to the environment
# Returns
Processed action given to the environment
"""
return action
def process_state_batch(self, batch):
"""Processes an entire batch of states and returns it.
# Arguments
batch (list): List of states
# Returns
Processed list of states
"""
return batch
@property
def metrics(self):
"""The metrics of the processor, which will be reported during training.
# Returns
List of `lambda y_true, y_pred: metric` functions.
"""
return []
@property
def metrics_names(self):
"""The human-readable names of the agent's metrics. Must return as many names as there
are metrics (see also `compile`).
"""
return []
# Note: the API of the `Env` and `Space` classes are taken from the OpenAI Gym implementation.
# https://github.com/openai/gym/blob/master/gym/core.py
class Env(object):
"""The abstract environment class that is used by all agents. This class has the exact
same API that OpenAI Gym uses so that integrating with it is trivial. In contrast to the
OpenAI Gym implementation, this class only defines the abstract methods without any actual
implementation.
To implement your own environment, you need to define the following methods:
- `step`
- `reset`
- `render`
- `close`
Refer to the [Gym documentation](https://gym.openai.com/docs/#environments).
"""
reward_range = (-np.inf, np.inf)
action_space = None
observation_space = None
def step(self, action):
"""Run one timestep of the environment's dynamics.
Accepts an action and returns a tuple (observation, reward, done, info).
# Arguments
action (object): An action provided by the environment.
# Returns
observation (object): Agent's observation of the current environment.
reward (float) : Amount of reward returned after previous action.
done (boolean): Whether the episode has ended, in which case further step() calls will return undefined results.
info (dict): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).
"""
raise NotImplementedError()
def reset(self):
"""
Resets the state of the environment and returns an initial observation.
# Returns
observation (object): The initial observation of the space. Initial reward is assumed to be 0.
"""
raise NotImplementedError()
def render(self, mode='human', close=False):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.)
# Arguments
mode (str): The mode to render with.
close (bool): Close all open renderings.
"""
raise NotImplementedError()
def close(self):
"""Override in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
raise NotImplementedError()
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
# Returns
Returns the list of seeds used in this env's random number generators
"""
raise NotImplementedError()
def configure(self, *args, **kwargs):
"""Provides runtime configuration to the environment.
This configuration should consist of data that tells your
environment how to run (such as an address of a remote server,
or path to your ImageNet data). It should not affect the
semantics of the environment.
"""
raise NotImplementedError()
def __del__(self):
self.close()
def __str__(self):
return '<{} instance>'.format(type(self).__name__)
class Space(object):
"""Abstract model for a space that is used for the state and action spaces. This class has the
exact same API that OpenAI Gym uses so that integrating with it is trivial.
Please refer to [Gym Documentation](https://gym.openai.com/docs/#spaces)
"""
def sample(self, seed=None):
"""Uniformly randomly sample a random element of this space.
"""
raise NotImplementedError()
def contains(self, x):
"""Return boolean specifying if x is a valid member of this space
"""
raise NotImplementedError()
| 29,790 | 41.018336 | 202 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/util.py | import numpy as np
from keras.models import model_from_config, Sequential, Model, model_from_config
import keras.optimizers as optimizers
import keras.backend as K
def clone_model(model, custom_objects={}):
# Requires Keras 1.0.7 since get_config has breaking changes.
config = {
'class_name': model.__class__.__name__,
'config': model.get_config(),
}
clone = model_from_config(config, custom_objects=custom_objects)
clone.set_weights(model.get_weights())
return clone
def clone_optimizer(optimizer):
if type(optimizer) is str:
return optimizers.get(optimizer)
# Requires Keras 1.0.7 since get_config has breaking changes.
params = dict([(k, v) for k, v in optimizer.get_config().items()])
config = {
'class_name': optimizer.__class__.__name__,
'config': params,
}
if hasattr(optimizers, 'optimizer_from_config'):
# COMPATIBILITY: Keras < 2.0
clone = optimizers.optimizer_from_config(config)
else:
clone = optimizers.deserialize(config)
return clone
def get_soft_target_model_updates(target, source, tau):
target_weights = target.trainable_weights + sum([l.non_trainable_weights for l in target.layers], [])
source_weights = source.trainable_weights + sum([l.non_trainable_weights for l in source.layers], [])
assert len(target_weights) == len(source_weights)
# Create updates.
updates = []
for tw, sw in zip(target_weights, source_weights):
updates.append((tw, tau * sw + (1. - tau) * tw))
return updates
def get_object_config(o):
if o is None:
return None
config = {
'class_name': o.__class__.__name__,
'config': o.get_config()
}
return config
def huber_loss(y_true, y_pred, clip_value):
# Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
# https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
# for details.
assert clip_value > 0.
x = y_true - y_pred
if np.isinf(clip_value):
# Spacial case for infinity since Tensorflow does have problems
# if we compare `K.abs(x) < np.inf`.
return .5 * K.square(x)
condition = K.abs(x) < clip_value
squared_loss = .5 * K.square(x)
linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
if K.backend() == 'tensorflow':
import tensorflow as tf
if hasattr(tf, 'select'):
return tf.select(condition, squared_loss, linear_loss) # condition, true, false
else:
return tf.where(condition, squared_loss, linear_loss) # condition, true, false
elif K.backend() == 'theano':
from theano import tensor as T
return T.switch(condition, squared_loss, linear_loss)
else:
raise RuntimeError('Unknown backend "{}".'.format(K.backend()))
class AdditionalUpdatesOptimizer(optimizers.Optimizer):
def __init__(self, optimizer, additional_updates):
super(AdditionalUpdatesOptimizer, self).__init__()
self.optimizer = optimizer
self.additional_updates = additional_updates
def get_updates(self, params, loss):
updates = self.optimizer.get_updates(params=params, loss=loss)
updates += self.additional_updates
self.updates = updates
return self.updates
def get_config(self):
return self.optimizer.get_config()
# Based on https://github.com/openai/baselines/blob/master/baselines/common/mpi_running_mean_std.py
class WhiteningNormalizer(object):
def __init__(self, shape, eps=1e-2, dtype=np.float64):
self.eps = eps
self.shape = shape
self.dtype = dtype
self._sum = np.zeros(shape, dtype=dtype)
self._sumsq = np.zeros(shape, dtype=dtype)
self._count = 0
self.mean = np.zeros(shape, dtype=dtype)
self.std = np.ones(shape, dtype=dtype)
def normalize(self, x):
return (x - self.mean) / self.std
def denormalize(self, x):
return self.std * x + self.mean
def update(self, x):
if x.ndim == len(self.shape):
x = x.reshape(-1, *self.shape)
assert x.shape[1:] == self.shape
self._count += x.shape[0]
self._sum += np.sum(x, axis=0)
self._sumsq += np.sum(np.square(x), axis=0)
self.mean = self._sum / float(self._count)
self.std = np.sqrt(np.maximum(np.square(self.eps), self._sumsq / float(self._count) - np.square(self.mean)))
| 4,476 | 32.410448 | 116 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/agents/ddpg.py | from __future__ import division
from collections import deque
import os
import warnings
import numpy as np
import keras.backend as K
import keras.optimizers as optimizers
from rl.core import Agent
from rl.random import OrnsteinUhlenbeckProcess
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
# Deep DPG as described by Lillicrap et al. (2015)
# http://arxiv.org/pdf/1509.02971v2.pdf
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.646.4324&rep=rep1&type=pdf
class DDPGAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, actor, critic, critic_action_input, memory,
gamma=.99, batch_size=32, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
train_interval=1, memory_interval=1, delta_range=None, delta_clip=np.inf,
random_process=None, custom_model_objects={}, target_model_update=.001, **kwargs):
if hasattr(actor.output, '__len__') and len(actor.output) > 1:
raise ValueError('Actor "{}" has more than one output. DDPG expects an actor that has a single output.'.format(actor))
if hasattr(critic.output, '__len__') and len(critic.output) > 1:
raise ValueError('Critic "{}" has more than one output. DDPG expects a critic that has a single output.'.format(critic))
if critic_action_input not in critic.input:
raise ValueError('Critic "{}" does not have designated action input "{}".'.format(critic, critic_action_input))
if not hasattr(critic.input, '__len__') or len(critic.input) < 2:
raise ValueError('Critic "{}" does not have enough inputs. The critic must have at exactly two inputs, one for the action and one for the observation.'.format(critic))
super(DDPGAgent, self).__init__(**kwargs)
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))
delta_clip = delta_range[1]
# Parameters.
self.nb_actions = nb_actions
self.nb_steps_warmup_actor = nb_steps_warmup_actor
self.nb_steps_warmup_critic = nb_steps_warmup_critic
self.random_process = random_process
self.delta_clip = delta_clip
self.gamma = gamma
self.target_model_update = target_model_update
self.batch_size = batch_size
self.train_interval = train_interval
self.memory_interval = memory_interval
self.custom_model_objects = custom_model_objects
# Related objects.
self.actor = actor
self.critic = critic
self.critic_action_input = critic_action_input
self.critic_action_input_idx = self.critic.input.index(critic_action_input)
self.memory = memory
# State.
self.compiled = False
self.reset_states()
@property
def uses_learning_phase(self):
return self.actor.uses_learning_phase or self.critic.uses_learning_phase
def compile(self, optimizer, metrics=[]):
metrics += [mean_q]
if type(optimizer) in (list, tuple):
if len(optimizer) != 2:
raise ValueError('More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.')
actor_optimizer, critic_optimizer = optimizer
else:
actor_optimizer = optimizer
critic_optimizer = clone_optimizer(optimizer)
if type(actor_optimizer) is str:
actor_optimizer = optimizers.get(actor_optimizer)
if type(critic_optimizer) is str:
critic_optimizer = optimizers.get(critic_optimizer)
assert actor_optimizer != critic_optimizer
if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(metrics[1], '__len__'):
actor_metrics, critic_metrics = metrics
else:
actor_metrics = critic_metrics = metrics
def clipped_error(y_true, y_pred):
return K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
# Compile target networks. We only use them in feed-forward mode, hence we can pass any
# optimizer and loss since we never use it anyway.
self.target_actor = clone_model(self.actor, self.custom_model_objects)
self.target_actor.compile(optimizer='sgd', loss='mse')
self.target_critic = clone_model(self.critic, self.custom_model_objects)
self.target_critic.compile(optimizer='sgd', loss='mse')
# We also compile the actor. We never optimize the actor using Keras but instead compute
# the policy gradient ourselves. However, we need the actor in feed-forward mode, hence
# we also compile it with any optimzer and
self.actor.compile(optimizer='sgd', loss='mse')
# Compile the critic.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
critic_updates = get_soft_target_model_updates(self.target_critic, self.critic, self.target_model_update)
critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer, critic_updates)
self.critic.compile(optimizer=critic_optimizer, loss=clipped_error, metrics=critic_metrics)
# Combine actor and critic so that we can get the policy gradient.
# Assuming critic's state inputs are the same as actor's.
combined_inputs = []
critic_inputs = []
for i in self.critic.input:
if i == self.critic_action_input:
combined_inputs.append([])
else:
combined_inputs.append(i)
critic_inputs.append(i)
combined_inputs[self.critic_action_input_idx] = self.actor(critic_inputs)
combined_output = self.critic(combined_inputs)
updates = actor_optimizer.get_updates(
params=self.actor.trainable_weights, loss=-K.mean(combined_output))
if self.target_model_update < 1.:
# Include soft target model updates.
updates += get_soft_target_model_updates(self.target_actor, self.actor, self.target_model_update)
updates += self.actor.updates # include other updates of the actor, e.g. for BN
# Finally, combine it all into a callable function.
if K.backend() == 'tensorflow':
self.actor_train_fn = K.function(critic_inputs + [K.learning_phase()],
[self.actor(critic_inputs)], updates=updates)
else:
if self.uses_learning_phase:
critic_inputs += [K.learning_phase()]
self.actor_train_fn = K.function(critic_inputs, [self.actor(critic_inputs)], updates=updates)
self.actor_optimizer = actor_optimizer
self.compiled = True
def load_weights(self, filepath):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.load_weights(actor_filepath)
self.critic.load_weights(critic_filepath)
self.update_target_models_hard()
def save_weights(self, filepath, overwrite=False):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.save_weights(actor_filepath, overwrite=overwrite)
self.critic.save_weights(critic_filepath, overwrite=overwrite)
def update_target_models_hard(self):
self.target_critic.set_weights(self.critic.get_weights())
self.target_actor.set_weights(self.actor.get_weights())
# TODO: implement pickle
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.actor.reset_states()
self.critic.reset_states()
self.target_actor.reset_states()
self.target_critic.reset_states()
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def select_action(self, state):
batch = self.process_state_batch([state])
action = self.actor.predict_on_batch(batch).flatten()
assert action.shape == (self.nb_actions,)
# Apply noise, if a random process is set.
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action.shape
action += noise
return action
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state) # TODO: move this into policy
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.actor.layers[:] + self.critic.layers[:]
@property
def metrics_names(self):
names = self.critic.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def backward(self, reward, terminal=False):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
can_train_either = self.step > self.nb_steps_warmup_critic or self.step > self.nb_steps_warmup_actor
if can_train_either and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
# Update critic, if warm up is over.
if self.step > self.nb_steps_warmup_critic:
target_actions = self.target_actor.predict_on_batch(state1_batch)
assert target_actions.shape == (self.batch_size, self.nb_actions)
if len(self.critic.inputs) >= 3:
state1_batch_with_action = state1_batch[:]
else:
state1_batch_with_action = [state1_batch]
state1_batch_with_action.insert(self.critic_action_input_idx, target_actions)
target_q_values = self.target_critic.predict_on_batch(state1_batch_with_action).flatten()
assert target_q_values.shape == (self.batch_size,)
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the target ys accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = self.gamma * target_q_values
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
targets = (reward_batch + discounted_reward_batch).reshape(self.batch_size, 1)
# Perform a single batch update on the critic network.
if len(self.critic.inputs) >= 3:
state0_batch_with_action = state0_batch[:]
else:
state0_batch_with_action = [state0_batch]
state0_batch_with_action.insert(self.critic_action_input_idx, action_batch)
metrics = self.critic.train_on_batch(state0_batch_with_action, targets)
if self.processor is not None:
metrics += self.processor.metrics
# Update actor, if warm up is over.
if self.step > self.nb_steps_warmup_actor:
# TODO: implement metrics for actor
if len(self.actor.inputs) >= 2:
inputs = state0_batch[:]
else:
inputs = [state0_batch]
if self.uses_learning_phase:
inputs += [self.training]
action_values = self.actor_train_fn(inputs)[0]
assert action_values.shape == (self.batch_size, self.nb_actions)
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_models_hard()
return metrics
| 14,524 | 44.820189 | 195 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/agents/sarsa.py | import collections
import numpy as np
from keras.callbacks import History
from keras.models import Model
from keras.layers import Input, Lambda
import keras.backend as K
from rl.core import Agent
from rl.agents.dqn import mean_q
from rl.util import huber_loss
from rl.policy import EpsGreedyQPolicy, GreedyQPolicy
from rl.util import get_object_config
class SARSAAgent(Agent):
"""Write me
"""
def __init__(self, model, nb_actions, policy=None, test_policy=None, gamma=.99, nb_steps_warmup=10,
train_interval=1, delta_clip=np.inf, *args, **kwargs):
super(SarsaAgent, self).__init__(*args, **kwargs)
# Do not use defaults in constructor because that would mean that each instance shares the same
# policy.
if policy is None:
policy = EpsGreedyQPolicy()
if test_policy is None:
test_policy = GreedyQPolicy()
self.model = model
self.nb_actions = nb_actions
self.policy = policy
self.test_policy = test_policy
self.gamma = gamma
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.delta_clip = delta_clip
self.compiled = False
self.actions = None
self.observations = None
self.rewards = None
def compute_batch_q_values(self, state_batch):
batch = self.process_state_batch(state_batch)
q_values = self.model.predict_on_batch(batch)
assert q_values.shape == (len(state_batch), self.nb_actions)
return q_values
def compute_q_values(self, state):
q_values = self.compute_batch_q_values([state]).flatten()
assert q_values.shape == (self.nb_actions,)
return q_values
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def get_config(self):
config = super(SarsaAgent, self).get_config()
config['nb_actions'] = self.nb_actions
config['gamma'] = self.gamma
config['nb_steps_warmup'] = self.nb_steps_warmup
config['train_interval'] = self.train_interval
config['delta_clip'] = self.delta_clip
config['model'] = get_object_config(self.model)
config['policy'] = get_object_config(self.policy)
config['test_policy'] = get_object_config(self.test_policy)
return config
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
def clipped_masked_error(args):
y_true, y_pred, mask = args
loss = huber_loss(y_true, y_pred, self.delta_clip)
loss *= mask # apply element-wise mask
return K.sum(loss, axis=-1)
# Create trainable model. The problem is that we need to mask the output since we only
# ever want to update the Q values for a certain action. The way we achieve this is by
# using a custom Lambda layer that computes the loss. This gives us the necessary flexibility
# to mask out certain parameters by passing in multiple inputs to the Lambda layer.
y_pred = self.model.output
y_true = Input(name='y_true', shape=(self.nb_actions,))
mask = Input(name='mask', shape=(self.nb_actions,))
loss_out = Lambda(clipped_masked_error, output_shape=(1,), name='loss')([y_pred, y_true, mask])
ins = [self.model.input] if type(self.model.input) is not list else self.model.input
trainable_model = Model(inputs=ins + [y_true, mask], outputs=[loss_out, y_pred])
assert len(trainable_model.output_names) == 2
combined_metrics = {trainable_model.output_names[1]: metrics}
losses = [
lambda y_true, y_pred: y_pred, # loss is computed in Lambda layer
lambda y_true, y_pred: K.zeros_like(y_pred), # we only include this for the metrics
]
trainable_model.compile(optimizer=optimizer, loss=losses, metrics=combined_metrics)
self.trainable_model = trainable_model
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
self.actions = collections.deque(maxlen=2)
self.observations = collections.deque(maxlen=2)
self.rewards = collections.deque(maxlen=2)
if self.compiled:
self.model.reset_states()
def forward(self, observation):
# Select an action.
q_values = self.compute_q_values([observation])
if self.training:
action = self.policy.select_action(q_values=q_values)
else:
action = self.test_policy.select_action(q_values=q_values)
# Book-keeping.
self.observations.append(observation)
self.actions.append(action)
return action
def backward(self, reward, terminal):
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
# Start by extracting the necessary parameters (we use a vectorized implementation).
self.rewards.append(reward)
if len(self.observations) < 2:
return metrics # not enough data yet
state0_batch = [self.observations[0]]
reward_batch = [self.rewards[0]]
action_batch = [self.actions[0]]
terminal1_batch = [0.] if terminal else [1.]
state1_batch = [self.observations[1]]
action1_batch = [self.actions[1]]
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
assert reward_batch.shape == (1,)
assert terminal1_batch.shape == reward_batch.shape
assert len(action_batch) == len(reward_batch)
batch = self.process_state_batch(state1_batch)
q_values = self.compute_q_values(batch)
q_values = q_values.reshape((1, self.nb_actions))
q_batch = q_values[0, action1_batch]
assert q_batch.shape == (1,)
targets = np.zeros((1, self.nb_actions))
dummy_targets = np.zeros((1,))
masks = np.zeros((1, self.nb_actions))
# Compute r_t + gamma * Q(s_t+1, a_t+1)
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
for idx, (target, mask, R, action) in enumerate(zip(targets, masks, Rs, action_batch)):
target[action] = R # update action with estimated accumulated reward
dummy_targets[idx] = R
mask[action] = 1. # enable loss for this specific action
targets = np.array(targets).astype('float32')
masks = np.array(masks).astype('float32')
# Finally, perform a single update on the entire batch. We use a dummy target since
# the actual loss is computed in a Lambda layer that needs more complex input. However,
# it is still useful to know the actual target to compute metrics properly.
state0_batch = state0_batch.reshape((1,) + state0_batch.shape)
ins = [state0_batch] if type(self.model.input) is not list else state0_batch
metrics = self.trainable_model.train_on_batch(ins + [targets, masks], [dummy_targets, targets])
metrics = [metric for idx, metric in enumerate(metrics) if idx not in (1, 2)] # throw away individual losses
metrics += self.policy.metrics
if self.processor is not None:
metrics += self.processor.metrics
return metrics
@property
def layers(self):
return self.model.layers[:]
@property
def metrics_names(self):
# Throw away individual losses and replace output name since this is hidden from the user.
assert len(self.trainable_model.output_names) == 2
dummy_output_name = self.trainable_model.output_names[1]
model_metrics = [name for idx, name in enumerate(self.trainable_model.metrics_names) if idx not in (1, 2)]
model_metrics = [name.replace(dummy_output_name + '_', '') for name in model_metrics]
names = model_metrics + self.policy.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
@property
def policy(self):
return self.__policy
@policy.setter
def policy(self, policy):
self.__policy = policy
self.__policy._set_agent(self)
@property
def test_policy(self):
return self.__test_policy
@test_policy.setter
def test_policy(self, policy):
self.__test_policy = policy
self.__test_policy._set_agent(self)
# Aliases
SarsaAgent = SARSAAgent
| 9,668 | 40.320513 | 121 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/agents/dqn.py | from __future__ import division
import warnings
import keras.backend as K
from keras.models import Model
from keras.layers import Lambda, Input, Layer, Dense
from rl.core import Agent
from rl.policy import EpsGreedyQPolicy, GreedyQPolicy
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
class AbstractDQNAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, memory, gamma=.99, batch_size=32, nb_steps_warmup=1000,
train_interval=1, memory_interval=1, target_model_update=10000,
delta_range=None, delta_clip=np.inf, custom_model_objects={}, **kwargs):
super(AbstractDQNAgent, self).__init__(**kwargs)
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))
delta_clip = delta_range[1]
# Parameters.
self.nb_actions = nb_actions
self.gamma = gamma
self.batch_size = batch_size
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.memory_interval = memory_interval
self.target_model_update = target_model_update
self.delta_clip = delta_clip
self.custom_model_objects = custom_model_objects
# Related objects.
self.memory = memory
# State.
self.compiled = False
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def compute_batch_q_values(self, state_batch):
batch = self.process_state_batch(state_batch)
q_values = self.model.predict_on_batch(batch)
assert q_values.shape == (len(state_batch), self.nb_actions)
return q_values
def compute_q_values(self, state):
q_values = self.compute_batch_q_values([state]).flatten()
assert q_values.shape == (self.nb_actions,)
return q_values
def get_config(self):
return {
'nb_actions': self.nb_actions,
'gamma': self.gamma,
'batch_size': self.batch_size,
'nb_steps_warmup': self.nb_steps_warmup,
'train_interval': self.train_interval,
'memory_interval': self.memory_interval,
'target_model_update': self.target_model_update,
'delta_clip': self.delta_clip,
'memory': get_object_config(self.memory),
}
# An implementation of the DQN agent as described in Mnih (2013) and Mnih (2015).
# http://arxiv.org/pdf/1312.5602.pdf
# http://arxiv.org/abs/1509.06461
class DQNAgent(AbstractDQNAgent):
"""
# Arguments
model__: A Keras model.
policy__: A Keras-rl policy that are defined in [policy](https://github.com/keras-rl/keras-rl/blob/master/rl/policy.py).
test_policy__: A Keras-rl policy.
enable_double_dqn__: A boolean which enable target network as a second network proposed by van Hasselt et al. to decrease overfitting.
enable_dueling_dqn__: A boolean which enable dueling architecture proposed by Mnih et al.
dueling_type__: If `enable_dueling_dqn` is set to `True`, a type of dueling architecture must be chosen which calculate Q(s,a) from V(s) and A(s,a) differently. Note that `avg` is recommanded in the [paper](https://arxiv.org/abs/1511.06581).
`avg`: Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
`max`: Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
`naive`: Q(s,a;theta) = V(s;theta) + A(s,a;theta)
"""
def __init__(self, model, policy=None, test_policy=None, enable_double_dqn=True, enable_dueling_network=False,
dueling_type='avg', *args, **kwargs):
super(DQNAgent, self).__init__(*args, **kwargs)
# Validate (important) input.
if hasattr(model.output, '__len__') and len(model.output) > 1:
raise ValueError('Model "{}" has more than one output. DQN expects a model that has a single output.'.format(model))
if model.output._keras_shape != (None, self.nb_actions):
raise ValueError('Model output "{}" has invalid shape. DQN expects a model that has one dimension for each action, in this case {}.'.format(model.output, self.nb_actions))
# Parameters.
self.enable_double_dqn = enable_double_dqn
self.enable_dueling_network = enable_dueling_network
self.dueling_type = dueling_type
if self.enable_dueling_network:
# get the second last layer of the model, abandon the last layer
layer = model.layers[-2]
nb_action = model.output._keras_shape[-1]
# layer y has a shape (nb_action+1,)
# y[:,0] represents V(s;theta)
# y[:,1:] represents A(s,a;theta)
y = Dense(nb_action + 1, activation='linear')(layer.output)
# caculate the Q(s,a;theta)
# dueling_type == 'avg'
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
# dueling_type == 'max'
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
# dueling_type == 'naive'
# Q(s,a;theta) = V(s;theta) + A(s,a;theta)
if self.dueling_type == 'avg':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True), output_shape=(nb_action,))(y)
elif self.dueling_type == 'max':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True), output_shape=(nb_action,))(y)
elif self.dueling_type == 'naive':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_action,))(y)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
model = Model(inputs=model.input, outputs=outputlayer)
# Related objects.
self.model = model
if policy is None:
policy = EpsGreedyQPolicy()
if test_policy is None:
test_policy = GreedyQPolicy()
self.policy = policy
self.test_policy = test_policy
# State.
self.reset_states()
def get_config(self):
config = super(DQNAgent, self).get_config()
config['enable_double_dqn'] = self.enable_double_dqn
config['dueling_type'] = self.dueling_type
config['enable_dueling_network'] = self.enable_dueling_network
config['model'] = get_object_config(self.model)
config['policy'] = get_object_config(self.policy)
config['test_policy'] = get_object_config(self.test_policy)
if self.compiled:
config['target_model'] = get_object_config(self.target_model)
return config
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# We never train the target model, hence we can set the optimizer and loss arbitrarily.
self.target_model = clone_model(self.model, self.custom_model_objects)
self.target_model.compile(optimizer='sgd', loss='mse')
self.model.compile(optimizer='sgd', loss='mse')
# Compile model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_model, self.model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_masked_error(args):
y_true, y_pred, mask = args
loss = huber_loss(y_true, y_pred, self.delta_clip)
loss *= mask # apply element-wise mask
return K.sum(loss, axis=-1)
# Create trainable model. The problem is that we need to mask the output since we only
# ever want to update the Q values for a certain action. The way we achieve this is by
# using a custom Lambda layer that computes the loss. This gives us the necessary flexibility
# to mask out certain parameters by passing in multiple inputs to the Lambda layer.
y_pred = self.model.output
y_true = Input(name='y_true', shape=(self.nb_actions,))
mask = Input(name='mask', shape=(self.nb_actions,))
loss_out = Lambda(clipped_masked_error, output_shape=(1,), name='loss')([y_true, y_pred, mask])
ins = [self.model.input] if type(self.model.input) is not list else self.model.input
trainable_model = Model(inputs=ins + [y_true, mask], outputs=[loss_out, y_pred])
assert len(trainable_model.output_names) == 2
combined_metrics = {trainable_model.output_names[1]: metrics}
losses = [
lambda y_true, y_pred: y_pred, # loss is computed in Lambda layer
lambda y_true, y_pred: K.zeros_like(y_pred), # we only include this for the metrics
]
trainable_model.compile(optimizer=optimizer, loss=losses, metrics=combined_metrics)
self.trainable_model = trainable_model
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.model.reset_states()
self.target_model.reset_states()
def update_target_model_hard(self):
self.target_model.set_weights(self.model.get_weights())
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
q_values = self.compute_q_values(state)
if self.training:
action = self.policy.select_action(q_values=q_values)
else:
action = self.test_policy.select_action(q_values=q_values)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert len(action_batch) == len(reward_batch)
# Compute Q values for mini-batch update.
if self.enable_double_dqn:
# According to the paper "Deep Reinforcement Learning with Double Q-learning"
# (van Hasselt et al., 2015), in Double DQN, the online network predicts the actions
# while the target network is used to estimate the Q value.
q_values = self.model.predict_on_batch(state1_batch)
assert q_values.shape == (self.batch_size, self.nb_actions)
actions = np.argmax(q_values, axis=1)
assert actions.shape == (self.batch_size,)
# Now, estimate Q values using the target network but select the values with the
# highest Q value wrt to the online model (as computed above).
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = target_q_values[range(self.batch_size), actions]
else:
# Compute the q_values given state1, and extract the maximum for each sample in the batch.
# We perform this prediction on the target_model instead of the model for reasons
# outlined in Mnih (2015). In short: it makes the algorithm more stable.
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = np.max(target_q_values, axis=1).flatten()
assert q_batch.shape == (self.batch_size,)
targets = np.zeros((self.batch_size, self.nb_actions))
dummy_targets = np.zeros((self.batch_size,))
masks = np.zeros((self.batch_size, self.nb_actions))
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the target targets accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
for idx, (target, mask, R, action) in enumerate(zip(targets, masks, Rs, action_batch)):
target[action] = R # update action with estimated accumulated reward
dummy_targets[idx] = R
mask[action] = 1. # enable loss for this specific action
targets = np.array(targets).astype('float32')
masks = np.array(masks).astype('float32')
# Finally, perform a single update on the entire batch. We use a dummy target since
# the actual loss is computed in a Lambda layer that needs more complex input. However,
# it is still useful to know the actual target to compute metrics properly.
ins = [state0_batch] if type(self.model.input) is not list else state0_batch
metrics = self.trainable_model.train_on_batch(ins + [targets, masks], [dummy_targets, targets])
metrics = [metric for idx, metric in enumerate(metrics) if idx not in (1, 2)] # throw away individual losses
metrics += self.policy.metrics
if self.processor is not None:
metrics += self.processor.metrics
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def layers(self):
return self.model.layers[:]
@property
def metrics_names(self):
# Throw away individual losses and replace output name since this is hidden from the user.
assert len(self.trainable_model.output_names) == 2
dummy_output_name = self.trainable_model.output_names[1]
model_metrics = [name for idx, name in enumerate(self.trainable_model.metrics_names) if idx not in (1, 2)]
model_metrics = [name.replace(dummy_output_name + '_', '') for name in model_metrics]
names = model_metrics + self.policy.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
@property
def policy(self):
return self.__policy
@policy.setter
def policy(self, policy):
self.__policy = policy
self.__policy._set_agent(self)
@property
def test_policy(self):
return self.__test_policy
@test_policy.setter
def test_policy(self, policy):
self.__test_policy = policy
self.__test_policy._set_agent(self)
class NAFLayer(Layer):
"""Write me
"""
def __init__(self, nb_actions, mode='full', **kwargs):
if mode not in ('full', 'diag'):
raise RuntimeError('Unknown mode "{}" in NAFLayer.'.format(self.mode))
self.nb_actions = nb_actions
self.mode = mode
super(NAFLayer, self).__init__(**kwargs)
def call(self, x, mask=None):
# TODO: validate input shape
assert (len(x) == 3)
L_flat = x[0]
mu = x[1]
a = x[2]
if self.mode == 'full':
# Create L and L^T matrix, which we use to construct the positive-definite matrix P.
L = None
LT = None
if K.backend() == 'theano':
import theano.tensor as T
import theano
def fn(x, L_acc, LT_acc):
x_ = K.zeros((self.nb_actions, self.nb_actions))
x_ = T.set_subtensor(x_[np.tril_indices(self.nb_actions)], x)
diag = K.exp(T.diag(x_)) + K.epsilon()
x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], diag)
return x_, x_.T
outputs_info = [
K.zeros((self.nb_actions, self.nb_actions)),
K.zeros((self.nb_actions, self.nb_actions)),
]
results, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
L, LT = results
elif K.backend() == 'tensorflow':
import tensorflow as tf
# Number of elements in a triangular matrix.
nb_elems = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
# Create mask for the diagonal elements in L_flat. This is used to exponentiate
# only the diagonal elements, which is done before gathering.
diag_indeces = [0]
for row in range(1, self.nb_actions):
diag_indeces.append(diag_indeces[-1] + (row + 1))
diag_mask = np.zeros(1 + nb_elems) # +1 for the leading zero
diag_mask[np.array(diag_indeces) + 1] = 1
diag_mask = K.variable(diag_mask)
# Add leading zero element to each element in the L_flat. We use this zero
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = tf.shape(L_flat)[0]
zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old TF behavior.
L_flat = tf.concat(1, [zeros, L_flat])
except TypeError:
# New TF behavior
L_flat = tf.concat([zeros, L_flat], 1)
# Create mask that can be used to gather elements from L_flat and put them
# into a lower triangular matrix.
tril_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
tril_mask[np.tril_indices(self.nb_actions)] = range(1, nb_elems + 1)
# Finally, process each element of the batch.
init = [
K.zeros((self.nb_actions, self.nb_actions)),
K.zeros((self.nb_actions, self.nb_actions)),
]
def fn(a, x):
# Exponentiate everything. This is much easier than only exponentiating
# the diagonal elements, and, usually, the action space is relatively low.
x_ = K.exp(x) + K.epsilon()
# Only keep the diagonal elements.
x_ *= diag_mask
# Add the original, non-diagonal elements.
x_ += x * (1. - diag_mask)
# Finally, gather everything into a lower triangular matrix.
L_ = tf.gather(x_, tril_mask)
return [L_, tf.transpose(L_)]
tmp = tf.scan(fn, L_flat, initializer=init)
if isinstance(tmp, (list, tuple)):
# TensorFlow 0.10 now returns a tuple of tensors.
L, LT = tmp
else:
# Old TensorFlow < 0.10 returns a shared tensor.
L = tmp[:, 0, :, :]
LT = tmp[:, 1, :, :]
else:
raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
assert L is not None
assert LT is not None
P = K.batch_dot(L, LT)
elif self.mode == 'diag':
if K.backend() == 'theano':
import theano.tensor as T
import theano
def fn(x, P_acc):
x_ = K.zeros((self.nb_actions, self.nb_actions))
x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], x)
return x_
outputs_info = [
K.zeros((self.nb_actions, self.nb_actions)),
]
P, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
elif K.backend() == 'tensorflow':
import tensorflow as tf
# Create mask that can be used to gather elements from L_flat and put them
# into a diagonal matrix.
diag_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
diag_mask[np.diag_indices(self.nb_actions)] = range(1, self.nb_actions + 1)
# Add leading zero element to each element in the L_flat. We use this zero
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = tf.shape(L_flat)[0]
zeros = tf.expand_dims(tf.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old TF behavior.
L_flat = tf.concat(1, [zeros, L_flat])
except TypeError:
# New TF behavior
L_flat = tf.concat([zeros, L_flat], 1)
# Finally, process each element of the batch.
def fn(a, x):
x_ = tf.gather(x, diag_mask)
return x_
P = tf.scan(fn, L_flat, initializer=K.zeros((self.nb_actions, self.nb_actions)))
else:
raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
assert P is not None
assert K.ndim(P) == 3
# Combine a, mu and P into a scalar (over the batches). What we compute here is
# -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately
# TensorFlow handles vector * P slightly suboptimal, hence we convert the vectors to
# 1xd/dx1 matrices and finally flatten the resulting 1x1 matrix into a scalar. All
# operations happen over the batch size, which is dimension 0.
prod = K.batch_dot(K.expand_dims(a - mu, 1), P)
prod = K.batch_dot(prod, K.expand_dims(a - mu, -1))
A = -.5 * K.batch_flatten(prod)
assert K.ndim(A) == 2
return A
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
if len(input_shape) != 3:
raise RuntimeError("Expects 3 inputs: L, mu, a")
for i, shape in enumerate(input_shape):
if len(shape) != 2:
raise RuntimeError("Input {} has {} dimensions but should have 2".format(i, len(shape)))
assert self.mode in ('full','diag')
if self.mode == 'full':
expected_elements = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
elif self.mode == 'diag':
expected_elements = self.nb_actions
else:
expected_elements = None
assert expected_elements is not None
if input_shape[0][1] != expected_elements:
raise RuntimeError("Input 0 (L) should have {} elements but has {}".format(input_shape[0][1]))
if input_shape[1][1] != self.nb_actions:
raise RuntimeError(
"Input 1 (mu) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
if input_shape[2][1] != self.nb_actions:
raise RuntimeError(
"Input 2 (action) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
return input_shape[0][0], 1
class NAFAgent(AbstractDQNAgent):
"""Write me
"""
def __init__(self, V_model, L_model, mu_model, random_process=None,
covariance_mode='full', *args, **kwargs):
super(NAFAgent, self).__init__(*args, **kwargs)
# TODO: Validate (important) input.
# Parameters.
self.random_process = random_process
self.covariance_mode = covariance_mode
# Related objects.
self.V_model = V_model
self.L_model = L_model
self.mu_model = mu_model
# State.
self.reset_states()
def update_target_model_hard(self):
self.target_V_model.set_weights(self.V_model.get_weights())
def load_weights(self, filepath):
self.combined_model.load_weights(filepath) # updates V, L and mu model since the weights are shared
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.combined_model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.combined_model.reset_states()
self.target_V_model.reset_states()
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# Create target V model. We don't need targets for mu or L.
self.target_V_model = clone_model(self.V_model, self.custom_model_objects)
self.target_V_model.compile(optimizer='sgd', loss='mse')
# Build combined model.
a_in = Input(shape=(self.nb_actions,), name='action_input')
if type(self.V_model.input) is list:
observation_shapes = [i._keras_shape[1:] for i in self.V_model.input]
else:
observation_shapes = [self.V_model.input._keras_shape[1:]]
os_in = [Input(shape=shape, name='observation_input_{}'.format(idx)) for idx, shape in enumerate(observation_shapes)]
L_out = self.L_model([a_in] + os_in)
V_out = self.V_model(os_in)
mu_out = self.mu_model(os_in)
A_out = NAFLayer(self.nb_actions, mode=self.covariance_mode)([L_out, mu_out, a_in])
combined_out = Lambda(lambda x: x[0]+x[1], output_shape=lambda x: x[0])([A_out, V_out])
combined = Model(inputs=[a_in] + os_in, outputs=[combined_out])
# Compile combined model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_V_model, self.V_model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_error(y_true, y_pred):
return K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
combined.compile(loss=clipped_error, optimizer=optimizer, metrics=metrics)
self.combined_model = combined
self.compiled = True
def select_action(self, state):
batch = self.process_state_batch([state])
action = self.mu_model.predict_on_batch(batch).flatten()
assert action.shape == (self.nb_actions,)
# Apply noise, if a random process is set.
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action.shape
action += noise
return action
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
# Compute Q values for mini-batch update.
q_batch = self.target_V_model.predict_on_batch(state1_batch).flatten()
assert q_batch.shape == (self.batch_size,)
# Compute discounted reward.
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
assert Rs.shape == (self.batch_size,)
# Finally, perform a single update on the entire batch.
if len(self.combined_model.input) == 2:
metrics = self.combined_model.train_on_batch([action_batch, state0_batch], Rs)
else:
metrics = self.combined_model.train_on_batch([action_batch] + state0_batch, Rs)
if self.processor is not None:
metrics += self.processor.metrics
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def layers(self):
return self.combined_model.layers[:]
def get_config(self):
config = super(NAFAgent, self).get_config()
config['V_model'] = get_object_config(self.V_model)
config['mu_model'] = get_object_config(self.mu_model)
config['L_model'] = get_object_config(self.L_model)
if self.compiled:
config['target_V_model'] = get_object_config(self.target_V_model)
return config
@property
def metrics_names(self):
names = self.combined_model.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
# Aliases
ContinuousDQNAgent = NAFAgent
| 33,631 | 44.204301 | 250 | py |
rl-perturbed-reward | rl-perturbed-reward-master/gym-control/rl/agents/cem.py | from __future__ import division
from collections import deque
from copy import deepcopy
import numpy as np
import keras.backend as K
from keras.models import Model
from rl.core import Agent
from rl.util import *
class CEMAgent(Agent):
"""Write me
"""
def __init__(self, model, nb_actions, memory, batch_size=50, nb_steps_warmup=1000,
train_interval=50, elite_frac=0.05, memory_interval=1, theta_init=None,
noise_decay_const=0.0, noise_ampl=0.0, **kwargs):
super(CEMAgent, self).__init__(**kwargs)
# Parameters.
self.nb_actions = nb_actions
self.batch_size = batch_size
self.elite_frac = elite_frac
self.num_best = int(self.batch_size * self.elite_frac)
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.memory_interval = memory_interval
# if using noisy CEM, the minimum standard deviation will be ampl * exp (- decay_const * step )
self.noise_decay_const = noise_decay_const
self.noise_ampl = noise_ampl
# default initial mean & cov, override this by passing an theta_init argument
self.init_mean = 0.0
self.init_stdev = 1.0
# Related objects.
self.memory = memory
self.model = model
self.shapes = [w.shape for w in model.get_weights()]
self.sizes = [w.size for w in model.get_weights()]
self.num_weights = sum(self.sizes)
# store the best result seen during training, as a tuple (reward, flat_weights)
self.best_seen = (-np.inf, np.zeros(self.num_weights))
self.theta = np.zeros(self.num_weights*2)
self.update_theta(theta_init)
# State.
self.episode = 0
self.compiled = False
self.reset_states()
def compile(self):
self.model.compile(optimizer='sgd', loss='mse')
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def get_weights_flat(self,weights):
weights_flat = np.zeros(self.num_weights)
pos = 0
for i_layer, size in enumerate(self.sizes):
weights_flat[pos:pos+size] = weights[i_layer].flatten()
pos += size
return weights_flat
def get_weights_list(self,weights_flat):
weights = []
pos = 0
for i_layer, size in enumerate(self.sizes):
arr = weights_flat[pos:pos+size].reshape(self.shapes[i_layer])
weights.append(arr)
pos += size
return weights
def reset_states(self):
self.recent_observation = None
self.recent_action = None
def select_action(self, state, stochastic=False):
batch = np.array([state])
if self.processor is not None:
batch = self.processor.process_state_batch(batch)
action = self.model.predict_on_batch(batch).flatten()
if stochastic or self.training:
return np.random.choice(np.arange(self.nb_actions), p=np.exp(action) / np.sum(np.exp(action)))
return np.argmax(action)
def update_theta(self,theta):
if (theta is not None):
assert theta.shape == self.theta.shape, "Invalid theta, shape is {0} but should be {1}".format(theta.shape,self.theta.shape)
assert (not np.isnan(theta).any()), "Invalid theta, NaN encountered"
assert (theta[self.num_weights:] >= 0.).all(), "Invalid theta, standard deviations must be nonnegative"
self.theta = theta
else:
means = np.ones(self.num_weights) * self.init_mean
stdevs = np.ones(self.num_weights) * self.init_stdev
self.theta = np.hstack((means,stdevs))
def choose_weights(self):
mean = self.theta[:self.num_weights]
std = self.theta[self.num_weights:]
weights_flat = std * np.random.randn(self.num_weights) + mean
sampled_weights = self.get_weights_list(weights_flat)
self.model.set_weights(sampled_weights)
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.model.layers[:]
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
if terminal:
params = self.get_weights_flat(self.model.get_weights())
self.memory.finalize_episode(params)
if self.step > self.nb_steps_warmup and self.episode % self.train_interval == 0:
params, reward_totals = self.memory.sample(self.batch_size)
best_idx = np.argsort(np.array(reward_totals))[-self.num_best:]
best = np.vstack([params[i] for i in best_idx])
if reward_totals[best_idx[-1]] > self.best_seen[0]:
self.best_seen = (reward_totals[best_idx[-1]], params[best_idx[-1]])
metrics = [np.mean(np.array(reward_totals)[best_idx])]
if self.processor is not None:
metrics += self.processor.metrics
min_std = self.noise_ampl * np.exp(-self.step * self.noise_decay_const)
mean = np.mean(best, axis=0)
std = np.std(best, axis=0) + min_std
new_theta = np.hstack((mean, std))
self.update_theta(new_theta)
self.choose_weights()
self.episode += 1
return metrics
def _on_train_end(self):
self.model.set_weights(self.get_weights_list(self.best_seen[1]))
@property
def metrics_names(self):
names = ['mean_best_reward']
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
| 6,679 | 36.740113 | 136 | py |
text-classification-cnn-rnn | text-classification-cnn-rnn-master/predict.py | # coding: utf-8
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow.contrib.keras as kr
from cnn_model import TCNNConfig, TextCNN
from data.cnews_loader import read_category, read_vocab
try:
bool(type(unicode))
except NameError:
unicode = str
base_dir = 'data/cnews'
vocab_dir = os.path.join(base_dir, 'cnews.vocab.txt')
save_dir = 'checkpoints/textcnn'
save_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径
class CnnModel:
def __init__(self):
self.config = TCNNConfig()
self.categories, self.cat_to_id = read_category()
self.words, self.word_to_id = read_vocab(vocab_dir)
self.config.vocab_size = len(self.words)
self.model = TextCNN(self.config)
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess=self.session, save_path=save_path) # 读取保存的模型
def predict(self, message):
# 支持不论在python2还是python3下训练的模型都可以在2或者3的环境下运行
content = unicode(message)
data = [self.word_to_id[x] for x in content if x in self.word_to_id]
feed_dict = {
self.model.input_x: kr.preprocessing.sequence.pad_sequences([data], self.config.seq_length),
self.model.keep_prob: 1.0
}
y_pred_cls = self.session.run(self.model.y_pred_cls, feed_dict=feed_dict)
return self.categories[y_pred_cls[0]]
if __name__ == '__main__':
cnn_model = CnnModel()
test_demo = ['三星ST550以全新的拍摄方式超越了以往任何一款数码相机',
'热火vs骑士前瞻:皇帝回乡二番战 东部次席唾手可得新浪体育讯北京时间3月30日7:00']
for i in test_demo:
print(cnn_model.predict(i))
| 1,694 | 28.736842 | 104 | py |
text-classification-cnn-rnn | text-classification-cnn-rnn-master/data/cnews_loader.py | # coding: utf-8
import sys
from collections import Counter
import numpy as np
import tensorflow.keras as kr
if sys.version_info[0] > 2:
is_py3 = True
else:
reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码"""
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
"""
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
"""
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
"""读取文件数据"""
contents, labels = [], []
with open_file(filename) as f:
for line in f:
try:
label, content = line.strip().split('\t')
if content:
contents.append(list(native_content(content)))
labels.append(native_content(label))
except:
pass
return contents, labels
def build_vocab(train_dir, vocab_dir, vocab_size=5000):
"""根据训练集构建词汇表,存储"""
data_train, _ = read_file(train_dir)
all_data = []
for content in data_train:
all_data.extend(content)
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1)
words, _ = list(zip(*count_pairs))
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def read_vocab(vocab_dir):
"""读取词汇表"""
# words = open_file(vocab_dir).read().strip().split('\n')
with open_file(vocab_dir) as fp:
# 如果是py2 则每个值都转化为unicode
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words))))
return words, word_to_id
def read_category():
"""读取分类目录,固定"""
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id
def to_words(content, words):
"""将id表示的内容转换为文字"""
return ''.join(words[x] for x in content)
def process_file(filename, word_to_id, cat_to_id, max_length=600):
"""将文件转换为id表示"""
contents, labels = read_file(filename)
data_id, label_id = [], []
for i in range(len(contents)):
data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
# 使用keras提供的pad_sequences来将文本pad为固定长度
x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示
return x_pad, y_pad
def batch_iter(x, y, batch_size=64):
"""生成批次数据"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
indices = np.random.permutation(np.arange(data_len))
x_shuffle = x[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
| 3,386 | 25.255814 | 92 | py |
alibi-detect | alibi-detect-master/setup.py | from setuptools import find_packages, setup
def readme():
with open("README.md", encoding="utf-8") as f:
return f.read()
# read version file
exec(open("alibi_detect/version.py").read())
extras_require = {
"prophet": [
"prophet>=1.1.0, <2.0.0",
],
"torch": [
"torch>=1.7.0, <1.14.0"
],
# https://github.com/SeldonIO/alibi-detect/issues/375 and 387
"tensorflow": [
"tensorflow_probability>=0.8.0, <0.21.0",
"tensorflow>=2.2.0, !=2.6.0, !=2.6.1, <2.13.0", # https://github.com/SeldonIO/alibi-detect/issues/375 and 387
],
"keops": [
"pykeops>=2.0.0, <2.2.0",
"torch>=1.7.0, <1.14.0"
],
"all": [
"prophet>=1.1.0, <2.0.0",
"tensorflow_probability>=0.8.0, <0.21.0",
"tensorflow>=2.2.0, !=2.6.0, !=2.6.1, <2.13.0", # https://github.com/SeldonIO/alibi-detect/issues/375 and 387
"pykeops>=2.0.0, <2.2.0",
"torch>=1.7.0, <1.14.0"
],
}
setup(
name="alibi-detect",
author="Seldon Technologies Ltd.",
author_email="hello@seldon.io",
version=__version__, # type: ignore # noqa F821
description="Algorithms for outlier detection, concept drift and metrics.",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/SeldonIO/alibi-detect",
license="Apache 2.0",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8",
# lower bounds based on Debian Stable versions where available
install_requires=[
"matplotlib>=3.0.0, <4.0.0",
"numpy>=1.16.2, <2.0.0",
"pandas>=1.0.0, <3.0.0",
"Pillow>=5.4.1, <11.0.0",
"opencv-python>=3.2.0, <5.0.0",
"scipy>=1.3.0, <2.0.0",
'scikit-image>=0.14.2, !=0.17.1, <0.22', # https://github.com/SeldonIO/alibi/issues/215
"scikit-learn>=0.20.2, <2.0.0",
"transformers>=4.0.0, <5.0.0",
"dill>=0.3.0, <0.4.0",
"tqdm>=4.28.1, <5.0.0",
"requests>=2.21.0, <3.0.0",
"pydantic>=1.8.0, <2.0.0",
"toml>=0.10.1, <1.0.0", # STC, see https://discuss.python.org/t/adopting-recommending-a-toml-parser/4068
"catalogue>=2.0.0, <3.0.0",
"numba>=0.50.0, !=0.54.0, <0.58.0", # Avoid 0.54 due to: https://github.com/SeldonIO/alibi/issues/466
"typing-extensions>=3.7.4.3"
],
extras_require=extras_require,
test_suite="tests",
zip_safe=False,
classifiers=[
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering",
],
)
| 2,931 | 33.494118 | 118 | py |
alibi-detect | alibi-detect-master/testing/test_notebooks.py | """
This script is an example of using `jupytext` to execute notebooks for testing instead of relying on `nbmake`
plugin. This approach may be more flexible if our requirements change in the future.
"""
import glob
from pathlib import Path
import shutil
import pytest
from jupytext.cli import jupytext
try:
from fbprophet import Prophet # noqa F401
PROPHET_INSTALLED = True
except ImportError:
PROPHET_INSTALLED = False
# Set of all example notebooks
# NOTE: we specifically get only the name of the notebook not the full path as we want to
# use these as variables on the command line for `pytest` for the workflow executing only
# changed notebooks. `pytest` does not allow `/` as part of the test name for the -k argument.
# This also means that the approach is limited to all notebooks being in the `NOTEBOOK_DIR`
# top-level path.
NOTEBOOK_DIR = 'doc/source/examples'
ALL_NOTEBOOKS = {Path(x).name for x in glob.glob(str(Path(NOTEBOOK_DIR).joinpath('*.ipynb')))}
# The following set includes notebooks which are not to be executed during notebook tests.
# These are typically those that would take too long to run in a CI environment or impractical
# due to other dependencies (e.g. downloading large datasets
EXCLUDE_NOTEBOOKS = {
# the following are all long-running
'cd_distillation_cifar10.ipynb',
'cd_ks_cifar10.ipynb',
'cd_mmd_cifar10.ipynb',
'od_llr_genome.ipynb',
'od_llr_mnist.ipynb',
'od_seq2seq_synth.ipynb',
'cd_context_20newsgroup.ipynb',
'cd_context_ecg.ipynb',
'cd_text_imdb.ipynb',
'cd_mmd_keops.ipynb',
# the following requires a k8s cluster
'alibi_detect_deploy.ipynb',
# the following require downloading large datasets
'cd_online_camelyon.ipynb',
'cd_text_amazon.ipynb',
# the following require complex dependencies
'cd_mol.ipynb', # complex to install pytorch-geometric
# the following require remote artefacts to be updated
'ad_ae_cifar10.ipynb', # bad marshal data error when fetching cifar10-resnet56 model
}
if not PROPHET_INSTALLED:
EXCLUDE_NOTEBOOKS.add('od_prophet_weather.ipynb') # Exclude if fbprophet not installed i.e. on Windows
EXECUTE_NOTEBOOKS = ALL_NOTEBOOKS - EXCLUDE_NOTEBOOKS
@pytest.mark.timeout(600)
@pytest.mark.parametrize("notebook", EXECUTE_NOTEBOOKS)
def test_notebook_execution(notebook, tmp_path):
# Original notebook filepath
orig_path = Path(NOTEBOOK_DIR, notebook)
# Copy notebook to a temp directory (so that any save/loading is done in a clean directory)
test_path = tmp_path.joinpath(notebook)
shutil.copy(orig_path, test_path)
# Execute copied notebook
jupytext(args=[str(test_path), "--execute"])
| 2,694 | 38.632353 | 109 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/base.py | from alibi_detect.utils.missing_optional_dependency import import_optional
from typing import Union
from typing_extensions import Literal, Protocol, runtime_checkable
# Use Protocols instead of base classes for the backend associated objects. This is a bit more flexible and allows us to
# avoid the torch/tensorflow imports in the base class.
@runtime_checkable
class TransformProtocol(Protocol):
"""Protocol for transformer objects.
The :py:obj:`~alibi_detect.od.pytorch.ensemble.BaseTransformTorch` object provides abstract methods for
objects that map between `torch` tensors. This protocol models the interface of the `BaseTransformTorch`
class.
"""
def transform(self, x):
pass
@runtime_checkable
class FittedTransformProtocol(TransformProtocol, Protocol):
"""Protocol for fitted transformer objects.
This protocol models the joint interface of the :py:obj:`~alibi_detect.od.pytorch.ensemble.BaseTransformTorch`
class and the :py:obj:`~alibi_detect.od.pytorch.ensemble.FitMixinTorch` class. These objects are transforms that
require to be fit."""
def fit(self, x_ref):
pass
def set_fitted(self):
pass
def check_fitted(self):
pass
TransformProtocolType = Union[TransformProtocol, FittedTransformProtocol]
NormalizerLiterals = Literal['PValNormalizer', 'ShiftAndScaleNormalizer']
AggregatorLiterals = Literal['TopKAggregator', 'AverageAggregator',
'MaxAggregator', 'MinAggregator']
PValNormalizer, ShiftAndScaleNormalizer, TopKAggregator, AverageAggregator, \
MaxAggregator, MinAggregator = import_optional(
'alibi_detect.od.pytorch.ensemble',
['PValNormalizer', 'ShiftAndScaleNormalizer', 'TopKAggregator',
'AverageAggregator', 'MaxAggregator', 'MinAggregator']
)
def get_normalizer(normalizer: Union[TransformProtocolType, NormalizerLiterals]) -> TransformProtocol:
if isinstance(normalizer, str):
try:
return {
'PValNormalizer': PValNormalizer,
'ShiftAndScaleNormalizer': ShiftAndScaleNormalizer,
}.get(normalizer)()
except KeyError:
raise NotImplementedError(f'Normalizer {normalizer} not implemented.')
return normalizer
def get_aggregator(aggregator: Union[TransformProtocol, AggregatorLiterals]) -> TransformProtocol:
if isinstance(aggregator, str):
try:
return {
'TopKAggregator': TopKAggregator,
'AverageAggregator': AverageAggregator,
'MaxAggregator': MaxAggregator,
'MinAggregator': MinAggregator,
}.get(aggregator)()
except KeyError:
raise NotImplementedError(f'Aggregator {aggregator} not implemented.')
return aggregator
| 2,818 | 36.092105 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/vaegmm.py | import logging
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Callable, Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import VAEGMM, eucl_cosim_features
from alibi_detect.models.tensorflow.gmm import gmm_energy, gmm_params
from alibi_detect.models.tensorflow.losses import loss_vaegmm
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierVAEGMM(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
vaegmm: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
gmm_density_net: tf.keras.Model = None,
n_gmm: int = None,
latent_dim: int = None,
samples: int = 10,
beta: float = 1.,
recon_features: Callable = eucl_cosim_features,
data_type: str = None
) -> None:
"""
VAEGMM-based outlier detector.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
vaegmm
A trained tf.keras model if available.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'vaegmm' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'vaegmm' is specified.
gmm_density_net
Layers for the GMM network wrapped in a tf.keras.Sequential class.
n_gmm
Number of components in GMM.
latent_dim
Dimensionality of the latent space.
samples
Number of samples sampled to evaluate each instance.
beta
Beta parameter for KL-divergence loss term.
recon_features
Function to extract features from the reconstructed instance by the decoder.
data_type
Optionally specifiy the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.samples = samples
# check if model can be loaded, otherwise initialize VAEGMM model
if isinstance(vaegmm, tf.keras.Model):
self.vaegmm = vaegmm
elif (isinstance(encoder_net, tf.keras.Sequential) and
isinstance(decoder_net, tf.keras.Sequential) and
isinstance(gmm_density_net, tf.keras.Sequential)):
self.vaegmm = VAEGMM(encoder_net, decoder_net, gmm_density_net, n_gmm,
latent_dim, recon_features=recon_features, beta=beta)
else:
raise TypeError('No valid format detected for `vaegmm` (tf.keras.Model) '
'or `encoder_net`, `decoder_net` and `gmm_density_net` (tf.keras.Sequential).')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
self.phi, self.mu, self.cov, self.L, self.log_det_cov = None, None, None, None, None
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = loss_vaegmm,
w_recon: float = 1e-7,
w_energy: float = .1,
w_cov_diag: float = .005,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
cov_elbo: dict = dict(sim=.05),
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train VAEGMM model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
w_recon
Weight on elbo loss term if default `loss_vaegmm`.
w_energy
Weight on sample energy loss term if default `loss_vaegmm` loss fn is used.
w_cov_diag
Weight on covariance regularizing loss term if default `loss_vaegmm` loss fn is used.
optimizer
Optimizer used for training.
cov_elbo
Dictionary with covariance matrix options in case the elbo loss function is used.
Either use the full covariance matrix inferred from X (dict(cov_full=None)),
only the variance (dict(cov_diag=None)) or a float representing the same standard deviation
for each feature (e.g. dict(sim=.05)).
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# train arguments
args = [self.vaegmm, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks,
'loss_fn_kwargs': {'w_recon': w_recon,
'w_energy': w_energy,
'w_cov_diag': w_cov_diag}
}
# initialize covariance matrix if default vaegmm loss fn is used
use_elbo = loss_fn.__name__ == 'loss_vaegmm'
cov_elbo_type, cov = [*cov_elbo][0], [*cov_elbo.values()][0]
if use_elbo and cov_elbo_type in ['cov_full', 'cov_diag']:
cov = tfp.stats.covariance(X.reshape(X.shape[0], -1))
if cov_elbo_type == 'cov_diag': # infer standard deviation from covariance matrix
cov = tf.math.sqrt(tf.linalg.diag_part(cov))
if use_elbo:
kwargs['loss_fn_kwargs'][cov_elbo_type] = tf.dtypes.cast(cov, tf.float32)
# train
trainer(*args, **kwargs)
# set GMM parameters
x_recon, z, gamma = self.vaegmm(X)
self.phi, self.mu, self.cov, self.L, self.log_det_cov = gmm_params(z, gamma)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
batch_size
Batch size used when making predictions with the VAEGMM.
"""
# compute outlier scores
iscore = self.score(X, batch_size=batch_size)
# update threshold
self.threshold = np.percentile(iscore, threshold_perc)
def score(self, X: np.ndarray, batch_size: int = int(1e10)) -> np.ndarray:
"""
Compute outlier scores.
Parameters
----------
X
Batch of instances to analyze.
batch_size
Batch size used when making predictions with the VAEGMM.
Returns
-------
Array with outlier scores for each instance in the batch.
"""
# draw samples from latent space
X_samples = np.repeat(X, self.samples, axis=0)
_, z, _ = predict_batch(X_samples, self.vaegmm, batch_size=batch_size)
# compute average energy for samples
energy, _ = gmm_energy(z, self.phi, self.mu, self.cov, self.L, self.log_det_cov, return_mean=False)
energy_samples = energy.numpy().reshape((-1, self.samples))
iscore = np.mean(energy_samples, axis=-1)
return iscore
def predict(self,
X: np.ndarray,
batch_size: int = int(1e10),
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when making predictions with the VAEGMM.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and instance level outlier scores.
"""
# compute outlier scores
iscore = self.score(X, batch_size=batch_size)
# values above threshold are outliers
outlier_pred = (iscore > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 9,744 | 37.98 | 107 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_svm.py | from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import numpy as np
from alibi_detect.base import (BaseDetector, FitMixin, ThresholdMixin,
outlier_prediction_dict)
from alibi_detect.exceptions import _catch_error as catch_error
from alibi_detect.od.pytorch import SgdSVMTorch, BgdSVMTorch
from alibi_detect.utils._types import Literal
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
if TYPE_CHECKING:
import torch
backends = {
'pytorch': {
'sgd': SgdSVMTorch,
'bgd': BgdSVMTorch
}
}
class SVM(BaseDetector, ThresholdMixin, FitMixin):
def __init__(
self,
nu: float,
n_components: Optional[int] = None,
kernel: 'torch.nn.Module' = None,
optimization: Literal['sgd', 'bgd'] = 'sgd',
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""One-Class Support vector machine (OCSVM) outlier detector.
The one-class Support vector machine outlier detector fits a one-class SVM to the reference data.
Rather than the typical approach of optimizing the exact kernel OCSVM objective through a dual formulation,
here we instead map the data into the kernel's RKHS and then solve the linear optimization problem
directly through its primal formulation. The Nystroem approximation is used to speed up training and inference
by approximating the kernel's RKHS.
We provide two options, specified by the `optimization` parameter, for optimizing the one-class svm. `''sgd''`
wraps the `SGDOneClassSVM` class from the sklearn package and the other, `''bgd''` uses a custom implementation
in PyTorch. The PyTorch approach is tailored for operation on GPUs. Instead of applying stochastic gradient
descent (one data point at a time) with a fixed learning rate schedule it performs full gradient descent with
step size chosen at each iteration via line search. Note that on a CPU this would not necessarily be preferable
to SGD as we would have to iterate through both data points and candidate step sizes, however on GPU all of the
operations are vectorized/parallelized. Moreover, the Nystroem approximation has complexity `O(n^2m)` where
`n` is the number of reference instances and `m` defines the number of inducing points. This can therefore be
expensive for large reference sets and benefits from implementation on the GPU.
In general if using a small dataset then using the `''cpu''` with the optimization `''sgd''` is the best choice.
Whereas if using a large dataset then using the `''gpu''` with the optimization `''bgd''` is the best choice.
Parameters
----------
nu
The proportion of the training data that should be considered outliers. Note that this does not necessarily
correspond to the false positive rate on test data, which is still defined when calling the
`infer_threshold` method. `nu` should be thought of as a regularization parameter that affects how smooth
the svm decision boundary is.
n_components
Number of components in the Nystroem approximation, By default uses all of them.
kernel
Kernel function to use for outlier detection. Should be an instance of a subclass of `torch.nn.Module`. If
not specified then defaults to the `GaussianRBF`.
optimization
Optimization method to use. Choose from ``'sgd'`` or ``'bgd'``. Defaults to ``'sgd'``.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
NotImplementedError
If choice of `backend` is not implemented.
ValueError
If choice of `optimization` is not valid.
ValueError
If `n_components` is not a positive integer.
"""
super().__init__()
if optimization not in ('sgd', 'bgd'):
raise ValueError(f'Optimization {optimization} not recognized. Choose from `sgd` or `bgd`.')
if n_components is not None and n_components <= 0:
raise ValueError(f'n_components must be a positive integer, got {n_components}.')
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls = backends[backend][optimization]
args: Dict[str, Any] = {
'n_components': n_components,
'kernel': kernel,
'nu': nu
}
args['device'] = device
self.backend = backend_cls(**args)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(
self,
x_ref: np.ndarray,
tol: float = 1e-6,
max_iter: int = 1000,
step_size_range: Tuple[float, float] = (1e-8, 1.0),
n_step_sizes: int = 16,
n_iter_no_change: int = 25,
verbose: int = 0,
) -> None:
"""Fit the detector on reference data.
Uses the choice of optimization method to fit the svm model to the data.
Parameters
----------
x_ref
Reference data used to fit the detector.
tol
Convergence threshold used to fit the detector. Used for both ``'sgd'`` and ``'bgd'`` optimizations.
Defaults to ``1e-3``.
max_iter
The maximum number of optimization steps. Used for both ``'sgd'`` and ``'bgd'`` optimizations.
step_size_range
The range of values to be considered for the gradient descent step size at each iteration. This is specified
as a tuple of the form `(min_eta, max_eta)` and only used for the ``'bgd'`` optimization.
n_step_sizes
The number of step sizes in the defined range to be tested for loss reduction. This many points are spaced
evenly along the range in log space. This is only used for the ``'bgd'`` optimization.
n_iter_no_change
The number of iterations over which the loss must decrease by `tol` in order for optimization to continue.
This is only used for the ``'bgd'`` optimization..
verbose
Verbosity level during training. ``0`` is silent, ``1`` prints fit status. If using `bgd`, fit displays a
progress bar. Otherwise, if using `sgd` then we output the Sklearn `SGDOneClassSVM.fit()` logs.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys depending on the optimization used:
- converged: `bool` indicating whether training converged.
- n_iter: number of iterations performed.
- lower_bound: loss lower bound. Only returned for the `bgd`.
"""
return self.backend.fit(
self.backend._to_backend_dtype(x_ref),
**self.backend.format_fit_kwargs(locals())
)
@catch_error('NotFittedError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
Scores the data using the fitted svm model. The higher the score, the more anomalous the instance.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the SVM detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 10,687 | 41.923695 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_lof.py | from typing import Callable, Union, Optional, Dict, Any, List, Tuple
from typing import TYPE_CHECKING
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.exceptions import _catch_error as catch_error
from alibi_detect.od.base import TransformProtocol, TransformProtocolType
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin
from alibi_detect.od.pytorch import LOFTorch, Ensembler
from alibi_detect.od.base import get_aggregator, get_normalizer, NormalizerLiterals, AggregatorLiterals
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
if TYPE_CHECKING:
import torch
backends = {
'pytorch': (LOFTorch, Ensembler)
}
class LOF(BaseDetector, FitMixin, ThresholdMixin):
def __init__(
self,
k: Union[int, np.ndarray, List[int], Tuple[int]],
kernel: Optional[Callable] = None,
normalizer: Optional[Union[TransformProtocolType, NormalizerLiterals]] = 'PValNormalizer',
aggregator: Union[TransformProtocol, AggregatorLiterals] = 'AverageAggregator',
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""
Local Outlier Factor (LOF) outlier detector.
The LOF detector is a non-parametric method for outlier detection. It computes the local density
deviation of a given data point with respect to its neighbors. It considers as outliers the
samples that have a substantially lower density than their neighbors.
The detector can be initialized with `k` a single value or an array of values. If `k` is a single value then
the score method uses the distance/kernel similarity to the k-th nearest neighbor. If `k` is an array of
values then the score method uses the distance/kernel similarity to each of the specified `k` neighbors.
In the latter case, an `aggregator` must be specified to aggregate the scores.
Note that, in the multiple k case, a normalizer can be provided. If a normalizer is passed then it is fit in
the `infer_threshold` method and so this method must be called before the `predict` method. If this is not
done an exception is raised. If `k` is a single value then the predict method can be called without first
calling `infer_threshold` but only scores will be returned and not outlier predictions.
Parameters
----------
k
Number of nearest neighbors to compute distance to. `k` can be a single value or
an array of integers. If an array is passed, an aggregator is required to aggregate
the scores. If `k` is a single value we compute the local outlier factor for that `k`.
Otherwise if `k` is a list then we compute and aggregate the local outlier factor for each
value in `k`.
kernel
Kernel function to use for outlier detection. If ``None``, `torch.cdist` is used.
Otherwise if a kernel is specified then instead of using `torch.cdist` the kernel
defines the k nearest neighbor distance.
normalizer
Normalizer to use for outlier detection. If ``None``, no normalization is applied.
For a list of available normalizers, see :mod:`alibi_detect.od.pytorch.ensemble`.
aggregator
Aggregator to use for outlier detection. Can be set to ``None`` if `k` is a single
value. For a list of available aggregators, see :mod:`alibi_detect.od.pytorch.ensemble`.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
Raises
------
ValueError
If `k` is an array and `aggregator` is None.
NotImplementedError
If choice of `backend` is not implemented.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls, ensembler_cls = backends[backend]
ensembler = None
if aggregator is None and isinstance(k, (list, np.ndarray, tuple)):
raise ValueError('If `k` is a `np.ndarray`, `list` or `tuple`, '
'the `aggregator` argument cannot be ``None``.')
if isinstance(k, (list, np.ndarray, tuple)):
ensembler = ensembler_cls(
normalizer=get_normalizer(normalizer),
aggregator=get_aggregator(aggregator)
)
self.backend = backend_cls(k, kernel=kernel, ensembler=ensembler, device=device)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(self, x_ref: np.ndarray) -> None:
"""Fit the detector on reference data.
Parameters
----------
x_ref
Reference data used to fit the detector.
"""
self.backend.fit(self.backend._to_backend_dtype(x_ref))
@catch_error('NotFittedError')
@catch_error('ThresholdNotInferredError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
Computes the local outlier factor for each point in `x`. This is the density of each point `x`
relative to those of its neighbors in `x_ref`. If `k` is an array of values then the score for
each `k` is aggregated using the ensembler.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
ThresholdNotInferredError
If k is a list and a threshold was not inferred.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
score = self.backend._ensembler(score)
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the LOF detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
@catch_error('ThresholdNotInferredError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
ThresholdNotInferredError
If k is a list and a threshold was not inferred.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 9,133 | 40.899083 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/seq2seq.py | import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from typing import Dict, Tuple, Union
from alibi_detect.models.tensorflow.autoencoder import Seq2Seq, EncoderLSTM, DecoderLSTM
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierSeq2Seq(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
n_features: int,
seq_len: int,
threshold: Union[float, np.ndarray] = None,
seq2seq: tf.keras.Model = None,
threshold_net: tf.keras.Model = None,
latent_dim: int = None,
output_activation: str = None,
beta: float = 1.
) -> None:
"""
Seq2Seq-based outlier detector.
Parameters
----------
n_features
Number of features in the time series.
seq_len
Sequence length fed into the Seq2Seq model.
threshold
Threshold used for outlier detection. Can be a float or feature-wise array.
seq2seq
A trained seq2seq model if available.
threshold_net
Layers for the threshold estimation network wrapped in a
tf.keras.Sequential class if no 'seq2seq' is specified.
latent_dim
Latent dimension of the encoder and decoder.
output_activation
Activation used in the Dense output layer of the decoder.
beta
Weight on the threshold estimation loss term.
"""
super().__init__()
if threshold is None:
threshold = 0.
logger.warning('No explicit threshold level set. Threshold defaults to 0. '
'A threshold can be inferred using `infer_threshold`.')
self.threshold = threshold
self.shape = (-1, seq_len, n_features)
self.latent_dim = (latent_dim // 2) * 2
if self.latent_dim != latent_dim:
logger.warning('Odd values for `latent_dim` are not supported, because '
'of Bidirectional(LSTM(latent_dim // 2,...) in the encoder. '
f'{self.latent_dim} is used instead of {latent_dim}.)')
self.output_activation = output_activation
if threshold_net is None and seq2seq is None: # default threshold network
threshold_net = tf.keras.Sequential(
[
InputLayer(input_shape=(seq_len, self.latent_dim)),
Dense(64, activation=tf.nn.relu),
Dense(64, activation=tf.nn.relu),
])
# check if model can be loaded, otherwise initialize a Seq2Seq model
if isinstance(seq2seq, tf.keras.Model):
self.seq2seq = seq2seq
elif isinstance(latent_dim, int) and isinstance(threshold_net, tf.keras.Sequential):
encoder_net = EncoderLSTM(self.latent_dim)
decoder_net = DecoderLSTM(self.latent_dim, n_features, output_activation)
self.seq2seq = Seq2Seq(encoder_net, decoder_net, threshold_net, n_features, beta=beta)
else:
raise TypeError('No valid format detected for `seq2seq` (tf.keras.Model), '
'`latent_dim` (int) or `threshold_net` (tf.keras.Sequential)')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'time-series'
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = tf.keras.losses.mse,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train Seq2Seq model.
Parameters
----------
X
Univariate or multivariate time series.
Shape equals (batch, features) or (batch, sequence length, features).
loss_fn
Loss function used for training.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# targets for teacher-forcing
if len(X.shape) == 2:
y = np.roll(X, -1, axis=0).reshape(self.shape)
X = X.reshape(self.shape)
else:
y = np.roll(X.reshape((-1, self.shape[-1])), -1, axis=0).reshape(self.shape)
# train arguments
args = [self.seq2seq, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'y_train': y,
'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
outlier_perc: Union[int, float] = 100.,
threshold_perc: Union[int, float, np.ndarray, list] = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update the outlier threshold by using a sequence of instances from the dataset
of which the fraction of features which are outliers are known. This fraction can be across
all features or per feature.
Parameters
----------
X
Univariate or multivariate time series.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
Overall (float) or feature-wise (array or list).
batch_size
Batch size used when making predictions with the seq2seq model.
"""
orig_shape = X.shape
threshold_shape = (1, orig_shape[-1])
if len(orig_shape) == 3: # (batch_size, seq_len, n_features)
threshold_shape = (1,) + threshold_shape # type: ignore
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_perc == 100.:
fscore = fscore.reshape((-1, self.shape[-1]))
# update threshold
if isinstance(threshold_perc, (int, float)) and outlier_perc == 100.:
self.threshold += np.percentile(fscore, threshold_perc)
elif isinstance(threshold_perc, (int, float)) and outlier_perc < 100.:
self.threshold += np.percentile(iscore, threshold_perc)
elif isinstance(threshold_perc, (list, np.ndarray)) and outlier_perc == 100.:
self.threshold += np.diag(np.percentile(fscore, threshold_perc, axis=0)).reshape(threshold_shape)
elif isinstance(threshold_perc, (list, np.ndarray)) and outlier_perc < 100.:
# number feature scores used for outlier score
n_score = int(np.ceil(.01 * outlier_perc * fscore.shape[1]))
# compute threshold level by feature
sorted_fscore = np.sort(fscore, axis=1)
if len(orig_shape) == 3: # (batch_size, seq_len, n_features)
sorted_fscore_perc = sorted_fscore[:, -n_score:, :] # (batch_size, n_score, n_features)
self.threshold += np.mean(sorted_fscore_perc, axis=(0, 1)).reshape(threshold_shape) # (1,1,n_features)
else: # (batch_size, n_features)
sorted_fscore_perc = sorted_fscore[:, -n_score:] # (batch_size, n_score)
self.threshold += np.mean(sorted_fscore_perc, axis=0) # float
else:
raise TypeError('Incorrect type for `threshold` and/or `threshold_perc`.')
def feature_score(self, X_orig: np.ndarray, X_recon: np.ndarray, threshold_est: np.ndarray) -> np.ndarray:
"""
Compute feature level outlier scores.
Parameters
----------
X_orig
Original time series.
X_recon
Reconstructed time series.
threshold_est
Estimated threshold from the decoder's latent space.
Returns
-------
Feature level outlier scores. Scores above 0 are outliers.
"""
fscore = (X_orig - X_recon) ** 2
# TODO: check casting if nb of features equals time dimension
fscore_adj = fscore - threshold_est - self.threshold
return fscore_adj
def instance_score(self, fscore: np.ndarray, outlier_perc: float = 100.) -> np.ndarray:
"""
Compute instance level outlier scores. `instance` in this case means the data along the
first axis of the original time series passed to the predictor.
Parameters
----------
fscore
Feature level outlier scores.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
Returns
-------
Instance level outlier scores.
"""
fscore_flat = fscore.reshape(fscore.shape[0], -1).copy()
n_score_features = int(np.ceil(.01 * outlier_perc * fscore_flat.shape[1]))
sorted_fscore = np.sort(fscore_flat, axis=1)
sorted_fscore_perc = sorted_fscore[:, -n_score_features:]
iscore = np.mean(sorted_fscore_perc, axis=1)
return iscore
def score(self, X: np.ndarray, outlier_perc: float = 100., batch_size: int = int(1e10)) \
-> Tuple[np.ndarray, np.ndarray]:
"""
Compute feature and instance level outlier scores.
Parameters
----------
X
Univariate or multivariate time series.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the seq2seq model.
Returns
-------
Feature and instance level outlier scores.
"""
# use the seq2seq model to reconstruct instances
orig_shape = X.shape
if len(orig_shape) == 2:
X = X.reshape(self.shape)
X_recon, threshold_est = predict_batch(X, self.seq2seq.decode_seq, batch_size=batch_size)
if len(orig_shape) == 2: # reshape back to original shape
X = X.reshape(orig_shape)
X_recon = X_recon.reshape(orig_shape)
threshold_est = threshold_est.reshape(orig_shape)
# compute feature and instance level scores
fscore = self.feature_score(X, X_recon, threshold_est)
iscore = self.instance_score(fscore, outlier_perc=outlier_perc)
return fscore, iscore
def predict(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
batch_size: int = int(1e10),
return_feature_score: bool = True,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Univariate or multivariate time series.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the seq2seq model.
return_feature_score
Whether to return feature level outlier scores.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and both feature and instance level outlier scores.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# values above threshold are outliers
outlier_pred = (outlier_score > 0).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_feature_score:
od['data']['feature_score'] = fscore
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 13,595 | 40.075529 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_mahalanobis.py | from typing import Union, Optional, Dict, Any
from typing import TYPE_CHECKING
from alibi_detect.exceptions import _catch_error as catch_error
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.od.pytorch import MahalanobisTorch
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
if TYPE_CHECKING:
import torch
backends = {
'pytorch': MahalanobisTorch
}
class Mahalanobis(BaseDetector, FitMixin, ThresholdMixin):
def __init__(
self,
min_eigenvalue: float = 1e-6,
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""
The Mahalanobis outlier detection method.
The Mahalanobis detector computes the directions of variation of a dataset and uses them to detect when points
are outliers by checking to see if the points vary from dataset points in unexpected ways.
When we fit the Mahalanobis detector we compute the covariance matrix of the reference data and its eigenvectors
and eigenvalues. We filter small eigenvalues for numerical stability using the `min_eigenvalue` parameter. We
then inversely weight each eigenvector by its eigenvalue.
When we score test points we project them onto the eigenvectors and compute the l2-norm of the projected point.
Because the eigenvectors are inversely weighted by the eigenvalues, the score will take into account the
difference in variance along each direction of variation. If a test point lies along a direction of high
variation then it must lie very far out to obtain a high score. If a test point lies along a direction of low
variation then it doesn't need to lie very far out to obtain a high score.
Parameters
----------
min_eigenvalue
Eigenvectors with eigenvalues below this value will be discarded. This is to ensure numerical stability.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
NotImplementedError
If choice of `backend` is not implemented.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls = backends[backend]
self.backend = backend_cls(min_eigenvalue, device=device)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(self, x_ref: np.ndarray) -> None:
"""Fit the detector on reference data.
Fitting the Mahalanobis detector amounts to computing the covariance matrix and its eigenvectors. We filter out
very small eigenvalues using the `min_eigenvalue` parameter. We then scale the eigenvectors such that the data
projected onto them has mean ``0`` and std ``1``.
Parameters
----------
x_ref
Reference data used to fit the detector.
"""
self.backend.fit(self.backend._to_backend_dtype(x_ref))
@catch_error('NotFittedError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
The mahalanobis method projects `x` onto the scaled eigenvectors computed during the fit step. The score is then
the l2-norm of the projected data. The higher the score, the more outlying the instance.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more outlying the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the Mahalanobis detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 6,935 | 37.966292 | 120 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/vae.py | import logging
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import VAE
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.models.tensorflow.losses import elbo
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierVAE(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
score_type: str = 'mse', # TODO: reconstruction proba; make sure to infer correct distribution
vae: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
latent_dim: int = None,
samples: int = 10,
beta: float = 1.,
data_type: str = None
) -> None:
"""
VAE-based outlier detector.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
score_type
Metric used for outlier scores. Either 'mse' (mean squared error) or
'proba' (reconstruction probabilities) supported.
vae
A trained tf.keras model if available.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'vae' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'vae' is specified.
latent_dim
Dimensionality of the latent space.
samples
Number of samples sampled to evaluate each instance.
beta
Beta parameter for KL-divergence loss term.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.score_type = score_type
self.samples = samples
# check if model can be loaded, otherwise initialize VAE model
if isinstance(vae, tf.keras.Model):
self.vae = vae
elif isinstance(encoder_net, tf.keras.Sequential) and isinstance(decoder_net, tf.keras.Sequential):
self.vae = VAE(encoder_net, decoder_net, latent_dim, beta=beta) # define VAE model
else:
raise TypeError('No valid format detected for `vae` (tf.keras.Model) '
'or `encoder_net` and `decoder_net` (tf.keras.Sequential).')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = elbo,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
cov_elbo: dict = dict(sim=.05),
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train VAE model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
optimizer
Optimizer used for training.
cov_elbo
Dictionary with covariance matrix options in case the elbo loss function is used.
Either use the full covariance matrix inferred from X (dict(cov_full=None)),
only the variance (dict(cov_diag=None)) or a float representing the same standard deviation
for each feature (e.g. dict(sim=.05)).
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# train arguments
args = [self.vae, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks}
# initialize covariance matrix if elbo loss fn is used
use_elbo = loss_fn.__name__ == 'elbo'
cov_elbo_type, cov = [*cov_elbo][0], [*cov_elbo.values()][0]
if use_elbo and cov_elbo_type in ['cov_full', 'cov_diag']:
cov = tfp.stats.covariance(X.reshape(X.shape[0], -1))
if cov_elbo_type == 'cov_diag': # infer standard deviation from covariance matrix
cov = tf.math.sqrt(tf.linalg.diag_part(cov))
if use_elbo:
kwargs['loss_fn_kwargs'] = {cov_elbo_type: tf.dtypes.cast(cov, tf.float32)}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
batch_size
Batch size used when making predictions with the VAE.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# update threshold
self.threshold = np.percentile(outlier_score, threshold_perc)
def feature_score(self, X_orig: np.ndarray, X_recon: np.ndarray) -> np.ndarray:
"""
Compute feature level outlier scores.
Parameters
----------
X_orig
Batch of original instances.
X_recon
Batch of reconstructed instances.
Returns
-------
Feature level outlier scores.
"""
if self.score_type == 'mse':
fscore = np.power(X_orig - X_recon, 2)
fscore = fscore.reshape((-1, self.samples) + X_orig.shape[1:])
fscore = np.mean(fscore, axis=1)
elif self.score_type == 'proba':
pass
return fscore
def instance_score(self, fscore: np.ndarray, outlier_perc: float = 100.) -> np.ndarray:
"""
Compute instance level outlier scores.
Parameters
----------
fscore
Feature level outlier scores.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
Returns
-------
Instance level outlier scores.
"""
fscore_flat = fscore.reshape(fscore.shape[0], -1).copy()
n_score_features = int(np.ceil(.01 * outlier_perc * fscore_flat.shape[1]))
sorted_fscore = np.sort(fscore_flat, axis=1)
sorted_fscore_perc = sorted_fscore[:, -n_score_features:]
iscore = np.mean(sorted_fscore_perc, axis=1)
return iscore
def score(self, X: np.ndarray, outlier_perc: float = 100., batch_size: int = int(1e10)) \
-> Tuple[np.ndarray, np.ndarray]:
"""
Compute feature and instance level outlier scores.
Parameters
----------
X
Batch of instances.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the VAE.
Returns
-------
Feature and instance level outlier scores.
"""
# sample reconstructed instances
X_samples = np.repeat(X, self.samples, axis=0)
X_recon = predict_batch(X_samples, self.vae, batch_size=batch_size)
# compute feature and instance level scores
fscore = self.feature_score(X_samples, X_recon) # type: ignore[arg-type]
iscore = self.instance_score(fscore, outlier_perc=outlier_perc)
return fscore, iscore
def predict(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
batch_size: int = int(1e10),
return_feature_score: bool = True,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Predict whether instances are outliers or not.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the VAE.
return_feature_score
Whether to return feature level outlier scores.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and both feature and instance level outlier scores.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# values above threshold are outliers
outlier_pred = (outlier_score > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_feature_score:
od['data']['feature_score'] = fscore
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 11,444 | 37.15 | 112 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_pca.py | from typing import Union, Optional, Callable, Dict, Any
from typing import TYPE_CHECKING
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.base import BaseDetector, ThresholdMixin, FitMixin
from alibi_detect.od.pytorch import KernelPCATorch, LinearPCATorch
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
from alibi_detect.exceptions import _catch_error as catch_error
if TYPE_CHECKING:
import torch
backends = {
'pytorch': (KernelPCATorch, LinearPCATorch)
}
class PCA(BaseDetector, ThresholdMixin, FitMixin):
def __init__(
self,
n_components: int,
kernel: Optional[Callable] = None,
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""Principal Component Analysis (PCA) outlier detector.
The detector is based on the Principal Component Analysis (PCA) algorithm. There are two variants of PCA:
linear PCA and kernel PCA. Linear PCA computes the eigenvectors of the covariance matrix of the data. Kernel
PCA computes the eigenvectors of the kernel matrix of the data.
When scoring a test instance using the linear variant compute the distance to the principal subspace spanned
by the first `n_components` eigenvectors.
When scoring a test instance using the kernel variant we project it onto the largest eigenvectors and
compute its score using the L2 norm.
If a threshold is fitted we use this to determine whether the instance is an outlier or not.
Parameters
----------
n_components:
The number of dimensions in the principal subspace. For linear pca should have
``1 <= n_components < dim(data)``. For kernel pca should have ``1 <= n_components < len(data)``.
kernel
Kernel function to use for outlier detection. If ``None``, linear PCA is used instead of the
kernel variant.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
NotImplementedError
If choice of `backend` is not implemented.
ValueError
If `n_components` is less than 1.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
kernel_backend_cls, linear_backend_cls = backends[backend]
self.backend: Union[KernelPCATorch, LinearPCATorch]
if kernel is not None:
self.backend = kernel_backend_cls(
n_components=n_components,
device=device,
kernel=kernel
)
else:
self.backend = linear_backend_cls(
n_components=n_components,
device=device,
)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(self, x_ref: np.ndarray) -> None:
"""Fit the detector on reference data.
In the linear case we compute the principal components of the reference data using the
covariance matrix and then remove the largest `n_components` eigenvectors. The remaining
eigenvectors correspond to the invariant dimensions of the data. Changes in these
dimensions are used to compute the outlier score which is the distance to the principal
subspace spanned by the first `n_components` eigenvectors.
In the kernel case we compute the principal components of the reference data using the
kernel matrix and then return the largest `n_components` eigenvectors. These are then
normalized to have length equal to `1/eigenvalue`. Note that this differs from the
linear case where we remove the largest eigenvectors.
In both cases we then store the computed components to use later when we score test
instances.
Parameters
----------
x_ref
Reference data used to fit the detector.
Raises
------
ValueError
If using linear pca variant and `n_components` is greater than or equal to number of
features or if using kernel pca variant and `n_components` is greater than or equal
to number of instances.
"""
self.backend.fit(self.backend._to_backend_dtype(x_ref))
@catch_error('NotFittedError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
Project `x` onto the eigenvectors and compute the score using the L2 norm.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the PCA detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 8,089 | 37.160377 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_knn.py | from typing import Callable, Union, Optional, Dict, Any, List, Tuple
from typing import TYPE_CHECKING
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.exceptions import _catch_error as catch_error
from alibi_detect.od.base import TransformProtocol, TransformProtocolType
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin
from alibi_detect.od.pytorch import KNNTorch, Ensembler
from alibi_detect.od.base import get_aggregator, get_normalizer, NormalizerLiterals, AggregatorLiterals
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
if TYPE_CHECKING:
import torch
backends = {
'pytorch': (KNNTorch, Ensembler)
}
class KNN(BaseDetector, FitMixin, ThresholdMixin):
def __init__(
self,
k: Union[int, np.ndarray, List[int], Tuple[int]],
kernel: Optional[Callable] = None,
normalizer: Optional[Union[TransformProtocolType, NormalizerLiterals]] = 'PValNormalizer',
aggregator: Union[TransformProtocol, AggregatorLiterals] = 'AverageAggregator',
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""
k-Nearest Neighbors (kNN) outlier detector.
The kNN detector is a non-parametric method for outlier detection. The detector scores each instance
based on the distance to its neighbors. Instances with a large distance to their neighbors are more
likely to be outliers.
The detector can be initialized with `k` a single value or an array of values. If `k` is a single value then
the outlier score is the distance/kernel similarity to the k-th nearest neighbor. If `k` is an array of
values then the outlier score is the distance/kernel similarity to each of the specified `k` neighbors.
In the latter case, an `aggregator` must be specified to aggregate the scores.
Note that, in the multiple k case, a normalizer can be provided. If a normalizer is passed then it is fit in
the `infer_threshold` method and so this method must be called before the `predict` method. If this is not
done an exception is raised. If `k` is a single value then the predict method can be called without first
calling `infer_threshold` but only scores will be returned and not outlier predictions.
Parameters
----------
k
Number of nearest neighbors to compute distance to. `k` can be a single value or
an array of integers. If an array is passed, an aggregator is required to aggregate
the scores. If `k` is a single value the outlier score is the distance/kernel
similarity to the `k`-th nearest neighbor. If `k` is a list then it returns the
distance/kernel similarity to each of the specified `k` neighbors.
kernel
Kernel function to use for outlier detection. If ``None``, `torch.cdist` is used.
Otherwise if a kernel is specified then instead of using `torch.cdist` the kernel
defines the k nearest neighbor distance.
normalizer
Normalizer to use for outlier detection. If ``None``, no normalization is applied.
For a list of available normalizers, see :mod:`alibi_detect.od.pytorch.ensemble`.
aggregator
Aggregator to use for outlier detection. Can be set to ``None`` if `k` is a single
value. For a list of available aggregators, see :mod:`alibi_detect.od.pytorch.ensemble`.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
Raises
------
ValueError
If `k` is an array and `aggregator` is None.
NotImplementedError
If choice of `backend` is not implemented.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls, ensembler_cls = backends[backend]
ensembler = None
if aggregator is None and isinstance(k, (list, np.ndarray, tuple)):
raise ValueError('If `k` is a `np.ndarray`, `list` or `tuple`, '
'the `aggregator` argument cannot be ``None``.')
if isinstance(k, (list, np.ndarray, tuple)):
ensembler = ensembler_cls(
normalizer=get_normalizer(normalizer),
aggregator=get_aggregator(aggregator)
)
self.backend = backend_cls(k, kernel=kernel, ensembler=ensembler, device=device)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(self, x_ref: np.ndarray) -> None:
"""Fit the detector on reference data.
Parameters
----------
x_ref
Reference data used to fit the detector.
"""
self.backend.fit(self.backend._to_backend_dtype(x_ref))
@catch_error('NotFittedError')
@catch_error('ThresholdNotInferredError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
Computes the k nearest neighbor distance/kernel similarity for each instance in `x`. If `k` is a single
value then this is the score otherwise if `k` is an array of values then the score is aggregated using
the ensembler.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
ThresholdNotInferredError
If k is a list and a threshold was not inferred.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
score = self.backend._ensembler(score)
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the kNN detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
@catch_error('ThresholdNotInferredError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
ThresholdNotInferredError
If k is a list and a threshold was not inferred.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 9,117 | 40.634703 | 119 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/llr.py | from functools import partial
import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow_probability.python.distributions.distribution import Distribution
from typing import Callable, Dict, Tuple, Union
from alibi_detect.models.tensorflow.pixelcnn import PixelCNN
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils.tensorflow.perturbation import mutate_categorical
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
def build_model(dist: Union[Distribution, PixelCNN], input_shape: tuple = None, filepath: str = None) \
-> Tuple[tf.keras.Model, Union[Distribution, PixelCNN]]:
"""
Create tf.keras.Model from TF distribution.
Parameters
----------
dist
TensorFlow distribution.
input_shape
Input shape of the model.
filepath
File to load model weights from.
Returns
-------
TensorFlow model.
"""
x_in = Input(shape=input_shape)
log_prob = dist.log_prob(x_in)
model = Model(inputs=x_in, outputs=log_prob)
model.add_loss(-tf.reduce_mean(log_prob))
if isinstance(filepath, str):
model.load_weights(filepath)
return model, dist
class LLR(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
model: Union[tf.keras.Model, Distribution, PixelCNN] = None,
model_background: Union[tf.keras.Model, Distribution, PixelCNN] = None,
log_prob: Callable = None,
sequential: bool = False,
data_type: str = None
) -> None:
"""
Likelihood Ratios for Out-of-Distribution Detection. Ren, J. et al. NeurIPS 2019.
https://arxiv.org/abs/1906.02845
Parameters
----------
threshold
Threshold used for the likelihood ratio (LLR) to determine outliers.
model
Generative model, defaults to PixelCNN.
model_background
Optional model for the background. Only needed if it is different from `model`.
log_prob
Function used to evaluate log probabilities under the model
if the model does not have a `log_prob` function.
sequential
Whether the data is sequential. Used to create targets during training.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.has_log_prob = True if hasattr(model, 'log_prob') else False
self.sequential = sequential
self.log_prob = log_prob
self.threshold = threshold
# semantic model trained on original data
self.dist_s = model
# background model trained on perturbed data
if model_background is None:
try:
self.dist_b = model.copy()
except AttributeError:
self.dist_b = tf.keras.models.clone_model(model)
else:
self.dist_b = model_background
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
mutate_fn: Callable = mutate_categorical,
mutate_fn_kwargs: dict = {'rate': .2, 'seed': 0, 'feature_range': (0, 255)},
mutate_batch_size: int = int(1e10),
loss_fn: tf.keras.losses = None,
loss_fn_kwargs: dict = None,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None
) -> None:
"""
Train semantic and background generative models.
Parameters
----------
X
Training batch.
mutate_fn
Mutation function used to generate the background dataset.
mutate_fn_kwargs
Kwargs for the mutation function used to generate the background dataset.
Default values set for an image dataset.
mutate_batch_size
Batch size used to generate the mutations for the background dataset.
loss_fn
Loss function used for training.
loss_fn_kwargs
Kwargs for loss function.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
input_shape = X.shape[1:]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
# Separate into two separate optimizers, one for semantic model and one for background model
optimizer_s = optimizer
optimizer_b = optimizer.__class__.from_config(optimizer.get_config())
# training arguments
kwargs = {'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'callbacks': callbacks}
# create background data
mutate_fn = partial(mutate_fn, **mutate_fn_kwargs)
X_back = predict_batch(X, mutate_fn, batch_size=mutate_batch_size, dtype=X.dtype)
# prepare sequential data
if self.sequential and not self.has_log_prob:
y, y_back = X[:, 1:], X_back[:, 1:] # type: ignore
X, X_back = X[:, :-1], X_back[:, :-1] # type: ignore
else:
y, y_back = None, None
# check if model needs to be built
use_build = True if self.has_log_prob and not isinstance(self.dist_s, tf.keras.Model) else False
if use_build:
# build and train semantic model
self.model_s = build_model(self.dist_s, input_shape)[0]
self.model_s.compile(optimizer=optimizer_s)
self.model_s.fit(X, **kwargs)
# build and train background model
self.model_b = build_model(self.dist_b, input_shape)[0]
self.model_b.compile(optimizer=optimizer_b)
self.model_b.fit(X_back, **kwargs)
else:
# update training arguments
kwargs.update({
'loss_fn_kwargs': loss_fn_kwargs,
'log_metric': log_metric
})
# train semantic model
args = [self.dist_s, loss_fn, X]
kwargs.update({'y_train': y, 'optimizer': optimizer_s})
trainer(*args, **kwargs)
# train background model
args = [self.dist_b, loss_fn, X_back]
kwargs.update({'y_train': y_back, 'optimizer': optimizer_b})
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
outlier_type: str = 'instance',
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update LLR threshold by a value inferred from the percentage of instances
considered to be outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
threshold_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size for the generative model evaluations.
"""
# compute outlier scores
fscore, iscore = self.score(X, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# update threshold
self.threshold = np.percentile(outlier_score, threshold_perc)
def logp(self, dist, X: np.ndarray, return_per_feature: bool = False, batch_size: int = int(1e10)) \
-> np.ndarray:
"""
Compute log probability of a batch of instances under the generative model.
Parameters
----------
dist
Distribution of the model.
X
Batch of instances.
return_per_feature
Return log probability per feature.
batch_size
Batch size for the generative model evaluations.
Returns
-------
Log probabilities.
"""
logp_fn = partial(dist.log_prob, return_per_feature=return_per_feature)
# TODO: TBD: can this be any of the other types from predict_batch? i.e. tf.Tensor or tuple
return predict_batch(X, logp_fn, batch_size=batch_size) # type: ignore[return-value]
def logp_alt(self, model: tf.keras.Model, X: np.ndarray, return_per_feature: bool = False,
batch_size: int = int(1e10)) -> np.ndarray:
"""
Compute log probability of a batch of instances using the log_prob function
defined by the user.
Parameters
----------
model
Trained model.
X
Batch of instances.
return_per_feature
Return log probability per feature.
batch_size
Batch size for the generative model evaluations.
Returns
-------
Log probabilities.
"""
if self.sequential:
y, X = X[:, 1:], X[:, :-1]
else:
y = X.copy()
y_preds = predict_batch(X, model, batch_size=batch_size)
logp = self.log_prob(y, y_preds).numpy()
if return_per_feature:
return logp
else:
axis = tuple(np.arange(len(logp.shape))[1:])
return np.mean(logp, axis=axis)
def llr(self, X: np.ndarray, return_per_feature: bool, batch_size: int = int(1e10)) -> np.ndarray:
"""
Compute likelihood ratios.
Parameters
----------
X
Batch of instances.
return_per_feature
Return likelihood ratio per feature.
batch_size
Batch size for the generative model evaluations.
Returns
-------
Likelihood ratios.
"""
logp_fn = self.logp if not isinstance(self.log_prob, Callable) else self.logp_alt # type: ignore
logp_s = logp_fn(self.dist_s, X, return_per_feature=return_per_feature, batch_size=batch_size)
logp_b = logp_fn(self.dist_b, X, return_per_feature=return_per_feature, batch_size=batch_size)
return logp_s - logp_b
def feature_score(self, X: np.ndarray, batch_size: int = int(1e10)) -> np.ndarray:
""" Feature-level negative likelihood ratios. """
return - self.llr(X, True, batch_size=batch_size)
def instance_score(self, X: np.ndarray, batch_size: int = int(1e10)) -> np.ndarray:
""" Instance-level negative likelihood ratios. """
return - self.llr(X, False, batch_size=batch_size)
def score(self, X: np.ndarray, batch_size: int = int(1e10)) -> Tuple[np.ndarray, np.ndarray]:
"""
Feature-level and instance-level outlier scores.
The scores are equal to the negative likelihood ratios.
"""
fscore = self.feature_score(X, batch_size=batch_size)
iscore = self.instance_score(X, batch_size=batch_size)
return fscore, iscore
def predict(self,
X: np.ndarray,
outlier_type: str = 'instance',
batch_size: int = int(1e10),
return_feature_score: bool = True,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Predict whether instances are outliers or not.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
batch_size
Batch size used when making predictions with the generative model.
return_feature_score
Whether to return feature level outlier scores.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and both feature and instance level outlier scores.
"""
# compute outlier scores
fscore, iscore = self.score(X, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# values above threshold are outliers
outlier_pred = (outlier_score > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_feature_score:
od['data']['feature_score'] = fscore
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 14,091 | 36.280423 | 109 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/aegmm.py | import logging
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import AEGMM, eucl_cosim_features
from alibi_detect.models.tensorflow.gmm import gmm_energy, gmm_params
from alibi_detect.models.tensorflow.losses import loss_aegmm
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierAEGMM(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
aegmm: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
gmm_density_net: tf.keras.Model = None,
n_gmm: int = None,
recon_features: Callable = eucl_cosim_features,
data_type: str = None
) -> None:
"""
AEGMM-based outlier detector.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
aegmm
A trained tf.keras model if available.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'aegmm' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'aegmm' is specified.
gmm_density_net
Layers for the GMM network wrapped in a tf.keras.Sequential class.
n_gmm
Number of components in GMM.
recon_features
Function to extract features from the reconstructed instance by the decoder.
data_type
Optionally specifiy the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
# check if model can be loaded, otherwise initialize AEGMM model
if isinstance(aegmm, tf.keras.Model):
self.aegmm = aegmm
elif (isinstance(encoder_net, tf.keras.Sequential) and
isinstance(decoder_net, tf.keras.Sequential) and
isinstance(gmm_density_net, tf.keras.Sequential)):
self.aegmm = AEGMM(encoder_net, decoder_net, gmm_density_net, n_gmm, recon_features)
else:
raise TypeError('No valid format detected for `aegmm` (tf.keras.Model) '
'or `encoder_net`, `decoder_net` and `gmm_density_net` (tf.keras.Sequential).')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
self.phi, self.mu, self.cov, self.L, self.log_det_cov = None, None, None, None, None
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = loss_aegmm,
w_energy: float = .1,
w_cov_diag: float = .005,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train AEGMM model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
w_energy
Weight on sample energy loss term if default `loss_aegmm` loss fn is used.
w_cov_diag
Weight on covariance regularizing loss term if default `loss_aegmm` loss fn is used.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# train arguments
args = [self.aegmm, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks,
'loss_fn_kwargs': {'w_energy': w_energy,
'w_cov_diag': w_cov_diag}
}
# train
trainer(*args, **kwargs)
# set GMM parameters
x_recon, z, gamma = self.aegmm(X)
self.phi, self.mu, self.cov, self.L, self.log_det_cov = gmm_params(z, gamma)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
batch_size
Batch size used when making predictions with the AEGMM.
"""
# compute outlier scores
iscore = self.score(X, batch_size=batch_size)
# update threshold
self.threshold = np.percentile(iscore, threshold_perc)
def score(self, X: np.ndarray, batch_size: int = int(1e10)) -> np.ndarray:
"""
Compute outlier scores.
Parameters
----------
X
Batch of instances to analyze.
batch_size
Batch size used when making predictions with the AEGMM.
Returns
-------
Array with outlier scores for each instance in the batch.
"""
_, z, _ = predict_batch(X, self.aegmm, batch_size=batch_size)
energy, _ = gmm_energy(z, self.phi, self.mu, self.cov, self.L, self.log_det_cov, return_mean=False)
return energy.numpy()
def predict(self,
X: np.ndarray,
batch_size: int = int(1e10),
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when making predictions with the AEGMM.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and instance level outlier scores.
"""
# compute outlier scores
iscore = self.score(X, batch_size=batch_size)
# values above threshold are outliers
outlier_pred = (iscore > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 7,829 | 35.933962 | 107 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/ae.py | import logging
import numpy as np
import tensorflow as tf
from typing import Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import AE
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierAE(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
ae: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
data_type: str = None
) -> None:
"""
AE-based outlier detector.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
ae
A trained tf.keras model if available.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'ae' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'ae' is specified.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
# check if model can be loaded, otherwise initialize AE model
if isinstance(ae, tf.keras.Model):
self.ae = ae
elif isinstance(encoder_net, tf.keras.Sequential) and isinstance(decoder_net, tf.keras.Sequential):
self.ae = AE(encoder_net, decoder_net)
else:
raise TypeError('No valid format detected for `ae` (tf.keras.Model) '
'or `encoder_net`, `decoder_net` (tf.keras.Sequential).')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = tf.keras.losses.MeanSquaredError(),
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train AE model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# train arguments
args = [self.ae, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
batch_size
Batch size used when making predictions with the autoencoder.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# update threshold
self.threshold = np.percentile(outlier_score, threshold_perc)
def feature_score(self, X_orig: np.ndarray, X_recon: np.ndarray) -> np.ndarray:
"""
Compute feature level outlier scores.
Parameters
----------
X_orig
Batch of original instances.
X_recon
Batch of reconstructed instances.
Returns
-------
Feature level outlier scores.
"""
fscore = np.power(X_orig - X_recon, 2)
return fscore
def instance_score(self, fscore: np.ndarray, outlier_perc: float = 100.) -> np.ndarray:
"""
Compute instance level outlier scores.
Parameters
----------
fscore
Feature level outlier scores.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
Returns
-------
Instance level outlier scores.
"""
fscore_flat = fscore.reshape(fscore.shape[0], -1).copy()
n_score_features = int(np.ceil(.01 * outlier_perc * fscore_flat.shape[1]))
sorted_fscore = np.sort(fscore_flat, axis=1)
sorted_fscore_perc = sorted_fscore[:, -n_score_features:]
iscore = np.mean(sorted_fscore_perc, axis=1)
return iscore
def score(self, X: np.ndarray, outlier_perc: float = 100., batch_size: int = int(1e10)) \
-> Tuple[np.ndarray, np.ndarray]:
"""
Compute feature and instance level outlier scores.
Parameters
----------
X
Batch of instances.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the autoencoder.
Returns
-------
Feature and instance level outlier scores.
"""
# reconstruct instances
X_recon = predict_batch(X, self.ae, batch_size=batch_size)
# compute feature and instance level scores
fscore = self.feature_score(X, X_recon) # type: ignore[arg-type]
iscore = self.instance_score(fscore, outlier_perc=outlier_perc)
return fscore, iscore
def predict(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
batch_size: int = int(1e10),
return_feature_score: bool = True,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Predict whether instances are outliers or not.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the autoencoder.
return_feature_score
Whether to return feature level outlier scores.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and both feature and instance level outlier scores.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# values above threshold are outliers
outlier_pred = (outlier_score > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_feature_score:
od['data']['feature_score'] = fscore
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 9,396 | 35.003831 | 109 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/_gmm.py | from typing import Union, Optional, Dict, Any, TYPE_CHECKING
import numpy as np
from alibi_detect.utils._types import Literal
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.base import BaseDetector, ThresholdMixin, FitMixin
from alibi_detect.od.pytorch import GMMTorch
from alibi_detect.od.sklearn import GMMSklearn
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
from alibi_detect.exceptions import _catch_error as catch_error
if TYPE_CHECKING:
import torch
backends = {
'pytorch': GMMTorch,
'sklearn': GMMSklearn
}
class GMM(BaseDetector, ThresholdMixin, FitMixin):
def __init__(
self,
n_components: int = 1,
backend: Literal['pytorch', 'sklearn'] = 'sklearn',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""Gaussian Mixture Model (GMM) outlier detector.
The gaussian mixture model outlier detector fits a mixture of gaussian distributions to the reference data.
Test points are scored via the negative log-likelihood under the corresponding density function.
We support two backends: ``'pytorch'`` and ``'sklearn'``. The ``'pytorch'`` backend allows for GPU acceleration
and uses gradient descent to fit the GMM. We recommend using the ``'pytorch'`` backend for large datasets. The
``'sklearn'`` backend is a pure python implementation and is recommended for smaller datasets.
Parameters
----------
n_components:
The number of mixture components. Defaults to ``1``.
backend
Backend used for outlier detection. Defaults to ``'sklearn'``. Options are ``'pytorch'`` and ``'sklearn'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'`` or ``'cpu'``. The device is only used if the ``'pytorch'`` backend is
used. Defaults to ``None``.
Raises
------
NotImplementedError
If choice of `backend` is not implemented.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch'], 'sklearn': ['sklearn']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls = backends[backend]
args: Dict[str, Any] = {'n_components': n_components}
if backend == 'pytorch':
args['device'] = device
self.backend = backend_cls(**args)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(
self,
x_ref: np.ndarray,
optimizer: Optional[str] = 'Adam',
learning_rate: float = 0.1,
max_epochs: Optional[int] = None,
batch_size: Optional[int] = None,
tol: float = 1e-3,
n_iter_no_change: int = 25,
n_init: int = 1,
init_params: str = 'kmeans',
verbose: int = 0,
) -> None:
"""Fit the detector on reference data.
If the ``'pytorch'`` backend is used, the detector is fitted using gradient descent. This is the recommended
backend for larger datasets.
If the ``'sklearn'`` backend is used, the detector is fitted using the EM algorithm. The ``'sklearn'``
backend is recommended for smaller datasets. For more information on the EM algorithm and the sklearn Gaussian
Mixture Model, see `here <https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture>`_. # noqa: E501
Parameters
----------
x_ref
Reference data used to fit the detector.
optimizer
Optimizer used to fit the detector. Only used if the ``'pytorch'`` backend is used. Defaults to ``'Adam'``.
learning_rate
Learning rate used to fit the detector. Only used if the ``'pytorch'`` backend is used. Defaults to ``0.1``.
max_epochs
Maximum number of training epochs used to fit the detector. Used for both the ``'pytorch'`` and ``'sklearn'``
backends. If the backend is ``'sklearn'``, the detector is fit using the EM algorithm and `max_epochs`
defaults to ``100``. If the backend is ``'pytorch'``, the detector is fitted using gradient descent and
`max_epochs` defaults to ``10``.
batch_size
Batch size used to fit the detector. Only used if the ``'pytorch'`` backend is used. Defaults to ``None``.
If ``None``, the entire dataset is used for each gradient update.
tol
Convergence threshold used to fit the detector. Used for both ``'sklearn'`` and ``'pytorch'`` backends.
Defaults to ``1e-3``.
n_iter_no_change
The number of iterations over which the loss must decrease by `tol` in order for optimization to continue.
Only used if the ``'pytorch'`` backend is used.
n_init
Number of initializations used to fit the detector. Only used if the ``'sklearn'`` backend is used.
Defaults to ``1``.
init_params
Initialization method used to fit the detector. Only used if the ``'sklearn'`` backend is used. Must be
one of:
'kmeans' : responsibilities are initialized using kmeans.
'kmeans++' : responsibilities are initialized using kmeans++.
'random' : responsibilities are initialized randomly.
'random_from_data' : responsibilities are initialized randomly from the data.
Defaults to ``'kmeans'``.
verbose
Verbosity level used to fit the detector. Used for both ``'sklearn'`` and ``'pytorch'`` backends. Defaults to ``0``.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys depending on the backend used:
- converged: bool indicating whether EM algorithm converged.
- n_iter: number of EM iterations performed. Only returned if `backend` is ``'sklearn'``.
- n_epochs: number of gradient descent iterations performed. Only returned if `backend` is ``'pytorch'``.
- lower_bound: log-likelihood lower bound.
"""
return self.backend.fit(
self.backend._to_backend_dtype(x_ref),
**self.backend.format_fit_kwargs(locals())
)
@catch_error('NotFittedError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
To score an instance, we compute the negative log-likelihood under the corresponding density function of
the fitted gaussian mixture model.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the GMM detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 9,837 | 41.042735 | 170 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_aegmm.py | from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierAEGMM
from alibi_detect.version import __version__
threshold = [None, 5.]
n_gmm = [1, 2]
w_energy = [.1, .5]
threshold_perc = [90.]
return_instance_score = [True, False]
tests = list(product(threshold, n_gmm, w_energy, threshold_perc, return_instance_score))
n_tests = len(tests)
# load and preprocess MNIST data
(X_train, _), (X_test, _) = tf.keras.datasets.mnist.load_data()
X = X_train.reshape(X_train.shape[0], -1)[:1000] # only train on 1000 instances
X = X.astype(np.float32)
X /= 255
input_dim = X.shape[1]
latent_dim = 2
@pytest.fixture
def aegmm_params(request):
return tests[request.param]
@pytest.mark.parametrize('aegmm_params', list(range(n_tests)), indirect=True)
def test_aegmm(aegmm_params):
# OutlierAEGMM parameters
threshold, n_gmm, w_energy, threshold_perc, return_instance_score = aegmm_params
# define encoder, decoder and GMM density net
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
# init OutlierAEGMM
aegmm = OutlierAEGMM(
threshold=threshold,
encoder_net=encoder_net,
decoder_net=decoder_net,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm
)
assert aegmm.threshold == threshold
assert aegmm.meta == {'name': 'OutlierAEGMM', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
# fit OutlierAEGMM, infer threshold and compute scores
aegmm.fit(X, w_energy=w_energy, epochs=5, batch_size=1000, verbose=False)
aegmm.infer_threshold(X, threshold_perc=threshold_perc)
energy = aegmm.score(X)
perc_score = 100 * (energy < aegmm.threshold).astype(int).sum() / energy.shape[0]
assert threshold_perc + 5 > perc_score > threshold_perc - 5
# make and check predictions
od_preds = aegmm.predict(X, return_instance_score=return_instance_score)
assert od_preds['meta'] == aegmm.meta
assert od_preds['data']['is_outlier'].shape == (X.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> aegmm.threshold).astype(int).sum()
else:
assert od_preds['data']['instance_score'] is None
| 3,007 | 31.695652 | 96 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_llr.py | from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, LSTM
from alibi_detect.od import LLR
from alibi_detect.version import __version__
input_dim = 5
hidden_dim = 20
shape = (1000, 6)
X_train = np.zeros(shape, dtype=np.int32)
X_train[:, ::2] = 1
X_test = np.zeros(shape, dtype=np.int32)
X_test[:, ::2] = 2
X_val = np.concatenate([X_train[:50], X_test[:50]])
def loss_fn(y: tf.Tensor, x: tf.Tensor) -> tf.Tensor:
y = tf.one_hot(tf.cast(y, tf.int32), input_dim)
return tf.nn.softmax_cross_entropy_with_logits(y, x, axis=-1)
def likelihood_fn(y: tf.Tensor, x: tf.Tensor) -> tf.Tensor:
return - loss_fn(y, x)
threshold = [None]
threshold_perc = [50.]
return_instance_score = [True, False]
return_feature_score = [True, False]
outlier_type = ['instance', 'feature']
tests = list(product(threshold, threshold_perc, return_instance_score,
return_feature_score, outlier_type))
n_tests = len(tests)
@pytest.fixture
def llr_params(request):
return tests[request.param]
@pytest.mark.parametrize('llr_params', list(range(n_tests)), indirect=True)
def test_llr(llr_params):
# LLR parameters
threshold, threshold_perc, return_instance_score, return_feature_score, outlier_type = llr_params
# define model and detector
inputs = Input(shape=(shape[-1] - 1,), dtype=tf.int32)
x = tf.one_hot(tf.cast(inputs, tf.int32), input_dim)
x = LSTM(hidden_dim, return_sequences=True)(x)
logits = Dense(input_dim, activation=None)(x)
model = tf.keras.Model(inputs=inputs, outputs=logits)
od = LLR(threshold=threshold, sequential=True, model=model, log_prob=likelihood_fn)
assert od.threshold == threshold
assert od.meta == {'name': 'LLR', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
od.fit(
X_train,
loss_fn=loss_fn,
mutate_fn_kwargs={'rate': .5, 'feature_range': (0, input_dim)},
epochs=1,
verbose=False
)
od.infer_threshold(X_val, threshold_perc=threshold_perc)
# iscore_test = od.score(X_test)[1]
# iscore_train = od.score(X_train)[1]
# assert (iscore_test > iscore_train).all()
od_preds = od.predict(X_test,
return_instance_score=return_instance_score,
return_feature_score=return_feature_score,
outlier_type=outlier_type)
assert od_preds['meta'] == od.meta
if outlier_type == 'instance':
assert od_preds['data']['is_outlier'].shape == (X_test.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> od.threshold).astype(int).sum()
elif outlier_type == 'feature':
assert od_preds['data']['is_outlier'].shape == (X_test.shape[0], X_test.shape[1] - 1)
if return_feature_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['feature_score']
> od.threshold).astype(int).sum()
if return_feature_score:
assert od_preds['data']['feature_score'].shape == (X_test.shape[0], X_test.shape[1] - 1)
else:
assert od_preds['data']['feature_score'] is None
if return_instance_score:
assert od_preds['data']['instance_score'].shape == (X_test.shape[0],)
else:
assert od_preds['data']['instance_score'] is None
| 3,587 | 34.524752 | 101 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_ae.py | from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierAE
from alibi_detect.version import __version__
threshold = [None, 5.]
threshold_perc = [90.]
return_instance_score = [True, False]
return_feature_score = [True, False]
outlier_perc = [50, 100]
outlier_type = ['instance', 'feature']
tests = list(product(threshold, threshold_perc, return_instance_score,
return_feature_score, outlier_perc, outlier_type))
n_tests = len(tests)
# load iris data
X, y = load_iris(return_X_y=True)
X = X.astype(np.float32)
input_dim = X.shape[1]
encoding_dim = 1
@pytest.fixture
def ae_params(request):
return tests[request.param]
@pytest.mark.parametrize('ae_params', list(range(n_tests)), indirect=True)
def test_ae(ae_params):
# OutlierAE parameters
threshold, threshold_perc, return_instance_score, return_feature_score, outlier_perc, outlier_type = ae_params
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(encoding_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(encoding_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
# init OutlierAE
ae = OutlierAE(
threshold=threshold,
encoder_net=encoder_net,
decoder_net=decoder_net
)
assert ae.threshold == threshold
assert ae.meta == {'name': 'OutlierAE', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
# fit OutlierAE, infer threshold and compute scores
ae.fit(X, epochs=5, verbose=False)
ae.infer_threshold(X, threshold_perc=threshold_perc)
fscore, iscore = ae.score(X)
perc_score = 100 * (iscore < ae.threshold).astype(int).sum() / iscore.shape[0]
assert threshold_perc + 5 > perc_score > threshold_perc - 5
# make and check predictions
od_preds = ae.predict(X,
return_instance_score=return_instance_score,
return_feature_score=return_feature_score,
outlier_type=outlier_type,
outlier_perc=outlier_perc
)
assert od_preds['meta'] == ae.meta
if outlier_type == 'instance':
assert od_preds['data']['is_outlier'].shape == (X.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> ae.threshold).astype(int).sum()
elif outlier_type == 'feature':
assert od_preds['data']['is_outlier'].shape == X.shape
if return_feature_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['feature_score']
> ae.threshold).astype(int).sum()
if return_feature_score:
assert od_preds['data']['feature_score'].shape == X.shape
else:
assert od_preds['data']['feature_score'] is None
if return_instance_score:
assert od_preds['data']['instance_score'].shape == (X.shape[0],)
else:
assert od_preds['data']['instance_score'] is None
| 3,537 | 33.349515 | 114 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_vae.py | from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierVAE
from alibi_detect.models.tensorflow.losses import elbo
from alibi_detect.version import __version__
threshold = [None, 5.]
score_type = ['mse']
samples = [10]
loss_fn = [elbo, tf.keras.losses.mse]
threshold_perc = [90.]
return_instance_score = [True, False]
return_feature_score = [True, False]
outlier_perc = [50, 100]
outlier_type = ['instance', 'feature']
tests = list(product(threshold, score_type, samples, loss_fn, threshold_perc,
return_instance_score, return_feature_score, outlier_perc, outlier_type))
n_tests = len(tests)
# load iris data
X, y = load_iris(return_X_y=True)
X = X.astype(np.float32)
input_dim = X.shape[1]
latent_dim = 2
@pytest.fixture
def vae_params(request):
return tests[request.param]
@pytest.mark.parametrize('vae_params', list(range(n_tests)), indirect=True)
def test_vae(vae_params):
# OutlierVAE parameters
threshold, score_type, samples, loss_fn, threshold_perc, return_instance_score, \
return_feature_score, outlier_perc, outlier_type = vae_params
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
# init OutlierVAE
vae = OutlierVAE(
threshold=threshold,
score_type=score_type,
encoder_net=encoder_net,
decoder_net=decoder_net,
latent_dim=latent_dim,
samples=samples
)
assert vae.threshold == threshold
assert vae.meta == {'name': 'OutlierVAE', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
# fit OutlierVAE, infer threshold and compute scores
vae.fit(X, loss_fn=loss_fn, epochs=5, verbose=False)
vae.infer_threshold(X, threshold_perc=threshold_perc)
fscore, iscore = vae.score(X)
perc_score = 100 * (iscore < vae.threshold).astype(int).sum() / iscore.shape[0]
assert threshold_perc + 5 > perc_score > threshold_perc - 5
# make and check predictions
od_preds = vae.predict(X,
return_instance_score=return_instance_score,
return_feature_score=return_feature_score,
outlier_type=outlier_type,
outlier_perc=outlier_perc
)
assert od_preds['meta'] == vae.meta
if outlier_type == 'instance':
assert od_preds['data']['is_outlier'].shape == (X.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> vae.threshold).astype(int).sum()
elif outlier_type == 'feature':
assert od_preds['data']['is_outlier'].shape == X.shape
if return_feature_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['feature_score']
> vae.threshold).astype(int).sum()
if return_feature_score:
assert od_preds['data']['feature_score'].shape == X.shape
else:
assert od_preds['data']['feature_score'] is None
if return_instance_score:
assert od_preds['data']['instance_score'].shape == (X.shape[0],)
else:
assert od_preds['data']['instance_score'] is None
| 3,862 | 33.801802 | 94 | py |
alibi-detect | alibi-detect-master/alibi_detect/od/tests/test_ensemble.py | import pytest
import torch
from alibi_detect.od.pytorch import ensemble
from alibi_detect.exceptions import NotFittedError
def test_pval_normalizer():
"""Test the PValNormalizer
- Test the PValNormalizer correctly normalizes data passed to it
- Test the PValNormalizer throws the correct errors if not fit
"""
normalizer = ensemble.PValNormalizer()
x = torch.randn(3, 10)
x_ref = torch.randn(64, 10)
# unfit normalizer raises exception
with pytest.raises(NotFittedError) as err:
normalizer(x)
assert err.value.args[0] == 'PValNormalizer has not been fit!'
normalizer.fit(x_ref)
x_norm = normalizer(x)
# compute the p-values explicitly and compare to the normalizer
# output.
assert torch.all(0 < x_norm)
assert torch.all(x_norm < 1)
for i in range(3):
for j in range(10):
comp_pval = ((x_ref[:, j] > x[i][j]).to(torch.float32)).sum() + 1
comp_pval /= (x_ref.shape[0] + 1)
normalizer_pval = x_norm[i][j].to(torch.float32)
assert torch.isclose(1 - comp_pval, normalizer_pval, atol=1e-4)
# Test the scriptability of the normalizer
normalizer = torch.jit.script(normalizer)
x_norm_2 = normalizer(x)
assert torch.all(x_norm_2 == x_norm)
def test_shift_and_scale_normalizer():
"""Test the ShiftAndScaleNormalizer
- Test the ShiftAndScaleNormalizer correctly normalizes data passed to it
- Test the ShiftAndScaleNormalizer throws the correct errors if not fit.
"""
normalizer = ensemble.ShiftAndScaleNormalizer()
x = torch.randn(3, 10) * 3 + 2
x_ref = torch.randn(5000, 10) * 3 + 2
# unfit normalizer raises exception
with pytest.raises(NotFittedError) as err:
normalizer(x)
assert err.value.args[0] == 'ShiftAndScaleNormalizer has not been fit!'
# test the normalizer correctly shifts and scales the data
normalizer.fit(x_ref)
x_norm = normalizer(x)
assert torch.isclose(x_norm.mean(), torch.tensor(0.), atol=0.1)
assert torch.isclose(x_norm.std(), torch.tensor(1.), atol=0.1)
# Test the scriptability of the normalizer
normalizer = torch.jit.script(normalizer)
x_norm_2 = normalizer(x)
assert torch.all(x_norm_2 == x_norm)
def test_average_aggregator():
"""Test the AverageAggregator
- Test the AverageAggregator correctly aggregates data passed to it.
- Test the AverageAggregator can be torch scripted
"""
aggregator = ensemble.AverageAggregator()
scores = torch.randn((3, 10))
# test the aggregator correctly averages the scores
aggregated_scores = aggregator(scores)
assert torch.all(torch.isclose(aggregated_scores, scores.mean(dim=1)))
assert aggregated_scores.shape == (3, )
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
def test_weighted_average_aggregator():
"""Test the AverageAggregator
- Test the AverageAggregator correctly aggregates data passed to it
- Test the AverageAggregator throws an error if the weights are not valid
- Test the AverageAggregator can be torch scripted
"""
weights = abs(torch.randn((10)))
with pytest.raises(ValueError) as err:
aggregator = ensemble.AverageAggregator(weights=weights)
assert err.value.args[0] == 'Weights must sum to 1.'
# test the aggregator correctly weights the scores when computing the
# average
weights /= weights.sum()
aggregator = ensemble.AverageAggregator(weights=weights)
scores = torch.randn((3, 10))
aggregated_scores = aggregator(scores)
torch.allclose(aggregated_scores, (weights @ scores.T))
assert aggregated_scores.shape == (3, )
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
def test_topk_aggregator():
"""Test the TopKAggregator
- Test the TopKAggregator correctly aggregates data passed to it
- Test the TopKAggregator can be torch scripted
"""
aggregator = ensemble.TopKAggregator(k=4)
scores = torch.randn((3, 10))
# test the aggregator correctly computes the top k scores
aggregated_scores = aggregator(scores)
assert aggregated_scores.shape == (3, )
scores_sorted, _ = torch.sort(scores)
torch.allclose(scores_sorted[:, -4:].mean(dim=1), aggregated_scores)
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
def test_max_aggregator():
"""Test the MaxAggregator
- Test the MaxAggregator correctly aggregates data passed to it
- Test the MaxAggregator can be torch scripted
"""
aggregator = ensemble.MaxAggregator()
scores = torch.randn((3, 10))
# test the aggregator correctly computes the max scores
aggregated_scores = aggregator(scores)
assert aggregated_scores.shape == (3, )
max_vals, _ = scores.max(dim=1)
torch.all(max_vals == aggregated_scores)
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
def test_min_aggregator():
"""Test the MinAggregator
- Test the MinAggregator correctly aggregates data passed to it
- Test the MinAggregator can be torch scripted
"""
aggregator = ensemble.MinAggregator()
scores = torch.randn((3, 10))
# test the aggregator correctly computes the min scores
aggregated_scores = aggregator(scores)
assert aggregated_scores.shape == (3, )
min_vals, _ = scores.min(dim=1)
torch.all(min_vals == aggregated_scores)
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
@pytest.mark.parametrize('aggregator', ['AverageAggregator', 'MaxAggregator', 'MinAggregator', 'TopKAggregator'])
@pytest.mark.parametrize('normalizer', ['PValNormalizer', 'ShiftAndScaleNormalizer'])
def test_ensembler(aggregator, normalizer):
"""Test the Ensembler for each combination of aggregator and normalizer
- Test the ensembler correctly aggregates and normalizes the scores
- Test the ensembler can be torch scripted
"""
aggregator = getattr(ensemble, aggregator)()
normalizer = getattr(ensemble, normalizer)()
ensembler = ensemble.Ensembler(aggregator=aggregator, normalizer=normalizer)
x = torch.randn(3, 10)
x_ref = torch.randn(64, 10)
# test the ensembler correctly aggregates and normalizes the scores
ensembler.fit(x_ref)
x_norm = ensembler(x)
# test the scriptability of the ensembler
ensembler = torch.jit.script(ensembler)
x_norm_2 = ensembler(x)
assert torch.all(x_norm_2 == x_norm)
| 7,122 | 34.08867 | 113 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.