code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import ClientSide2 #custom package
import numpy as np
import argparse
import json
import os
import ClassifierFunctions2 as cf
from matplotlib import pyplot as plt
from builtins import input
# Initialize essential global variables
#URL = "" #you'll need me to send you the link
FAMILIES = ["triclinic","monoclinic","orthorhombic","tetragonal",
"trigonal","hexagonal","cubic"]
DEFAULT_SESSION = os.path.join ("Sessions","session.json")
DEFAULT_USER = "user_profile.json"
SERVER_INFO = "server_gen2.json"
# list of three, one per level
prediction_per_level = [2, 3, 3]
FILTER_SETTINGS = { "max_numpeaks": 20,
"dspace_range" : [0.1,6],
"peak_threshold": 1,
"filter_size" : 15,
"passes" : 2
}
def build_parser():
parser = argparse.ArgumentParser()
# This will be implemented as rollout broadens
parser.add_argument('--apikey', type=str,
dest='key', help='api key to securely access service',
metavar='KEY', required=False)
parser.add_argument('--session',
dest='session',help='Keep user preferences for multirun sessions',
metavar='SESSION',required=False, default=None)
return parser
def main():
parser = build_parser()
options = parser.parse_args()
#print(options.session)
# opens the user specified session
if options.session:
with open(os.path.join("Sessions",options.session),'r') as f:
session = json.load(f)
# opens the default session
else:
with open(DEFAULT_SESSION,'r') as f:
session = json.load(f)
# set variables from loaded session data
# print(session)
file_path = session["file_path"]
output_file = session["output_file"]
manual_peak_selection = session["manual_peak_selection"]
known_family = session["known_family"]
chemistry = session["chemistry"]
diffraction = session["diffraction"]
mode = ""
if diffraction:
if chemistry:
mode="DiffChem"
else:
mode="DiffOnly"
else:
if chemistry:
raise ValueError('Running chemistry only predictions is currently not implemented')
else:
raise ValueError('Invalid prediction type. Either diffraction or chemistry must be enabled')
if known_family and known_family=='yes':
print('known family')
crystal_family = session["crystal_family"]
prediction_per_level[0] = 1
else:
crystal_family = None
# Load user from provided path, [IN PROGRESS]
if session["user_info"]:
with open(session["user_info"],'r') as f:
user_info = json.load(f)
else:
with open(DEFAULT_USER,'r') as f:
user_info = json.load(f)
with open(session["server_info"],'r') as f:
server_info = json.load(f)
if server_info['URL']:
url = server_info['URL']
else:
raise ValueError('you need to have the server URL provided to you')
chem_vec = cf.check_for_chemistry(session)
# Determine if the path is a directory or a file
if os.path.isdir(file_path):
print("loading files from directory")
file_paths = []
for dirpath,dirnames,fpath in os.walk(file_path):
for path in fpath:
if not path[0] == '.':
file_paths.append(os.path.join(dirpath,path))
print("found {} files to load.".format(len(file_paths)))
else:
file_paths = [file_path]
for f_path in file_paths:
# Load Data from specified file (DM3, TIFF, CSV etc....)
print("loading data from {}".format(f_path))
image_data,scale = ClientSide2.Load_Profile(f_path)
print("I successfully loaded the data")
# print(scale)
print("length",len(image_data))
print("max",np.max(image_data))
if diffraction:
peak_locs,peaks_h = ClientSide2.Find_Peaks(image_data,scale, **FILTER_SETTINGS)
# Choose which peaks to classify on
if manual_peak_selection:
peak_locs = cf.choose_peaks(peak_locs,peaks_h)
#raise NotImplementedError
else:
peak_locs = []
peaks_h = []
#
# print(peak_locs)
# print(chem_vec)
classificated = ClientSide2.Send_For_Classification(peak_locs, chem_vec, mode, crystal_family, user_info, url, prediction_per_level)
classificated["file_name"] = f_path
# update the user on the results before saving
print(classificated)
# write results out to the specified file
if not os.path.exists("Results"):
os.makedirs("Results")
cf.write_to_csv(os.path.join("Results",output_file), classificated, prediction_per_level)
if __name__ == "__main__":
main()
| [
"json.load",
"argparse.ArgumentParser",
"ClassifierFunctions2.check_for_chemistry",
"os.makedirs",
"os.path.isdir",
"os.walk",
"os.path.exists",
"ClassifierFunctions2.choose_peaks",
"ClientSide2.Load_Profile",
"numpy.max",
"ClientSide2.Send_For_Classification",
"ClientSide2.Find_Peaks",
"os.... | [((412, 452), 'os.path.join', 'os.path.join', (['"""Sessions"""', '"""session.json"""'], {}), "('Sessions', 'session.json')\n", (424, 452), False, 'import os\n'), ((844, 869), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (867, 869), False, 'import argparse\n'), ((3159, 3190), 'ClassifierFunctions2.check_for_chemistry', 'cf.check_for_chemistry', (['session'], {}), '(session)\n', (3181, 3190), True, 'import ClassifierFunctions2 as cf\n'), ((3270, 3294), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (3283, 3294), False, 'import os\n'), ((2971, 2983), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2980, 2983), False, 'import json\n'), ((3404, 3422), 'os.walk', 'os.walk', (['file_path'], {}), '(file_path)\n', (3411, 3422), False, 'import os\n'), ((3860, 3892), 'ClientSide2.Load_Profile', 'ClientSide2.Load_Profile', (['f_path'], {}), '(f_path)\n', (3884, 3892), False, 'import ClientSide2\n'), ((4538, 4658), 'ClientSide2.Send_For_Classification', 'ClientSide2.Send_For_Classification', (['peak_locs', 'chem_vec', 'mode', 'crystal_family', 'user_info', 'url', 'prediction_per_level'], {}), '(peak_locs, chem_vec, mode,\n crystal_family, user_info, url, prediction_per_level)\n', (4573, 4658), False, 'import ClientSide2\n'), ((1584, 1596), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1593, 1596), False, 'import json\n'), ((1711, 1723), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1720, 1723), False, 'import json\n'), ((2794, 2806), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2803, 2806), False, 'import json\n'), ((2883, 2895), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2892, 2895), False, 'import json\n'), ((4032, 4050), 'numpy.max', 'np.max', (['image_data'], {}), '(image_data)\n', (4038, 4050), True, 'import numpy as np\n'), ((4121, 4181), 'ClientSide2.Find_Peaks', 'ClientSide2.Find_Peaks', (['image_data', 'scale'], {}), '(image_data, scale, **FILTER_SETTINGS)\n', (4143, 4181), False, 'import ClientSide2\n'), ((4851, 4876), 'os.path.exists', 'os.path.exists', (['"""Results"""'], {}), "('Results')\n", (4865, 4876), False, 'import os\n'), ((4890, 4912), 'os.makedirs', 'os.makedirs', (['"""Results"""'], {}), "('Results')\n", (4901, 4912), False, 'import os\n'), ((4950, 4986), 'os.path.join', 'os.path.join', (['"""Results"""', 'output_file'], {}), "('Results', output_file)\n", (4962, 4986), False, 'import os\n'), ((1510, 1551), 'os.path.join', 'os.path.join', (['"""Sessions"""', 'options.session'], {}), "('Sessions', options.session)\n", (1522, 1551), False, 'import os\n'), ((4295, 4330), 'ClassifierFunctions2.choose_peaks', 'cf.choose_peaks', (['peak_locs', 'peaks_h'], {}), '(peak_locs, peaks_h)\n', (4310, 4330), True, 'import ClassifierFunctions2 as cf\n'), ((3532, 3559), 'os.path.join', 'os.path.join', (['dirpath', 'path'], {}), '(dirpath, path)\n', (3544, 3559), False, 'import os\n')] |
"""
generator.py
Generator for batch training on keras models
"""
import pandas as pd
import os
import numpy as np
import boto3
import tensorflow as tf
# print(tf.__version__)
import keras
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D, Bidirectional
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.layers import Dropout
from sklearn.model_selection import train_test_split
from botocore.client import ClientError
# from smart_open import smart_open
import csv
# config
BUCKET_NAME = 'sagemaker-cs281'
config = {'AWS_REGION':'us-east-2',
'S3_ENDPOINT':'s3.us-east-2.amazonaws.com',
'S3_USE_HTTPS':'1',
'S3_VERIFY_SSL':'1', }
os.environ.update(config)
line_counts = {'train':376968, 'test':122928, 'valid':104054}
class Keras_DataGenerator(keras.utils.Sequence):
""" Generates data for Keras
Usage:
training_generator = generator.My_DataGenerator(dataset='train')
validation_generator = generator.My_DataGenerator(dataset='valid')
history = model.fit_generator(generator=training_generator,
validation_data=validation_generator,
verbose=1, use_multiprocessing=False,
epochs=n_epochs)
Data is stored in three folders in S3 key 'deephol-data-processed/proofs/human'
* /train
* /valid
* /test
Files are have three path format. For all three folders, we keep the name X or Y_train
* /X_train_{}.csv
* /X_train_hyp_{}.csv
* /Y_train.csv
"""
def __init__(self, dataset='train',batch_size=64,
w_hyp=False, n_channels=1, n_classes=41, shuffle=False):
self.w_hyp = w_hyp
self.dim = 3000 if self.w_hyp else 1000
self.batch_size = batch_size
self.shuffle = shuffle
self.dataset = dataset
# paths
X_paths, Y_path = self.get_partition_and_labels()
self.features_keys_lst = X_paths
self.label_key = Y_path[0]
self.n = line_counts[self.dataset]
self.partition_index = 0
# initialize readers
self.on_epoch_end()
print('Generating examples from a set of {} examples'.format(self.n))
def __len__(self):
""" Denotes the number of batches per epoch
subtract 1 unfull batch per partition """
return 50 #int(np.floor(self.n / self.batch_size)) - len(self.features_keys_lst) - 1
def __getitem__(self, index):
'Generate one batch of data'
if self.partition_index >= len(self.features_keys_lst) - 1:
# pass #if you put this pass on the
self.on_epoch_end()
try:
X, y = next(self.reader_X_lst[self.partition_index]), next(self.reader_Y)
except Exception as e:
self.partition_index += 1
X, y = next(self.reader_X_lst[self.partition_index]), next(self.reader_Y)
else:
if len(X) < 64:
self.partition_index += 1
X, y = next(self.reader_X_lst[self.partition_index]), next(self.reader_Y)
return X.values, y.values
def _initialize_readers(self):
paths_X = [os.path.join('s3://', BUCKET_NAME, x) for x in self.features_keys_lst]
path_Y = os.path.join('s3://', BUCKET_NAME, self.label_key)
self.reader_X_lst = [pd.read_csv(path, chunksize=self.batch_size, header=None, engine='python')
for path in paths_X]
self.reader_Y = pd.read_csv(path_Y, chunksize=self.batch_size, header=None, engine='python')
def on_epoch_end(self):
"""Updates indexes after each epoch"""
# re initialize readers
self._initialize_readers()
self.list_partitions = self.features_keys_lst
if self.shuffle == True:
np.random.shuffle(self.list_partitions)
# start from begining
self.partition_index = 0
def get_partition_and_labels(self):
""" Create a dictionary called partition where:
- in partition['train']: a list of training IDs
- in partition['validation']: a list of validation IDs
"""
s3_r = boto3.resource('s3')
my_bucket = s3_r.Bucket(BUCKET_NAME)
full_dataset_key = 'deephol-data-processed/proofs/human'
# paths as strings
dataset_keys = {s: '{}/{}/'.format(full_dataset_key, s)
for s in ['train', 'test', 'valid']}
partition = {dataset: [x.key for x in my_bucket.objects.filter(Prefix=dataset_keys[self.dataset])]
for dataset in ['train', 'test', 'valid']}
print('Retrieving data from {}'.format(dataset_keys[self.dataset]))
# get each file key
y_file = [x for x in partition[self.dataset] if x.find('/Y_train') != (-1)]
X_files_hyp = [x for x in partition[self.dataset] if x.find('/X_train_hyp_') != (-1)]
X_files = X_files_hyp if self.w_hyp else set(partition[self.dataset]) - set(y_file) - set(X_files_hyp)
# sort (will be shuffled if shuffle=True)
X_files = sorted(X_files, key=lambda x: (len(x), x))
return X_files, y_file
# tests
| [
"pandas.read_csv",
"os.environ.update",
"boto3.resource",
"os.path.join",
"numpy.random.shuffle"
] | [((798, 823), 'os.environ.update', 'os.environ.update', (['config'], {}), '(config)\n', (815, 823), False, 'import os\n'), ((3566, 3616), 'os.path.join', 'os.path.join', (['"""s3://"""', 'BUCKET_NAME', 'self.label_key'], {}), "('s3://', BUCKET_NAME, self.label_key)\n", (3578, 3616), False, 'import os\n'), ((3795, 3871), 'pandas.read_csv', 'pd.read_csv', (['path_Y'], {'chunksize': 'self.batch_size', 'header': 'None', 'engine': '"""python"""'}), "(path_Y, chunksize=self.batch_size, header=None, engine='python')\n", (3806, 3871), True, 'import pandas as pd\n'), ((4481, 4501), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (4495, 4501), False, 'import boto3\n'), ((3478, 3515), 'os.path.join', 'os.path.join', (['"""s3://"""', 'BUCKET_NAME', 'x'], {}), "('s3://', BUCKET_NAME, x)\n", (3490, 3515), False, 'import os\n'), ((3646, 3720), 'pandas.read_csv', 'pd.read_csv', (['path'], {'chunksize': 'self.batch_size', 'header': 'None', 'engine': '"""python"""'}), "(path, chunksize=self.batch_size, header=None, engine='python')\n", (3657, 3720), True, 'import pandas as pd\n'), ((4114, 4153), 'numpy.random.shuffle', 'np.random.shuffle', (['self.list_partitions'], {}), '(self.list_partitions)\n', (4131, 4153), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import numpy as np
import tensorflow as tf
from niftynet.layer import layer_util
from niftynet.layer.base_layer import TrainableLayer
from niftynet.layer.deconvolution import DeconvLayer
from niftynet.utilities.util_common import look_up_operations
SUPPORTED_OP = set(['REPLICATE', 'CHANNELWISE_DECONV'])
class UpSampleLayer(TrainableLayer):
"""
This class defines channel-wise upsampling operations.
Different from ``DeconvLayer``,
the elements are not mixed in the channel dim.
``REPLICATE`` mode replicates each spatial_dim into
``spatial_dim*kernel_size``
`CHANNELWISE_DECONV`` mode makes a projection using a kernel.
e.g., With 2D input (without loss of generality), given input
``[N, X, Y, C]``, the output is ``[N, X*kernel_size, Y*kernel_size, C]``.
"""
def __init__(self,
func,
kernel_size=3,
stride=2,
w_initializer=None,
w_regularizer=None,
with_bias=False,
b_initializer=None,
b_regularizer=None,
name='upsample'):
self.func = look_up_operations(func.upper(), SUPPORTED_OP)
self.layer_name = '{}_{}'.format(self.func.lower(), name)
super(UpSampleLayer, self).__init__(name=self.layer_name)
self.kernel_size = kernel_size
self.stride = stride
self.with_bias = with_bias
self.initializers = {'w': w_initializer, 'b': b_initializer}
self.regularizers = {'w': w_regularizer, 'b': b_regularizer}
def layer_op(self, input_tensor):
spatial_rank = layer_util.infer_spatial_rank(input_tensor)
output_tensor = input_tensor
if self.func == 'REPLICATE':
if self.kernel_size != self.stride:
raise ValueError(
"`kernel_size` != `stride` currently not"
"supported in `REPLICATE` mode. Please"
"consider using `CHANNELWISE_DECONV` operation.")
# simply replicate input values to
# local regions of (kernel_size ** spatial_rank) element
kernel_size_all_dims = layer_util.expand_spatial_params(
self.kernel_size, spatial_rank)
pixel_num = np.prod(kernel_size_all_dims)
repmat = np.hstack((pixel_num, [1] * spatial_rank, 1)).flatten()
output_tensor = tf.tile(input=input_tensor, multiples=repmat)
output_tensor = tf.batch_to_space(
input=output_tensor,
block_shape=kernel_size_all_dims,
crops=[[0, 0]] * spatial_rank)
elif self.func == 'CHANNELWISE_DECONV':
output_tensor = [tf.expand_dims(x, -1)
for x in tf.unstack(input_tensor, axis=-1)]
output_tensor = [DeconvLayer(n_output_chns=1,
kernel_size=self.kernel_size,
stride=self.stride,
padding='SAME',
with_bias=self.with_bias,
w_initializer=self.initializers['w'],
w_regularizer=self.regularizers['w'],
b_initializer=self.initializers['b'],
b_regularizer=self.regularizers['b'],
name='deconv_{}'.format(i))(x)
for (i, x) in enumerate(output_tensor)]
output_tensor = tf.concat(output_tensor, axis=-1)
return output_tensor
| [
"tensorflow.batch_to_space",
"tensorflow.unstack",
"tensorflow.concat",
"numpy.hstack",
"niftynet.layer.layer_util.infer_spatial_rank",
"tensorflow.tile",
"tensorflow.expand_dims",
"niftynet.layer.layer_util.expand_spatial_params",
"numpy.prod"
] | [((1714, 1757), 'niftynet.layer.layer_util.infer_spatial_rank', 'layer_util.infer_spatial_rank', (['input_tensor'], {}), '(input_tensor)\n', (1743, 1757), False, 'from niftynet.layer import layer_util\n'), ((2257, 2321), 'niftynet.layer.layer_util.expand_spatial_params', 'layer_util.expand_spatial_params', (['self.kernel_size', 'spatial_rank'], {}), '(self.kernel_size, spatial_rank)\n', (2289, 2321), False, 'from niftynet.layer import layer_util\n'), ((2363, 2392), 'numpy.prod', 'np.prod', (['kernel_size_all_dims'], {}), '(kernel_size_all_dims)\n', (2370, 2392), True, 'import numpy as np\n'), ((2498, 2543), 'tensorflow.tile', 'tf.tile', ([], {'input': 'input_tensor', 'multiples': 'repmat'}), '(input=input_tensor, multiples=repmat)\n', (2505, 2543), True, 'import tensorflow as tf\n'), ((2572, 2679), 'tensorflow.batch_to_space', 'tf.batch_to_space', ([], {'input': 'output_tensor', 'block_shape': 'kernel_size_all_dims', 'crops': '([[0, 0]] * spatial_rank)'}), '(input=output_tensor, block_shape=kernel_size_all_dims,\n crops=[[0, 0]] * spatial_rank)\n', (2589, 2679), True, 'import tensorflow as tf\n'), ((3697, 3730), 'tensorflow.concat', 'tf.concat', (['output_tensor'], {'axis': '(-1)'}), '(output_tensor, axis=-1)\n', (3706, 3730), True, 'import tensorflow as tf\n'), ((2414, 2459), 'numpy.hstack', 'np.hstack', (['(pixel_num, [1] * spatial_rank, 1)'], {}), '((pixel_num, [1] * spatial_rank, 1))\n', (2423, 2459), True, 'import numpy as np\n'), ((2803, 2824), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(-1)'], {}), '(x, -1)\n', (2817, 2824), True, 'import tensorflow as tf\n'), ((2863, 2896), 'tensorflow.unstack', 'tf.unstack', (['input_tensor'], {'axis': '(-1)'}), '(input_tensor, axis=-1)\n', (2873, 2896), True, 'import tensorflow as tf\n')] |
import torch
import matplotlib.pyplot as plt
import numpy as np
import argparse
import pickle
import os
from torch.autograd import Variable
from torchvision import transforms
from build_vocab import Vocabulary
from model import EncoderCNN, DecoderRNN
from PIL import Image
def main(args):
# Image preprocessing
transform = transforms.Compose([
transforms.Scale(args.crop_size),
transforms.CenterCrop(args.crop_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Load vocabulary wrapper
with open(args.vocab_path, 'rb') as f:
vocab = pickle.load(f)
# Build Models
encoder = EncoderCNN(args.embed_size)
encoder.eval() # evaluation mode (BN uses moving mean/variance)
decoder = DecoderRNN(args.embed_size, args.hidden_size,
len(vocab), args.num_layers)
# Load the trained model parameters
encoder.load_state_dict(torch.load(args.encoder_path))
decoder.load_state_dict(torch.load(args.decoder_path))
# Prepare Image
image = Image.open(args.image)
image_tensor = Variable(transform(image).unsqueeze(0))
# Set initial states
state = (Variable(torch.zeros(args.num_layers, 1, args.hidden_size)),
Variable(torch.zeros(args.num_layers, 1, args.hidden_size)))
# If use gpu
if torch.cuda.is_available():
encoder.cuda()
decoder.cuda()
state = [s.cuda() for s in state]
image_tensor = image_tensor.cuda()
# Generate caption from image
feature = encoder(image_tensor)
sampled_ids = decoder.sample(feature, state)
sampled_ids = sampled_ids.cpu().data.numpy()
# Decode word_ids to words
sampled_caption = []
for word_id in sampled_ids:
word = vocab.idx2word[word_id]
sampled_caption.append(word)
if word == '<end>':
break
sentence = ' '.join(sampled_caption)
# Print out image and generated caption.
print (sentence)
plt.imshow(np.asarray(image))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image', type=str, required=True,
help='input image for generating caption')
parser.add_argument('--encoder_path', type=str, default='./models/encoder-5-3000.pkl',
help='path for trained encoder')
parser.add_argument('--decoder_path', type=str, default='./models/decoder-5-3000.pkl',
help='path for trained decoder')
parser.add_argument('--vocab_path', type=str, default='./data/vocab.pkl',
help='path for vocabulary wrapper')
parser.add_argument('--crop_size', type=int, default=224,
help='size for center cropping images')
# Model parameters (should be same as paramters in train.py)
parser.add_argument('--embed_size', type=int , default=256,
help='dimension of word embedding vectors')
parser.add_argument('--hidden_size', type=int , default=512,
help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int , default=1 ,
help='number of layers in lstm')
args = parser.parse_args()
main(args) | [
"argparse.ArgumentParser",
"torchvision.transforms.Scale",
"torch.load",
"numpy.asarray",
"PIL.Image.open",
"torchvision.transforms.ToTensor",
"pickle.load",
"torch.cuda.is_available",
"torch.zeros",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"model.EncoderCNN"
] | [((690, 717), 'model.EncoderCNN', 'EncoderCNN', (['args.embed_size'], {}), '(args.embed_size)\n', (700, 717), False, 'from model import EncoderCNN, DecoderRNN\n'), ((1106, 1128), 'PIL.Image.open', 'Image.open', (['args.image'], {}), '(args.image)\n', (1116, 1128), False, 'from PIL import Image\n'), ((1395, 1420), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1418, 1420), False, 'import torch\n'), ((2132, 2157), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2155, 2157), False, 'import argparse\n'), ((641, 655), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (652, 655), False, 'import pickle\n'), ((976, 1005), 'torch.load', 'torch.load', (['args.encoder_path'], {}), '(args.encoder_path)\n', (986, 1005), False, 'import torch\n'), ((1035, 1064), 'torch.load', 'torch.load', (['args.decoder_path'], {}), '(args.decoder_path)\n', (1045, 1064), False, 'import torch\n'), ((2068, 2085), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2078, 2085), True, 'import numpy as np\n'), ((367, 399), 'torchvision.transforms.Scale', 'transforms.Scale', (['args.crop_size'], {}), '(args.crop_size)\n', (383, 399), False, 'from torchvision import transforms\n'), ((411, 448), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['args.crop_size'], {}), '(args.crop_size)\n', (432, 448), False, 'from torchvision import transforms\n'), ((458, 479), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (477, 479), False, 'from torchvision import transforms\n'), ((490, 544), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (510, 544), False, 'from torchvision import transforms\n'), ((1240, 1289), 'torch.zeros', 'torch.zeros', (['args.num_layers', '(1)', 'args.hidden_size'], {}), '(args.num_layers, 1, args.hidden_size)\n', (1251, 1289), False, 'import torch\n'), ((1314, 1363), 'torch.zeros', 'torch.zeros', (['args.num_layers', '(1)', 'args.hidden_size'], {}), '(args.num_layers, 1, args.hidden_size)\n', (1325, 1363), False, 'import torch\n')] |
from __future__ import print_function
import os
import numpy as np
import lasagne
import gdal
import random
import sys
def build_cnn(input_var=None,bands=None):
network = lasagne.layers.InputLayer(shape=(None,bands,None,None),input_var=input_var) #Patch sizes varying between train-val and test
network = lasagne.layers.Conv2DLayer(network, num_filters=48, filter_size=(9,9), W=lasagne.init.Normal(std = 0.001,mean = 0),nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.Conv2DLayer(network, num_filters=32, filter_size=(5,5),W=lasagne.init.Normal(std = 0.001,mean = 0),nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.Conv2DLayer(network, num_filters=1, filter_size=(5,5),W=lasagne.init.Normal(std = 0.001,mean = 0))
return network
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def load_dataset(dataset_folder, patch_side, border_width,identity, num):
#############
# short names
path, ps, r = dataset_folder, patch_side, border_width
dir_list = os.listdir(path)
dir_list.sort()
B = 9
x_train = np.ndarray(shape=(0, B, ps, ps), dtype='float32')
y_train = np.ndarray(shape=(0, 1, ps-2*r, ps-2*r), dtype='float32')
x_val = np.ndarray(shape=(0, B, ps, ps), dtype='float32')
y_val = np.ndarray(shape=(0, 1, ps-2*r, ps-2*r), dtype='float32')
for file in dir_list:
if file[4:6] == 'VH' and int(file[2:3])==num:
vh0_file =file
vv0_file = '00' + str(num) + '_VV'+ vh0_file[6:]
ndvi0_file = '00' + str(num) + '_NDVI'+ vh0_file[6:]
mask0_file = '00' + str(num) + '_MASK'+ vh0_file[6:]
vh_file = '00' + str(num+1) + '_VH'+ vh0_file[6:]
vv_file = '00' + str(num+1) + '_VV' + vh0_file[6:]
ndvi_file = '00' + str(num+1) + '_NDVI'+ vh0_file[6:]
mask_file = '00' + str(num+1) + '_MASK'+ vh0_file[6:]
vh2_file = '00' + str(num+2) + '_VH'+ vh0_file[6:]
vv2_file = '00' + str(num+2) + '_VV' + vh0_file[6:]
ndvi2_file = '00' + str(num+2) + '_NDVI'+ vh0_file[6:]
mask2_file = '00' + str(num+2) + '_MASK'+ vh0_file[6:]
dem_file = 'DEM' + vh0_file[6:]
dataset = gdal.Open(path + vh0_file, gdal.GA_ReadOnly)
vh0 = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path+vv0_file, gdal.GA_ReadOnly)
vv0 = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path+ndvi0_file, gdal.GA_ReadOnly)
ndvi0 = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path+mask0_file, gdal.GA_ReadOnly)
mask0 = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path + vh_file, gdal.GA_ReadOnly)
vh = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path+vv_file, gdal.GA_ReadOnly)
vv = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path+ndvi_file, gdal.GA_ReadOnly)
ndvi = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path+mask_file, gdal.GA_ReadOnly)
mask = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path + vh2_file, gdal.GA_ReadOnly)
vh2 = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path+vv2_file, gdal.GA_ReadOnly)
vv2 = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path+ndvi2_file, gdal.GA_ReadOnly)
ndvi2 = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path+mask2_file, gdal.GA_ReadOnly)
mask2 = dataset.ReadAsArray()
dataset = None
dataset = gdal.Open(path + dem_file, gdal.GA_ReadOnly)
dem = dataset.ReadAsArray()
dataset = None
mask0[530:1000,4300:4750]=1
mask[530:1000,4300:4750]=1
mask2[530:1000,4300:4750]=1
[s1, s2] = ndvi0.shape
p = []
for y in range(1,s1-ps+1,r):
for x in range(1,s2-ps+1,r):
Mk = mask[y:y+ps, x:x+ps]
Mk0 = mask0[y:y+ps,x:x+ps]
Mk2 = mask2[y:y+ps,x:x+ps]
if Mk0.sum() == 0 and Mk.sum()== 0 and Mk2.sum()== 0:
p.append([y,x])
random.shuffle(p)
P = 19000
p_train, p_val = p[:int(0.8*P)], p[int(0.8*P):int(P)]
x_train_k = np.ndarray(shape=(len(p_train), B, ps, ps), dtype='float32')
y_train_k = np.ndarray(shape=(len(p_train), 1, ps-2*r, ps-2*r), dtype='float32')
n = 0
for patch in p_train:
y0, x0 = patch[0], patch[1]
x_train_k[n,0,:,:] = vh0[y0:y0+ps,x0:x0+ps]
x_train_k[n,1,:,:] = vv0[y0:y0+ps,x0:x0+ps]
x_train_k[n,2,:,:] = vh[y0:y0+ps,x0:x0+ps]
x_train_k[n,3,:,:] = vv[y0:y0+ps,x0:x0+ps]
x_train_k[n,4,:,:] = vv2[y0:y0+ps,x0:x0+ps]
x_train_k[n,5,:,:] = vh2[y0:y0+ps,x0:x0+ps]
x_train_k[n,6,:,:] = ndvi0[y0:y0+ps,x0:x0+ps]
x_train_k[n,7,:,:] = ndvi2[y0:y0+ps,x0:x0+ps]
x_train_k[n,8,:,:] = dem[y0:y0+ps,x0:x0+ps]
y_train_k[n, 0, :, :] = ndvi[y0+r:y0+ps-r, x0+r:x0+ps-r]
n = n + 1
x_train = np.concatenate((x_train, x_train_k))
y_train = np.concatenate((y_train, y_train_k))
x_val_k = np.ndarray(shape=(len(p_val), B, ps, ps), dtype='float32')
y_val_k = np.ndarray(shape=(len(p_val), 1, ps-2*r, ps-2*r), dtype='float32')
n = 0
for patch in p_val:
y0, x0 = patch[0], patch[1]
x_val_k[n,0,:,:] = vh0[y0:y0+ps,x0:x0+ps]
x_val_k[n,1,:,:] = vv0[y0:y0+ps,x0:x0+ps]
x_val_k[n,2,:,:] = vh[y0:y0+ps,x0:x0+ps]
x_val_k[n,3,:,:] = vv[y0:y0+ps,x0:x0+ps]
x_val_k[n,4,:,:] = vv2[y0:y0+ps,x0:x0+ps]
x_val_k[n,5,:,:] = vh2[y0:y0+ps,x0:x0+ps]
x_val_k[n,6,:,:] = ndvi0[y0:y0+ps,x0:x0+ps]
x_val_k[n,7,:,:] = ndvi2[y0:y0+ps,x0:x0+ps]
x_val_k[n,8,:,:] = dem[y0:y0+ps,x0:x0+ps]
y_val_k[n, 0, :, :] = ndvi[y0+r:y0+ps-r, x0+r:x0+ps-r]
n = n + 1
x_val = np.concatenate((x_val, x_val_k))
y_val = np.concatenate((y_val, y_val_k))
if identity == 'SOPTII':
B1 = 8
x_val_temp = np.ndarray(shape=(len(p_val), B1, ps, ps), dtype='float32')
x_train_temp = np.ndarray(shape=(len(p_train), B1, ps, ps), dtype='float32')
x_train_temp = x_train[:,:8,:,:]
x_val_temp = x_val[:,:8,:,:]
elif identity == 'SOPTIIp':
B1 = 9
x_val_temp = np.ndarray(shape=(len(p_val), B1, ps, ps), dtype='float32')
x_train_temp = np.ndarray(shape=(len(p_train), B1, ps, ps), dtype='float32')
x_train_temp = x_train[:,:,:,:]
x_val_temp = x_val[:,:,:,:]
elif identity == 'SOPTI':
B1 = 5
x_val_temp = np.ndarray(shape=(len(p_val), B1, ps, ps), dtype='float32')
x_train_temp = np.ndarray(shape=(len(p_train), B1, ps, ps), dtype='float32')
x_train_temp[:,:4,:,:] = x_train[:,:4,:,:]
x_val_temp[:,:4,:,:] = x_val[:,:4,:,:]
x_train_temp[:,4,:,:] = x_train[:,6,:,:]
x_val_temp[:,4,:,:] = x_val[:,6,:,:]
elif identity == 'SOPTIp':
B1 = 6
x_val_temp = np.ndarray(shape=(len(p_val), B1, ps, ps), dtype='float32')
x_train_temp = np.ndarray(shape=(len(p_train), B1, ps, ps), dtype='float32')
x_train_temp[:,:4,:,:] = x_train[:,:4,:,:]
x_val_temp[:,:4,:,:] = x_val[:,:4,:,:]
x_train_temp[:,4,:,:] = x_train[:,6,:,:]
x_val_temp[:,4,:,:] = x_val[:,6,:,:]
x_train_temp[:,5,:,:] = x_train[:,8,:,:]
x_val_temp[:,5,:,:] = x_val[:,8,:,:]
elif identity == 'SAR':
B1 = 2
x_val_temp = np.ndarray(shape=(len(p_val), B1, ps, ps), dtype='float32')
x_train_temp = np.ndarray(shape=(len(p_train), B1, ps, ps), dtype='float32')
x_train_temp[:,:,:,:] = x_train[:,2:4,:,:]
x_val_temp[:,:,:,:] = x_val[:,2:4,:,:]
elif identity == 'SARp':
B1 = 3
x_val_temp = np.ndarray(shape=(len(p_val), B1, ps, ps), dtype='float32')
x_train_temp = np.ndarray(shape=(len(p_train), B1, ps, ps), dtype='float32')
x_train_temp[:,:2,:,:] = x_train[:,2:4,:,:]
x_val_temp[:,:2,:,:] = x_val[:,2:4,:,:]
x_train_temp[:,2,:,:] = x_train[:,8,:,:]
x_val_temp[:,2,:,:] = x_val[:,8,:,:]
elif identity == 'OPTI':
B1 = 1
x_val_temp = np.ndarray(shape=(len(p_val), B1, ps, ps), dtype='float32')
x_train_temp = np.ndarray(shape=(len(p_train), B1, ps, ps), dtype='float32')
x_train_temp[:,0,:,:] = x_train[:,6,:,:]
x_val_temp[:,0,:,:] = x_val[:,6,:,:]
elif identity == 'OPTII':
B1 = 2
x_val_temp = np.ndarray(shape=(len(p_val), B1, ps, ps), dtype='float32')
x_train_temp = np.ndarray(shape=(len(p_train), B1, ps, ps), dtype='float32')
x_train_temp[:,:,:,:] = x_train[:,6:8,:,:]
x_val_temp[:,:,:,:] = x_val[:,6:8,:,:]
else:
print('Insert the correct identifier. You must choose among these: \n - SOPTIIp \n - SOPTII \n - SOPTIp \n - SOPTI \n - OPTII \n - OPTI \n - SARp \n - SAR')
quit()
vh0, vv0, ndvi0,vh, vv, ndvi,vh2, vv2, ndvi2,dem = None, None, None, None, None, None, None, None, None, None
return x_train_temp, y_train, x_val_temp, y_val
| [
"lasagne.layers.InputLayer",
"numpy.concatenate",
"random.shuffle",
"gdal.Open",
"lasagne.init.Normal",
"numpy.ndarray",
"os.listdir",
"numpy.random.shuffle"
] | [((180, 259), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', ([], {'shape': '(None, bands, None, None)', 'input_var': 'input_var'}), '(shape=(None, bands, None, None), input_var=input_var)\n', (205, 259), False, 'import lasagne\n'), ((1476, 1492), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1486, 1492), False, 'import os\n'), ((1537, 1586), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0, B, ps, ps)', 'dtype': '"""float32"""'}), "(shape=(0, B, ps, ps), dtype='float32')\n", (1547, 1586), True, 'import numpy as np\n'), ((1601, 1666), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0, 1, ps - 2 * r, ps - 2 * r)', 'dtype': '"""float32"""'}), "(shape=(0, 1, ps - 2 * r, ps - 2 * r), dtype='float32')\n", (1611, 1666), True, 'import numpy as np\n'), ((1671, 1720), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0, B, ps, ps)', 'dtype': '"""float32"""'}), "(shape=(0, B, ps, ps), dtype='float32')\n", (1681, 1720), True, 'import numpy as np\n'), ((1733, 1798), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0, 1, ps - 2 * r, ps - 2 * r)', 'dtype': '"""float32"""'}), "(shape=(0, 1, ps - 2 * r, ps - 2 * r), dtype='float32')\n", (1743, 1798), True, 'import numpy as np\n'), ((981, 1007), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (998, 1007), True, 'import numpy as np\n'), ((397, 435), 'lasagne.init.Normal', 'lasagne.init.Normal', ([], {'std': '(0.001)', 'mean': '(0)'}), '(std=0.001, mean=0)\n', (416, 435), False, 'import lasagne\n'), ((571, 609), 'lasagne.init.Normal', 'lasagne.init.Normal', ([], {'std': '(0.001)', 'mean': '(0)'}), '(std=0.001, mean=0)\n', (590, 609), False, 'import lasagne\n'), ((744, 782), 'lasagne.init.Normal', 'lasagne.init.Normal', ([], {'std': '(0.001)', 'mean': '(0)'}), '(std=0.001, mean=0)\n', (763, 782), False, 'import lasagne\n'), ((2699, 2743), 'gdal.Open', 'gdal.Open', (['(path + vh0_file)', 'gdal.GA_ReadOnly'], {}), '(path + vh0_file, gdal.GA_ReadOnly)\n', (2708, 2743), False, 'import gdal\n'), ((2833, 2877), 'gdal.Open', 'gdal.Open', (['(path + vv0_file)', 'gdal.GA_ReadOnly'], {}), '(path + vv0_file, gdal.GA_ReadOnly)\n', (2842, 2877), False, 'import gdal\n'), ((2965, 3011), 'gdal.Open', 'gdal.Open', (['(path + ndvi0_file)', 'gdal.GA_ReadOnly'], {}), '(path + ndvi0_file, gdal.GA_ReadOnly)\n', (2974, 3011), False, 'import gdal\n'), ((3101, 3147), 'gdal.Open', 'gdal.Open', (['(path + mask0_file)', 'gdal.GA_ReadOnly'], {}), '(path + mask0_file, gdal.GA_ReadOnly)\n', (3110, 3147), False, 'import gdal\n'), ((3249, 3292), 'gdal.Open', 'gdal.Open', (['(path + vh_file)', 'gdal.GA_ReadOnly'], {}), '(path + vh_file, gdal.GA_ReadOnly)\n', (3258, 3292), False, 'import gdal\n'), ((3381, 3424), 'gdal.Open', 'gdal.Open', (['(path + vv_file)', 'gdal.GA_ReadOnly'], {}), '(path + vv_file, gdal.GA_ReadOnly)\n', (3390, 3424), False, 'import gdal\n'), ((3511, 3556), 'gdal.Open', 'gdal.Open', (['(path + ndvi_file)', 'gdal.GA_ReadOnly'], {}), '(path + ndvi_file, gdal.GA_ReadOnly)\n', (3520, 3556), False, 'import gdal\n'), ((3645, 3690), 'gdal.Open', 'gdal.Open', (['(path + mask_file)', 'gdal.GA_ReadOnly'], {}), '(path + mask_file, gdal.GA_ReadOnly)\n', (3654, 3690), False, 'import gdal\n'), ((3803, 3847), 'gdal.Open', 'gdal.Open', (['(path + vh2_file)', 'gdal.GA_ReadOnly'], {}), '(path + vh2_file, gdal.GA_ReadOnly)\n', (3812, 3847), False, 'import gdal\n'), ((3937, 3981), 'gdal.Open', 'gdal.Open', (['(path + vv2_file)', 'gdal.GA_ReadOnly'], {}), '(path + vv2_file, gdal.GA_ReadOnly)\n', (3946, 3981), False, 'import gdal\n'), ((4069, 4115), 'gdal.Open', 'gdal.Open', (['(path + ndvi2_file)', 'gdal.GA_ReadOnly'], {}), '(path + ndvi2_file, gdal.GA_ReadOnly)\n', (4078, 4115), False, 'import gdal\n'), ((4205, 4251), 'gdal.Open', 'gdal.Open', (['(path + mask2_file)', 'gdal.GA_ReadOnly'], {}), '(path + mask2_file, gdal.GA_ReadOnly)\n', (4214, 4251), False, 'import gdal\n'), ((4348, 4392), 'gdal.Open', 'gdal.Open', (['(path + dem_file)', 'gdal.GA_ReadOnly'], {}), '(path + dem_file, gdal.GA_ReadOnly)\n', (4357, 4392), False, 'import gdal\n'), ((5018, 5035), 'random.shuffle', 'random.shuffle', (['p'], {}), '(p)\n', (5032, 5035), False, 'import random\n'), ((6093, 6129), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_train_k)'], {}), '((x_train, x_train_k))\n', (6107, 6129), True, 'import numpy as np\n'), ((6152, 6188), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_train_k)'], {}), '((y_train, y_train_k))\n', (6166, 6188), True, 'import numpy as np\n'), ((7096, 7128), 'numpy.concatenate', 'np.concatenate', (['(x_val, x_val_k)'], {}), '((x_val, x_val_k))\n', (7110, 7128), True, 'import numpy as np\n'), ((7149, 7181), 'numpy.concatenate', 'np.concatenate', (['(y_val, y_val_k)'], {}), '((y_val, y_val_k))\n', (7163, 7181), True, 'import numpy as np\n')] |
import os
import torch
import shutil
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import math
cmap = plt.cm.viridis
def parse_command():
data_names = ['nyudepthv2']
from dataloaders.dataloader import MyDataloader
modality_names = MyDataloader.modality_names
import argparse
parser = argparse.ArgumentParser(description='FastDepth')
parser.add_argument('--data', metavar='DATA', default='nyudepthv2',
choices=data_names,
help='dataset: ' + ' | '.join(data_names) + ' (default: nyudepthv2)')
parser.add_argument('--modality', '-m', metavar='MODALITY', default='rgb', choices=modality_names,
help='modality: ' + ' | '.join(modality_names) + ' (default: rgb)')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('--print-freq', '-p', default=50, type=int,
metavar='N', help='print frequency (default: 50)')
parser.add_argument('-e', '--evaluate', default='', type=str, metavar='PATH',)
parser.add_argument('--gpu', default='0', type=str, metavar='N', help="gpu id")
parser.set_defaults(cuda=True)
args = parser.parse_args()
return args
def colored_depthmap(depth, d_min=None, d_max=None):
if d_min is None:
d_min = np.min(depth)
if d_max is None:
d_max = np.max(depth)
depth_relative = (depth - d_min) / (d_max - d_min)
return 255 * cmap(depth_relative)[:,:,:3] # H, W, C
def merge_into_row(input, depth_target, depth_pred):
rgb = 255 * np.transpose(np.squeeze(input.cpu().numpy()), (1,2,0)) # H, W, C
depth_target_cpu = np.squeeze(depth_target.cpu().numpy())
depth_pred_cpu = np.squeeze(depth_pred.data.cpu().numpy())
d_min = min(np.min(depth_target_cpu), np.min(depth_pred_cpu))
d_max = max(np.max(depth_target_cpu), np.max(depth_pred_cpu))
depth_target_col = colored_depthmap(depth_target_cpu, d_min, d_max)
depth_pred_col = colored_depthmap(depth_pred_cpu, d_min, d_max)
img_merge = np.hstack([rgb, depth_target_col, depth_pred_col])
return img_merge
def merge_into_row_with_gt(input, depth_input, depth_target, depth_pred):
rgb = 255 * np.transpose(np.squeeze(input.cpu().numpy()), (1,2,0)) # H, W, C
depth_input_cpu = np.squeeze(depth_input.cpu().numpy())
depth_target_cpu = np.squeeze(depth_target.cpu().numpy())
depth_pred_cpu = np.squeeze(depth_pred.data.cpu().numpy())
d_min = min(np.min(depth_input_cpu), np.min(depth_target_cpu), np.min(depth_pred_cpu))
d_max = max(np.max(depth_input_cpu), np.max(depth_target_cpu), np.max(depth_pred_cpu))
depth_input_col = colored_depthmap(depth_input_cpu, d_min, d_max)
depth_target_col = colored_depthmap(depth_target_cpu, d_min, d_max)
depth_pred_col = colored_depthmap(depth_pred_cpu, d_min, d_max)
img_merge = np.hstack([rgb, depth_input_col, depth_target_col, depth_pred_col])
return img_merge
def add_row(img_merge, row):
return np.vstack([img_merge, row])
def save_image(img_merge, filename):
img_merge = Image.fromarray(img_merge.astype('uint8'))
img_merge.save(filename)
| [
"argparse.ArgumentParser",
"numpy.hstack",
"numpy.min",
"numpy.max",
"numpy.vstack"
] | [((336, 384), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""FastDepth"""'}), "(description='FastDepth')\n", (359, 384), False, 'import argparse\n'), ((2158, 2208), 'numpy.hstack', 'np.hstack', (['[rgb, depth_target_col, depth_pred_col]'], {}), '([rgb, depth_target_col, depth_pred_col])\n', (2167, 2208), True, 'import numpy as np\n'), ((2987, 3054), 'numpy.hstack', 'np.hstack', (['[rgb, depth_input_col, depth_target_col, depth_pred_col]'], {}), '([rgb, depth_input_col, depth_target_col, depth_pred_col])\n', (2996, 3054), True, 'import numpy as np\n'), ((3119, 3146), 'numpy.vstack', 'np.vstack', (['[img_merge, row]'], {}), '([img_merge, row])\n', (3128, 3146), True, 'import numpy as np\n'), ((1431, 1444), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (1437, 1444), True, 'import numpy as np\n'), ((1483, 1496), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (1489, 1496), True, 'import numpy as np\n'), ((1886, 1910), 'numpy.min', 'np.min', (['depth_target_cpu'], {}), '(depth_target_cpu)\n', (1892, 1910), True, 'import numpy as np\n'), ((1912, 1934), 'numpy.min', 'np.min', (['depth_pred_cpu'], {}), '(depth_pred_cpu)\n', (1918, 1934), True, 'import numpy as np\n'), ((1952, 1976), 'numpy.max', 'np.max', (['depth_target_cpu'], {}), '(depth_target_cpu)\n', (1958, 1976), True, 'import numpy as np\n'), ((1978, 2000), 'numpy.max', 'np.max', (['depth_pred_cpu'], {}), '(depth_pred_cpu)\n', (1984, 2000), True, 'import numpy as np\n'), ((2594, 2617), 'numpy.min', 'np.min', (['depth_input_cpu'], {}), '(depth_input_cpu)\n', (2600, 2617), True, 'import numpy as np\n'), ((2619, 2643), 'numpy.min', 'np.min', (['depth_target_cpu'], {}), '(depth_target_cpu)\n', (2625, 2643), True, 'import numpy as np\n'), ((2645, 2667), 'numpy.min', 'np.min', (['depth_pred_cpu'], {}), '(depth_pred_cpu)\n', (2651, 2667), True, 'import numpy as np\n'), ((2685, 2708), 'numpy.max', 'np.max', (['depth_input_cpu'], {}), '(depth_input_cpu)\n', (2691, 2708), True, 'import numpy as np\n'), ((2710, 2734), 'numpy.max', 'np.max', (['depth_target_cpu'], {}), '(depth_target_cpu)\n', (2716, 2734), True, 'import numpy as np\n'), ((2736, 2758), 'numpy.max', 'np.max', (['depth_pred_cpu'], {}), '(depth_pred_cpu)\n', (2742, 2758), True, 'import numpy as np\n')] |
"""Functions for ISCE software.
This module has functions that facilitate running `ISCE software` (v2.1.0).
Currently extensively uses external calls to GDAL command line scripts. Some
functions borrow from example applications distributed with ISCE.
.. _`ISCE software`: https://winsar.unavco.org/software/isce
"""
# from lxml import objectify, etree
# import os
# import matplotlib
# matplotlib.use("Agg") # Necessary for basic OS (e.g. minimal docker images)
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import numpy as np
import yaml
import os
def read_yaml_template(template=None):
"""Read yaml file."""
if template is None:
template = os.path.join(os.path.dirname(__file__), "topsApp-template.yml")
with open(template, "r") as outfile:
defaults = yaml.load(outfile, Loader=yaml.FullLoader)
return defaults
def dict2xml(dictionary, root="topsApp", topcomp="topsinsar"):
"""Convert simple dictionary to XML for ISCE."""
def add_property(property, value):
xml = f" <property name='{property}'>{value}</property>\n"
return xml
def add_component(name, properties):
xml = f" <component name='{name}'>\n"
for prop, val in properties.items():
xml += add_property(prop, val)
xml += " </component>\n"
return xml
dictionary = dictionary[topcomp]
xml = f'<{root}>\n <component name="{topcomp}">\n'
for key, val in dictionary.items():
if isinstance(val, dict):
xml += add_component(key, val)
else:
xml += add_property(key, val)
xml += f" </component>\n</{root}>\n"
return xml
def write_xml(xml, outname="topsApp.xml"):
"""Write xml string to a file."""
print(f"writing {outname}")
with open(outname, "w") as f:
f.write(xml)
def load_defaultDict(template):
if template:
print(f"Reading from template file: {template}...")
inputDict = read_yaml_template(template)
else:
inputDict = {
"topsinsar": {
"sensorname": "SENTINEL1",
"reference": {"safe": ""},
"secondary": {"safe": ""},
}
}
return inputDict
def write_cmap(outname, vals, scalarMap):
"""Write external cpt colormap file based on matplotlib colormap.
Parameters
----------
outname : str
name of output file (e.g. amplitude-cog.cpt)
vals : float
values to be mapped to ncolors
scalarMap: ScalarMappable
mapping between array value and colormap value between 0 and 1
"""
with open(outname, "w") as fid:
for val in vals:
cval = scalarMap.to_rgba(val)
fid.write(
"{0} {1} {2} {3} \n".format(
val, # value
int(cval[0] * 255), # R
int(cval[1] * 255), # G
int(cval[2] * 255),
)
) # B
fid.write("nv 0 0 0 0 \n") # nodata alpha transparency
def make_amplitude_cmap(
mapname="gray", vmin=1, vmax=1e5, ncolors=64, outname="amplitude-cog.cpt"
):
"""Write default colormap (amplitude-cog.cpt) for isce amplitude images.
Uses a LogNorm colormap by default since amplitude return values typically
span several orders of magnitude.
Parameters
----------
mapname : str
matplotlib colormap name
vmin : float
data value mapped to lower end of colormap
vmax : float
data value mapped to upper end of colormap
ncolors : int
number of discrete mapped values between vmin and vmax
"""
cmap = plt.get_cmap(mapname)
# NOTE for strong contrast amp return:
# cNorm = colors.Normalize(vmin=1e3, vmax=1e4)
cNorm = colors.LogNorm(vmin=vmin, vmax=vmax)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
vals = np.linspace(vmin, vmax, ncolors, endpoint=True)
write_cmap(outname, vals, scalarMap)
return outname
def make_wrapped_phase_cmap(
mapname="plasma",
vmin=-50,
vmax=50,
ncolors=64,
wrapRate=6.28,
outname="unwrapped-phase-cog.cpt",
):
"""Re-wrap unwrapped phase values and write 'unwrapped-phase-cog.cpt'.
Each color cycle represents wavelength/2 line-of-sight change for
wrapRate=6.28.
Parameters
----------
mapname : str
matplotlib colormap name
vmin : float
data value mapped to lower end of colormap
vmax : float
data value mapped to upper end of colormap
ncolors : int
number of discrete mapped values between vmin and vmax
wrapRate : float
number of radians per phase cycle
"""
cmap = plt.get_cmap(mapname)
cNorm = colors.Normalize(vmin=0, vmax=1) # re-wrapping normalization
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
vals = np.linspace(vmin, vmax, ncolors, endpoint=True)
vals_wrapped = np.remainder(vals, wrapRate) / wrapRate
with open(outname, "w") as fid:
for val, wval in zip(vals, vals_wrapped):
cval = scalarMap.to_rgba(wval)
fid.write(
"{0} {1} {2} {3} \n".format(
val, # value
int(cval[0] * 255), # R
int(cval[1] * 255), # G
int(cval[2] * 255),
)
) # B
fid.write("nv 0 0 0 0 \n") # nodata alpha
return outname
def make_coherence_cmap(
mapname="inferno", vmin=1e-5, vmax=1, ncolors=64, outname="coherence-cog.cpt"
):
"""Write default colormap (coherence-cog.cpt) for isce coherence images.
Parameters
----------
mapname : str
matplotlib colormap name
vmin : float
data value mapped to lower end of colormap
vmax : float
data value mapped to upper end of colormap
ncolors : int
number of discrete mapped values between vmin and vmax
"""
cmap = plt.get_cmap(mapname)
cNorm = colors.Normalize(vmin=vmin, vmax=vmax)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
vals = np.linspace(vmin, vmax, ncolors, endpoint=True)
write_cmap(outname, vals, scalarMap)
return outname
def make_cmap(infile):
"""Call correct cmap function depending on file."""
cornames = ["coherence-cog.tif", "phsig.cor.geo.vrt", "topophase.cor.geo.vrt"]
phsnames = ["unwrapped-phase-cog.tif", "filt_topophase.unw.geo.vrt"]
if infile in cornames:
cpt = make_coherence_cmap()
elif infile in phsnames:
cpt = make_wrapped_phase_cmap()
else: # amplitude cmap
cpt = make_amplitude_cmap()
return cpt
| [
"yaml.load",
"matplotlib.pyplot.get_cmap",
"matplotlib.colors.Normalize",
"numpy.remainder",
"matplotlib.cm.ScalarMappable",
"os.path.dirname",
"matplotlib.colors.LogNorm",
"numpy.linspace"
] | [((3719, 3740), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['mapname'], {}), '(mapname)\n', (3731, 3740), True, 'import matplotlib.pyplot as plt\n'), ((3847, 3883), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (3861, 3883), True, 'import matplotlib.colors as colors\n'), ((3900, 3941), 'matplotlib.cm.ScalarMappable', 'cmx.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'cmap'}), '(norm=cNorm, cmap=cmap)\n', (3918, 3941), True, 'import matplotlib.cm as cmx\n'), ((3953, 4000), 'numpy.linspace', 'np.linspace', (['vmin', 'vmax', 'ncolors'], {'endpoint': '(True)'}), '(vmin, vmax, ncolors, endpoint=True)\n', (3964, 4000), True, 'import numpy as np\n'), ((4766, 4787), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['mapname'], {}), '(mapname)\n', (4778, 4787), True, 'import matplotlib.pyplot as plt\n'), ((4800, 4832), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(1)'}), '(vmin=0, vmax=1)\n', (4816, 4832), True, 'import matplotlib.colors as colors\n'), ((4878, 4919), 'matplotlib.cm.ScalarMappable', 'cmx.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'cmap'}), '(norm=cNorm, cmap=cmap)\n', (4896, 4919), True, 'import matplotlib.cm as cmx\n'), ((4931, 4978), 'numpy.linspace', 'np.linspace', (['vmin', 'vmax', 'ncolors'], {'endpoint': '(True)'}), '(vmin, vmax, ncolors, endpoint=True)\n', (4942, 4978), True, 'import numpy as np\n'), ((6016, 6037), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['mapname'], {}), '(mapname)\n', (6028, 6037), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6088), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (6066, 6088), True, 'import matplotlib.colors as colors\n'), ((6105, 6146), 'matplotlib.cm.ScalarMappable', 'cmx.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'cmap'}), '(norm=cNorm, cmap=cmap)\n', (6123, 6146), True, 'import matplotlib.cm as cmx\n'), ((6158, 6205), 'numpy.linspace', 'np.linspace', (['vmin', 'vmax', 'ncolors'], {'endpoint': '(True)'}), '(vmin, vmax, ncolors, endpoint=True)\n', (6169, 6205), True, 'import numpy as np\n'), ((838, 880), 'yaml.load', 'yaml.load', (['outfile'], {'Loader': 'yaml.FullLoader'}), '(outfile, Loader=yaml.FullLoader)\n', (847, 880), False, 'import yaml\n'), ((4998, 5026), 'numpy.remainder', 'np.remainder', (['vals', 'wrapRate'], {}), '(vals, wrapRate)\n', (5010, 5026), True, 'import numpy as np\n'), ((727, 752), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (742, 752), False, 'import os\n')] |
import os
import torch
import numpy as np
import imageio as m
import cv2
import pandas as pd
from torch.utils import data
import json
def recursive_glob(rootdir=".", suffix=""):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [
os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames
if filename.endswith(suffix)
]
class SegmentationLoader(data.Dataset):
mean_rgb = {
"standard": [0.0, 0.0, 0.0],
}
def __init__(
self,
root='dataset',
images_folder='images',
annotations_folder='annotations',
split="train",
is_transform=True,
img_size=(512, 256),
augmentations=None,
img_norm=True,
test_mode=False,
version="standard"
):
"""__init__
:param root:
:param split:
:param is_transform:
:param img_size:
:param augmentations
"""
self.root = root
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.img_norm = img_norm
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.mean = np.array(self.mean_rgb[version])
self.files = {}
self.images_base = os.path.join(self.root, images_folder, self.split)
self.annotations_base = os.path.join(self.root, annotations_folder, self.split)
self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".jpg")
# Reading dataset info
self.data = pd.read_csv(os.path.join(self.root, 'dataset.csv'))
with open(os.path.join(self.root, 'annotations-info.txt')) as json_file:
annotations_info = json.load(json_file)
# colors
self.colors = annotations_info['colors']
self.label_colours = dict(zip(range(3), self.colors))
# class names
self.class_names = annotations_info['class_names']
self.n_classes = len(self.class_names)
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(
self.annotations_base,
os.path.basename(img_path)[:-4] + "_mask.png",
)
img = m.imread(img_path)
img = np.array(img, dtype=np.uint8)
lbl = m.imread(lbl_path)
idx = int(os.path.basename(img_path)[:-4]) - 1
cls = torch.tensor([1., 1., 0])
cls[2] = torch.tensor(self.data.iloc[idx, -1]) > 0
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8))
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl, cls
def transform(self, img, lbl):
"""transform
:param img:
:param lbl:
"""
img = cv2.resize(img, (self.img_size[0], self.img_size[1]))
img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean
if self.img_norm:
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCHW
img = img.transpose(2, 0, 1)
classes = np.unique(lbl)
lbl = lbl.astype(float)
lbl = cv2.resize(lbl, (self.img_size[0], self.img_size[1]), interpolation=cv2.INTER_NEAREST)
lbl = lbl.astype(int)
if not np.all(classes == np.unique(lbl)):
print("WARN: resizing labels yielded fewer classes")
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def decode_segmap(self, temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_classes):
r[temp == l] = self.label_colours[l][0]
g[temp == l] = self.label_colours[l][1]
b[temp == l] = self.label_colours[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
def encode_segmap(self, mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(self.colors):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
# Leave code for debugging purposes
# import ptsemseg.augmentations as aug
if __name__ == '__main__':
from augmentations import Compose, RandomHorizontallyFlip, RandomVerticallyFlip, RandomRotate, Scale
from augmentations import AdjustContrast, AdjustBrightness, AdjustSaturation
import matplotlib.pyplot as plt
bs = 4
augmentations = Compose([Scale(512),
RandomRotate(10),
RandomHorizontallyFlip(0.5),
RandomVerticallyFlip(0.5),
AdjustContrast(0.25),
AdjustBrightness(0.25),
AdjustSaturation(0.25)])
dst = SegmentationLoader(root='../dataset/', is_transform=True, augmentations=augmentations)
trainloader = data.DataLoader(dst, batch_size=bs)
for i, data_samples in enumerate(trainloader):
imgs, labels, cls = data_samples
imgs = imgs.numpy()[:, ::-1, :, :]
imgs = np.transpose(imgs, [0,2,3,1])
f, axarr = plt.subplots(bs, 2)
for j in range(bs):
print(imgs[j].shape)
axarr[j][0].imshow(imgs[j])
axarr[j][1].imshow(dst.decode_segmap(labels.numpy()[j]))
plt.show() | [
"augmentations.RandomRotate",
"os.walk",
"augmentations.RandomHorizontallyFlip",
"augmentations.AdjustContrast",
"os.path.join",
"numpy.unique",
"torch.utils.data.DataLoader",
"numpy.transpose",
"augmentations.AdjustBrightness",
"augmentations.Scale",
"matplotlib.pyplot.subplots",
"cv2.resize"... | [((6369, 6404), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dst'], {'batch_size': 'bs'}), '(dst, batch_size=bs)\n', (6384, 6404), False, 'from torch.utils import data\n'), ((366, 398), 'os.path.join', 'os.path.join', (['looproot', 'filename'], {}), '(looproot, filename)\n', (378, 398), False, 'import os\n'), ((1390, 1422), 'numpy.array', 'np.array', (['self.mean_rgb[version]'], {}), '(self.mean_rgb[version])\n', (1398, 1422), True, 'import numpy as np\n'), ((1475, 1525), 'os.path.join', 'os.path.join', (['self.root', 'images_folder', 'self.split'], {}), '(self.root, images_folder, self.split)\n', (1487, 1525), False, 'import os\n'), ((1558, 1613), 'os.path.join', 'os.path.join', (['self.root', 'annotations_folder', 'self.split'], {}), '(self.root, annotations_folder, self.split)\n', (1570, 1613), False, 'import os\n'), ((2810, 2828), 'imageio.imread', 'm.imread', (['img_path'], {}), '(img_path)\n', (2818, 2828), True, 'import imageio as m\n'), ((2843, 2872), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (2851, 2872), True, 'import numpy as np\n'), ((2888, 2906), 'imageio.imread', 'm.imread', (['lbl_path'], {}), '(lbl_path)\n', (2896, 2906), True, 'import imageio as m\n'), ((2985, 3012), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 0]'], {}), '([1.0, 1.0, 0])\n', (2997, 3012), False, 'import torch\n'), ((3479, 3532), 'cv2.resize', 'cv2.resize', (['img', '(self.img_size[0], self.img_size[1])'], {}), '(img, (self.img_size[0], self.img_size[1]))\n', (3489, 3532), False, 'import cv2\n'), ((3884, 3898), 'numpy.unique', 'np.unique', (['lbl'], {}), '(lbl)\n', (3893, 3898), True, 'import numpy as np\n'), ((3954, 4045), 'cv2.resize', 'cv2.resize', (['lbl', '(self.img_size[0], self.img_size[1])'], {'interpolation': 'cv2.INTER_NEAREST'}), '(lbl, (self.img_size[0], self.img_size[1]), interpolation=cv2.\n INTER_NEAREST)\n', (3964, 4045), False, 'import cv2\n'), ((4622, 4665), 'numpy.zeros', 'np.zeros', (['(temp.shape[0], temp.shape[1], 3)'], {}), '((temp.shape[0], temp.shape[1], 3))\n', (4630, 4665), True, 'import numpy as np\n'), ((5276, 5332), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1])'], {'dtype': 'np.int16'}), '((mask.shape[0], mask.shape[1]), dtype=np.int16)\n', (5284, 5332), True, 'import numpy as np\n'), ((6560, 6592), 'numpy.transpose', 'np.transpose', (['imgs', '[0, 2, 3, 1]'], {}), '(imgs, [0, 2, 3, 1])\n', (6572, 6592), True, 'import numpy as np\n'), ((6609, 6628), 'matplotlib.pyplot.subplots', 'plt.subplots', (['bs', '(2)'], {}), '(bs, 2)\n', (6621, 6628), True, 'import matplotlib.pyplot as plt\n'), ((6825, 6835), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6833, 6835), True, 'import matplotlib.pyplot as plt\n'), ((437, 453), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (444, 453), False, 'import os\n'), ((1771, 1809), 'os.path.join', 'os.path.join', (['self.root', '"""dataset.csv"""'], {}), "(self.root, 'dataset.csv')\n", (1783, 1809), False, 'import os\n'), ((1923, 1943), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1932, 1943), False, 'import json\n'), ((3028, 3065), 'torch.tensor', 'torch.tensor', (['self.data.iloc[idx, -1]'], {}), '(self.data.iloc[idx, -1])\n', (3040, 3065), False, 'import torch\n'), ((3216, 3245), 'numpy.array', 'np.array', (['lbl'], {'dtype': 'np.uint8'}), '(lbl, dtype=np.uint8)\n', (3224, 3245), True, 'import numpy as np\n'), ((5910, 5920), 'augmentations.Scale', 'Scale', (['(512)'], {}), '(512)\n', (5915, 5920), False, 'from augmentations import Compose, RandomHorizontallyFlip, RandomVerticallyFlip, RandomRotate, Scale\n'), ((5951, 5967), 'augmentations.RandomRotate', 'RandomRotate', (['(10)'], {}), '(10)\n', (5963, 5967), False, 'from augmentations import Compose, RandomHorizontallyFlip, RandomVerticallyFlip, RandomRotate, Scale\n'), ((5998, 6025), 'augmentations.RandomHorizontallyFlip', 'RandomHorizontallyFlip', (['(0.5)'], {}), '(0.5)\n', (6020, 6025), False, 'from augmentations import Compose, RandomHorizontallyFlip, RandomVerticallyFlip, RandomRotate, Scale\n'), ((6056, 6081), 'augmentations.RandomVerticallyFlip', 'RandomVerticallyFlip', (['(0.5)'], {}), '(0.5)\n', (6076, 6081), False, 'from augmentations import Compose, RandomHorizontallyFlip, RandomVerticallyFlip, RandomRotate, Scale\n'), ((6112, 6132), 'augmentations.AdjustContrast', 'AdjustContrast', (['(0.25)'], {}), '(0.25)\n', (6126, 6132), False, 'from augmentations import AdjustContrast, AdjustBrightness, AdjustSaturation\n'), ((6163, 6185), 'augmentations.AdjustBrightness', 'AdjustBrightness', (['(0.25)'], {}), '(0.25)\n', (6179, 6185), False, 'from augmentations import AdjustContrast, AdjustBrightness, AdjustSaturation\n'), ((6216, 6238), 'augmentations.AdjustSaturation', 'AdjustSaturation', (['(0.25)'], {}), '(0.25)\n', (6232, 6238), False, 'from augmentations import AdjustContrast, AdjustBrightness, AdjustSaturation\n'), ((1829, 1876), 'os.path.join', 'os.path.join', (['self.root', '"""annotations-info.txt"""'], {}), "(self.root, 'annotations-info.txt')\n", (1841, 1876), False, 'import os\n'), ((4202, 4223), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (4218, 4223), False, 'import torch\n'), ((4246, 4267), 'torch.from_numpy', 'torch.from_numpy', (['lbl'], {}), '(lbl)\n', (4262, 4267), False, 'import torch\n'), ((2738, 2764), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (2754, 2764), False, 'import os\n'), ((2934, 2960), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (2950, 2960), False, 'import os\n'), ((4105, 4119), 'numpy.unique', 'np.unique', (['lbl'], {}), '(lbl)\n', (4114, 4119), True, 'import numpy as np\n'), ((5414, 5444), 'numpy.all', 'np.all', (['(mask == label)'], {'axis': '(-1)'}), '(mask == label, axis=-1)\n', (5420, 5444), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import unittest
import copy
import logging
import shutil
import random
import importlib
import pandas as pd
import json
import pytest
from unittest.mock import patch
from nibabel import Nifti1Image
import numpy as np
import nilearn.image
import ciftify.bin.ciftify_clean_img as ciftify_clean_img
logging.disable(logging.CRITICAL)
def _check_input_readble_side_effect(path):
'''just returns the path'''
return(path)
def _pandas_read_side_effect(path):
'''return and empty data frame'''
return(pd.DataFrame())
class TestUserSettings(unittest.TestCase):
docopt_args = {
'<func_input>': '/path/to/func/file.nii.gz',
'--output-file': None,
'--clean-config': None,
'--drop-dummy-TRs': None,
'--no-cleaning': False,
'--detrend': False,
'--standardize': False,
'--confounds-tsv': None,
'--cf-cols': None,
'--cf-sq-cols': None,
'--cf-td-cols': None,
'--cf-sqtd-cols': None,
'--low-pass': None,
'--high-pass': None,
'--tr': '2.0',
'--smooth-fwhm': None,
'--left-surface': None,
'--right-surface': None }
json_config = '''
{
"--detrend": true,
"--standardize": true,
"--low-pass": 0.1,
"--high-pass": 0.01
}
'''
@patch('ciftify.bin.ciftify_clean_img.load_json_file', side_effect = json.loads)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_that_updated_arg_is_present(self, mock_readable, mock_writable, mock_json):
arguments = copy.deepcopy(self.docopt_args)
arguments['--clean-config'] = self.json_config
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.high_pass == 0.01, "high_pass not set to config val"
assert settings.detrend == True, "detrend not set to config val"
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exist_gracefully_if_json_not_readable(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/func.nii.gz'
missing_json = '/wrong/path/missing.json'
arguments['--clean-config'] = missing_json
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exists_gracefully_if_input_is_gifti(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/func.L.func.gii'
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_dtseries_input_returned(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.dtseries.nii'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.func.type == "cifti"
assert settings.func.path == '/path/to/input/myfunc.dtseries.nii'
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_nifti_input_returned_correctly(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.nii.gz'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.func.type == "nifti"
assert settings.func.path == '/path/to/input/myfunc.nii.gz'
def test_exists_gracefully_if_output_not_writable(self):
wrong_func = '/wrong/path/to/input/myfunc.nii.gz'
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = wrong_func
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_proper_output_returned_for_nifti(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.nii.gz'
arguments['--smooth-fwhm'] = 8
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.output_func == '/path/to/input/myfunc_clean_s8.nii.gz'
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_proper_output_returned_for_cifti(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.dtseries.nii'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.output_func == '/path/to/input/myfunc_clean_s0.dtseries.nii'
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exits_when_confounds_tsv_not_given(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['--cf-cols'] = 'one,two,three'
arguments['--confounds-tsv'] = None
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('pandas.read_csv', return_value = pd.DataFrame(columns = ['one', 'two', 'three']))
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_list_arg_returns_list_for_multi(self, mock_readable, mock_writable, mock_pdread):
arguments = copy.deepcopy(self.docopt_args)
arguments['--cf-cols'] = 'one,two,three'
arguments['--confounds-tsv'] = '/path/to/confounds.tsv'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.cf_cols == ['one','two','three']
@patch('pandas.read_csv', return_value = pd.DataFrame(columns = ['one', 'two', 'three']))
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_list_arg_returns_list_for_one_item(self, mock_readable, mock_writable, mock_pdread):
arguments = copy.deepcopy(self.docopt_args)
arguments['--cf-cols'] = 'one'
arguments['--confounds-tsv'] = '/path/to/confounds.tsv'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.cf_cols == ['one']
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_list_arg_returns_empty_list_for_none(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.cf_cols == []
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_bandpass_filter_returns_none_if_none(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.high_pass == None
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_bandpass_filter_returns_float_if_float(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['--high-pass'] = '3.14'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.high_pass == 3.14
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exists_gracefully_if_filter_not_float(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['--high-pass'] = 'three'
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exists_gracefully_if_surfaces_not_present(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.dtseries.nii'
arguments['--smooth-fwhm'] = 8
arguments['--left-surface'] = None
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_fwhm_is_0_if_not_smoothing(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.nii.gz'
arguments['--smooth-fwhm'] = None
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.smooth.fwhm == 0
class TestMangleConfounds(unittest.TestCase):
input_signals = pd.DataFrame(data = {'x': [1,2,3,4,5],
'y': [0,0,1,2,4],
'z': [8,8,8,8,8]})
class SettingsStub(object):
def __init__(self, start_from, confounds,
cf_cols, cf_sq_cols, cf_td_cols, cf_sqtd_cols):
self.start_from_tr = start_from
self.confounds = confounds
self.cf_cols = cf_cols
self.cf_sq_cols = cf_sq_cols
self.cf_td_cols = cf_td_cols
self.cf_sqtd_cols = cf_sqtd_cols
def test_starts_from_correct_row(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = [],
cf_sqtd_cols = [])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
test_output = confound_signals['y'].values
expected_output = np.array([1,2,4])
assert np.allclose(test_output, expected_output, equal_nan = True), \
"{} not equal to {}".format(test_output, expected_output)
def test_that_omitted_cols_not_output(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = [],
cf_sqtd_cols = [])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
assert 'z' not in list(confound_signals.columns.values)
def test_td_col_is_returned(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = ['y'],
cf_sqtd_cols = [])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
test_output = confound_signals['y_lag'].values
expected_output = np.array([0,1,2])
assert np.allclose(test_output, expected_output, equal_nan = True), \
"{} not equal to {}".format(test_output, expected_output)
def test_sq_is_returned(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = ['y'],
cf_sqtd_cols = [])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
test_output = confound_signals['y_sq'].values
expected_output = np.array([1,4,16])
assert np.allclose(test_output, expected_output, equal_nan = True), \
"{} not equal to {}".format(test_output, expected_output)
def test_sqtd_col_is_returned(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = ['y'],
cf_sqtd_cols = ['y'])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
test_output = confound_signals['y_sqlag'].values
expected_output = np.array([0,1,4])
assert np.allclose(test_output, expected_output, equal_nan = True), \
"{} not equal to {}".format(test_output, expected_output)
def test_all_cols_named_as_expected(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = ['y'],
cf_sqtd_cols = ['y'])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
for coln in ['x', 'y', 'y_sq', 'y_lag', 'y_sqlag']:
assert coln in list(confound_signals.columns.values)
@patch('nilearn.image.clean_img')
class TestCleanImage(unittest.TestCase):
# note this one need to patch nilean.clean_img just to check it is only called when asked for
def test_nilearn_not_called_not_indicated(self, nilearn_clean):
class SettingsStub(object):
def __init__(self):
self.detrend = False
self.standardize = False
self.high_pass = None
self.low_pass = None
input_img = 'fake_img.nii.gz'
confound_signals = None
settings = SettingsStub()
output_img = ciftify_clean_img.clean_image_with_nilearn(
input_img, confound_signals, settings)
nilearn_clean.assert_not_called()
def test_drop_image():
img1 = Nifti1Image(np.ones((2, 2, 2, 1)), affine=np.eye(4))
img2 = Nifti1Image(np.ones((2, 2, 2, 1)) + 1, affine=np.eye(4))
img3 = Nifti1Image(np.ones((2, 2, 2, 1)) + 2, affine=np.eye(4))
img4 = Nifti1Image(np.ones((2, 2, 2, 1)) + 3, affine=np.eye(4))
img5 = Nifti1Image(np.ones((2, 2, 2, 1)) + 4, affine=np.eye(4))
img_1to5 = nilearn.image.concat_imgs([img1, img2, img3, img4, img5])
img_trim = ciftify_clean_img.image_drop_dummy_trs(img_1to5, 2)
assert np.allclose(img_trim.get_data()[1,1,1,:], np.array([3, 4, 5]))
assert img_trim.header.get_data_shape() == (2,2,2,3)
| [
"pandas.DataFrame",
"copy.deepcopy",
"ciftify.bin.ciftify_clean_img.UserSettings",
"numpy.allclose",
"numpy.ones",
"unittest.mock.patch",
"logging.disable",
"pytest.raises",
"numpy.array",
"ciftify.bin.ciftify_clean_img.clean_image_with_nilearn",
"numpy.eye",
"ciftify.bin.ciftify_clean_img.ima... | [((333, 366), 'logging.disable', 'logging.disable', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (348, 366), False, 'import logging\n'), ((14741, 14773), 'unittest.mock.patch', 'patch', (['"""nilearn.image.clean_img"""'], {}), "('nilearn.image.clean_img')\n", (14746, 14773), False, 'from unittest.mock import patch\n'), ((547, 561), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (559, 561), True, 'import pandas as pd\n'), ((1314, 1391), 'unittest.mock.patch', 'patch', (['"""ciftify.bin.ciftify_clean_img.load_json_file"""'], {'side_effect': 'json.loads'}), "('ciftify.bin.ciftify_clean_img.load_json_file', side_effect=json.loads)\n", (1319, 1391), False, 'from unittest.mock import patch\n'), ((1399, 1493), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (1404, 1493), False, 'from unittest.mock import patch\n'), ((1496, 1559), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (1501, 1559), False, 'from unittest.mock import patch\n'), ((1978, 2072), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (1983, 2072), False, 'from unittest.mock import patch\n'), ((2075, 2138), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (2080, 2138), False, 'from unittest.mock import patch\n'), ((2561, 2655), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (2566, 2655), False, 'from unittest.mock import patch\n'), ((2658, 2721), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (2663, 2721), False, 'from unittest.mock import patch\n'), ((3046, 3140), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (3051, 3140), False, 'from unittest.mock import patch\n'), ((3143, 3206), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (3148, 3206), False, 'from unittest.mock import patch\n'), ((3596, 3690), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (3601, 3690), False, 'from unittest.mock import patch\n'), ((3693, 3756), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (3698, 3756), False, 'from unittest.mock import patch\n'), ((4467, 4561), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (4472, 4561), False, 'from unittest.mock import patch\n'), ((4564, 4627), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (4569, 4627), False, 'from unittest.mock import patch\n'), ((5019, 5113), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (5024, 5113), False, 'from unittest.mock import patch\n'), ((5116, 5179), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (5121, 5179), False, 'from unittest.mock import patch\n'), ((5543, 5637), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (5548, 5637), False, 'from unittest.mock import patch\n'), ((5640, 5703), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (5645, 5703), False, 'from unittest.mock import patch\n'), ((6142, 6236), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (6147, 6236), False, 'from unittest.mock import patch\n'), ((6239, 6302), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (6244, 6302), False, 'from unittest.mock import patch\n'), ((6784, 6878), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (6789, 6878), False, 'from unittest.mock import patch\n'), ((6881, 6944), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (6886, 6944), False, 'from unittest.mock import patch\n'), ((7311, 7405), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (7316, 7405), False, 'from unittest.mock import patch\n'), ((7408, 7471), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (7413, 7471), False, 'from unittest.mock import patch\n'), ((7720, 7814), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (7725, 7814), False, 'from unittest.mock import patch\n'), ((7817, 7880), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (7822, 7880), False, 'from unittest.mock import patch\n'), ((8133, 8227), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (8138, 8227), False, 'from unittest.mock import patch\n'), ((8230, 8293), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (8235, 8293), False, 'from unittest.mock import patch\n'), ((8591, 8685), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (8596, 8685), False, 'from unittest.mock import patch\n'), ((8688, 8751), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (8693, 8751), False, 'from unittest.mock import patch\n'), ((9051, 9145), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (9056, 9145), False, 'from unittest.mock import patch\n'), ((9148, 9211), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (9153, 9211), False, 'from unittest.mock import patch\n'), ((9626, 9720), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_input_readable"""'], {'side_effect': '_check_input_readble_side_effect'}), "('ciftify.utils.check_input_readable', side_effect=\n _check_input_readble_side_effect)\n", (9631, 9720), False, 'from unittest.mock import patch\n'), ((9723, 9786), 'unittest.mock.patch', 'patch', (['"""ciftify.utils.check_output_writable"""'], {'return_value': '(True)'}), "('ciftify.utils.check_output_writable', return_value=True)\n", (9728, 9786), False, 'from unittest.mock import patch\n'), ((10199, 10289), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'x': [1, 2, 3, 4, 5], 'y': [0, 0, 1, 2, 4], 'z': [8, 8, 8, 8, 8]}"}), "(data={'x': [1, 2, 3, 4, 5], 'y': [0, 0, 1, 2, 4], 'z': [8, 8, \n 8, 8, 8]})\n", (10211, 10289), True, 'import pandas as pd\n'), ((15918, 15969), 'ciftify.bin.ciftify_clean_img.image_drop_dummy_trs', 'ciftify_clean_img.image_drop_dummy_trs', (['img_1to5', '(2)'], {}), '(img_1to5, 2)\n', (15956, 15969), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((1672, 1703), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (1685, 1703), False, 'import copy\n'), ((1778, 1819), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (1808, 1819), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((2250, 2281), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (2263, 2281), False, 'import copy\n'), ((2832, 2863), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (2845, 2863), False, 'import copy\n'), ((3304, 3335), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (3317, 3335), False, 'import copy\n'), ((3428, 3469), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (3458, 3469), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((3861, 3892), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (3874, 3892), False, 'import copy\n'), ((3979, 4020), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (4009, 4020), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((4276, 4307), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (4289, 4307), False, 'import copy\n'), ((4735, 4766), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (4748, 4766), False, 'import copy\n'), ((4892, 4933), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (4922, 4933), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((5286, 5317), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (5299, 5317), False, 'import copy\n'), ((5410, 5451), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (5440, 5451), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((5812, 5843), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (5825, 5843), False, 'import copy\n'), ((6421, 6452), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (6434, 6452), False, 'import copy\n'), ((6585, 6626), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (6615, 6626), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((7066, 7097), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (7079, 7097), False, 'import copy\n'), ((7220, 7261), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (7250, 7261), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((7582, 7613), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (7595, 7613), False, 'import copy\n'), ((7633, 7674), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (7663, 7674), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((7991, 8022), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (8004, 8022), False, 'import copy\n'), ((8042, 8083), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (8072, 8083), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((8406, 8437), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (8419, 8437), False, 'import copy\n'), ((8499, 8540), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (8529, 8540), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((8863, 8894), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (8876, 8894), False, 'import copy\n'), ((9327, 9358), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (9340, 9358), False, 'import copy\n'), ((9887, 9918), 'copy.deepcopy', 'copy.deepcopy', (['self.docopt_args'], {}), '(self.docopt_args)\n', (9900, 9918), False, 'import copy\n'), ((10048, 10089), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (10078, 10089), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((11167, 11211), 'ciftify.bin.ciftify_clean_img.mangle_confounds', 'ciftify_clean_img.mangle_confounds', (['settings'], {}), '(settings)\n', (11201, 11211), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((11289, 11308), 'numpy.array', 'np.array', (['[1, 2, 4]'], {}), '([1, 2, 4])\n', (11297, 11308), True, 'import numpy as np\n'), ((11322, 11379), 'numpy.allclose', 'np.allclose', (['test_output', 'expected_output'], {'equal_nan': '(True)'}), '(test_output, expected_output, equal_nan=True)\n', (11333, 11379), True, 'import numpy as np\n'), ((11883, 11927), 'ciftify.bin.ciftify_clean_img.mangle_confounds', 'ciftify_clean_img.mangle_confounds', (['settings'], {}), '(settings)\n', (11917, 11927), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((12412, 12456), 'ciftify.bin.ciftify_clean_img.mangle_confounds', 'ciftify_clean_img.mangle_confounds', (['settings'], {}), '(settings)\n', (12446, 12456), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((12538, 12557), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (12546, 12557), True, 'import numpy as np\n'), ((12571, 12628), 'numpy.allclose', 'np.allclose', (['test_output', 'expected_output'], {'equal_nan': '(True)'}), '(test_output, expected_output, equal_nan=True)\n', (12582, 12628), True, 'import numpy as np\n'), ((13122, 13166), 'ciftify.bin.ciftify_clean_img.mangle_confounds', 'ciftify_clean_img.mangle_confounds', (['settings'], {}), '(settings)\n', (13156, 13166), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((13247, 13267), 'numpy.array', 'np.array', (['[1, 4, 16]'], {}), '([1, 4, 16])\n', (13255, 13267), True, 'import numpy as np\n'), ((13281, 13338), 'numpy.allclose', 'np.allclose', (['test_output', 'expected_output'], {'equal_nan': '(True)'}), '(test_output, expected_output, equal_nan=True)\n', (13292, 13338), True, 'import numpy as np\n'), ((13841, 13885), 'ciftify.bin.ciftify_clean_img.mangle_confounds', 'ciftify_clean_img.mangle_confounds', (['settings'], {}), '(settings)\n', (13875, 13885), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((13970, 13989), 'numpy.array', 'np.array', (['[0, 1, 4]'], {}), '([0, 1, 4])\n', (13978, 13989), True, 'import numpy as np\n'), ((14003, 14060), 'numpy.allclose', 'np.allclose', (['test_output', 'expected_output'], {'equal_nan': '(True)'}), '(test_output, expected_output, equal_nan=True)\n', (14014, 14060), True, 'import numpy as np\n'), ((14569, 14613), 'ciftify.bin.ciftify_clean_img.mangle_confounds', 'ciftify_clean_img.mangle_confounds', (['settings'], {}), '(settings)\n', (14603, 14613), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((15330, 15415), 'ciftify.bin.ciftify_clean_img.clean_image_with_nilearn', 'ciftify_clean_img.clean_image_with_nilearn', (['input_img', 'confound_signals', 'settings'], {}), '(input_img, confound_signals,\n settings)\n', (15372, 15415), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((15515, 15536), 'numpy.ones', 'np.ones', (['(2, 2, 2, 1)'], {}), '((2, 2, 2, 1))\n', (15522, 15536), True, 'import numpy as np\n'), ((16024, 16043), 'numpy.array', 'np.array', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (16032, 16043), True, 'import numpy as np\n'), ((2462, 2487), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (2475, 2487), False, 'import pytest\n'), ((2512, 2553), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (2542, 2553), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((2946, 2971), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (2959, 2971), False, 'import pytest\n'), ((2996, 3037), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (3026, 3037), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((4368, 4393), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (4381, 4393), False, 'import pytest\n'), ((4418, 4459), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (4448, 4459), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((5950, 5975), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (5963, 5975), False, 'import pytest\n'), ((6000, 6041), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (6030, 6041), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((6088, 6133), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['one', 'two', 'three']"}), "(columns=['one', 'two', 'three'])\n", (6100, 6133), True, 'import pandas as pd\n'), ((6730, 6775), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['one', 'two', 'three']"}), "(columns=['one', 'two', 'three'])\n", (6742, 6775), True, 'import pandas as pd\n'), ((8951, 8976), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (8964, 8976), False, 'import pytest\n'), ((9001, 9042), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (9031, 9042), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((9527, 9552), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (9540, 9552), False, 'import pytest\n'), ((9577, 9618), 'ciftify.bin.ciftify_clean_img.UserSettings', 'ciftify_clean_img.UserSettings', (['arguments'], {}), '(arguments)\n', (9607, 9618), True, 'import ciftify.bin.ciftify_clean_img as ciftify_clean_img\n'), ((15545, 15554), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (15551, 15554), True, 'import numpy as np\n'), ((15579, 15600), 'numpy.ones', 'np.ones', (['(2, 2, 2, 1)'], {}), '((2, 2, 2, 1))\n', (15586, 15600), True, 'import numpy as np\n'), ((15613, 15622), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (15619, 15622), True, 'import numpy as np\n'), ((15647, 15668), 'numpy.ones', 'np.ones', (['(2, 2, 2, 1)'], {}), '((2, 2, 2, 1))\n', (15654, 15668), True, 'import numpy as np\n'), ((15681, 15690), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (15687, 15690), True, 'import numpy as np\n'), ((15715, 15736), 'numpy.ones', 'np.ones', (['(2, 2, 2, 1)'], {}), '((2, 2, 2, 1))\n', (15722, 15736), True, 'import numpy as np\n'), ((15749, 15758), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (15755, 15758), True, 'import numpy as np\n'), ((15783, 15804), 'numpy.ones', 'np.ones', (['(2, 2, 2, 1)'], {}), '((2, 2, 2, 1))\n', (15790, 15804), True, 'import numpy as np\n'), ((15817, 15826), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (15823, 15826), True, 'import numpy as np\n')] |
import gym
from gym import error, spaces, utils
from gym.utils import seeding
#
import cityflow
import pandas as pd
import os
import numpy as np
import json
import math
from gym.spaces import Discrete, Box
class CityflowGymEnv(gym.Env):
# metadata = {'render.modes': ['human']}
def __init__(self, config):
self.config = config
self.eng = cityflow.Engine(self.config['cityflow_config_file'], thread_num=self.config['thread_num'])
self.num_step = self.config['num_step']
self.state_size = 9
self.lane_phase_info = self.config['lane_phase_info'] # "intersection_1_1"
self.intersection_id = list(self.lane_phase_info.keys())[0]
self.start_lane = self.lane_phase_info[self.intersection_id]['start_lane']
self.phase_list = self.lane_phase_info[self.intersection_id]["phase"]
self.phase_startLane_mapping = self.lane_phase_info[self.intersection_id]["phase_startLane_mapping"]
self.current_phase = self.phase_list[0]
self.current_phase_time = 0
self.yellow_time = 5
self.state_store_i = 0
self.time_span = self.config['time_span']
self.state_time_span = self.config['state_time_span']
self.num_span_1 = 0
self.num_span_2 = 0
self.state_size_single = len(self.start_lane)
self.phase_log = []
self.count = np.zeros([8, self.time_span])
self.accum_s = np.zeros([self.state_size_single, self.state_time_span])
self.observation_space = Box(-1 * np.ones(9), 100 * np.ones(9))
self.action_space = Discrete(8)
self.step_count = 1
self.avg_reward = 0
def step(self, next_phase):
if self.current_phase == next_phase:
self.current_phase_time += 1
else:
self.current_phase = next_phase
self.current_phase_time = 1
self.eng.set_tl_phase(self.intersection_id, self.current_phase) # set phase of traffic light
self.eng.next_step()
self.phase_log.append(self.current_phase)
done = 0
if self.step_count > 999:
done = 1
return self.get_state(), self.get_reward(), done, {} # return next_state and reward, whether done and info
def get_state(self):
state = {}
state['lane_vehicle_count'] = self.eng.get_lane_vehicle_count() # {lane_id: lane_count, ...}
state['start_lane_vehicle_count'] = {lane: self.eng.get_lane_vehicle_count()[lane] for lane in self.start_lane}
state[
'lane_waiting_vehicle_count'] = self.eng.get_lane_waiting_vehicle_count() # {lane_id: lane_waiting_count, ...}
state['lane_vehicles'] = self.eng.get_lane_vehicles() # {lane_id: [vehicle1_id, vehicle2_id, ...], ...}
state['vehicle_speed'] = self.eng.get_vehicle_speed() # {vehicle_id: vehicle_speed, ...}
state['vehicle_distance'] = self.eng.get_vehicle_distance() # {vehicle_id: distance, ...}
state['current_time'] = self.eng.get_current_time()
state['current_phase'] = self.current_phase
state['current_phase_time'] = self.current_phase_time
state_pre = self.waiting_count_pre_1()
return_state = np.array(list(state_pre) + [state['current_phase']])
return_state = np.reshape(return_state, [1, self.state_size]).flatten()
return return_state
def waiting_count_pre_1(self):
state_pre = list(self.eng.get_lane_waiting_vehicle_count().values())
state = np.zeros(8)
state[0] = state_pre[1] + state_pre[15]
state[1] = state_pre[3] + state_pre[13]
state[2] = state_pre[0] + state_pre[14]
state[3] = state_pre[2] + state_pre[12]
state[4] = state_pre[1] + state_pre[0]
state[5] = state_pre[14] + state_pre[15]
state[6] = state_pre[3] + state_pre[2]
state[7] = state_pre[12] + state_pre[13]
return state
def get_reward(self):
mystate = self.get_state()
# reward function
lane_vehicle_count = mystate[0:8]
vehicle_velocity = self.eng.get_vehicle_speed()
# reward = sum(list(vehicle_velocity.values())) / sum(lane_vehicle_count)
reward = float(-max(list(lane_vehicle_count)))
# reward_sig = 2 / ((1 + math.exp(-1 * reward)))
self.step_count += 1
# self.avg_reward += reward
# if self.step_count is 1000:
# print("!!!!" + str(self.avg_reward) + "!!!!!")
if np.isnan(reward):
reward = 1
return reward
def get_score(self):
lane_waiting_vehicle_count = self.eng.get_lane_waiting_vehicle_count()
reward = max(list(lane_waiting_vehicle_count.values()))
metric = 1 / ((1 + math.exp(-1 * reward)) * self.config["num_step"])
return reward
def reset(self):
self.eng.reset()
self.step_count=0
return self.get_state()
# def render(self, mode='human', close=False):
# ...
| [
"math.exp",
"gym.spaces.Discrete",
"numpy.zeros",
"numpy.ones",
"numpy.isnan",
"numpy.reshape",
"cityflow.Engine"
] | [((383, 478), 'cityflow.Engine', 'cityflow.Engine', (["self.config['cityflow_config_file']"], {'thread_num': "self.config['thread_num']"}), "(self.config['cityflow_config_file'], thread_num=self.config\n ['thread_num'])\n", (398, 478), False, 'import cityflow\n'), ((1415, 1444), 'numpy.zeros', 'np.zeros', (['[8, self.time_span]'], {}), '([8, self.time_span])\n', (1423, 1444), True, 'import numpy as np\n'), ((1469, 1525), 'numpy.zeros', 'np.zeros', (['[self.state_size_single, self.state_time_span]'], {}), '([self.state_size_single, self.state_time_span])\n', (1477, 1525), True, 'import numpy as np\n'), ((1630, 1641), 'gym.spaces.Discrete', 'Discrete', (['(8)'], {}), '(8)\n', (1638, 1641), False, 'from gym.spaces import Discrete, Box\n'), ((3579, 3590), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (3587, 3590), True, 'import numpy as np\n'), ((4576, 4592), 'numpy.isnan', 'np.isnan', (['reward'], {}), '(reward)\n', (4584, 4592), True, 'import numpy as np\n'), ((1571, 1581), 'numpy.ones', 'np.ones', (['(9)'], {}), '(9)\n', (1578, 1581), True, 'import numpy as np\n'), ((1589, 1599), 'numpy.ones', 'np.ones', (['(9)'], {}), '(9)\n', (1596, 1599), True, 'import numpy as np\n'), ((3358, 3404), 'numpy.reshape', 'np.reshape', (['return_state', '[1, self.state_size]'], {}), '(return_state, [1, self.state_size])\n', (3368, 3404), True, 'import numpy as np\n'), ((4842, 4863), 'math.exp', 'math.exp', (['(-1 * reward)'], {}), '(-1 * reward)\n', (4850, 4863), False, 'import math\n')] |
#!/usr/bin/python
import sys
import numpy as np
from .ComponentBase import Component
def Calzetti_ext(spectrum=None, parameters=None):
if spectrum is None:
raise Exception("Need a data spectrum")
ext= [1.]*len(spectrum.spectral_axis)
Rv = 4.05
for j in range(len(spectrum.spectral_axis)):
wavelengths_um = spectrum.spectral_axis[j]/10000.
if (wavelengths_um >= 0.12) & (wavelengths_um < 0.63):
k = 2.659*(-1.857+1.040/wavelengths_um)+4.05
ext[j] = pow(10,-0.4*parameters[0]*k)
if (wavelengths_um >= 0.63) & (wavelengths_um <= 2.2):
k = 2.659*(-2.156+1.509/wavelengths_um-0.198/pow(wavelengths_um,2)+0.11/pow(wavelengths_um,3))+4.05
ext[j] = pow(10,-0.4*parameters[0]*k)
return ext
def LMC_Fitzpatrick_ext(spectrum=None, parameters=None): #Fitzpatrick 1986
'''Large Magellanic Cloud extinction curve defined in Fitzpatrick 1986'''
if spectrum is None:
raise Exception("Need a data spectrum")
ext= [1.]*len(spectrum.spectral_axis)
C1 = -0.69
C2 = 0.89 #micrometres
C3 = 2.55 #micrometres^-2
x_0 = 4.608 #micrometres^-1
gamma = 0.994 #micrometres^-1
Rv = 3.1
for j in range(len(spectrum.spectral_axis)):
wavelengths_um = spectrum.spectral_axis[j]/10000.
x = pow(wavelengths_um,-1)
x2 = pow(x,2)
D = x2/(pow(x2-pow(x_0,2),2)+x2*pow(gamma,2))
F = 0.5392*pow((x-5.9),2)+0.05644*pow((x-5.9),3)
if (x >= 5.9):
C4 = 0.50 #micrometres^-1
else:
C4 =0.0
k = C1+Rv+C2*x+C3*x*D+C4*F
ext[j] = pow(10,-0.4*parameters[0]*k)
return ext
def MW_Seaton_ext(spectrum=None, parameters=None): #Seaton 1979
'''Milky Way extinction curve defined in Seaton 1979'''
if spectrum is None:
raise Exception("Need a data spectrum")
ext= [1.]*len(spectrum.spectral_axis)
C1 = -0.38
C2 = 0.74 #micrometres
C3 = 3.96 #micrometres^-2
C4 = 0.26 #micrometres^-1
x_0 = 4.595 #micrometres^-1
gamma = 1.051 #micrometres^-1
Rv = 3.1
for j in range(len(spectrum.spectral_axis)):
wavelengths_um = spectrum.spectral_axis[j]/10000.
x = pow(wavelengths_um,-1)
x2 = pow(x,2)
D = x2/(pow(x2-pow(x_0,2),2)+x2*pow(gamma,2))
F = 0.5392*pow((x-5.9),2)+0.05644*pow((x-5.9),3)
if (x >= 5.9):
C4 = 0.26 #micrometres^-1
else:
C4 =0.0
k = C1+Rv+C2*x+C3*x*D+C4*F
ext[j] = pow(10,-0.4*parameters[0]*k)
return ext
def SMC_Gordon_ext(spectrum=None, parameters=None): #Gordon 2003
'''Small Magellanic Cloud extinction curve defined in Gordon 2003'''
if spectrum is None:
raise Exception("Need a data spectrum")
ext= [1.]*len(spectrum.spectral_axis)
C1 = -4.96
C2 = 2.26 #micrometres
C3 = 0.39 #micrometres^-2
C4 = 0.46 #micrometres^-1
x_0 = 4.6 #micrometres^-1
gamma = 1.0 #micrometres^-1
Rv = 2.74
for j in range(len(spectrum.spectral_axis)):
wavelengths_um = spectrum.spectral_axis[j]/10000.
x = pow(wavelengths_um,-1)
x2 = pow(x,2)
D = x2/(pow(x2-pow(x_0,2),2)+x2*pow(gamma,2))
F = 0.5392*pow((x-5.9),2)+0.05644*pow((x-5.9),3)
if (x >= 5.9):
C4 = 0.26 #micrometres^-1
else:
C4 =0.0
k = C1+Rv+C2*x+C3*x*D+C4*F
ext[j] = pow(10,-0.4*parameters[0]*k)
return np.array(ext)
def AGN_Gaskell_ext(spectrum=None, parameters=None): #Gaskell and Benker 2007
'''Active galactic nuclei extinction curve defined in Gaskell and Benker 2007.
Much flatter than galactic extinction curves.'''
if spectrum is None:
raise Exception("Need a data spectrum")
ext = [1.]*len(spectrum.spectral_axis)
A = 0.000843
B = -0.02496
C = 0.2919
D = -1.815
E = 6.83
F = -7.92
Rv = 5.0
#for j in range(len(spectrum.spectral_axis)):
# wavelengths_um = spectrum.spectral_axis[j]/10000.
# x = pow(wavelengths_um,-1)
# if (x>=1.5) & (x<8):
# k = A*pow(x,5)+B*pow(x,4)+C*pow(x,3)+D*pow(x,2)+E*x+F+Rv
# ext[j] = pow(10,-0.4*parameters[0]*k)
wavelengths_um = np.array(spectrum.spectral_axis/10000.)
x = pow(wavelengths_um,-1)
k = A*x**5+B*x**4+C*x**3+D*x**2+E*x+F+Rv
ext = pow(10,-0.4*parameters[0]*k)
return ext#np.array(ext)
class Extinction(Component):
'''
Description of Extinction class goes here.
'''
def __init__(self,MW=False,AGN=False,LMC=False,SMC=False, Calzetti=False):
super(Extinction, self).__init__()
self.model_parameter_names = list()
self.model_parameter_names.append("E(B-V)")
self.EBV_min=None
self.EBV_max=None
self.name = "Extinction"
self.MW = MW
self.AGN = AGN
self.LMC = LMC
self.SMC = SMC
self.Calzetti = Calzetti
self._k = None
@property
def is_analytic(self):
return True
def initial_values(self, spectrum=None):
'''
Needs to sample from prior distribution.
These are the first guess for the parameters to be fit for in emcee, unless specified elsewhere.
'''
# calculate/define minimum and maximum values for each parameter.
if self.EBV_min == None or self.EBV_max == None:
self.EBV_min = 0
self.EBV_max = 2.
EBV_init = np.random.uniform(low=self.EBV_min,
high=self.EBV_max)
return [EBV_init]
def ln_priors(self, params):
'''
Return a list of the ln of all of the priors.
@param params
'''
# need to return parameters as a list in the correct order
ln_priors = list()
#get the parameters
EBV = params[self.parameter_index("E(B-V)")]
#Flat priors, appended in order
if self.EBV_min < EBV < self.EBV_max:
ln_priors.append(0)
else:
ln_priors.append(-np.inf)
return ln_priors
def flux(self, spectrum=None):
'''
Returns the flux for this component for a given wavelength grid
and parameters. Will use the initial parameters if none are specified.
'''
flux = [0.]*len(spectrum.spectral_axis)
return flux
def extinction(self, spectrum=None, params=None):
EBV = params[self.parameter_index("E(B-V)")]
parameters = [EBV]
if spectrum is None:
raise Exception("Need a data spectrum")
sys.exit()
if self.MW:
ext = MW_Seaton_ext(spectrum=spectrum, parameters=parameters)
if self.AGN:
ext = AGN_Gaskell_ext(spectrum=spectrum, parameters=parameters)
if self.LMC:
ext = LMC_Fitzpatrick_ext(spectrum=spectrum, parameters=parameters)
if self.SMC:
ext = SMC_Gordon_ext(spectrum=spectrum, parameters=parameters)
if self.Calzetti:
ext = Calzetti_ext(spectrum=spectrum, parameters=parameters)
if (not self.MW) & (not self.AGN) & (not self.LMC) & (not self.SMC) & (not self.Calzetti):
ext = [1.]*len(spectrum.spectral_axis)
return ext
| [
"numpy.random.uniform",
"numpy.array",
"sys.exit"
] | [((3470, 3483), 'numpy.array', 'np.array', (['ext'], {}), '(ext)\n', (3478, 3483), True, 'import numpy as np\n'), ((4248, 4290), 'numpy.array', 'np.array', (['(spectrum.spectral_axis / 10000.0)'], {}), '(spectrum.spectral_axis / 10000.0)\n', (4256, 4290), True, 'import numpy as np\n'), ((5533, 5587), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.EBV_min', 'high': 'self.EBV_max'}), '(low=self.EBV_min, high=self.EBV_max)\n', (5550, 5587), True, 'import numpy as np\n'), ((6723, 6733), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6731, 6733), False, 'import sys\n')] |
"""
Plot the posterior of mu vs g1 with outliers
--------------------------------------------
Figure 5.17
The marginal joint distribution between mu and g_i, as given by eq. 5.100.
The left panel shows a point identified as bad (:math:`\hat{g_i|} = 0`),
while the right panel shows a point identified as good(:math:`\hat{g_i|} = 1`).
"""
# Author: <NAME>
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
from astroML.plotting.mcmc import convert_to_stdev
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def p(mu, g1, xi, sigma1, sigma2):
"""Equation 5.97: marginalized likelihood over outliers"""
L = (g1 * norm.pdf(xi[0], mu, sigma1) +
(1 - g1) * norm.pdf(xi[0], mu, sigma2))
mu = mu.reshape(mu.shape + (1,))
g1 = g1.reshape(g1.shape + (1,))
return L * np.prod(norm.pdf(xi[1:], mu, sigma1)
+ norm.pdf(xi[1:], mu, sigma2), -1)
#------------------------------------------------------------
# Sample the points
np.random.seed(138)
N1 = 8
N2 = 2
sigma1 = 1
sigma2 = 3
sigmai = np.zeros(N1 + N2)
sigmai[N2:] = sigma1
sigmai[:N2] = sigma2
xi = np.random.normal(0, sigmai)
#------------------------------------------------------------
# Compute the marginalized posterior for the first and last point
mu = np.linspace(-5, 5, 71)
g1 = np.linspace(0, 1, 11)
L1 = p(mu[:, None], g1, xi, 1, 10)
L1 /= np.max(L1)
L2 = p(mu[:, None], g1, xi[::-1], 1, 10)
L2 /= np.max(L2)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.05,
bottom=0.15, top=0.9)
ax1 = fig.add_subplot(121)
ax1.imshow(L1.T, origin='lower', aspect='auto', cmap=plt.cm.binary,
extent=[mu[0], mu[-1], g1[0], g1[-1]])
ax1.contour(mu, g1, convert_to_stdev(np.log(L1).T),
levels=(0.683, 0.955, 0.997),
colors='k')
ax1.set_xlabel(r'$\mu$')
ax1.set_ylabel(r'$g_1$')
ax2 = fig.add_subplot(122)
ax2.imshow(L2.T, origin='lower', aspect='auto', cmap=plt.cm.binary,
extent=[mu[0], mu[-1], g1[0], g1[-1]])
ax2.contour(mu, g1, convert_to_stdev(np.log(L2).T),
levels=(0.683, 0.955, 0.997),
colors='k')
ax2.set_xlabel(r'$\mu$')
ax2.yaxis.set_major_locator(plt.NullLocator())
plt.show()
| [
"matplotlib.pyplot.NullLocator",
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.log",
"numpy.zeros",
"scipy.stats.norm.pdf",
"numpy.max",
"astroML.plotting.setup_text_plots",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.random.normal"
] | [((1196, 1237), 'astroML.plotting.setup_text_plots', 'setup_text_plots', ([], {'fontsize': '(8)', 'usetex': '(True)'}), '(fontsize=8, usetex=True)\n', (1212, 1237), False, 'from astroML.plotting import setup_text_plots\n'), ((1701, 1720), 'numpy.random.seed', 'np.random.seed', (['(138)'], {}), '(138)\n', (1715, 1720), True, 'import numpy as np\n'), ((1768, 1785), 'numpy.zeros', 'np.zeros', (['(N1 + N2)'], {}), '(N1 + N2)\n', (1776, 1785), True, 'import numpy as np\n'), ((1834, 1861), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigmai'], {}), '(0, sigmai)\n', (1850, 1861), True, 'import numpy as np\n'), ((1996, 2018), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(71)'], {}), '(-5, 5, 71)\n', (2007, 2018), True, 'import numpy as np\n'), ((2024, 2045), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (2035, 2045), True, 'import numpy as np\n'), ((2088, 2098), 'numpy.max', 'np.max', (['L1'], {}), '(L1)\n', (2094, 2098), True, 'import numpy as np\n'), ((2147, 2157), 'numpy.max', 'np.max', (['L2'], {}), '(L2)\n', (2153, 2157), True, 'import numpy as np\n'), ((2246, 2274), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 2.5)'}), '(figsize=(5, 2.5))\n', (2256, 2274), True, 'from matplotlib import pyplot as plt\n'), ((3023, 3033), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3031, 3033), True, 'from matplotlib import pyplot as plt\n'), ((3003, 3020), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (3018, 3020), True, 'from matplotlib import pyplot as plt\n'), ((1352, 1379), 'scipy.stats.norm.pdf', 'norm.pdf', (['xi[0]', 'mu', 'sigma1'], {}), '(xi[0], mu, sigma1)\n', (1360, 1379), False, 'from scipy.stats import norm\n'), ((1402, 1429), 'scipy.stats.norm.pdf', 'norm.pdf', (['xi[0]', 'mu', 'sigma2'], {}), '(xi[0], mu, sigma2)\n', (1410, 1429), False, 'from scipy.stats import norm\n'), ((2555, 2565), 'numpy.log', 'np.log', (['L1'], {}), '(L1)\n', (2561, 2565), True, 'import numpy as np\n'), ((2869, 2879), 'numpy.log', 'np.log', (['L2'], {}), '(L2)\n', (2875, 2879), True, 'import numpy as np\n'), ((1530, 1558), 'scipy.stats.norm.pdf', 'norm.pdf', (['xi[1:]', 'mu', 'sigma1'], {}), '(xi[1:], mu, sigma1)\n', (1538, 1558), False, 'from scipy.stats import norm\n'), ((1584, 1612), 'scipy.stats.norm.pdf', 'norm.pdf', (['xi[1:]', 'mu', 'sigma2'], {}), '(xi[1:], mu, sigma2)\n', (1592, 1612), False, 'from scipy.stats import norm\n')] |
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
from typing import Dict, Tuple
import numpy as np
import pytest
from image_tf_encoder import ImageTFEncoder
from jina import Document, DocumentArray, Executor
from PIL import Image
_INPUT_DIM = 336
_EMBEDDING_DIM = 1280
@pytest.fixture(scope="module")
def encoder() -> ImageTFEncoder:
return ImageTFEncoder()
@pytest.fixture(scope="function")
def nested_docs() -> DocumentArray:
blob = np.ones((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8)
docs = DocumentArray([Document(id="root1", blob=blob)])
docs[0].chunks = [
Document(id="chunk11", blob=blob),
Document(id="chunk12", blob=blob),
Document(id="chunk13", blob=blob),
]
docs[0].chunks[0].chunks = [
Document(id="chunk111", blob=blob),
Document(id="chunk112", blob=blob),
]
return docs
@pytest.fixture(scope='function')
def test_images(test_dir: str) -> Dict[str, np.ndarray]:
def get_path(file_name_no_suffix: str) -> str:
return os.path.join(test_dir, 'test_data', file_name_no_suffix + '.png')
return {
file_name: np.array(Image.open(get_path(file_name)), dtype=np.float32)[
:, :, 0:3
]
/ 255
for file_name in ['airplane', 'banana1', 'banana2', 'satellite', 'studio']
}
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_name == 'MobileNetV2'
def test_no_documents(encoder: ImageTFEncoder):
docs = DocumentArray()
encoder.encode(docs=docs, parameters={})
assert len(docs) == 0 # SUCCESS
def test_none_docs(encoder: ImageTFEncoder):
encoder.encode(docs=None, parameters={})
def test_docs_no_blobs(encoder: ImageTFEncoder):
docs = DocumentArray([Document()])
encoder.encode(docs=DocumentArray(), parameters={})
assert len(docs) == 1
assert docs[0].embedding is None
def test_single_image(encoder: ImageTFEncoder):
docs = DocumentArray(
[Document(blob=np.ones((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8))]
)
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
assert docs[0].embedding.dtype == np.float32
def test_encoding_cpu():
encoder = ImageTFEncoder(device='cpu')
input_data = DocumentArray(
[Document(blob=np.ones((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8))]
)
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
encoder = ImageTFEncoder(device='/GPU:0')
input_data = DocumentArray(
[Document(blob=np.ones((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8))]
)
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'img_shape',
[224, 512],
)
def test_encode_any_image_shape(img_shape):
encoder = ImageTFEncoder(img_shape=img_shape)
docs = DocumentArray(
[Document(blob=np.ones((img_shape, img_shape, 3), dtype=np.uint8))]
)
encoder.encode(docs=docs, parameters={})
assert len(docs.get_attributes('embedding')) == 1
def test_encode_any_image_shape_mismatch():
encoder = ImageTFEncoder(img_shape=224)
docs = DocumentArray(
[Document(blob=np.ones((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8))]
)
with pytest.raises(ValueError):
encoder.encode(docs=docs, parameters={})
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(encoder: ImageTFEncoder, batch_size: int):
blob = np.ones((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8)
docs = DocumentArray([Document(blob=blob) for _ in range(32)])
encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_image_results(test_images: Dict[str, np.array]):
embeddings = {}
encoder = ImageTFEncoder()
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
@pytest.mark.gpu
def test_image_results_gpu():
num_doc = 2
test_data = np.random.rand(num_doc, _INPUT_DIM, _INPUT_DIM, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = ImageTFEncoder(device='/GPU:0')
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
"traversal_paths, counts",
[
[('c',), (('r', 0), ('c', 3), ('cc', 0))],
[('cc',), (("r", 0), ('c', 0), ('cc', 2))],
[('r',), (('r', 1), ('c', 0), ('cc', 0))],
[('cc', 'r'), (('r', 1), ('c', 0), ('cc', 2))],
],
)
def test_traversal_path(
traversal_paths: Tuple[str],
counts: Tuple[str, int],
nested_docs: DocumentArray,
encoder: ImageTFEncoder,
):
encoder.encode(nested_docs, parameters={"traversal_paths": traversal_paths})
for path, count in counts:
embeddings = nested_docs.traverse_flat([path]).get_attributes('embedding')
assert len([em for em in embeddings if em is not None]) == count
| [
"jina.DocumentArray",
"image_tf_encoder.ImageTFEncoder",
"pytest.fixture",
"numpy.ones",
"pytest.raises",
"pathlib.Path",
"numpy.linalg.norm",
"jina.Document",
"numpy.random.rand",
"pytest.mark.parametrize",
"os.path.join"
] | [((368, 398), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (382, 398), False, 'import pytest\n'), ((463, 495), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (477, 495), False, 'import pytest\n'), ((961, 993), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (975, 993), False, 'import pytest\n'), ((2932, 2980), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""img_shape"""', '[224, 512]'], {}), "('img_shape', [224, 512])\n", (2955, 2980), False, 'import pytest\n'), ((3583, 3634), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '[1, 2, 4, 8]'], {}), "('batch_size', [1, 2, 4, 8])\n", (3606, 3634), False, 'import pytest\n'), ((5425, 5669), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""traversal_paths, counts"""', "[[('c',), (('r', 0), ('c', 3), ('cc', 0))], [('cc',), (('r', 0), ('c', 0),\n ('cc', 2))], [('r',), (('r', 1), ('c', 0), ('cc', 0))], [('cc', 'r'), (\n ('r', 1), ('c', 0), ('cc', 2))]]"], {}), "('traversal_paths, counts', [[('c',), (('r', 0), (\n 'c', 3), ('cc', 0))], [('cc',), (('r', 0), ('c', 0), ('cc', 2))], [('r'\n ,), (('r', 1), ('c', 0), ('cc', 0))], [('cc', 'r'), (('r', 1), ('c', 0),\n ('cc', 2))]])\n", (5448, 5669), False, 'import pytest\n'), ((443, 459), 'image_tf_encoder.ImageTFEncoder', 'ImageTFEncoder', ([], {}), '()\n', (457, 459), False, 'from image_tf_encoder import ImageTFEncoder\n'), ((543, 595), 'numpy.ones', 'np.ones', (['(_INPUT_DIM, _INPUT_DIM, 3)'], {'dtype': 'np.uint8'}), '((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8)\n', (550, 595), True, 'import numpy as np\n'), ((1613, 1628), 'jina.DocumentArray', 'DocumentArray', ([], {}), '()\n', (1626, 1628), False, 'from jina import Document, DocumentArray, Executor\n'), ((2348, 2376), 'image_tf_encoder.ImageTFEncoder', 'ImageTFEncoder', ([], {'device': '"""cpu"""'}), "(device='cpu')\n", (2362, 2376), False, 'from image_tf_encoder import ImageTFEncoder\n'), ((2666, 2697), 'image_tf_encoder.ImageTFEncoder', 'ImageTFEncoder', ([], {'device': '"""/GPU:0"""'}), "(device='/GPU:0')\n", (2680, 2697), False, 'from image_tf_encoder import ImageTFEncoder\n'), ((3050, 3085), 'image_tf_encoder.ImageTFEncoder', 'ImageTFEncoder', ([], {'img_shape': 'img_shape'}), '(img_shape=img_shape)\n', (3064, 3085), False, 'from image_tf_encoder import ImageTFEncoder\n'), ((3354, 3383), 'image_tf_encoder.ImageTFEncoder', 'ImageTFEncoder', ([], {'img_shape': '(224)'}), '(img_shape=224)\n', (3368, 3383), False, 'from image_tf_encoder import ImageTFEncoder\n'), ((3709, 3761), 'numpy.ones', 'np.ones', (['(_INPUT_DIM, _INPUT_DIM, 3)'], {'dtype': 'np.uint8'}), '((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8)\n', (3716, 3761), True, 'import numpy as np\n'), ((4065, 4081), 'image_tf_encoder.ImageTFEncoder', 'ImageTFEncoder', ([], {}), '()\n', (4079, 4081), False, 'from image_tf_encoder import ImageTFEncoder\n'), ((5063, 5113), 'numpy.random.rand', 'np.random.rand', (['num_doc', '_INPUT_DIM', '_INPUT_DIM', '(3)'], {}), '(num_doc, _INPUT_DIM, _INPUT_DIM, 3)\n', (5077, 5113), True, 'import numpy as np\n'), ((5124, 5139), 'jina.DocumentArray', 'DocumentArray', ([], {}), '()\n', (5137, 5139), False, 'from jina import Document, DocumentArray, Executor\n'), ((5232, 5263), 'image_tf_encoder.ImageTFEncoder', 'ImageTFEncoder', ([], {'device': '"""/GPU:0"""'}), "(device='/GPU:0')\n", (5246, 5263), False, 'from image_tf_encoder import ImageTFEncoder\n'), ((687, 720), 'jina.Document', 'Document', ([], {'id': '"""chunk11"""', 'blob': 'blob'}), "(id='chunk11', blob=blob)\n", (695, 720), False, 'from jina import Document, DocumentArray, Executor\n'), ((730, 763), 'jina.Document', 'Document', ([], {'id': '"""chunk12"""', 'blob': 'blob'}), "(id='chunk12', blob=blob)\n", (738, 763), False, 'from jina import Document, DocumentArray, Executor\n'), ((773, 806), 'jina.Document', 'Document', ([], {'id': '"""chunk13"""', 'blob': 'blob'}), "(id='chunk13', blob=blob)\n", (781, 806), False, 'from jina import Document, DocumentArray, Executor\n'), ((855, 889), 'jina.Document', 'Document', ([], {'id': '"""chunk111"""', 'blob': 'blob'}), "(id='chunk111', blob=blob)\n", (863, 889), False, 'from jina import Document, DocumentArray, Executor\n'), ((899, 933), 'jina.Document', 'Document', ([], {'id': '"""chunk112"""', 'blob': 'blob'}), "(id='chunk112', blob=blob)\n", (907, 933), False, 'from jina import Document, DocumentArray, Executor\n'), ((1117, 1182), 'os.path.join', 'os.path.join', (['test_dir', '"""test_data"""', "(file_name_no_suffix + '.png')"], {}), "(test_dir, 'test_data', file_name_no_suffix + '.png')\n", (1129, 1182), False, 'import os\n'), ((3504, 3529), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3517, 3529), False, 'import pytest\n'), ((4444, 4485), 'numpy.linalg.norm', 'np.linalg.norm', (['(a_embedding - b_embedding)'], {}), '(a_embedding - b_embedding)\n', (4458, 4485), True, 'import numpy as np\n'), ((622, 653), 'jina.Document', 'Document', ([], {'id': '"""root1"""', 'blob': 'blob'}), "(id='root1', blob=blob)\n", (630, 653), False, 'from jina import Document, DocumentArray, Executor\n'), ((1880, 1890), 'jina.Document', 'Document', ([], {}), '()\n', (1888, 1890), False, 'from jina import Document, DocumentArray, Executor\n'), ((1917, 1932), 'jina.DocumentArray', 'DocumentArray', ([], {}), '()\n', (1930, 1932), False, 'from jina import Document, DocumentArray, Executor\n'), ((3788, 3807), 'jina.Document', 'Document', ([], {'blob': 'blob'}), '(blob=blob)\n', (3796, 3807), False, 'from jina import Document, DocumentArray, Executor\n'), ((5188, 5215), 'jina.Document', 'Document', ([], {'blob': 'test_data[i]'}), '(blob=test_data[i])\n', (5196, 5215), False, 'from jina import Document, DocumentArray, Executor\n'), ((4160, 4184), 'jina.Document', 'Document', ([], {'blob': 'image_arr'}), '(blob=image_arr)\n', (4168, 4184), False, 'from jina import Document, DocumentArray, Executor\n'), ((2111, 2163), 'numpy.ones', 'np.ones', (['(_INPUT_DIM, _INPUT_DIM, 3)'], {'dtype': 'np.uint8'}), '((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8)\n', (2118, 2163), True, 'import numpy as np\n'), ((2432, 2484), 'numpy.ones', 'np.ones', (['(_INPUT_DIM, _INPUT_DIM, 3)'], {'dtype': 'np.uint8'}), '((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8)\n', (2439, 2484), True, 'import numpy as np\n'), ((2753, 2805), 'numpy.ones', 'np.ones', (['(_INPUT_DIM, _INPUT_DIM, 3)'], {'dtype': 'np.uint8'}), '((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8)\n', (2760, 2805), True, 'import numpy as np\n'), ((3135, 3185), 'numpy.ones', 'np.ones', (['(img_shape, img_shape, 3)'], {'dtype': 'np.uint8'}), '((img_shape, img_shape, 3), dtype=np.uint8)\n', (3142, 3185), True, 'import numpy as np\n'), ((3433, 3485), 'numpy.ones', 'np.ones', (['(_INPUT_DIM, _INPUT_DIM, 3)'], {'dtype': 'np.uint8'}), '((_INPUT_DIM, _INPUT_DIM, 3), dtype=np.uint8)\n', (3440, 3485), True, 'import numpy as np\n'), ((1467, 1481), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1471, 1481), False, 'from pathlib import Path\n')] |
# Theory: Intro to NumPy
import numpy as np
# NumPy (short for Numerical Python) is a Python library
# fundamental for scientific computing. It supports a variety of
# high-level mathematical functions and is broadly used in data
# science, machine learning, and big data applications. With its
# help. you will be able to perform linear algebra calculations
# easily, as well as statistical, logical, and other operations,
# making use of numerous built-in functions.
# Most parts of NumPy that require high execution speed are
# written in C, which makes the operations much faster than the
# corresponding ones in Python itself. Owing to its efficiency and
# convenience, the library has gained vast popularity among data
# scientists who work with large datasets and need to perform
# speed computations.
# 1. Installation
# Firstly, to start working with NumPy, we need to install it, which
# can be easily done with pip:
# pip install numpy
# You can read more about the installation on the official page of
# the scientific python distribution.
# Then, import the library before starting your work:
# import numpy as np
# 2. NumPy arrays
# The core NumPy objects is an n-dimensional array, also known
# as ndarray. The simplest way to create a NumPy array is to
# convert a Python list:
first = np.array([1, 2, 3, 4, 5])
print(first) # [1 2 3 4 5]
print(type(first)) # <class 'numpy.ndarray'>
# In the example above, first is one-dimensional array that is
# treated as a vector. As you can see, when printed, it is rendered
# without commas, as opposed to Python lists.
# You can also use not only integers in the list but any objects
# (strings, lists, etc.).
first_modified = np.array(['1', 2, [3], 4, [5]])
print(first_modified) # ['1' 2 list([3]) 4 list([5])]
# Similarly, we can create a two-dimensional Numpy array from
# the corresponding Python list. Two-and more dimensional
# arrays are treated as matrices.
second = np.array([[1, 1, 1],
[2, 2, 2]])
print(second) # [[1 1 1]
# [2 2 2]]
# If you try to create a two-dimensional Numpy array from a list
# with a sublists of two different lengths, you will obtain a one-
# dimensional array.
second_modified = np.array([[1, 2, 3],
[4, 5]])
print(second_modified) # [list([1, 2, 3]) list([4, 5])]
# 3. NumPy arrays vs Python lists
# As you can see, NumPy arrays resemble a Python built-in list
# data type. However, there are a few crucial differences:
# - Unlike Python lists, NumPy arrays can only contain
# elements of the same type, usually numbers, due to the
# specifics of application fields.
# - NumPy arrays are much more memory-efficient and much
# faster than Python lists when working with large datasets.
# - Arithmetic operations differ when executed on Python
# lists or NumPy arrays.
# Let's take a look at arithmetic operations that can be applied
# both to arrays and to lists. All differences in them can be
# explained by the fact that NumPy arrays are created for
# computing and treated as vectors and matrices, while Python
# lists are a datatype made just to store data.
# To illustrate it, we'll create two lists and two arrays containing
# the same elements:
list_a = [1, 2, 3, 4]
array_a = np.array(list_a)
list_b = [11, 22, 33, 44]
array_b = np.array(list_b)
# First, let's find their sum. The addition of two arrays returns
# their sum as when we add two vectors.
print(array_a + array_b) # [12 24 36 48]
# For this reason, we can't add up arrays of different lengths, a
# ValueError will appear.
array_c = np.array([111, 222])
# print(array_a + array_c) # ValueError
# When we try to add a list and an array, the former is converted
# to an array, so the result is also a sum of vectors.
print(list_a + array_a) # [2 4 6 8]
# However, when applied to lists, addition just merges them
# together.
print(list_a + list_b) # [1, 2, 3, 4, 11, 22, 33, 44]
# Similarly, if we try to multiply a list by n, we'll get the list
# repeated n times, while with an array, each element will be
# multiplied by n:
print(list_a * 3) # [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
print(array_a * 3) # [3 6 9 12]
# 4. Learning sizes
# There's a number of ways to learn more about an array without
# printing it.
first = np.array([1, 2, 3, 4, 5])
second = np.array([[1, 1, 1],
[2, 2, 2]])
# To find out the dimensions size, use shape. The first number of
# the output indicates the number of rows and the second - the
# number of columns.
# rows columns
print(second.shape) # (2, 3)
# If the NumPy array has only one dimension, the result will be a
# bit different:
print(first.shape) # (5,)
# In this case, the first number is not the number of rows but
# rather the number of elements in the only dimension, and the
# empty place after the comma means that there's no second
# dimension.
# The length of the shape tuple is the number of axes, ndim.
print(first.ndim) # 1
print(second.ndim) # 2
# The function len() returns the array's length, and size gives
# us the number of elements in the array.
print(len(first), first.size) # 5 5
print(len(second), second.size) # 2 6
# Note that in the first case they return the same value, while in
# the second case the numbers differ. The thing is, len() works
# as it would work with regular lists, so if we regard the two-
# dimensional array above as a list containing two nested lists, it
# becomes clear that for finding its length only the nested lists
# are counted. Size, on the contrary, counts each single element
# in all nested lists.
# Another thing to point out is that both length and size can also
# be got from its shape: length is actually the length of the first
# dimension, so it equals shape[0], and size is the total number
# of elements, which equals the product of all elements in the
# shape tuple.
# 5. Recap
# In this topic, we've learned the basics of NumPy:
# - what is NumPy and what it can be used for,
# - how to install and import the library,
# - arrays, the basic datatype of Numpy,
# - the difference between NumPy arrays and Python lists,
# - ways to get information about an array's content.
# Now, let's practice the acquired knowledge so that you'll be able
# to use it in the future.
| [
"numpy.array"
] | [((1307, 1332), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1315, 1332), True, 'import numpy as np\n'), ((1749, 1780), 'numpy.array', 'np.array', (["['1', 2, [3], 4, [5]]"], {}), "(['1', 2, [3], 4, [5]])\n", (1757, 1780), True, 'import numpy as np\n'), ((2001, 2033), 'numpy.array', 'np.array', (['[[1, 1, 1], [2, 2, 2]]'], {}), '([[1, 1, 1], [2, 2, 2]])\n', (2009, 2033), True, 'import numpy as np\n'), ((2282, 2311), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5]]'], {}), '([[1, 2, 3], [4, 5]])\n', (2290, 2311), True, 'import numpy as np\n'), ((3329, 3345), 'numpy.array', 'np.array', (['list_a'], {}), '(list_a)\n', (3337, 3345), True, 'import numpy as np\n'), ((3383, 3399), 'numpy.array', 'np.array', (['list_b'], {}), '(list_b)\n', (3391, 3399), True, 'import numpy as np\n'), ((3654, 3674), 'numpy.array', 'np.array', (['[111, 222]'], {}), '([111, 222])\n', (3662, 3674), True, 'import numpy as np\n'), ((4373, 4398), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (4381, 4398), True, 'import numpy as np\n'), ((4408, 4440), 'numpy.array', 'np.array', (['[[1, 1, 1], [2, 2, 2]]'], {}), '([[1, 1, 1], [2, 2, 2]])\n', (4416, 4440), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from audiomentations import Normalize, Compose
class TestNormalize(unittest.TestCase):
def test_normalize_positive_peak(self):
samples = np.array([0.5, 0.6, -0.2, 0.0], dtype=np.float32)
sample_rate = 16000
augmenter = Compose([Normalize(p=1.0)])
samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(np.amax(samples), 1.0)
self.assertEqual(samples.dtype, np.float32)
self.assertEqual(len(samples), 4)
def test_normalize_negative_peak(self):
samples = np.array([0.5, 0.6, -0.8, 0.0], dtype=np.float32)
sample_rate = 16000
augmenter = Compose([Normalize(p=1.0)])
samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(np.amin(samples), -1.0)
self.assertEqual(samples[-1], 0.0)
self.assertEqual(samples.dtype, np.float32)
self.assertEqual(len(samples), 4)
def test_normalize_all_zeros(self):
samples = np.array([0.0, 0.0, 0.0, 0.0], dtype=np.float32)
sample_rate = 16000
augmenter = Compose([Normalize(p=1.0)])
samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(np.amin(samples), 0.0)
self.assertEqual(samples[-1], 0.0)
self.assertEqual(samples.dtype, np.float32)
self.assertEqual(len(samples), 4)
def test_normalize_multichannel(self):
samples = np.array(
[[0.9, 0.5, -0.25, -0.125, 0.0], [0.95, 0.5, -0.25, -0.125, 0.0]],
dtype=np.float32,
)
sample_rate = 16000
augmenter = Compose([Normalize(p=1.0)])
processed_samples = augmenter(samples=samples, sample_rate=sample_rate)
assert_array_equal(processed_samples, samples / 0.95)
self.assertEqual(processed_samples.dtype, np.float32)
| [
"numpy.amin",
"numpy.testing.assert_array_equal",
"numpy.amax",
"numpy.array",
"audiomentations.Normalize"
] | [((233, 282), 'numpy.array', 'np.array', (['[0.5, 0.6, -0.2, 0.0]'], {'dtype': 'np.float32'}), '([0.5, 0.6, -0.2, 0.0], dtype=np.float32)\n', (241, 282), True, 'import numpy as np\n'), ((635, 684), 'numpy.array', 'np.array', (['[0.5, 0.6, -0.8, 0.0]'], {'dtype': 'np.float32'}), '([0.5, 0.6, -0.8, 0.0], dtype=np.float32)\n', (643, 684), True, 'import numpy as np\n'), ((1077, 1125), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {'dtype': 'np.float32'}), '([0.0, 0.0, 0.0, 0.0], dtype=np.float32)\n', (1085, 1125), True, 'import numpy as np\n'), ((1520, 1617), 'numpy.array', 'np.array', (['[[0.9, 0.5, -0.25, -0.125, 0.0], [0.95, 0.5, -0.25, -0.125, 0.0]]'], {'dtype': 'np.float32'}), '([[0.9, 0.5, -0.25, -0.125, 0.0], [0.95, 0.5, -0.25, -0.125, 0.0]],\n dtype=np.float32)\n', (1528, 1617), True, 'import numpy as np\n'), ((1814, 1867), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['processed_samples', '(samples / 0.95)'], {}), '(processed_samples, samples / 0.95)\n', (1832, 1867), False, 'from numpy.testing import assert_array_equal\n'), ((455, 471), 'numpy.amax', 'np.amax', (['samples'], {}), '(samples)\n', (462, 471), True, 'import numpy as np\n'), ((857, 873), 'numpy.amin', 'np.amin', (['samples'], {}), '(samples)\n', (864, 873), True, 'import numpy as np\n'), ((1298, 1314), 'numpy.amin', 'np.amin', (['samples'], {}), '(samples)\n', (1305, 1314), True, 'import numpy as np\n'), ((340, 356), 'audiomentations.Normalize', 'Normalize', ([], {'p': '(1.0)'}), '(p=1.0)\n', (349, 356), False, 'from audiomentations import Normalize, Compose\n'), ((742, 758), 'audiomentations.Normalize', 'Normalize', ([], {'p': '(1.0)'}), '(p=1.0)\n', (751, 758), False, 'from audiomentations import Normalize, Compose\n'), ((1183, 1199), 'audiomentations.Normalize', 'Normalize', ([], {'p': '(1.0)'}), '(p=1.0)\n', (1192, 1199), False, 'from audiomentations import Normalize, Compose\n'), ((1706, 1722), 'audiomentations.Normalize', 'Normalize', ([], {'p': '(1.0)'}), '(p=1.0)\n', (1715, 1722), False, 'from audiomentations import Normalize, Compose\n')] |
# Atlas measurements and statistics
# Author: <NAME>, 2019
"""Low-level measurement of atlases and statistics generation.
Typically applied to specific types of atlases and less generalizable
than measurements in :module:`vols`.
"""
import os
import numpy as np
import pandas as pd
from magmap.plot import colormaps
from magmap.settings import config
from magmap.io import export_stack
from magmap.io import libmag
from magmap.io import np_io
from magmap.atlas import ontology
from magmap.plot import plot_2d
from magmap.plot import plot_support
from magmap.io import df_io
from magmap.stats import vols
def plot_region_development(metric, size=None, show=True):
"""Plot regions across development for the given metric.
Args:
metric (str): Column name of metric to track.
size (List[int]): Sequence of ``width, height`` to size the figure;
defaults to None.
show (bool): True to display the image; defaults to True.
"""
# set up access to data frame columns
id_cols = ["Age", "Condition"]
extra_cols = ["RegionName"]
cond_col = "Region"
# assume that vol stats file is given first, then region IDs;
# merge in region names and levels
df_regions = pd.read_csv(config.filenames[1])
df = pd.read_csv(config.filename).merge(
df_regions[["Region", "RegionName", "Level"]], on="Region",
how="left")
# convert sample names to ages
ages = ontology.rel_to_abs_ages(df["Sample"].unique())
df["Age"] = df["Sample"].map(ages)
# get large super-structures for normalization to brain tissue, where
# "non-brain" are spinal cord and ventricles, which are variably labeled
df_base = df[df["Region"] == 15564]
ids_nonbr_large = (17651, 126651558)
dfs_nonbr_large = [df[df["Region"] == n] for n in ids_nonbr_large]
# get data frame with region IDs of all non-brain structures removed
labels_ref_lookup = ontology.create_aba_reverse_lookup(
ontology.load_labels_ref(config.load_labels))
ids_nonbr = []
for n in ids_nonbr_large:
ids_nonbr.extend(
ontology.get_children_from_id(labels_ref_lookup, n))
label_id = config.atlas_labels[config.AtlasLabels.ID]
if label_id is not None:
# show only selected region and its children
ids = ontology.get_children_from_id(labels_ref_lookup, label_id)
df = df[np.isin(df["Region"], ids)]
df_brain = df.loc[~df["Region"].isin(ids_nonbr)]
levels = np.sort(df["Level"].unique())
conds = df["Condition"].unique()
# get aggregated whole brain tissue for normalization
cols_show = (*id_cols, cond_col, *extra_cols, metric)
if dfs_nonbr_large:
# add all large non-brain structures
df_nonbr = dfs_nonbr_large[0]
for df_out in dfs_nonbr_large[1:]:
df_nonbr = df_io.normalize_df(
df_nonbr, id_cols, cond_col, None, [metric],
extra_cols, df_out, df_io.df_add)
# subtract them from whole organism to get brain tissue alone,
# updating given metric in db_base
df_base = df_io.normalize_df(
df_base, id_cols, cond_col, None, [metric], extra_cols,
df_nonbr, df_io.df_subtract)
df_base.loc[:, "RegionName"] = "Brain tissue"
print("Brain {}:".format(metric))
df_io.print_data_frame(df_base.loc[:, cols_show], "\t")
df_base_piv, regions = df_io.pivot_with_conditions(
df_base, id_cols, "RegionName", metric)
# plot lines with separate styles for each condition and colors for
# each region name
linestyles = ("--", "-.", ":", "-")
num_conds = len(conds)
linestyles = linestyles * (num_conds // (len(linestyles) + 1) + 1)
if num_conds < len(linestyles):
# ensure that 1st and last styles are dashed and solid unless
linestyles = (*linestyles[:num_conds-1], linestyles[-1])
lines_params = {
"labels": (metric, "Post-Conceptional Age"),
"linestyles": linestyles,
"size": size,
"show": show,
"ignore_invis": True,
"groups": conds,
"marker": ".",
}
line_params_norm = lines_params.copy()
line_params_norm["labels"] = ("Fraction", "Post-Conceptional Age")
plot_2d.plot_lines(
config.filename, "Age", regions,
title="Whole Brain Development ({})".format(metric),
suffix="_dev_{}_brain".format(metric),
df=df_base_piv, **lines_params)
for level in levels:
# plot raw metric at given level
df_level = df.loc[df["Level"] == level]
print("Raw {}:".format(metric))
df_io.print_data_frame(df_level.loc[:, cols_show], "\t")
df_level_piv, regions = df_io.pivot_with_conditions(
df_level, id_cols, "RegionName", metric)
plot_2d.plot_lines(
config.filename, "Age", regions,
title="Structure Development ({}, Level {})".format(
metric, level),
suffix="_dev_{}_level{}".format(metric, level),
df=df_level_piv, **lines_params)
# plot metric normalized to whole brain tissue; structures
# above removed regions will still contain them
df_brain_level = df_brain.loc[df_brain["Level"] == level]
df_norm = df_io.normalize_df(
df_brain_level, id_cols, cond_col, None, [metric],
extra_cols, df_base)
print("{} normalized to whole brain:".format(metric))
df_io.print_data_frame(df_norm.loc[:, cols_show], "\t")
df_norm_piv, regions = df_io.pivot_with_conditions(
df_norm, id_cols, "RegionName", metric)
plot_2d.plot_lines(
config.filename, "Age", regions,
units=(None,
config.plot_labels[config.PlotLabels.X_UNIT]),
title=("Structure Development Normalized to Whole "
"Brain ({}, Level {})".format(metric, level)),
suffix="_dev_{}_level{}_norm".format(metric, level),
df=df_norm_piv, **line_params_norm)
def plot_unlabeled_hemisphere(path, cols, size=None, show=True):
"""Plot unlabeled hemisphere fractions as bar and line plots.
Args:
path (str): Path to data frame.
cols (List[str]): Sequence of columns to plot.
size (List[int]): Sequence of ``width, height`` to size the figure;
defaults to None.
show (bool): True to display the image; defaults to True.
"""
# load data frame and convert sample names to ages
df = pd.read_csv(path)
ages = ontology.rel_to_abs_ages(df["Sample"].unique())
df["Age"] = df["Sample"].map(ages)
# generate a separate graph for each metric
conds = df["Condition"].unique()
for col in cols:
title = "{}".format(col).replace("_", " ")
y_label = "Fraction of hemisphere unlabeled"
# plot as lines
df_lines, regions = df_io.pivot_with_conditions(
df, ["Age", "Condition"], "Region", col)
plot_2d.plot_lines(
config.filename, "Age", regions, linestyles=("--", "-"),
labels=(y_label, "Post-Conceptional Age"), title=title,
size=size, show=show, ignore_invis=True,
suffix="_{}".format(col), df=df_lines, groups=conds)
# plot as bars, pivoting value into separate columns by condition
df_bars = df.pivot(
index="Sample", columns="Condition", values=col).reset_index()
plot_2d.plot_bars(
config.filename, conds, col_groups="Sample", y_label=y_label,
title=title, size=None, show=show, df=df_bars,
prefix="{}_{}".format(
os.path.splitext(config.filename)[0], col))
def meas_plot_zscores(path, metric_cols, extra_cols, composites, size=None,
show=True):
"""Measure and plot z-scores for given columns in a data frame.
Args:
path (str): Path to data frame.
metric_cols (List[str]): Sequence of column names for which to
compute z-scores.
extra_cols (List[str]): Additional columns to included in the
output data frame.
composites (List[Enum]): Sequence of enums specifying the
combination, typically from :class:`vols.MetricCombos`.
size (List[int]): Sequence of ``width, height`` to size the figure;
defaults to None.
show (bool): True to display the image; defaults to True.
"""
# generate z-scores
df = pd.read_csv(path)
df = df_io.zscore_df(df, "Region", metric_cols, extra_cols, True)
# generate composite score column
df_comb = df_io.combine_cols(df, composites)
df_io.data_frames_to_csv(
df_comb,
libmag.insert_before_ext(config.filename, "_zhomogeneity"))
# shift metrics from each condition to separate columns
conds = np.unique(df["Condition"])
df = df_io.cond_to_cols_df(
df, ["Sample", "Region"], "Condition", "original", metric_cols)
path = libmag.insert_before_ext(config.filename, "_zscore")
df_io.data_frames_to_csv(df, path)
# display as probability plot
lims = (-3, 3)
plot_2d.plot_probability(
path, conds, metric_cols, "Volume",
xlim=lims, ylim=lims, title="Region Match Z-Scores",
fig_size=size, show=show, suffix=None, df=df)
def meas_plot_coefvar(path, id_cols, cond_col, cond_base, metric_cols,
composites, size_col=None, size=None, show=True):
"""Measure and plot coefficient of variation (CV) as a scatter plot.
CV is computed two ways:
- Based on columns and equation specified in ``composites``, applied
across all samples regardless of group
- For each metric in ``metric_cols``, separated by groups
Args:
path (str): Path to data frame.
id_cols (List[str]): Sequence of columns to serve as index/indices.
cond_col (str): Name of the condition column.
cond_base (str): Name of the condition to which all other conditions
will be normalized.
metric_cols (List[str]): Sequence of column names for which to
compute z-scores.
composites (List[Enum]): Sequence of enums specifying the
combination, typically from :class:`vols.MetricCombos`.
size_col (str): Name of weighting column for coefficient of
variation measurement; defaults to None.
size (List[int]): Sequence of ``width, height`` to size the figure;
defaults to None.
show (bool): True to display the image; defaults to True.
"""
# measure coefficient of variation per sample-region regardless of group
df = pd.read_csv(path)
df = df_io.combine_cols(df, composites)
df_io.data_frames_to_csv(
df, libmag.insert_before_ext(config.filename, "_coefvar"))
# measure CV within each condition and shift metrics from each
# condition to separate columns
df = df_io.coefvar_df(df, [*id_cols, cond_col], metric_cols, size_col)
conds = np.unique(df[cond_col])
df = df_io.cond_to_cols_df(df, id_cols, cond_col, cond_base, metric_cols)
path = libmag.insert_before_ext(config.filename, "_coefvartransp")
df_io.data_frames_to_csv(df, path)
# display CV measured by condition as probability plot
lims = (0, 0.7)
plot_2d.plot_probability(
path, conds, metric_cols, "Volume",
xlim=lims, ylim=lims, title="Coefficient of Variation",
fig_size=size, show=show, suffix=None, df=df)
def smoothing_peak(df, thresh_label_loss=None, filter_size=None):
"""Extract the baseline and peak smoothing quality rows from the given data
frame matching the given criteria.
Args:
df: Data frame from which to extract.
thresh_label_loss: Only check rows below or equal to this
fraction of label loss; defaults to None to ignore.
filter_size: Only rows with the given filter size; defaults
to None to ignore.
Returns:
New data frame with the baseline (filter size of 0) row and the
row having the peak smoothing quality meeting criteria.
"""
if thresh_label_loss is not None:
df = df.loc[
df[config.SmoothingMetrics.LABEL_LOSS.value] <= thresh_label_loss]
if filter_size is not None:
df = df.loc[np.isin(
df[config.SmoothingMetrics.FILTER_SIZE.value], (filter_size, 0))]
sm_qual = df[config.SmoothingMetrics.SM_QUALITY.value]
df_peak = df.loc[np.logical_or(
sm_qual == sm_qual.max(),
df[config.SmoothingMetrics.FILTER_SIZE.value] == 0)]
return df_peak
def plot_intensity_nuclei(paths, labels, size=None, show=True, unit=None):
"""Plot nuclei vs. intensity as a scatter plot.
Args:
paths (List[str]): Sequence of paths to CSV files.
labels (List[str]): Sequence of label metrics corresponding to
``paths``.
size (List[int]): Sequence of ``width, height`` to size the figure;
defaults to None.
show (bool): True to display the image; defaults to True.
unit (str): Denominator unit for density plot; defaults to None.
Returns:
:obj:`pd.DataFrame`: Data frame with columns matching ``labels``
for the given ``paths`` concatenated.
"""
def plot(lbls, suffix=None, unit=None):
cols_xy = []
for label in lbls:
# get columns for the given label to plot on a given axis; assume
# same order of labels for each group of columns so they correspond
cols_xy.append([
c for c in df.columns if c.split(".")[0] == label])
names_group = None
if cols_xy:
# extract legend names assuming label.cond format
names_group = np.unique([c.split(".")[1] for c in cols_xy[0]])
units = (["{}/{}".format(l.split("_")[0], unit) for l in lbls]
if unit else (None, None))
lbls = [l.replace("_", " ") for l in lbls]
title = "{} Vs. {} By Region".format(*lbls)
plot_2d.plot_scatter(
"vols_stats_intensVnuc", cols_xy[0], cols_xy[1], units=units,
# col_annot=config.AtlasMetrics.REGION_ABBR.value,
names_group=names_group, labels=lbls, title=title,
fig_size=size, show=show, suffix=suffix, df=df)
if len(paths) < 2 or len(labels) < 2: return
dfs = [pd.read_csv(path) for path in paths]
# merge data frames with all columns ending with .mean, prepending labels
extra_cols = [
config.AtlasMetrics.REGION.value,
config.AtlasMetrics.REGION_ABBR.value,
vols.LabelMetrics.Volume.name,
]
tag = ".mean"
df = df_io.append_cols(
dfs[:2], labels, lambda x: x.lower().endswith(tag), extra_cols)
dens = "{}_density"
for col in df.columns:
if col.startswith(labels):
col_split = col.split(".")
col_split[0] = dens.format(col_split[0])
df.loc[:, ".".join(col_split)] = (
df[col] / df[vols.LabelMetrics.Volume.name])
# strip the tag from column names
names = {col: col.rsplit(tag)[0] for col in df.columns}
df = df.rename(columns=names)
df_io.data_frames_to_csv(df, "vols_stats_intensVnuc.csv")
# plot labels and density labels
plot(labels)
plot([dens.format(l) for l in labels], "_density", unit)
return df
def meas_improvement(path, col_effect, col_p, thresh_impr=0, thresh_p=0.05,
col_wt=None, suffix=None, df=None):
"""Measure overall improvement and worsening for a column in a data frame.
Args:
path (str): Path of file to load into data frame.
col_effect (str): Name of column with metric to measure.
col_p (str): Name of column with p-values.
thresh_impr (float): Threshold of effects below which are considered
improved.
thresh_p (float): Threshold of p-values below which are considered
statistically significant.
col_wt (str): Name of column for weighting.
suffix (str): Output path suffix; defaults to None.
df (:obj:`pd.DataFrame`): Data fram to use instead of loading from
``path``; defaults to None.
Returns:
:obj:`pd.DataFrame`: Data frame with improvement measurements.
The data frame will be saved to a filename based on ``path``.
"""
def add_wt(mask_cond, mask_cond_ss, name):
# add weighted metrics for the given condition, such as improved
# vs. worsened
metrics[col_wt] = [np.sum(df[col_wt])]
wt_cond = df.loc[mask_cond, col_wt]
wt_cond_ss = df.loc[mask_cond_ss, col_wt]
# sum of weighting column fitting the condition (all and statistically
# significant)
metrics["{}_{}".format(col_wt, name)] = [np.sum(wt_cond)]
metrics["{}_{}_ss".format(col_wt, name)] = [np.sum(wt_cond_ss)]
# sum of filtered effect multiplied by weighting
metrics["{}_{}_by_{}".format(col_effect, name, col_wt)] = [np.sum(
wt_cond.multiply(df.loc[mask_cond, col_effect]))]
metrics["{}_{}_by_{}_ss".format(col_effect, name, col_wt)] = [np.sum(
wt_cond_ss.multiply(df.loc[mask_cond_ss, col_effect]))]
if df is None:
df = pd.read_csv(path)
# masks of improved and worsened, all and statistically significant
# for each, where improvement is above the given threshold
effects = df[col_effect]
mask_impr = effects > thresh_impr
mask_ss = df[col_p] < thresh_p
mask_impr_ss = mask_impr & mask_ss
mask_wors = effects < thresh_impr
mask_wors_ss = mask_wors & mask_ss
metrics = {
"n": [len(effects)],
"n_impr": [np.sum(mask_impr)],
"n_impr_ss": [np.sum(mask_impr_ss)],
"n_wors": [np.sum(mask_wors)],
"n_wors_ss": [np.sum(mask_wors_ss)],
col_effect: [np.sum(effects)],
"{}_impr".format(col_effect): [np.sum(effects[mask_impr])],
"{}_impr_ss".format(col_effect): [np.sum(effects[mask_impr_ss])],
"{}_wors".format(col_effect): [np.sum(effects[mask_wors])],
"{}_wors_ss".format(col_effect): [np.sum(effects[mask_wors_ss])],
}
if col_wt:
# add columns based on weighting column
add_wt(mask_impr, mask_impr_ss, "impr")
add_wt(mask_wors, mask_wors_ss, "wors")
out_path = libmag.insert_before_ext(path, "_impr")
if suffix:
out_path = libmag.insert_before_ext(out_path, suffix)
df_impr = df_io.dict_to_data_frame(metrics, out_path)
# display transposed version for more compact view given large number
# of columns, but save un-transposed to preserve data types
df_io.print_data_frame(df_impr.T, index=True, header=False)
return df_impr
def plot_clusters_by_label(path, z, suffix=None, show=True, scaling=None):
"""Plot separate sets of clusters for each label.
Args:
path (str): Base path to blobs file with clusters.
z (int): z-plane to plot.
suffix (str): Suffix for ``path``; defaults to None.
show (bool): True to show; defaults to True.
scaling (List): Sequence of scaling from blobs' coordinate space
to that of :attr:`config.labels_img`.
"""
mod_path = path
if suffix is not None:
mod_path = libmag.insert_before_ext(path, suffix)
blobs = np.load(libmag.combine_paths(
mod_path, config.SUFFIX_BLOB_CLUSTERS))
label_ids = np.unique(blobs[:, 3])
fig, gs = plot_support.setup_fig(
1, 1, config.plot_labels[config.PlotLabels.SIZE])
ax = fig.add_subplot(gs[0, 0])
plot_support.hide_axes(ax)
# plot underlying atlas
np_io.setup_images(mod_path)
if config.reg_suffixes[config.RegSuffixes.ATLAS]:
# use atlas if explicitly set
img = config.image5d
else:
# default to black background
img = np.zeros_like(config.labels_img)[None]
export_stack.stack_to_ax_imgs(
ax, img, mod_path, slice_vals=(z, ),
labels_imgs=(config.labels_img, config.borders_img),
fit=False)
# export_stack.reg_planes_to_img(
# (np.zeros(config.labels_img.shape[1:], dtype=int),
# config.labels_img[z]), ax=ax)
if scaling is not None:
print("scaling blobs cluster coordinates by", scaling)
blobs = blobs.astype(float)
blobs[:, :3] = np.multiply(blobs[:, :3], scaling)
blobs[:, 0] = np.floor(blobs[:, 0])
# plot nuclei by label, colored based on cluster size within each label
colors = colormaps.discrete_colormap(
len(np.unique(blobs[:, 4])), prioritize_default="cn") / 255.
col_noise = (1, 1, 1, 1)
for label_id in label_ids:
if label_id == 0:
# skip blobs in background
continue
# sort blobs within label by cluster size (descending order),
# including clusters within all z-planes to keep same order across zs
blobs_lbl = blobs[blobs[:, 3] == label_id]
clus_lbls, clus_lbls_counts = np.unique(
blobs_lbl[:, 4], return_counts=True)
clus_lbls = clus_lbls[np.argsort(clus_lbls_counts)][::-1]
blobs_lbl = blobs_lbl[blobs_lbl[:, 0] == z]
for i, (clus_lbl, color) in enumerate(zip(clus_lbls, colors)):
blobs_clus = blobs_lbl[blobs_lbl[:, 4] == clus_lbl]
if len(blobs_clus) < 1: continue
# default to small, translucent dominant cluster points
size = 0.1
alpha = 0.5
if clus_lbl == -1:
# color all noise points the same and emphasize points
color = col_noise
size = 0.5
alpha = 1
print(label_id, clus_lbl, color, len(blobs_clus))
ax.scatter(
blobs_clus[:, 2], blobs_clus[:, 1], color=color, s=size,
alpha=alpha)
plot_support.save_fig(mod_path, config.savefig, "_clusplot")
if show: plot_support.show()
| [
"numpy.isin",
"numpy.sum",
"magmap.io.df_io.print_data_frame",
"pandas.read_csv",
"magmap.atlas.ontology.load_labels_ref",
"numpy.floor",
"numpy.argsort",
"magmap.io.df_io.normalize_df",
"magmap.io.df_io.zscore_df",
"numpy.unique",
"numpy.zeros_like",
"numpy.multiply",
"magmap.io.export_stac... | [((1236, 1268), 'pandas.read_csv', 'pd.read_csv', (['config.filenames[1]'], {}), '(config.filenames[1])\n', (1247, 1268), True, 'import pandas as pd\n'), ((3359, 3414), 'magmap.io.df_io.print_data_frame', 'df_io.print_data_frame', (['df_base.loc[:, cols_show]', '"""\t"""'], {}), "(df_base.loc[:, cols_show], '\\t')\n", (3381, 3414), False, 'from magmap.io import df_io\n'), ((3442, 3509), 'magmap.io.df_io.pivot_with_conditions', 'df_io.pivot_with_conditions', (['df_base', 'id_cols', '"""RegionName"""', 'metric'], {}), "(df_base, id_cols, 'RegionName', metric)\n", (3469, 3509), False, 'from magmap.io import df_io\n'), ((6589, 6606), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (6600, 6606), True, 'import pandas as pd\n'), ((8580, 8597), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (8591, 8597), True, 'import pandas as pd\n'), ((8607, 8667), 'magmap.io.df_io.zscore_df', 'df_io.zscore_df', (['df', '"""Region"""', 'metric_cols', 'extra_cols', '(True)'], {}), "(df, 'Region', metric_cols, extra_cols, True)\n", (8622, 8667), False, 'from magmap.io import df_io\n'), ((8725, 8759), 'magmap.io.df_io.combine_cols', 'df_io.combine_cols', (['df', 'composites'], {}), '(df, composites)\n', (8743, 8759), False, 'from magmap.io import df_io\n'), ((8953, 8979), 'numpy.unique', 'np.unique', (["df['Condition']"], {}), "(df['Condition'])\n", (8962, 8979), True, 'import numpy as np\n'), ((8989, 9078), 'magmap.io.df_io.cond_to_cols_df', 'df_io.cond_to_cols_df', (['df', "['Sample', 'Region']", '"""Condition"""', '"""original"""', 'metric_cols'], {}), "(df, ['Sample', 'Region'], 'Condition', 'original',\n metric_cols)\n", (9010, 9078), False, 'from magmap.io import df_io\n'), ((9095, 9147), 'magmap.io.libmag.insert_before_ext', 'libmag.insert_before_ext', (['config.filename', '"""_zscore"""'], {}), "(config.filename, '_zscore')\n", (9119, 9147), False, 'from magmap.io import libmag\n'), ((9152, 9186), 'magmap.io.df_io.data_frames_to_csv', 'df_io.data_frames_to_csv', (['df', 'path'], {}), '(df, path)\n', (9176, 9186), False, 'from magmap.io import df_io\n'), ((9249, 9416), 'magmap.plot.plot_2d.plot_probability', 'plot_2d.plot_probability', (['path', 'conds', 'metric_cols', '"""Volume"""'], {'xlim': 'lims', 'ylim': 'lims', 'title': '"""Region Match Z-Scores"""', 'fig_size': 'size', 'show': 'show', 'suffix': 'None', 'df': 'df'}), "(path, conds, metric_cols, 'Volume', xlim=lims,\n ylim=lims, title='Region Match Z-Scores', fig_size=size, show=show,\n suffix=None, df=df)\n", (9273, 9416), False, 'from magmap.plot import plot_2d\n'), ((10797, 10814), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (10808, 10814), True, 'import pandas as pd\n'), ((10824, 10858), 'magmap.io.df_io.combine_cols', 'df_io.combine_cols', (['df', 'composites'], {}), '(df, composites)\n', (10842, 10858), False, 'from magmap.io import df_io\n'), ((11074, 11139), 'magmap.io.df_io.coefvar_df', 'df_io.coefvar_df', (['df', '[*id_cols, cond_col]', 'metric_cols', 'size_col'], {}), '(df, [*id_cols, cond_col], metric_cols, size_col)\n', (11090, 11139), False, 'from magmap.io import df_io\n'), ((11152, 11175), 'numpy.unique', 'np.unique', (['df[cond_col]'], {}), '(df[cond_col])\n', (11161, 11175), True, 'import numpy as np\n'), ((11185, 11253), 'magmap.io.df_io.cond_to_cols_df', 'df_io.cond_to_cols_df', (['df', 'id_cols', 'cond_col', 'cond_base', 'metric_cols'], {}), '(df, id_cols, cond_col, cond_base, metric_cols)\n', (11206, 11253), False, 'from magmap.io import df_io\n'), ((11265, 11324), 'magmap.io.libmag.insert_before_ext', 'libmag.insert_before_ext', (['config.filename', '"""_coefvartransp"""'], {}), "(config.filename, '_coefvartransp')\n", (11289, 11324), False, 'from magmap.io import libmag\n'), ((11329, 11363), 'magmap.io.df_io.data_frames_to_csv', 'df_io.data_frames_to_csv', (['df', 'path'], {}), '(df, path)\n', (11353, 11363), False, 'from magmap.io import df_io\n'), ((11452, 11622), 'magmap.plot.plot_2d.plot_probability', 'plot_2d.plot_probability', (['path', 'conds', 'metric_cols', '"""Volume"""'], {'xlim': 'lims', 'ylim': 'lims', 'title': '"""Coefficient of Variation"""', 'fig_size': 'size', 'show': 'show', 'suffix': 'None', 'df': 'df'}), "(path, conds, metric_cols, 'Volume', xlim=lims,\n ylim=lims, title='Coefficient of Variation', fig_size=size, show=show,\n suffix=None, df=df)\n", (11476, 11622), False, 'from magmap.plot import plot_2d\n'), ((15382, 15439), 'magmap.io.df_io.data_frames_to_csv', 'df_io.data_frames_to_csv', (['df', '"""vols_stats_intensVnuc.csv"""'], {}), "(df, 'vols_stats_intensVnuc.csv')\n", (15406, 15439), False, 'from magmap.io import df_io\n'), ((18582, 18621), 'magmap.io.libmag.insert_before_ext', 'libmag.insert_before_ext', (['path', '"""_impr"""'], {}), "(path, '_impr')\n", (18606, 18621), False, 'from magmap.io import libmag\n'), ((18713, 18756), 'magmap.io.df_io.dict_to_data_frame', 'df_io.dict_to_data_frame', (['metrics', 'out_path'], {}), '(metrics, out_path)\n', (18737, 18756), False, 'from magmap.io import df_io\n'), ((18899, 18958), 'magmap.io.df_io.print_data_frame', 'df_io.print_data_frame', (['df_impr.T'], {'index': '(True)', 'header': '(False)'}), '(df_impr.T, index=True, header=False)\n', (18921, 18958), False, 'from magmap.io import df_io\n'), ((19675, 19697), 'numpy.unique', 'np.unique', (['blobs[:, 3]'], {}), '(blobs[:, 3])\n', (19684, 19697), True, 'import numpy as np\n'), ((19712, 19784), 'magmap.plot.plot_support.setup_fig', 'plot_support.setup_fig', (['(1)', '(1)', 'config.plot_labels[config.PlotLabels.SIZE]'], {}), '(1, 1, config.plot_labels[config.PlotLabels.SIZE])\n', (19734, 19784), False, 'from magmap.plot import plot_support\n'), ((19833, 19859), 'magmap.plot.plot_support.hide_axes', 'plot_support.hide_axes', (['ax'], {}), '(ax)\n', (19855, 19859), False, 'from magmap.plot import plot_support\n'), ((19897, 19925), 'magmap.io.np_io.setup_images', 'np_io.setup_images', (['mod_path'], {}), '(mod_path)\n', (19915, 19925), False, 'from magmap.io import np_io\n'), ((20152, 20285), 'magmap.io.export_stack.stack_to_ax_imgs', 'export_stack.stack_to_ax_imgs', (['ax', 'img', 'mod_path'], {'slice_vals': '(z,)', 'labels_imgs': '(config.labels_img, config.borders_img)', 'fit': '(False)'}), '(ax, img, mod_path, slice_vals=(z,),\n labels_imgs=(config.labels_img, config.borders_img), fit=False)\n', (20181, 20285), False, 'from magmap.io import export_stack\n'), ((22114, 22174), 'magmap.plot.plot_support.save_fig', 'plot_support.save_fig', (['mod_path', 'config.savefig', '"""_clusplot"""'], {}), "(mod_path, config.savefig, '_clusplot')\n", (22135, 22174), False, 'from magmap.plot import plot_support\n'), ((1995, 2039), 'magmap.atlas.ontology.load_labels_ref', 'ontology.load_labels_ref', (['config.load_labels'], {}), '(config.load_labels)\n', (2019, 2039), False, 'from magmap.atlas import ontology\n'), ((2336, 2394), 'magmap.atlas.ontology.get_children_from_id', 'ontology.get_children_from_id', (['labels_ref_lookup', 'label_id'], {}), '(labels_ref_lookup, label_id)\n', (2365, 2394), False, 'from magmap.atlas import ontology\n'), ((3137, 3244), 'magmap.io.df_io.normalize_df', 'df_io.normalize_df', (['df_base', 'id_cols', 'cond_col', 'None', '[metric]', 'extra_cols', 'df_nonbr', 'df_io.df_subtract'], {}), '(df_base, id_cols, cond_col, None, [metric], extra_cols,\n df_nonbr, df_io.df_subtract)\n', (3155, 3244), False, 'from magmap.io import df_io\n'), ((4669, 4725), 'magmap.io.df_io.print_data_frame', 'df_io.print_data_frame', (['df_level.loc[:, cols_show]', '"""\t"""'], {}), "(df_level.loc[:, cols_show], '\\t')\n", (4691, 4725), False, 'from magmap.io import df_io\n'), ((4758, 4826), 'magmap.io.df_io.pivot_with_conditions', 'df_io.pivot_with_conditions', (['df_level', 'id_cols', '"""RegionName"""', 'metric'], {}), "(df_level, id_cols, 'RegionName', metric)\n", (4785, 4826), False, 'from magmap.io import df_io\n'), ((5335, 5429), 'magmap.io.df_io.normalize_df', 'df_io.normalize_df', (['df_brain_level', 'id_cols', 'cond_col', 'None', '[metric]', 'extra_cols', 'df_base'], {}), '(df_brain_level, id_cols, cond_col, None, [metric],\n extra_cols, df_base)\n', (5353, 5429), False, 'from magmap.io import df_io\n'), ((5522, 5577), 'magmap.io.df_io.print_data_frame', 'df_io.print_data_frame', (['df_norm.loc[:, cols_show]', '"""\t"""'], {}), "(df_norm.loc[:, cols_show], '\\t')\n", (5544, 5577), False, 'from magmap.io import df_io\n'), ((5609, 5676), 'magmap.io.df_io.pivot_with_conditions', 'df_io.pivot_with_conditions', (['df_norm', 'id_cols', '"""RegionName"""', 'metric'], {}), "(df_norm, id_cols, 'RegionName', metric)\n", (5636, 5676), False, 'from magmap.io import df_io\n'), ((6981, 7049), 'magmap.io.df_io.pivot_with_conditions', 'df_io.pivot_with_conditions', (['df', "['Age', 'Condition']", '"""Region"""', 'col'], {}), "(df, ['Age', 'Condition'], 'Region', col)\n", (7008, 7049), False, 'from magmap.io import df_io\n'), ((8816, 8874), 'magmap.io.libmag.insert_before_ext', 'libmag.insert_before_ext', (['config.filename', '"""_zhomogeneity"""'], {}), "(config.filename, '_zhomogeneity')\n", (8840, 8874), False, 'from magmap.io import libmag\n'), ((10901, 10954), 'magmap.io.libmag.insert_before_ext', 'libmag.insert_before_ext', (['config.filename', '"""_coefvar"""'], {}), "(config.filename, '_coefvar')\n", (10925, 10954), False, 'from magmap.io import libmag\n'), ((14223, 14414), 'magmap.plot.plot_2d.plot_scatter', 'plot_2d.plot_scatter', (['"""vols_stats_intensVnuc"""', 'cols_xy[0]', 'cols_xy[1]'], {'units': 'units', 'names_group': 'names_group', 'labels': 'lbls', 'title': 'title', 'fig_size': 'size', 'show': 'show', 'suffix': 'suffix', 'df': 'df'}), "('vols_stats_intensVnuc', cols_xy[0], cols_xy[1], units\n =units, names_group=names_group, labels=lbls, title=title, fig_size=\n size, show=show, suffix=suffix, df=df)\n", (14243, 14414), False, 'from magmap.plot import plot_2d\n'), ((14570, 14587), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (14581, 14587), True, 'import pandas as pd\n'), ((17488, 17505), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (17499, 17505), True, 'import pandas as pd\n'), ((18656, 18698), 'magmap.io.libmag.insert_before_ext', 'libmag.insert_before_ext', (['out_path', 'suffix'], {}), '(out_path, suffix)\n', (18680, 18698), False, 'from magmap.io import libmag\n'), ((19530, 19568), 'magmap.io.libmag.insert_before_ext', 'libmag.insert_before_ext', (['path', 'suffix'], {}), '(path, suffix)\n', (19554, 19568), False, 'from magmap.io import libmag\n'), ((19589, 19648), 'magmap.io.libmag.combine_paths', 'libmag.combine_paths', (['mod_path', 'config.SUFFIX_BLOB_CLUSTERS'], {}), '(mod_path, config.SUFFIX_BLOB_CLUSTERS)\n', (19609, 19648), False, 'from magmap.io import libmag\n'), ((20605, 20639), 'numpy.multiply', 'np.multiply', (['blobs[:, :3]', 'scaling'], {}), '(blobs[:, :3], scaling)\n', (20616, 20639), True, 'import numpy as np\n'), ((20662, 20683), 'numpy.floor', 'np.floor', (['blobs[:, 0]'], {}), '(blobs[:, 0])\n', (20670, 20683), True, 'import numpy as np\n'), ((21259, 21305), 'numpy.unique', 'np.unique', (['blobs_lbl[:, 4]'], {'return_counts': '(True)'}), '(blobs_lbl[:, 4], return_counts=True)\n', (21268, 21305), True, 'import numpy as np\n'), ((22188, 22207), 'magmap.plot.plot_support.show', 'plot_support.show', ([], {}), '()\n', (22205, 22207), False, 'from magmap.plot import plot_support\n'), ((1278, 1306), 'pandas.read_csv', 'pd.read_csv', (['config.filename'], {}), '(config.filename)\n', (1289, 1306), True, 'import pandas as pd\n'), ((2128, 2179), 'magmap.atlas.ontology.get_children_from_id', 'ontology.get_children_from_id', (['labels_ref_lookup', 'n'], {}), '(labels_ref_lookup, n)\n', (2157, 2179), False, 'from magmap.atlas import ontology\n'), ((2411, 2437), 'numpy.isin', 'np.isin', (["df['Region']", 'ids'], {}), "(df['Region'], ids)\n", (2418, 2437), True, 'import numpy as np\n'), ((2872, 2973), 'magmap.io.df_io.normalize_df', 'df_io.normalize_df', (['df_nonbr', 'id_cols', 'cond_col', 'None', '[metric]', 'extra_cols', 'df_out', 'df_io.df_add'], {}), '(df_nonbr, id_cols, cond_col, None, [metric], extra_cols,\n df_out, df_io.df_add)\n', (2890, 2973), False, 'from magmap.io import df_io\n'), ((12474, 12546), 'numpy.isin', 'np.isin', (['df[config.SmoothingMetrics.FILTER_SIZE.value]', '(filter_size, 0)'], {}), '(df[config.SmoothingMetrics.FILTER_SIZE.value], (filter_size, 0))\n', (12481, 12546), True, 'import numpy as np\n'), ((16757, 16775), 'numpy.sum', 'np.sum', (['df[col_wt]'], {}), '(df[col_wt])\n', (16763, 16775), True, 'import numpy as np\n'), ((17022, 17037), 'numpy.sum', 'np.sum', (['wt_cond'], {}), '(wt_cond)\n', (17028, 17037), True, 'import numpy as np\n'), ((17091, 17109), 'numpy.sum', 'np.sum', (['wt_cond_ss'], {}), '(wt_cond_ss)\n', (17097, 17109), True, 'import numpy as np\n'), ((17925, 17942), 'numpy.sum', 'np.sum', (['mask_impr'], {}), '(mask_impr)\n', (17931, 17942), True, 'import numpy as np\n'), ((17967, 17987), 'numpy.sum', 'np.sum', (['mask_impr_ss'], {}), '(mask_impr_ss)\n', (17973, 17987), True, 'import numpy as np\n'), ((18009, 18026), 'numpy.sum', 'np.sum', (['mask_wors'], {}), '(mask_wors)\n', (18015, 18026), True, 'import numpy as np\n'), ((18051, 18071), 'numpy.sum', 'np.sum', (['mask_wors_ss'], {}), '(mask_wors_ss)\n', (18057, 18071), True, 'import numpy as np\n'), ((18095, 18110), 'numpy.sum', 'np.sum', (['effects'], {}), '(effects)\n', (18101, 18110), True, 'import numpy as np\n'), ((18152, 18178), 'numpy.sum', 'np.sum', (['effects[mask_impr]'], {}), '(effects[mask_impr])\n', (18158, 18178), True, 'import numpy as np\n'), ((18223, 18252), 'numpy.sum', 'np.sum', (['effects[mask_impr_ss]'], {}), '(effects[mask_impr_ss])\n', (18229, 18252), True, 'import numpy as np\n'), ((18294, 18320), 'numpy.sum', 'np.sum', (['effects[mask_wors]'], {}), '(effects[mask_wors])\n', (18300, 18320), True, 'import numpy as np\n'), ((18365, 18394), 'numpy.sum', 'np.sum', (['effects[mask_wors_ss]'], {}), '(effects[mask_wors_ss])\n', (18371, 18394), True, 'import numpy as np\n'), ((20109, 20141), 'numpy.zeros_like', 'np.zeros_like', (['config.labels_img'], {}), '(config.labels_img)\n', (20122, 20141), True, 'import numpy as np\n'), ((20819, 20841), 'numpy.unique', 'np.unique', (['blobs[:, 4]'], {}), '(blobs[:, 4])\n', (20828, 20841), True, 'import numpy as np\n'), ((21349, 21377), 'numpy.argsort', 'np.argsort', (['clus_lbls_counts'], {}), '(clus_lbls_counts)\n', (21359, 21377), True, 'import numpy as np\n'), ((7747, 7780), 'os.path.splitext', 'os.path.splitext', (['config.filename'], {}), '(config.filename)\n', (7763, 7780), False, 'import os\n')] |
#!/usr/bin/env python
from math import pi, cos, sin
import numpy as np
import diagnostic_msgs
import diagnostic_updater
import roboclaw_driver.roboclaw_driver_new as roboclaw
import rospy
import tf
from geometry_msgs.msg import Quaternion, Twist
from nav_msgs.msg import Odometry
import sys
__author__ = "<EMAIL> (<NAME>)"
status1, enc1, crc1 = None, None, None
status2, enc2, crc2 = None, None, None
status3, enc3, crc3 = None, None, None
status4, enc4, crc4 = None, None, None
X = np.array([0.00,0.00,0.00])
get_cmd_vel=0
U =np.array([[0],[0],[0],[0]])
# version = 0x80
# TODO need to find some better was of handling OSerror 11 or preventing it, any ideas?
class EncoderOdom:
def __init__(self, ticks_per_meter, base_width, base_length, wheel_radius):
# rospy.logwarn(sys.version)
self.TICKS_PER_METER = ticks_per_meter
self.BASE_WIDTH = base_width
self.BASE_LENGTH = base_length
self.WHEEL_RADIUS = wheel_radius
self.odom_pub = rospy.Publisher('/odom', Odometry, queue_size=10)
self.cur_x = 0.0
self.cur_y = 0.0
self.cur_theta = 0.0
self.last_enc_front_left = 0
self.last_enc_front_right = 0
self.last_enc_back_left = 0
self.last_enc_back_right = 0
self.last_enc_time = rospy.Time.now()
@staticmethod
def normalize_angle(angle):
while angle > pi:
angle -= 2.0 * pi
while angle < -pi:
angle += 2.0 * pi
return angle
def update(self, enc_front_left, enc_front_right, enc_back_left, enc_back_right):
lx = self.BASE_LENGTH
ly = self.BASE_WIDTH
r = self.WHEEL_RADIUS
# rospy.logwarn("before - before cur_th: %f" %self.cur_theta)
######to be modified t oreturn vel_x, vel_y, vel_theta
# rospy.logwarn("enc_front_left %f"%enc_front_left)
# rospy.logwarn("self.last_enc_front_left %f"%self.last_enc_front_left)
front_left_ticks = enc_front_left - self.last_enc_front_left
front_right_ticks = enc_front_right - self.last_enc_front_right
back_left_ticks = enc_back_left - self.last_enc_back_left
back_right_ticks = enc_back_right - self.last_enc_back_right
self.last_enc_front_left = enc_front_left
self.last_enc_front_right = enc_front_right
self.last_enc_back_left = enc_back_left
self.last_enc_back_right = enc_back_right
dist_front_left = front_left_ticks*6.28 / self.TICKS_PER_METER
dist_front_right = front_right_ticks*6.28 / self.TICKS_PER_METER
dist_back_left = back_left_ticks*6.28 / self.TICKS_PER_METER
dist_back_right = back_right_ticks*6.28 / self.TICKS_PER_METER
# rospy.logwarn("front_left_ticks %f"%front_left_ticks)
# rospy.logwarn("dist_front_left %f"%dist_front_left)
# rospy.logwarn("dist_front_right %f"%dist_front_right)
# rospy.logwarn("dist_back_left %f"%dist_back_left)
# rospy.logwarn("dist_back_right %f"%dist_back_right)
# rospy.logwarn("self.TICKS_PER_METER %d"%self.TICKS_PER_METER)
# rospy.logwarn("enc_front_left %d"%enc_front_left)
# rospy.logwarn("enc_front_right %d"%enc_front_right)
# rospy.logwarn("enc_back_left %d"%enc_back_left)
# rospy.logwarn("enc_back_right %d"%enc_back_right)
# dist = (dist_right + dist_left) / 2.0
current_time = rospy.Time.now()
d_time = (current_time - self.last_enc_time).to_sec()
self.last_enc_time = current_time
W = np.array([[dist_front_left/d_time],[dist_front_right/d_time],[dist_back_left/d_time],[dist_back_right/d_time]])
B = np.array([[1.00, 1.00, 1.00, 1.00],[-1.00, 1.00, 1.00, -1.00 ],[1.00/(lx+ly), -1.00/(lx+ly), 1.00/(lx+ly), -1.00/(lx+ly)]])
V = np.dot(B*(r/4), W) ####no r i think
rospy.logwarn("curent wheel velocities %f %f %f %f " %(W[0],W[1],W[2],W[3]))
self.cur_x += V[0]*d_time
self.cur_y += V[1]*d_time
self.cur_theta += V[2]*d_time
vel_x = V[0]
vel_y = V[1]
vel_theta = V[2]
rospy.logwarn("robot vel_x %f"%vel_x)
return vel_x, vel_y, vel_theta
def publish_odom(self, cur_x, cur_y, cur_theta, vx,vy, vth):
# quat = tf.transformations.quaternion_from_euler(0, 0, cur_theta)
roll=0
pitch =0
yaw = cur_theta
cy = cos(yaw * 0.5)
sy = sin(yaw * 0.5)
cp = cos(pitch * 0.5)
sp = sin(pitch * 0.5)
cr = cos(roll * 0.5)
sr = sin(roll * 0.5)
quat = Quaternion()
quat.w = cy * cp * cr + sy * sp * sr
quat.x = cy * cp * sr - sy * sp * cr
quat.y = sy * cp * sr + cy * sp * cr
quat.z = sy * cp * cr - cy * sp * sr
current_time = rospy.Time.now()
# br = tf.TransformBroadcaster()
# br.sendTransform((cur_x, cur_y, 0),
# tf.transformations.quaternion_from_euler(0, 0, -cur_theta),
# current_time,
# "summit_base_footprint",
# "odom")
odom = Odometry()
odom.header.stamp = current_time
odom.header.frame_id = 'odom'
odom.pose.pose.position.x = cur_x
odom.pose.pose.position.y = cur_y
odom.pose.pose.position.z = 0.0
odom.pose.pose.orientation = quat
odom.pose.covariance[0] = 0.01
odom.pose.covariance[7] = 0.01
odom.pose.covariance[14] = 99999
odom.pose.covariance[21] = 99999
odom.pose.covariance[28] = 99999
odom.pose.covariance[35] = 0.01
# odom.child_frame_id = 'summit_base_footprint'
odom.twist.twist.linear.x = vx
odom.twist.twist.linear.y = vy
odom.twist.twist.angular.z = vth
odom.twist.covariance = odom.pose.covariance
self.odom_pub.publish(odom)
def update_publish(self, enc_front_left, enc_front_right, enc_back_left, enc_back_right):
# 2106 per 0.1 seconds is max speed, error in the 16th bit is 32768
# TODO lets find a better way to deal with this error
if abs(enc_front_left - self.last_enc_front_left) > 700000:
rospy.logerr("Ignoring front left encoder jump: cur %d, last %d" % (enc_front_left, self.last_enc_front_left))
elif abs(enc_back_right - self.last_enc_back_right) > 700000:
rospy.logerr("Ignoring back right encoder jump: cur %d, last %d" % (enc_back_right, self.last_enc_back_right))
elif abs(enc_front_right - self.last_enc_front_right) > 700000:
rospy.logerr("Ignoring front right encoder jump: cur %d, last %d" % (enc_front_right, self.last_enc_front_right))
elif abs(enc_back_left - self.last_enc_back_left) > 700000:
rospy.logerr("Ignoring back left encoder jump: cur %d, last %d" % (enc_back_left, self.last_enc_back_left))
else:
vel_x, vel_y, vel_theta = self.update(enc_front_left, enc_front_right, enc_back_left, enc_back_right)###changed
self.publish_odom(self.cur_x, self.cur_y, self.cur_theta, vel_x,vel_y, vel_theta)
class Node:
def __init__(self):
global p1,p2
self.ERRORS = {0x0000: (diagnostic_msgs.msg.DiagnosticStatus.OK, "Normal"),
0x0001: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "M1 over current"),
0x0002: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "M2 over current"),
0x0004: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Emergency Stop"),
0x0008: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Temperature1"),
0x0010: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Temperature2"),
0x0020: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Main batt voltage high"),
0x0040: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Logic batt voltage high"),
0x0080: (diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Logic batt voltage low"),
0x0100: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "M1 driver fault"),
0x0200: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "M2 driver fault"),
0x0400: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "Main batt voltage high"),
0x0800: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "Main batt voltage low"),
0x1000: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "Temperature1"),
0x2000: (diagnostic_msgs.msg.DiagnosticStatus.WARN, "Temperature2"),
0x4000: (diagnostic_msgs.msg.DiagnosticStatus.OK, "M1 home"),
0x8000: (diagnostic_msgs.msg.DiagnosticStatus.OK, "M2 home")}
rospy.init_node("roboclaw_node_f")
rospy.on_shutdown(self.shutdown)
rospy.loginfo("Connecting to roboclaw")
dev_name_front = rospy.get_param("~dev_front", "/dev/ttyACM0")
dev_name_back = rospy.get_param("~dev_back", "/dev/ttyACM1")
baud_rate = int(rospy.get_param("~baud", "38400"))
rospy.logwarn("after dev name")
self.address_front = int(rospy.get_param("~address_front", "128"))
self.address_back = int(rospy.get_param("~address_back", "129"))
#check wheather the addresses are in range
if self.address_front > 0x87 or self.address_front < 0x80 or self.address_back > 0x87 or self.address_back < 0x80:
rospy.logfatal("Address out of range")
rospy.signal_shutdown("Address out of range")
# TODO need someway to check if address is correct
try:
p1 = roboclaw.Open(dev_name_front, baud_rate)
except Exception as e:
rospy.logfatal("Could not connect to front Roboclaw")
rospy.logdebug(e)
rospy.signal_shutdown("Could not connect to front Roboclaw")
try:
p2 = roboclaw.Open(dev_name_back, baud_rate)
except Exception as e:
rospy.logfatal("Could not connect to back Roboclaw")
rospy.logdebug(e)
rospy.signal_shutdown("Could not connect to back Roboclaw")
#run diagnostics
# self.updater = diagnostic_updater.Updater() ###check later may be do it for both the motors
# self.updater.setHardwareID("Roboclaw")
# self.updater.add(diagnostic_updater.
# FunctionDiagnosticTask("Vitals", self.check_vitals))
#check if you can get the version of the roboclaw...mosly you dont
try:
version = roboclaw.ReadVersion(self.address_front,p1)
except Exception as e:
rospy.logwarn("Problem getting front roboclaw version")
rospy.logdebug(e)
pass
if not version[0]:
rospy.logwarn("Could not get version from front roboclaw")
else:
rospy.logdebug(repr(version[1]))
try:
version = roboclaw.ReadVersion(self.address_back,p2)
except Exception as e:
rospy.logwarn("Problem getting back roboclaw version")
rospy.logdebug(e)
pass
if not version[0]:
rospy.logwarn("Could not get version from back roboclaw")
else:
rospy.logdebug(repr(version[1]))
# roboclaw.SpeedM1M2(self.address_front, 0, 0,p1)
roboclaw.ResetEncoders(self.address_front,p1)
# roboclaw.SpeedM1M2(self.address_back, 0, 0,p2)
roboclaw.ResetEncoders(self.address_back,p2)
self.MAX_SPEED = float(rospy.get_param("~max_speed", "1.1"))
self.TICKS_PER_METER = float(rospy.get_param("~ticks_per_meter", "35818"))
self.BASE_WIDTH = float(rospy.get_param("~base_width", "0.1762"))
self.BASE_LENGTH = float(rospy.get_param("~base_length", "0.2485"))
self.WHEEL_RADIUS = float(rospy.get_param("~wheel_radius", "0.0635"))
self.encodm = EncoderOdom(self.TICKS_PER_METER, self.BASE_WIDTH,self.BASE_LENGTH,self.WHEEL_RADIUS)
self.last_set_speed_time = rospy.get_rostime()
rospy.Subscriber("cmd_vel", Twist, self.cmd_vel_callback)
# rospy.sleep(1)
rospy.logdebug("dev_front %s dev_back %s", dev_name_front, dev_name_back)
rospy.logdebug("baud %d", baud_rate)
rospy.logdebug("address_front %d address_back %d", self.address_front, self.address_back)
rospy.logdebug("max_speed %f", self.MAX_SPEED)
rospy.logdebug("ticks_per_meter %f", self.TICKS_PER_METER)
rospy.logdebug("base_width %f base_length %f", self.BASE_WIDTH, self.BASE_LENGTH)
def run(self):
global p1,p2,get_cmd_vel,U
rospy.loginfo("Starting motor drive")
r_time = rospy.Rate(100)
max_possible_speed=1.79
while not rospy.is_shutdown():
if (rospy.get_rostime() - self.last_set_speed_time).to_sec() > 1:
rospy.loginfo("Did not get command for 1 second, stopping")
try:
roboclaw.ForwardM1(self.address_front, 0,p1)
roboclaw.ForwardM2(self.address_front, 0,p1)
roboclaw.ForwardM1(self.address_back, 0,p2)
roboclaw.ForwardM2(self.address_back, 0,p2)
except OSError as e:
rospy.logerr("Could not stop")
rospy.logdebug(e)
# TODO need find solution to the OSError11 looks like sync problem with serial
try:
status1, enc1, crc1 = roboclaw.ReadEncM1(self.address_front,p1)
except ValueError:
pass
except OSError as e:
rospy.logwarn("ReadEncM1 OSError: %d", e.errno)
rospy.logdebug(e)
try:
status2, enc2, crc2 = roboclaw.ReadEncM2(self.address_front,p1)
except ValueError:
pass
except OSError as e:
rospy.logwarn("ReadEncM2 OSError: %d", e.errno)
rospy.logdebug(e)
try:
status3, enc3, crc3 = roboclaw.ReadEncM1(self.address_back,p2)
except ValueError:
pass
except OSError as e:
rospy.logwarn("ReadEncM3 OSError: %d", e.errno)
rospy.logdebug(e)
try:
status4, enc4, crc4 = roboclaw.ReadEncM2(self.address_back,p2)
except ValueError:
pass
except OSError as e:
rospy.logwarn("ReadEncM4 OSError: %d", e.errno)
rospy.logdebug(e)
if ('enc1' in vars()) and ('enc2' in vars()) and ('enc3' in vars()) and ('enc4' in vars()):
rospy.logdebug(" Encoders %d %d %d %d" % (enc1, enc2,enc3,enc4))
self.encodm.update_publish(enc1, enc2, enc3, enc4)
# self.updater.update()
if(get_cmd_vel==1):
try:
if U[0] is 0 and U[1] is 0 and U[2] is 0 and U[3] is 0:
roboclaw.ForwardBackwardM1(self.address_front, 63,p1)
roboclaw.ForwardBackwardM2(self.address_front, 63,p1)
roboclaw.ForwardBackwardM1(self.address_back, 63,p2)
roboclaw.ForwardBackwardM2(self.address_back, 63,p2)
else:
rospy.logwarn("wheel velocities %f %f %f %f " %(U[0],U[1],U[2],U[3]))
real_sp_m1 = ((U[0]/max_possible_speed)*63)+64
real_sp_m2 = ((U[1]/max_possible_speed)*63)+64
real_sp_m3 = ((U[2]/max_possible_speed)*63)+64
real_sp_m4 = ((U[3]/max_possible_speed)*63)+64
roboclaw.ForwardBackwardM1(self.address_front, int(real_sp_m1),p1)
roboclaw.ForwardBackwardM2(self.address_front, int(real_sp_m2),p1)
roboclaw.ForwardBackwardM1(self.address_back, int(real_sp_m3),p2)
roboclaw.ForwardBackwardM2(self.address_back, int(real_sp_m4),p2)
except OSError as e:
rospy.logwarn("SpeedM1M2 OSError: %d", e.errno)
rospy.logdebug(e)
get_cmd_vel=0
r_time.sleep()
def cmd_vel_callback(self, twist):
global p1,p2, U, get_cmd_vel
self.last_set_speed_time = rospy.get_rostime()
max_linear_speed = 0.5
max_angular_speed = 0.7
max_possible_speed=1.79
lx = self.BASE_LENGTH
ly = self.BASE_WIDTH
r = self.WHEEL_RADIUS
linear_x = twist.linear.x
linear_y = twist.linear.y
angular_z = twist.angular.z
if linear_x > max_linear_speed:
linear_x = max_linear_speed
if linear_x < -max_linear_speed:
linear_x = -max_linear_speed
if linear_y > max_linear_speed:
linear_y = max_linear_speed
if linear_y < -max_linear_speed:
linear_y = -max_linear_speed
if angular_z > max_angular_speed:
angular_z = max_angular_speed
if angular_z < -max_angular_speed:
angular_z = -max_angular_speed
#######inverse kinematic Equations
B_inv = np.array([[1.00, -1.00, -(lx+ly)],[1.00, 1.00, (lx+ly)] ,[1.00, 1.00, -(lx+ly)], [1.00, -1.00, (lx+ly)]])
X[0]=linear_x
X[1]=linear_y
X[2]=angular_z
U_inv =X
#wheel velocities
U = np.dot(B_inv,U_inv)
get_cmd_vel=1
# try:
# if U[0] is 0 and U[1] is 0 and U[2] is 0 and U[3] is 0:
# roboclaw.ForwardBackwardM1(self.address_front, 63,p1)
# roboclaw.ForwardBackwardM2(self.address_front, 63,p1)
# roboclaw.ForwardBackwardM1(self.address_back, 63,p2)
# roboclaw.ForwardBackwardM2(self.address_back, 63,p2)
# else:
# real_sp_m1 = ((U[0]/max_possible_speed)*63)+64
# real_sp_m2 = ((U[1]/max_possible_speed)*63)+64
# real_sp_m3 = ((U[2]/max_possible_speed)*63)+64
# real_sp_m4 = ((U[3]/max_possible_speed)*63)+64
# roboclaw.ForwardBackwardM1(self.address_front, int(real_sp_m1),p1)
# roboclaw.ForwardBackwardM2(self.address_front, int(real_sp_m2),p1)
# roboclaw.ForwardBackwardM1(self.address_back, int(real_sp_m3),p2)
# roboclaw.ForwardBackwardM2(self.address_back, int(real_sp_m4),p2)
# except OSError as e:
# rospy.logwarn("SpeedM1M2 OSError: %d", e.errno)
# rospy.logdebug(e)
# TODO: Need to make this work when more than one error is raised
def check_vitals(self, stat):
global p1,p2
try:
status_front = roboclaw.ReadError(self.address_front,p1)[1]
status_back = roboclaw.ReadError(self.address_back,p2)[1]
except OSError as e:
rospy.logwarn("Diagnostics OSError: %d", e.errno)
rospy.logdebug(e)
return
state, message = self.ERRORS[status_front]
stat.summary(state, message)
state, message = self.ERRORS[status_back]
stat.summary(state, message)
try:
stat.add("front Main Batt V:", float(roboclaw.ReadMainBatteryVoltage(self.address_front,p1)[1] / 10))
stat.add("front Logic Batt V:", float(roboclaw.ReadLogicBatteryVoltage(self.address_front,p1)[1] / 10))
stat.add("front Temp1 C:", float(roboclaw.ReadTemp(self.address_front,p1)[1] / 10))
stat.add("front Temp2 C:", float(roboclaw.ReadTemp2(self.address_front,p1)[1] / 10))
stat.add("back Main Batt V:", float(roboclaw.ReadMainBatteryVoltage(self.address_back,p2)[1] / 10))
stat.add("back Logic Batt V:", float(roboclaw.ReadLogicBatteryVoltage(self.address_back,p2)[1] / 10))
stat.add("back Temp1 C:", float(roboclaw.ReadTemp(self.address_back,p2)[1] / 10))
stat.add("back Temp2 C:", float(roboclaw.ReadTemp2(self.address_back,p2)[1] / 10))
except OSError as e:
rospy.logwarn("Diagnostics OSError: %d", e.errno)
rospy.logdebug(e)
return stat
# TODO: need clean shutdown so motors stop even if new msgs are arriving
def shutdown(self):
global p1,p2
rospy.loginfo("Shutting down")
try:
roboclaw.ForwardBackwardM1(self.address_front, 63,p1)
roboclaw.ForwardBackwardM2(self.address_front, 63,p1)
roboclaw.ForwardBackwardM1(self.address_back, 63,p2)
roboclaw.ForwardBackwardM2(self.address_back, 63,p2)
except OSError:
rospy.logerr("Shutdown did not work trying again")
try:
roboclaw.ForwardBackwardM1(self.address_front, 63,p1)
roboclaw.ForwardBackwardM2(self.address_front, 63,p1)
roboclaw.ForwardBackwardM1(self.address_back, 63,p2)
roboclaw.ForwardBackwardM2(self.address_back, 63,p2)
except OSError as e:
rospy.logerr("Could not shutdown motors!!!!")
rospy.logdebug(e)
if __name__ == "__main__":
try:
node = Node()
node.run()
except rospy.ROSInterruptException:
pass
rospy.loginfo("Exiting")
| [
"rospy.logerr",
"rospy.Subscriber",
"roboclaw_driver.roboclaw_driver_new.Open",
"roboclaw_driver.roboclaw_driver_new.ReadError",
"rospy.logwarn",
"roboclaw_driver.roboclaw_driver_new.ReadMainBatteryVoltage",
"roboclaw_driver.roboclaw_driver_new.ReadTemp2",
"rospy.Time.now",
"rospy.Rate",
"rospy.si... | [((487, 512), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (495, 512), True, 'import numpy as np\n'), ((533, 563), 'numpy.array', 'np.array', (['[[0], [0], [0], [0]]'], {}), '([[0], [0], [0], [0]])\n', (541, 563), True, 'import numpy as np\n'), ((21544, 21568), 'rospy.loginfo', 'rospy.loginfo', (['"""Exiting"""'], {}), "('Exiting')\n", (21557, 21568), False, 'import rospy\n'), ((991, 1040), 'rospy.Publisher', 'rospy.Publisher', (['"""/odom"""', 'Odometry'], {'queue_size': '(10)'}), "('/odom', Odometry, queue_size=10)\n", (1006, 1040), False, 'import rospy\n'), ((1297, 1313), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (1311, 1313), False, 'import rospy\n'), ((3428, 3444), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3442, 3444), False, 'import rospy\n'), ((3562, 3689), 'numpy.array', 'np.array', (['[[dist_front_left / d_time], [dist_front_right / d_time], [dist_back_left /\n d_time], [dist_back_right / d_time]]'], {}), '([[dist_front_left / d_time], [dist_front_right / d_time], [\n dist_back_left / d_time], [dist_back_right / d_time]])\n', (3570, 3689), True, 'import numpy as np\n'), ((3686, 3819), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0], [-1.0, 1.0, 1.0, -1.0], [1.0 / (lx + ly), -1.0 / (lx +\n ly), 1.0 / (lx + ly), -1.0 / (lx + ly)]]'], {}), '([[1.0, 1.0, 1.0, 1.0], [-1.0, 1.0, 1.0, -1.0], [1.0 / (lx + ly), -\n 1.0 / (lx + ly), 1.0 / (lx + ly), -1.0 / (lx + ly)]])\n', (3694, 3819), True, 'import numpy as np\n'), ((3823, 3845), 'numpy.dot', 'np.dot', (['(B * (r / 4))', 'W'], {}), '(B * (r / 4), W)\n', (3829, 3845), True, 'import numpy as np\n'), ((3869, 3954), 'rospy.logwarn', 'rospy.logwarn', (["('curent wheel velocities %f %f %f %f ' % (W[0], W[1], W[2], W[3]))"], {}), "('curent wheel velocities %f %f %f %f ' % (W[0], W[1], W[2], W[3])\n )\n", (3882, 3954), False, 'import rospy\n'), ((4130, 4169), 'rospy.logwarn', 'rospy.logwarn', (["('robot vel_x %f' % vel_x)"], {}), "('robot vel_x %f' % vel_x)\n", (4143, 4169), False, 'import rospy\n'), ((4418, 4432), 'math.cos', 'cos', (['(yaw * 0.5)'], {}), '(yaw * 0.5)\n', (4421, 4432), False, 'from math import pi, cos, sin\n'), ((4446, 4460), 'math.sin', 'sin', (['(yaw * 0.5)'], {}), '(yaw * 0.5)\n', (4449, 4460), False, 'from math import pi, cos, sin\n'), ((4474, 4490), 'math.cos', 'cos', (['(pitch * 0.5)'], {}), '(pitch * 0.5)\n', (4477, 4490), False, 'from math import pi, cos, sin\n'), ((4504, 4520), 'math.sin', 'sin', (['(pitch * 0.5)'], {}), '(pitch * 0.5)\n', (4507, 4520), False, 'from math import pi, cos, sin\n'), ((4534, 4549), 'math.cos', 'cos', (['(roll * 0.5)'], {}), '(roll * 0.5)\n', (4537, 4549), False, 'from math import pi, cos, sin\n'), ((4563, 4578), 'math.sin', 'sin', (['(roll * 0.5)'], {}), '(roll * 0.5)\n', (4566, 4578), False, 'from math import pi, cos, sin\n'), ((4595, 4607), 'geometry_msgs.msg.Quaternion', 'Quaternion', ([], {}), '()\n', (4605, 4607), False, 'from geometry_msgs.msg import Quaternion, Twist\n'), ((4820, 4836), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (4834, 4836), False, 'import rospy\n'), ((5155, 5165), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (5163, 5165), False, 'from nav_msgs.msg import Odometry\n'), ((8834, 8868), 'rospy.init_node', 'rospy.init_node', (['"""roboclaw_node_f"""'], {}), "('roboclaw_node_f')\n", (8849, 8868), False, 'import rospy\n'), ((8877, 8909), 'rospy.on_shutdown', 'rospy.on_shutdown', (['self.shutdown'], {}), '(self.shutdown)\n', (8894, 8909), False, 'import rospy\n'), ((8918, 8957), 'rospy.loginfo', 'rospy.loginfo', (['"""Connecting to roboclaw"""'], {}), "('Connecting to roboclaw')\n", (8931, 8957), False, 'import rospy\n'), ((8983, 9028), 'rospy.get_param', 'rospy.get_param', (['"""~dev_front"""', '"""/dev/ttyACM0"""'], {}), "('~dev_front', '/dev/ttyACM0')\n", (8998, 9028), False, 'import rospy\n'), ((9053, 9097), 'rospy.get_param', 'rospy.get_param', (['"""~dev_back"""', '"""/dev/ttyACM1"""'], {}), "('~dev_back', '/dev/ttyACM1')\n", (9068, 9097), False, 'import rospy\n'), ((9174, 9205), 'rospy.logwarn', 'rospy.logwarn', (['"""after dev name"""'], {}), "('after dev name')\n", (9187, 9205), False, 'import rospy\n'), ((11461, 11507), 'roboclaw_driver.roboclaw_driver_new.ResetEncoders', 'roboclaw.ResetEncoders', (['self.address_front', 'p1'], {}), '(self.address_front, p1)\n', (11483, 11507), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((11573, 11618), 'roboclaw_driver.roboclaw_driver_new.ResetEncoders', 'roboclaw.ResetEncoders', (['self.address_back', 'p2'], {}), '(self.address_back, p2)\n', (11595, 11618), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((12143, 12162), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (12160, 12162), False, 'import rospy\n'), ((12172, 12229), 'rospy.Subscriber', 'rospy.Subscriber', (['"""cmd_vel"""', 'Twist', 'self.cmd_vel_callback'], {}), "('cmd_vel', Twist, self.cmd_vel_callback)\n", (12188, 12229), False, 'import rospy\n'), ((12265, 12338), 'rospy.logdebug', 'rospy.logdebug', (['"""dev_front %s dev_back %s"""', 'dev_name_front', 'dev_name_back'], {}), "('dev_front %s dev_back %s', dev_name_front, dev_name_back)\n", (12279, 12338), False, 'import rospy\n'), ((12347, 12383), 'rospy.logdebug', 'rospy.logdebug', (['"""baud %d"""', 'baud_rate'], {}), "('baud %d', baud_rate)\n", (12361, 12383), False, 'import rospy\n'), ((12392, 12486), 'rospy.logdebug', 'rospy.logdebug', (['"""address_front %d address_back %d"""', 'self.address_front', 'self.address_back'], {}), "('address_front %d address_back %d', self.address_front, self\n .address_back)\n", (12406, 12486), False, 'import rospy\n'), ((12490, 12536), 'rospy.logdebug', 'rospy.logdebug', (['"""max_speed %f"""', 'self.MAX_SPEED'], {}), "('max_speed %f', self.MAX_SPEED)\n", (12504, 12536), False, 'import rospy\n'), ((12545, 12603), 'rospy.logdebug', 'rospy.logdebug', (['"""ticks_per_meter %f"""', 'self.TICKS_PER_METER'], {}), "('ticks_per_meter %f', self.TICKS_PER_METER)\n", (12559, 12603), False, 'import rospy\n'), ((12612, 12698), 'rospy.logdebug', 'rospy.logdebug', (['"""base_width %f base_length %f"""', 'self.BASE_WIDTH', 'self.BASE_LENGTH'], {}), "('base_width %f base_length %f', self.BASE_WIDTH, self.\n BASE_LENGTH)\n", (12626, 12698), False, 'import rospy\n'), ((12757, 12794), 'rospy.loginfo', 'rospy.loginfo', (['"""Starting motor drive"""'], {}), "('Starting motor drive')\n", (12770, 12794), False, 'import rospy\n'), ((12812, 12827), 'rospy.Rate', 'rospy.Rate', (['(100)'], {}), '(100)\n', (12822, 12827), False, 'import rospy\n'), ((16528, 16547), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (16545, 16547), False, 'import rospy\n'), ((17440, 17546), 'numpy.array', 'np.array', (['[[1.0, -1.0, -(lx + ly)], [1.0, 1.0, lx + ly], [1.0, 1.0, -(lx + ly)], [1.0,\n -1.0, lx + ly]]'], {}), '([[1.0, -1.0, -(lx + ly)], [1.0, 1.0, lx + ly], [1.0, 1.0, -(lx +\n ly)], [1.0, -1.0, lx + ly]])\n', (17448, 17546), True, 'import numpy as np\n'), ((17696, 17716), 'numpy.dot', 'np.dot', (['B_inv', 'U_inv'], {}), '(B_inv, U_inv)\n', (17702, 17716), True, 'import numpy as np\n'), ((20591, 20621), 'rospy.loginfo', 'rospy.loginfo', (['"""Shutting down"""'], {}), "('Shutting down')\n", (20604, 20621), False, 'import rospy\n'), ((6232, 6347), 'rospy.logerr', 'rospy.logerr', (["('Ignoring front left encoder jump: cur %d, last %d' % (enc_front_left,\n self.last_enc_front_left))"], {}), "('Ignoring front left encoder jump: cur %d, last %d' % (\n enc_front_left, self.last_enc_front_left))\n", (6244, 6347), False, 'import rospy\n'), ((9122, 9155), 'rospy.get_param', 'rospy.get_param', (['"""~baud"""', '"""38400"""'], {}), "('~baud', '38400')\n", (9137, 9155), False, 'import rospy\n'), ((9239, 9279), 'rospy.get_param', 'rospy.get_param', (['"""~address_front"""', '"""128"""'], {}), "('~address_front', '128')\n", (9254, 9279), False, 'import rospy\n'), ((9313, 9352), 'rospy.get_param', 'rospy.get_param', (['"""~address_back"""', '"""129"""'], {}), "('~address_back', '129')\n", (9328, 9352), False, 'import rospy\n'), ((9542, 9580), 'rospy.logfatal', 'rospy.logfatal', (['"""Address out of range"""'], {}), "('Address out of range')\n", (9556, 9580), False, 'import rospy\n'), ((9593, 9638), 'rospy.signal_shutdown', 'rospy.signal_shutdown', (['"""Address out of range"""'], {}), "('Address out of range')\n", (9614, 9638), False, 'import rospy\n'), ((9737, 9777), 'roboclaw_driver.roboclaw_driver_new.Open', 'roboclaw.Open', (['dev_name_front', 'baud_rate'], {}), '(dev_name_front, baud_rate)\n', (9750, 9777), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((10009, 10048), 'roboclaw_driver.roboclaw_driver_new.Open', 'roboclaw.Open', (['dev_name_back', 'baud_rate'], {}), '(dev_name_back, baud_rate)\n', (10022, 10048), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((10663, 10707), 'roboclaw_driver.roboclaw_driver_new.ReadVersion', 'roboclaw.ReadVersion', (['self.address_front', 'p1'], {}), '(self.address_front, p1)\n', (10683, 10707), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((10893, 10951), 'rospy.logwarn', 'rospy.logwarn', (['"""Could not get version from front roboclaw"""'], {}), "('Could not get version from front roboclaw')\n", (10906, 10951), False, 'import rospy\n'), ((11047, 11090), 'roboclaw_driver.roboclaw_driver_new.ReadVersion', 'roboclaw.ReadVersion', (['self.address_back', 'p2'], {}), '(self.address_back, p2)\n', (11067, 11090), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((11275, 11332), 'rospy.logwarn', 'rospy.logwarn', (['"""Could not get version from back roboclaw"""'], {}), "('Could not get version from back roboclaw')\n", (11288, 11332), False, 'import rospy\n'), ((11650, 11686), 'rospy.get_param', 'rospy.get_param', (['"""~max_speed"""', '"""1.1"""'], {}), "('~max_speed', '1.1')\n", (11665, 11686), False, 'import rospy\n'), ((11725, 11769), 'rospy.get_param', 'rospy.get_param', (['"""~ticks_per_meter"""', '"""35818"""'], {}), "('~ticks_per_meter', '35818')\n", (11740, 11769), False, 'import rospy\n'), ((11803, 11843), 'rospy.get_param', 'rospy.get_param', (['"""~base_width"""', '"""0.1762"""'], {}), "('~base_width', '0.1762')\n", (11818, 11843), False, 'import rospy\n'), ((11878, 11919), 'rospy.get_param', 'rospy.get_param', (['"""~base_length"""', '"""0.2485"""'], {}), "('~base_length', '0.2485')\n", (11893, 11919), False, 'import rospy\n'), ((11955, 11997), 'rospy.get_param', 'rospy.get_param', (['"""~wheel_radius"""', '"""0.0635"""'], {}), "('~wheel_radius', '0.0635')\n", (11970, 11997), False, 'import rospy\n'), ((12878, 12897), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (12895, 12897), False, 'import rospy\n'), ((20647, 20701), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM1', 'roboclaw.ForwardBackwardM1', (['self.address_front', '(63)', 'p1'], {}), '(self.address_front, 63, p1)\n', (20673, 20701), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((20713, 20767), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM2', 'roboclaw.ForwardBackwardM2', (['self.address_front', '(63)', 'p1'], {}), '(self.address_front, 63, p1)\n', (20739, 20767), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((20779, 20832), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM1', 'roboclaw.ForwardBackwardM1', (['self.address_back', '(63)', 'p2'], {}), '(self.address_back, 63, p2)\n', (20805, 20832), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((20844, 20897), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM2', 'roboclaw.ForwardBackwardM2', (['self.address_back', '(63)', 'p2'], {}), '(self.address_back, 63, p2)\n', (20870, 20897), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((6425, 6540), 'rospy.logerr', 'rospy.logerr', (["('Ignoring back right encoder jump: cur %d, last %d' % (enc_back_right,\n self.last_enc_back_right))"], {}), "('Ignoring back right encoder jump: cur %d, last %d' % (\n enc_back_right, self.last_enc_back_right))\n", (6437, 6540), False, 'import rospy\n'), ((9821, 9874), 'rospy.logfatal', 'rospy.logfatal', (['"""Could not connect to front Roboclaw"""'], {}), "('Could not connect to front Roboclaw')\n", (9835, 9874), False, 'import rospy\n'), ((9887, 9904), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (9901, 9904), False, 'import rospy\n'), ((9917, 9977), 'rospy.signal_shutdown', 'rospy.signal_shutdown', (['"""Could not connect to front Roboclaw"""'], {}), "('Could not connect to front Roboclaw')\n", (9938, 9977), False, 'import rospy\n'), ((10092, 10144), 'rospy.logfatal', 'rospy.logfatal', (['"""Could not connect to back Roboclaw"""'], {}), "('Could not connect to back Roboclaw')\n", (10106, 10144), False, 'import rospy\n'), ((10157, 10174), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (10171, 10174), False, 'import rospy\n'), ((10187, 10246), 'rospy.signal_shutdown', 'rospy.signal_shutdown', (['"""Could not connect to back Roboclaw"""'], {}), "('Could not connect to back Roboclaw')\n", (10208, 10246), False, 'import rospy\n'), ((10750, 10805), 'rospy.logwarn', 'rospy.logwarn', (['"""Problem getting front roboclaw version"""'], {}), "('Problem getting front roboclaw version')\n", (10763, 10805), False, 'import rospy\n'), ((10818, 10835), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (10832, 10835), False, 'import rospy\n'), ((11133, 11187), 'rospy.logwarn', 'rospy.logwarn', (['"""Problem getting back roboclaw version"""'], {}), "('Problem getting back roboclaw version')\n", (11146, 11187), False, 'import rospy\n'), ((11200, 11217), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (11214, 11217), False, 'import rospy\n'), ((12994, 13053), 'rospy.loginfo', 'rospy.loginfo', (['"""Did not get command for 1 second, stopping"""'], {}), "('Did not get command for 1 second, stopping')\n", (13007, 13053), False, 'import rospy\n'), ((13620, 13662), 'roboclaw_driver.roboclaw_driver_new.ReadEncM1', 'roboclaw.ReadEncM1', (['self.address_front', 'p1'], {}), '(self.address_front, p1)\n', (13638, 13662), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((13915, 13957), 'roboclaw_driver.roboclaw_driver_new.ReadEncM2', 'roboclaw.ReadEncM2', (['self.address_front', 'p1'], {}), '(self.address_front, p1)\n', (13933, 13957), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((14196, 14237), 'roboclaw_driver.roboclaw_driver_new.ReadEncM1', 'roboclaw.ReadEncM1', (['self.address_back', 'p2'], {}), '(self.address_back, p2)\n', (14214, 14237), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((14490, 14531), 'roboclaw_driver.roboclaw_driver_new.ReadEncM2', 'roboclaw.ReadEncM2', (['self.address_back', 'p2'], {}), '(self.address_back, p2)\n', (14508, 14531), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((14835, 14901), 'rospy.logdebug', 'rospy.logdebug', (["(' Encoders %d %d %d %d' % (enc1, enc2, enc3, enc4))"], {}), "(' Encoders %d %d %d %d' % (enc1, enc2, enc3, enc4))\n", (14849, 14901), False, 'import rospy\n'), ((19037, 19079), 'roboclaw_driver.roboclaw_driver_new.ReadError', 'roboclaw.ReadError', (['self.address_front', 'p1'], {}), '(self.address_front, p1)\n', (19055, 19079), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((19108, 19149), 'roboclaw_driver.roboclaw_driver_new.ReadError', 'roboclaw.ReadError', (['self.address_back', 'p2'], {}), '(self.address_back, p2)\n', (19126, 19149), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((19193, 19242), 'rospy.logwarn', 'rospy.logwarn', (['"""Diagnostics OSError: %d"""', 'e.errno'], {}), "('Diagnostics OSError: %d', e.errno)\n", (19206, 19242), False, 'import rospy\n'), ((19255, 19272), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (19269, 19272), False, 'import rospy\n'), ((20360, 20409), 'rospy.logwarn', 'rospy.logwarn', (['"""Diagnostics OSError: %d"""', 'e.errno'], {}), "('Diagnostics OSError: %d', e.errno)\n", (20373, 20409), False, 'import rospy\n'), ((20422, 20439), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (20436, 20439), False, 'import rospy\n'), ((20933, 20983), 'rospy.logerr', 'rospy.logerr', (['"""Shutdown did not work trying again"""'], {}), "('Shutdown did not work trying again')\n", (20945, 20983), False, 'import rospy\n'), ((6620, 6738), 'rospy.logerr', 'rospy.logerr', (["('Ignoring front right encoder jump: cur %d, last %d' % (enc_front_right,\n self.last_enc_front_right))"], {}), "('Ignoring front right encoder jump: cur %d, last %d' % (\n enc_front_right, self.last_enc_front_right))\n", (6632, 6738), False, 'import rospy\n'), ((13095, 13140), 'roboclaw_driver.roboclaw_driver_new.ForwardM1', 'roboclaw.ForwardM1', (['self.address_front', '(0)', 'p1'], {}), '(self.address_front, 0, p1)\n', (13113, 13140), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((13160, 13205), 'roboclaw_driver.roboclaw_driver_new.ForwardM2', 'roboclaw.ForwardM2', (['self.address_front', '(0)', 'p1'], {}), '(self.address_front, 0, p1)\n', (13178, 13205), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((13225, 13269), 'roboclaw_driver.roboclaw_driver_new.ForwardM1', 'roboclaw.ForwardM1', (['self.address_back', '(0)', 'p2'], {}), '(self.address_back, 0, p2)\n', (13243, 13269), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((13289, 13333), 'roboclaw_driver.roboclaw_driver_new.ForwardM2', 'roboclaw.ForwardM2', (['self.address_back', '(0)', 'p2'], {}), '(self.address_back, 0, p2)\n', (13307, 13333), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((13763, 13810), 'rospy.logwarn', 'rospy.logwarn', (['"""ReadEncM1 OSError: %d"""', 'e.errno'], {}), "('ReadEncM1 OSError: %d', e.errno)\n", (13776, 13810), False, 'import rospy\n'), ((13827, 13844), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (13841, 13844), False, 'import rospy\n'), ((14058, 14105), 'rospy.logwarn', 'rospy.logwarn', (['"""ReadEncM2 OSError: %d"""', 'e.errno'], {}), "('ReadEncM2 OSError: %d', e.errno)\n", (14071, 14105), False, 'import rospy\n'), ((14122, 14139), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (14136, 14139), False, 'import rospy\n'), ((14338, 14385), 'rospy.logwarn', 'rospy.logwarn', (['"""ReadEncM3 OSError: %d"""', 'e.errno'], {}), "('ReadEncM3 OSError: %d', e.errno)\n", (14351, 14385), False, 'import rospy\n'), ((14402, 14419), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (14416, 14419), False, 'import rospy\n'), ((14632, 14679), 'rospy.logwarn', 'rospy.logwarn', (['"""ReadEncM4 OSError: %d"""', 'e.errno'], {}), "('ReadEncM4 OSError: %d', e.errno)\n", (14645, 14679), False, 'import rospy\n'), ((14696, 14713), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (14710, 14713), False, 'import rospy\n'), ((21017, 21071), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM1', 'roboclaw.ForwardBackwardM1', (['self.address_front', '(63)', 'p1'], {}), '(self.address_front, 63, p1)\n', (21043, 21071), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((21087, 21141), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM2', 'roboclaw.ForwardBackwardM2', (['self.address_front', '(63)', 'p1'], {}), '(self.address_front, 63, p1)\n', (21113, 21141), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((21157, 21210), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM1', 'roboclaw.ForwardBackwardM1', (['self.address_back', '(63)', 'p2'], {}), '(self.address_back, 63, p2)\n', (21183, 21210), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((21226, 21279), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM2', 'roboclaw.ForwardBackwardM2', (['self.address_back', '(63)', 'p2'], {}), '(self.address_back, 63, p2)\n', (21252, 21279), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((6814, 6926), 'rospy.logerr', 'rospy.logerr', (["('Ignoring back left encoder jump: cur %d, last %d' % (enc_back_left, self.\n last_enc_back_left))"], {}), "('Ignoring back left encoder jump: cur %d, last %d' % (\n enc_back_left, self.last_enc_back_left))\n", (6826, 6926), False, 'import rospy\n'), ((13390, 13420), 'rospy.logerr', 'rospy.logerr', (['"""Could not stop"""'], {}), "('Could not stop')\n", (13402, 13420), False, 'import rospy\n'), ((13441, 13458), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (13455, 13458), False, 'import rospy\n'), ((15162, 15216), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM1', 'roboclaw.ForwardBackwardM1', (['self.address_front', '(63)', 'p1'], {}), '(self.address_front, 63, p1)\n', (15188, 15216), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((15240, 15294), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM2', 'roboclaw.ForwardBackwardM2', (['self.address_front', '(63)', 'p1'], {}), '(self.address_front, 63, p1)\n', (15266, 15294), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((15318, 15371), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM1', 'roboclaw.ForwardBackwardM1', (['self.address_back', '(63)', 'p2'], {}), '(self.address_back, 63, p2)\n', (15344, 15371), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((15395, 15448), 'roboclaw_driver.roboclaw_driver_new.ForwardBackwardM2', 'roboclaw.ForwardBackwardM2', (['self.address_back', '(63)', 'p2'], {}), '(self.address_back, 63, p2)\n', (15421, 15448), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((15498, 15571), 'rospy.logwarn', 'rospy.logwarn', (["('wheel velocities %f %f %f %f ' % (U[0], U[1], U[2], U[3]))"], {}), "('wheel velocities %f %f %f %f ' % (U[0], U[1], U[2], U[3]))\n", (15511, 15571), False, 'import rospy\n'), ((16271, 16318), 'rospy.logwarn', 'rospy.logwarn', (['"""SpeedM1M2 OSError: %d"""', 'e.errno'], {}), "('SpeedM1M2 OSError: %d', e.errno)\n", (16284, 16318), False, 'import rospy\n'), ((16339, 16356), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (16353, 16356), False, 'import rospy\n'), ((21328, 21373), 'rospy.logerr', 'rospy.logerr', (['"""Could not shutdown motors!!!!"""'], {}), "('Could not shutdown motors!!!!')\n", (21340, 21373), False, 'import rospy\n'), ((21390, 21407), 'rospy.logdebug', 'rospy.logdebug', (['e'], {}), '(e)\n', (21404, 21407), False, 'import rospy\n'), ((12916, 12935), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (12933, 12935), False, 'import rospy\n'), ((19529, 19584), 'roboclaw_driver.roboclaw_driver_new.ReadMainBatteryVoltage', 'roboclaw.ReadMainBatteryVoltage', (['self.address_front', 'p1'], {}), '(self.address_front, p1)\n', (19560, 19584), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((19644, 19700), 'roboclaw_driver.roboclaw_driver_new.ReadLogicBatteryVoltage', 'roboclaw.ReadLogicBatteryVoltage', (['self.address_front', 'p1'], {}), '(self.address_front, p1)\n', (19676, 19700), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((19755, 19796), 'roboclaw_driver.roboclaw_driver_new.ReadTemp', 'roboclaw.ReadTemp', (['self.address_front', 'p1'], {}), '(self.address_front, p1)\n', (19772, 19796), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((19851, 19893), 'roboclaw_driver.roboclaw_driver_new.ReadTemp2', 'roboclaw.ReadTemp2', (['self.address_front', 'p1'], {}), '(self.address_front, p1)\n', (19869, 19893), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((19952, 20006), 'roboclaw_driver.roboclaw_driver_new.ReadMainBatteryVoltage', 'roboclaw.ReadMainBatteryVoltage', (['self.address_back', 'p2'], {}), '(self.address_back, p2)\n', (19983, 20006), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((20065, 20120), 'roboclaw_driver.roboclaw_driver_new.ReadLogicBatteryVoltage', 'roboclaw.ReadLogicBatteryVoltage', (['self.address_back', 'p2'], {}), '(self.address_back, p2)\n', (20097, 20120), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((20174, 20214), 'roboclaw_driver.roboclaw_driver_new.ReadTemp', 'roboclaw.ReadTemp', (['self.address_back', 'p2'], {}), '(self.address_back, p2)\n', (20191, 20214), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n'), ((20268, 20309), 'roboclaw_driver.roboclaw_driver_new.ReadTemp2', 'roboclaw.ReadTemp2', (['self.address_back', 'p2'], {}), '(self.address_back, p2)\n', (20286, 20309), True, 'import roboclaw_driver.roboclaw_driver_new as roboclaw\n')] |
import numpy as np
from scipy.integrate import odeint
from bokeh.plotting import figure, show, output_file
sigma = 10
rho = 28
beta = 8.0/3
theta = 3 * np.pi / 4
def lorenz(xyz, t):
x, y, z = xyz
x_dot = sigma * (y - x)
y_dot = x * rho - x * z - y
z_dot = x * y - beta* z
return [x_dot, y_dot, z_dot]
initial = (-10, -7, 35)
t = np.arange(0, 100, 0.006)
solution = odeint(lorenz, initial, t)
x = solution[:, 0]
y = solution[:, 1]
z = solution[:, 2]
xprime = np.cos(theta) * x - np.sin(theta) * y
colors = ["#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5", "#08519C", "#08306B",]
output_file("lorenz.html", title="lorenz.py example")
p = figure(title="lorenz example")
p.multi_line(np.array_split(xprime, 7), np.array_split(z, 7),
line_color=colors, line_alpha=0.8, line_width=1.5)
show(p) # open a browser
| [
"bokeh.plotting.figure",
"scipy.integrate.odeint",
"bokeh.plotting.output_file",
"numpy.sin",
"numpy.arange",
"bokeh.plotting.show",
"numpy.cos",
"numpy.array_split"
] | [((353, 377), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(0.006)'], {}), '(0, 100, 0.006)\n', (362, 377), True, 'import numpy as np\n'), ((390, 416), 'scipy.integrate.odeint', 'odeint', (['lorenz', 'initial', 't'], {}), '(lorenz, initial, t)\n', (396, 416), False, 'from scipy.integrate import odeint\n'), ((612, 665), 'bokeh.plotting.output_file', 'output_file', (['"""lorenz.html"""'], {'title': '"""lorenz.py example"""'}), "('lorenz.html', title='lorenz.py example')\n", (623, 665), False, 'from bokeh.plotting import figure, show, output_file\n'), ((671, 701), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""lorenz example"""'}), "(title='lorenz example')\n", (677, 701), False, 'from bokeh.plotting import figure, show, output_file\n'), ((828, 835), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (832, 835), False, 'from bokeh.plotting import figure, show, output_file\n'), ((716, 741), 'numpy.array_split', 'np.array_split', (['xprime', '(7)'], {}), '(xprime, 7)\n', (730, 741), True, 'import numpy as np\n'), ((743, 763), 'numpy.array_split', 'np.array_split', (['z', '(7)'], {}), '(z, 7)\n', (757, 763), True, 'import numpy as np\n'), ((484, 497), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (490, 497), True, 'import numpy as np\n'), ((504, 517), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (510, 517), True, 'import numpy as np\n')] |
import numpy as np
from active_reward_learning.util.helpers import (
argmax_over_index_set,
get_deterministic_policy_matrix,
jaccard_index,
mean_jaccard_distance,
)
def test_argmax_over_index_set():
l = [0, 1, 1.5, 2, 3, 3.5, 4]
idx = [0, 1, 2]
assert argmax_over_index_set(l, idx) == [2]
assert argmax_over_index_set(l, set(idx)) == [2]
idx = [1, 2, 4, 6]
assert argmax_over_index_set(l, idx) == [6]
assert argmax_over_index_set(l, set(idx)) == [6]
def test_get_deterministic_policy_matrix():
policy = get_deterministic_policy_matrix([0, 0, 3, 4, 2], 5)
target_policy = np.array(
[
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 1, 0, 0],
]
)
assert np.all(policy == target_policy)
policy = get_deterministic_policy_matrix(np.array([0, 1, 1, 4, 2]), 5)
target_policy = np.array(
[
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 1, 0, 0],
]
)
assert np.all(policy == target_policy)
def test_jaccard_index():
A = {1, 2, 3, 4, 7}
B = {1, 4, 5, 7, 9}
assert jaccard_index(A, B) == 3 / 7
def test_mean_jaccard_distance():
A = {1, 2, 3, 4, 7}
B = {1, 4, 5, 7, 9}
C = {1, 3, 7, 8}
assert (
mean_jaccard_distance([A, B, C])
== ((1 - 3 / 7) + (1 - 2 / 7) + (1 - 3 / 6)) / 3
)
assert mean_jaccard_distance([A]) == 0
| [
"active_reward_learning.util.helpers.get_deterministic_policy_matrix",
"active_reward_learning.util.helpers.jaccard_index",
"active_reward_learning.util.helpers.argmax_over_index_set",
"active_reward_learning.util.helpers.mean_jaccard_distance",
"numpy.array",
"numpy.all"
] | [((556, 607), 'active_reward_learning.util.helpers.get_deterministic_policy_matrix', 'get_deterministic_policy_matrix', (['[0, 0, 3, 4, 2]', '(5)'], {}), '([0, 0, 3, 4, 2], 5)\n', (587, 607), False, 'from active_reward_learning.util.helpers import argmax_over_index_set, get_deterministic_policy_matrix, jaccard_index, mean_jaccard_distance\n'), ((628, 728), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0,\n 1, 0, 0]]'], {}), '([[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1\n ], [0, 0, 1, 0, 0]])\n', (636, 728), True, 'import numpy as np\n'), ((820, 851), 'numpy.all', 'np.all', (['(policy == target_policy)'], {}), '(policy == target_policy)\n', (826, 851), True, 'import numpy as np\n'), ((947, 1047), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1], [0, 0,\n 1, 0, 0]]'], {}), '([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1\n ], [0, 0, 1, 0, 0]])\n', (955, 1047), True, 'import numpy as np\n'), ((1139, 1170), 'numpy.all', 'np.all', (['(policy == target_policy)'], {}), '(policy == target_policy)\n', (1145, 1170), True, 'import numpy as np\n'), ((283, 312), 'active_reward_learning.util.helpers.argmax_over_index_set', 'argmax_over_index_set', (['l', 'idx'], {}), '(l, idx)\n', (304, 312), False, 'from active_reward_learning.util.helpers import argmax_over_index_set, get_deterministic_policy_matrix, jaccard_index, mean_jaccard_distance\n'), ((407, 436), 'active_reward_learning.util.helpers.argmax_over_index_set', 'argmax_over_index_set', (['l', 'idx'], {}), '(l, idx)\n', (428, 436), False, 'from active_reward_learning.util.helpers import argmax_over_index_set, get_deterministic_policy_matrix, jaccard_index, mean_jaccard_distance\n'), ((897, 922), 'numpy.array', 'np.array', (['[0, 1, 1, 4, 2]'], {}), '([0, 1, 1, 4, 2])\n', (905, 922), True, 'import numpy as np\n'), ((1258, 1277), 'active_reward_learning.util.helpers.jaccard_index', 'jaccard_index', (['A', 'B'], {}), '(A, B)\n', (1271, 1277), False, 'from active_reward_learning.util.helpers import argmax_over_index_set, get_deterministic_policy_matrix, jaccard_index, mean_jaccard_distance\n'), ((1413, 1445), 'active_reward_learning.util.helpers.mean_jaccard_distance', 'mean_jaccard_distance', (['[A, B, C]'], {}), '([A, B, C])\n', (1434, 1445), False, 'from active_reward_learning.util.helpers import argmax_over_index_set, get_deterministic_policy_matrix, jaccard_index, mean_jaccard_distance\n'), ((1521, 1547), 'active_reward_learning.util.helpers.mean_jaccard_distance', 'mean_jaccard_distance', (['[A]'], {}), '([A])\n', (1542, 1547), False, 'from active_reward_learning.util.helpers import argmax_over_index_set, get_deterministic_policy_matrix, jaccard_index, mean_jaccard_distance\n')] |
import numpy as np
import random
import copy
# Adding Hardcoding of the Geometric Plotting Order, Identity, and Axis Location
# Order to Plot (Probe Channel Name)
all_chan_map = {'z007': [2, 1, 10, 9, 18, 17, 4, 3, 12, 11, 20, 19, 5, 6, 13, 14, 21,
22, 7, 8, 15, 16, 23, 24, 29, 30, 31, 32, 25, 26, 27, 28],
'z020': [6, 5, 9, 15, 3, 4, 10, 16, 1, 7, 13, 14, 2, 8, 12, 11],
'z017': [1, 7, 13, 14, 3, 4, 10, 16, 2, 8, 12, 11, 6, 5, 9, 15]
}
# Order to Plot on a 4x4 (OpenEphys Designation)
all_plot_maps = {'z007': [1, 0, 9, 8, 17, 16, 3, 2, 11, 10, 19, 18, 4, 5, 12, 13, 20,
21, 6, 7, 14, 15, 22, 23, 28, 29, 30, 31, 24, 25, 26, 27],
'z020': [13, 12, 6, 5, 11, 15, 1, 7, 8, 14, 0, 4, 10, 9, 3, 2],
'z017': [8, 14, 0, 4, 11, 15, 1, 7, 10, 9, 3, 2, 13, 12, 6, 5]
}
# Location of Axis in plotting order
all_axis_orders = {'z007': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 30, 31, 32, 33],
'z020': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
'z017': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
}
# Channels to be Excluded from all Analysis
all_bad_channels = {'z007': [24, 28],
'z020': [1],
'z017': []
}
# Bird's Default Class Labels
all_label_instructions = {'z007': [1, 2, 3, 4, 5, 'I'],
'z020': [1, 2, 3, 4, 'I'],
'z017': [1, 2, 3, 4, 5, 6, 7]
}
# Bird's Default drop temps
all_drop_temps = {'z007': [6],
'z020': [5],
'z017': [7]}
def balance_classes(neural_data, safe=True, seed=False):
""" Takes a List of Instances of the Time Series and Balances out all classes to be equal size
(Approach 1: All Classes Set to be Equal)
Parameters
----------
neural_data : list | (classes, instances, channels, samples)
Neural Data to be used in PCA-PSD Analysis
safe : bool
If True the function will make a soft copy of the neural data to prevent changes to the original input,
if false the original data is altered instead
seed : int, optional
sets the seed for the pseudo-random module, defaults to not setting the seed.
Returns
-------
balanced_data : list | (classes, instances, channels, samples)
Randomly Rebalanced Neural Data to be used in PCA-PSD Analysis (All Sets are equal length)
safe : bool, optional
If True a deepcopy is made of the neural_data internally and returned.
"""
if safe:
balanced_data = copy.deepcopy(neural_data) # Deep Copy
else:
balanced_data = neural_data # Not a Shallow Copy or Deep Copy (Assignment)
group_sizes = [len(events) for events in neural_data] # Number of Instances per Class
minimum = min(np.unique(group_sizes)) # Size of Smallest Class
focus_mask = [index for index, value in enumerate(group_sizes) if value > minimum] # Index of Larger Classes
for needs_help in focus_mask:
big = len(neural_data[needs_help])
if seed:
random.seed(seed)
selected = random.sample(range(0, big), minimum, ) # Select the instances to Use
balanced_data[needs_help] = neural_data[needs_help][selected] # Reduce Instances to Those Selected
return balanced_data
def get_priors(num_labels):
priors = np.zeros((num_labels,))
priors[:] = 1 / num_labels
return priors
| [
"copy.deepcopy",
"numpy.zeros",
"numpy.unique",
"random.seed"
] | [((3612, 3635), 'numpy.zeros', 'np.zeros', (['(num_labels,)'], {}), '((num_labels,))\n', (3620, 3635), True, 'import numpy as np\n'), ((2812, 2838), 'copy.deepcopy', 'copy.deepcopy', (['neural_data'], {}), '(neural_data)\n', (2825, 2838), False, 'import copy\n'), ((3056, 3078), 'numpy.unique', 'np.unique', (['group_sizes'], {}), '(group_sizes)\n', (3065, 3078), True, 'import numpy as np\n'), ((3327, 3344), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3338, 3344), False, 'import random\n')] |
import os
import time
import random
import numpy as np
import os.path as osp
import tensorflow as tf
from baselines import logger
from collections import deque
from baselines.common import explained_variance
from baselines.common.runners import AbstractEnvRunner
from copy import deepcopy
from ppo2ttifrutti_policies import filmInit
from ppo2ttifrutti_agent import nenvs
def disarrange(a, axis=-1):
"""
Shuffle `a` in-place along the given axis.
Apply numpy.random.shuffle to the given axis of `a`.
Each one-dimensional slice is shuffled independently.
"""
b = a.swapaxes(axis, -1)
# Shuffle `b` in-place along the last axis. `b` is a view of `a`,
# so `a` is shuffled in place, too.
shp = b.shape[:-1]
for ndx in np.ndindex(shp):
np.random.shuffle(b[ndx])
return b
class Model(object):
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm):
sess = tf.get_default_session()
filmObj = filmInit(sess, nenvs)
act_model = policy(sess, ob_space, ac_space, nbatch_act, 1, filmObj, reuse=False,st = "act")
# print('Shape of obs in the model is ',ob_space.shape)
train_model = policy(sess, ob_space, ac_space, nbatch_train, nsteps, filmObj, reuse=True,st = "train")
self.filmObj = filmObj
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [None])
R = tf.placeholder(tf.float32, [None])
OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
OLDVPRED = tf.placeholder(tf.float32, [None])
LR = tf.placeholder(tf.float32, [])
CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
vf_losses1 = tf.square(vpred - R)
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
with tf.variable_scope('model'):
params = tf.trainable_variables()
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
print('print this before using the optimizer')
trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
_train = trainer.apply_gradients(grads)
def reinit():
filmObj.reinit()
def train(idx,lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {train_model.X:obs,train_model.index :[idx], A:actions, ADV:advs, R:returns, LR:lr,
CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
return sess.run(
[pg_loss, vf_loss, entropy, approxkl, clipfrac, _train],
td_map
)[:-1]
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
def save(save_path):
saver = tf.train.Saver()
saver.save(sess, save_path + '_tf')
def load(load_path):
saver = tf.train.Saver()
print('Loading ' + load_path + '_tf')
saver.restore(sess, load_path + '_tf')
self.train = train
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = save
self.load = load
tf.global_variables_initializer().run(session=sess) #pylint: disable=E1101
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, nsteps, total_timesteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
self.lam = lam
self.gamma = gamma
self.total_timesteps = total_timesteps
self.current_timestep = 0
def run(self, update):
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# Data augmentation:
# * Grayscale mode just shifts all the input values.
# * Color chooses an order to apply the different operations and randomly remove some of them.
# Inspired from: https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py
use_data_augmentation = True
if use_data_augmentation:
nbatch = self.env.num_envs
nh, nw, nc = self.env.observation_space.shape
ob_shape = (nbatch, nh, nw, nc)
ordered_strategies = np.arange(1, 6 if nc == 3 else 2)
np.random.shuffle(ordered_strategies)
ordered_strategies = [i for i in ordered_strategies if random.randint(0, 2 if nc == 3 else 1) == 0]
X = tf.placeholder(dtype = tf.uint8, shape = ob_shape)
augment = tf.cast(X, tf.float32) / 255.
c = (random.randint(0, 20) - 10) / 255.
#print('Data augmentation: %d' % c)
C = tf.constant(c)
for i in ordered_strategies:
if i == 1:
augment = tf.clip_by_value(tf.add(augment, C), 0., 1.)
elif i == 2:
augment = tf.image.random_saturation(augment, lower=0.5, upper=1.5, seed=update)
elif i == 3:
augment = tf.image.random_brightness(augment, max_delta=32./255., seed=update)
elif i == 4:
augment = tf.image.random_contrast(augment, lower=0.5, upper=1.5, seed=update)
elif i == 5:
augment = tf.image.random_hue(augment, max_delta=0.2, seed=update)
augment = tf.cast(augment * 255., tf.uint8)
for s in range(self.nsteps):
self.current_timestep = self.current_timestep + 1
# print('observations',self.obs.shape)
# print('states',self.states)
# print('dones',self.dones)
# print (self.obs.shape)
actions = np.ndarray(self.env.num_envs,dtype = np.int)
values = np.ndarray(self.env.num_envs,dtype = np.int)
states_mine = []
neglogpacs = np.ndarray(self.env.num_envs,dtype = np.int)
for j in range(self.env.num_envs):
if (self.states is not None):
# ob1 = deepcopy(self.obs)
ob1 = self.obs
ob1 = np.expand_dims(ob1[j,:,:,:], axis=0)
act,val,sta,neg = self.model.step(ob1,j,self.states[j],self.dones[j])
else:
# ob1 = deepcopy(self.obs)
ob1 = self.obs
ob1 = np.expand_dims(ob1[j,:,:,:], axis=0)
act,val,sta,neg = self.model.step(ob1,j,self.states,self.dones[j])
actions[j] = act
values[j] = val
if (self.states is not None):
self.states.append(sta)
neglogpacs[j] = neg
# actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
# print(actions)
# print(type(actions))
# print('shape of actions in runner.run is',actions.shape)
# print('shape of values in runner.run is',values.shape)
# print('shape of states is',self.states.shape)
# print('shape of neg is',neglogpacs.shape)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
if use_data_augmentation:
self.obs = tf.get_default_session().run(augment, {X:self.obs})
# Attempt to incentivize good behavior in Sonic, like catching rings, killing ennemies, jumping on TVs.
# But not too much since that's only for meta-learning. Follow a polynomial decay so that at the end
# the model is trained for the reward function that is used for learning. Hopefully that can speed up
# convergence and increase total reward (agent it less likely to die with more rings, fewer ennemies).
#if infos is not None and 'rings' in infos[0]:# and 'score' in infos[0]: # Metalearning only.
# poly_decay_rate = 1.5 * (1. - self.current_timestep / self.total_timesteps)**0.9
# for i in range(len(infos)):
# rewards[i] += infos[i]['rings'] * 0.001 * poly_decay_rate
# #rewards[i] += infos[i]['score'] * 0.001 * poly_decay_rate
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
# print('the shape of mb_obs in runner is',mb_obs.shape)
# print('the shape of flattened mb_obs in runner is',sf01(mb_obs).shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = np.ndarray(self.env.num_envs,dtype = np.int)
for j in range(self.env.num_envs):
if (self.states is not None):
ob1 = deepcopy(self.obs)
ob1 = np.expand_dims(ob1[j,:,:,:], axis=0)
val = self.model.value(ob1,j,self.states[j],self.dones[j])
else:
ob1 = deepcopy(self.obs)
ob1 = np.expand_dims(ob1[j,:,:,:], axis=0)
val = self.model.value(ob1,j,self.states,self.dones[j])
last_values[j] = val
# last_values = self.model.value(self.obs, self.states, self.dones)
# print('the shape of values is ',last_values.shape)
# print('the values are',last_values)
#discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
# return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)), mb_states, epinfos)
return (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs,mb_states, epinfos)
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def constfn(val):
def f(_):
return val
return f
def learn(*, policy, env, nsteps, total_timesteps, ent_coef, lr,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, log_path = '', train=True):
# logger.configure('/scratch/msy290/RL/aborg/retro_contest_agent/metalearner_for_expt/model/')
logger.configure(log_path+'model/')
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
assert nbatch % nminibatches == 0
make_model = lambda : Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
model = make_model()
if load_path is not None:
model.load(load_path)
runner = Runner(env=env, model=model, nsteps=nsteps, total_timesteps=total_timesteps, gamma=gamma, lam=lam)
epinfobuf = deque(maxlen=100)
tfirststart = time.time()
# Experience replay a la PPO-ER with L=2: https://arxiv.org/abs/1710.04423
use_experience_replay = False
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lrnow = lr(frac)
cliprangenow = cliprange(frac)
if not use_experience_replay or update % 2 == 1:
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run(update) #pylint: disable=E0632
else:
obs2, returns2, masks2, actions2, values2, neglogpacs2, states, epinfos = runner.run(update) #pylint: disable=E0632
epinfobuf.extend(epinfos)
mblossvals = []
if states is None: # nonrecurrent version
if use_experience_replay and update != 1:
inds = list(np.arange(nbatch * 2))
for _ in range(noptepochs):
random.sample(inds, nbatch)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (np.concatenate((obs, obs2)), np.concatenate((returns, returns2)), np.concatenate((masks, masks2)), np.concatenate((actions, actions2)), np.concatenate((values, values2)), np.concatenate((neglogpacs, neglogpacs2))))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else:
inds = np.arange(int(nbatch/obs.shape[1]))
inds = np.tile(inds, ( obs.shape[1],1))
inds = disarrange(inds)
for _ in range(noptepochs):
for start in range(0, nsteps, int(nbatch_train/nenvs)):
end = start + int(nbatch_train/nenvs)
n_env = obs.shape[1]
for j in range(n_env):
mbinds = inds[j][start:end]
slices = (arr[mbinds] for arr in (obs[:,j,:,:,:], returns[:,j], masks[:,j], actions[:,j], values[:,j], neglogpacs[:,j]))
if train:
mblossvals.append(model.train(j,lrnow, cliprangenow, *slices))
else:
mblossvals.append(model.train(nenvs,lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
assert use_experience_replay == False
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
envsperbatch = nbatch_train // nsteps
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow - tstart))
values = sf01(values)
returns = sf01(returns)
if update % log_interval == 0 or update == 1:
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
model.filmObj.reinit()
env.close()
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
| [
"tensorflow.get_default_session",
"tensorflow.clip_by_value",
"tensorflow.trainable_variables",
"ppo2ttifrutti_policies.filmInit",
"tensorflow.maximum",
"random.sample",
"numpy.mean",
"numpy.arange",
"numpy.tile",
"numpy.ndarray",
"tensorflow.clip_by_global_norm",
"collections.deque",
"os.pa... | [((757, 772), 'numpy.ndindex', 'np.ndindex', (['shp'], {}), '(shp)\n', (767, 772), True, 'import numpy as np\n'), ((10871, 10908), 'baselines.logger.configure', 'logger.configure', (["(log_path + 'model/')"], {}), "(log_path + 'model/')\n", (10887, 10908), False, 'from baselines import logger\n'), ((11716, 11733), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (11721, 11733), False, 'from collections import deque\n'), ((11749, 11760), 'time.time', 'time.time', ([], {}), '()\n', (11758, 11760), False, 'import time\n'), ((782, 807), 'numpy.random.shuffle', 'np.random.shuffle', (['b[ndx]'], {}), '(b[ndx])\n', (799, 807), True, 'import numpy as np\n'), ((977, 1001), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (999, 1001), True, 'import tensorflow as tf\n'), ((1014, 1035), 'ppo2ttifrutti_policies.filmInit', 'filmInit', (['sess', 'nenvs'], {}), '(sess, nenvs)\n', (1022, 1035), False, 'from ppo2ttifrutti_policies import filmInit\n'), ((1379, 1413), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (1393, 1413), True, 'import tensorflow as tf\n'), ((1420, 1454), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (1434, 1454), True, 'import tensorflow as tf\n'), ((1472, 1506), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (1486, 1506), True, 'import tensorflow as tf\n'), ((1520, 1554), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (1534, 1554), True, 'import tensorflow as tf\n'), ((1562, 1592), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (1576, 1592), True, 'import tensorflow as tf\n'), ((1607, 1637), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (1621, 1637), True, 'import tensorflow as tf\n'), ((1869, 1889), 'tensorflow.square', 'tf.square', (['(vpred - R)'], {}), '(vpred - R)\n', (1878, 1889), True, 'import tensorflow as tf\n'), ((1905, 1932), 'tensorflow.square', 'tf.square', (['(vpredclipped - R)'], {}), '(vpredclipped - R)\n', (1914, 1932), True, 'import tensorflow as tf\n'), ((2011, 2043), 'tensorflow.exp', 'tf.exp', (['(OLDNEGLOGPAC - neglogpac)'], {}), '(OLDNEGLOGPAC - neglogpac)\n', (2017, 2043), True, 'import tensorflow as tf\n'), ((2508, 2534), 'tensorflow.gradients', 'tf.gradients', (['loss', 'params'], {}), '(loss, params)\n', (2520, 2534), True, 'import tensorflow as tf\n'), ((2731, 2786), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'LR', 'epsilon': '(1e-05)'}), '(learning_rate=LR, epsilon=1e-05)\n', (2753, 2786), True, 'import tensorflow as tf\n'), ((8521, 8561), 'numpy.asarray', 'np.asarray', (['mb_obs'], {'dtype': 'self.obs.dtype'}), '(mb_obs, dtype=self.obs.dtype)\n', (8531, 8561), True, 'import numpy as np\n'), ((8711, 8751), 'numpy.asarray', 'np.asarray', (['mb_rewards'], {'dtype': 'np.float32'}), '(mb_rewards, dtype=np.float32)\n', (8721, 8751), True, 'import numpy as np\n'), ((8767, 8789), 'numpy.asarray', 'np.asarray', (['mb_actions'], {}), '(mb_actions)\n', (8777, 8789), True, 'import numpy as np\n'), ((8804, 8843), 'numpy.asarray', 'np.asarray', (['mb_values'], {'dtype': 'np.float32'}), '(mb_values, dtype=np.float32)\n', (8814, 8843), True, 'import numpy as np\n'), ((8862, 8905), 'numpy.asarray', 'np.asarray', (['mb_neglogpacs'], {'dtype': 'np.float32'}), '(mb_neglogpacs, dtype=np.float32)\n', (8872, 8905), True, 'import numpy as np\n'), ((8919, 8954), 'numpy.asarray', 'np.asarray', (['mb_dones'], {'dtype': 'np.bool'}), '(mb_dones, dtype=np.bool)\n', (8929, 8954), True, 'import numpy as np\n'), ((8973, 9016), 'numpy.ndarray', 'np.ndarray', (['self.env.num_envs'], {'dtype': 'np.int'}), '(self.env.num_envs, dtype=np.int)\n', (8983, 9016), True, 'import numpy as np\n'), ((9613, 9638), 'numpy.zeros_like', 'np.zeros_like', (['mb_rewards'], {}), '(mb_rewards)\n', (9626, 9638), True, 'import numpy as np\n'), ((9651, 9676), 'numpy.zeros_like', 'np.zeros_like', (['mb_rewards'], {}), '(mb_rewards)\n', (9664, 9676), True, 'import numpy as np\n'), ((11954, 11965), 'time.time', 'time.time', ([], {}), '()\n', (11963, 11965), False, 'import time\n'), ((14437, 14464), 'numpy.mean', 'np.mean', (['mblossvals'], {'axis': '(0)'}), '(mblossvals, axis=0)\n', (14444, 14464), True, 'import numpy as np\n'), ((14474, 14485), 'time.time', 'time.time', ([], {}), '()\n', (14483, 14485), False, 'import time\n'), ((15592, 15603), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (15599, 15603), True, 'import numpy as np\n'), ((1786, 1852), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(train_model.vf - OLDVPRED)', '(-CLIPRANGE)', 'CLIPRANGE'], {}), '(train_model.vf - OLDVPRED, -CLIPRANGE, CLIPRANGE)\n', (1802, 1852), True, 'import tensorflow as tf\n'), ((2093, 2150), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', '(1.0 - CLIPRANGE)', '(1.0 + CLIPRANGE)'], {}), '(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)\n', (2109, 2150), True, 'import tensorflow as tf\n'), ((2178, 2211), 'tensorflow.maximum', 'tf.maximum', (['pg_losses', 'pg_losses2'], {}), '(pg_losses, pg_losses2)\n', (2188, 2211), True, 'import tensorflow as tf\n'), ((2433, 2459), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {}), "('model')\n", (2450, 2459), True, 'import tensorflow as tf\n'), ((2473, 2497), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2495, 2497), True, 'import tensorflow as tf\n'), ((2590, 2634), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'max_grad_norm'], {}), '(grads, max_grad_norm)\n', (2612, 2634), True, 'import tensorflow as tf\n'), ((3531, 3547), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3545, 3547), True, 'import tensorflow as tf\n'), ((3622, 3638), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3636, 3638), True, 'import tensorflow as tf\n'), ((4952, 4985), 'numpy.arange', 'np.arange', (['(1)', '(6 if nc == 3 else 2)'], {}), '(1, 6 if nc == 3 else 2)\n', (4961, 4985), True, 'import numpy as np\n'), ((4989, 5026), 'numpy.random.shuffle', 'np.random.shuffle', (['ordered_strategies'], {}), '(ordered_strategies)\n', (5006, 5026), True, 'import numpy as np\n'), ((5137, 5183), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.uint8', 'shape': 'ob_shape'}), '(dtype=tf.uint8, shape=ob_shape)\n', (5151, 5183), True, 'import tensorflow as tf\n'), ((5320, 5334), 'tensorflow.constant', 'tf.constant', (['c'], {}), '(c)\n', (5331, 5334), True, 'import tensorflow as tf\n'), ((5849, 5883), 'tensorflow.cast', 'tf.cast', (['(augment * 255.0)', 'tf.uint8'], {}), '(augment * 255.0, tf.uint8)\n', (5856, 5883), True, 'import tensorflow as tf\n'), ((6116, 6159), 'numpy.ndarray', 'np.ndarray', (['self.env.num_envs'], {'dtype': 'np.int'}), '(self.env.num_envs, dtype=np.int)\n', (6126, 6159), True, 'import numpy as np\n'), ((6173, 6216), 'numpy.ndarray', 'np.ndarray', (['self.env.num_envs'], {'dtype': 'np.int'}), '(self.env.num_envs, dtype=np.int)\n', (6183, 6216), True, 'import numpy as np\n'), ((6254, 6297), 'numpy.ndarray', 'np.ndarray', (['self.env.num_envs'], {'dtype': 'np.int'}), '(self.env.num_envs, dtype=np.int)\n', (6264, 6297), True, 'import numpy as np\n'), ((13874, 13890), 'numpy.arange', 'np.arange', (['nenvs'], {}), '(nenvs)\n', (13883, 13890), True, 'import numpy as np\n'), ((14632, 14667), 'baselines.common.explained_variance', 'explained_variance', (['values', 'returns'], {}), '(values, returns)\n', (14650, 14667), False, 'from baselines.common import explained_variance\n'), ((14671, 14720), 'baselines.logger.logkv', 'logger.logkv', (['"""serial_timesteps"""', '(update * nsteps)'], {}), "('serial_timesteps', update * nsteps)\n", (14683, 14720), False, 'from baselines import logger\n'), ((14722, 14754), 'baselines.logger.logkv', 'logger.logkv', (['"""nupdates"""', 'update'], {}), "('nupdates', update)\n", (14734, 14754), False, 'from baselines import logger\n'), ((14758, 14806), 'baselines.logger.logkv', 'logger.logkv', (['"""total_timesteps"""', '(update * nbatch)'], {}), "('total_timesteps', update * nbatch)\n", (14770, 14806), False, 'from baselines import logger\n'), ((14808, 14832), 'baselines.logger.logkv', 'logger.logkv', (['"""fps"""', 'fps'], {}), "('fps', fps)\n", (14820, 14832), False, 'from baselines import logger\n'), ((15041, 15089), 'baselines.logger.logkv', 'logger.logkv', (['"""time_elapsed"""', '(tnow - tfirststart)'], {}), "('time_elapsed', tnow - tfirststart)\n", (15053, 15089), False, 'from baselines import logger\n'), ((15192, 15208), 'baselines.logger.dumpkvs', 'logger.dumpkvs', ([], {}), '()\n', (15206, 15208), False, 'from baselines import logger\n'), ((15281, 15297), 'baselines.logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (15295, 15297), False, 'from baselines import logger\n'), ((15358, 15394), 'os.makedirs', 'os.makedirs', (['checkdir'], {'exist_ok': '(True)'}), '(checkdir, exist_ok=True)\n', (15369, 15394), False, 'import os\n'), ((15409, 15444), 'os.path.join', 'osp.join', (['checkdir', "('%.5i' % update)"], {}), "(checkdir, '%.5i' % update)\n", (15417, 15444), True, 'import os.path as osp\n'), ((1965, 1999), 'tensorflow.maximum', 'tf.maximum', (['vf_losses1', 'vf_losses2'], {}), '(vf_losses1, vf_losses2)\n', (1975, 1999), True, 'import tensorflow as tf\n'), ((2246, 2281), 'tensorflow.square', 'tf.square', (['(neglogpac - OLDNEGLOGPAC)'], {}), '(neglogpac - OLDNEGLOGPAC)\n', (2255, 2281), True, 'import tensorflow as tf\n'), ((3953, 3986), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3984, 3986), True, 'import tensorflow as tf\n'), ((5201, 5223), 'tensorflow.cast', 'tf.cast', (['X', 'tf.float32'], {}), '(X, tf.float32)\n', (5208, 5223), True, 'import tensorflow as tf\n'), ((9098, 9116), 'copy.deepcopy', 'deepcopy', (['self.obs'], {}), '(self.obs)\n', (9106, 9116), False, 'from copy import deepcopy\n'), ((9127, 9166), 'numpy.expand_dims', 'np.expand_dims', (['ob1[j, :, :, :]'], {'axis': '(0)'}), '(ob1[j, :, :, :], axis=0)\n', (9141, 9166), True, 'import numpy as np\n'), ((9246, 9264), 'copy.deepcopy', 'deepcopy', (['self.obs'], {}), '(self.obs)\n', (9254, 9264), False, 'from copy import deepcopy\n'), ((9275, 9314), 'numpy.expand_dims', 'np.expand_dims', (['ob1[j, :, :, :]'], {'axis': '(0)'}), '(ob1[j, :, :, :], axis=0)\n', (9289, 9314), True, 'import numpy as np\n'), ((13103, 13135), 'numpy.tile', 'np.tile', (['inds', '(obs.shape[1], 1)'], {}), '(inds, (obs.shape[1], 1))\n', (13110, 13135), True, 'import numpy as np\n'), ((14030, 14056), 'numpy.random.shuffle', 'np.random.shuffle', (['envinds'], {}), '(envinds)\n', (14047, 14056), True, 'import numpy as np\n'), ((15157, 15188), 'baselines.logger.logkv', 'logger.logkv', (['lossname', 'lossval'], {}), '(lossname, lossval)\n', (15169, 15188), False, 'from baselines import logger\n'), ((15322, 15338), 'baselines.logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (15336, 15338), False, 'from baselines import logger\n'), ((2334, 2353), 'tensorflow.abs', 'tf.abs', (['(ratio - 1.0)'], {}), '(ratio - 1.0)\n', (2340, 2353), True, 'import tensorflow as tf\n'), ((5239, 5260), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (5253, 5260), False, 'import random\n'), ((6434, 6473), 'numpy.expand_dims', 'np.expand_dims', (['ob1[j, :, :, :]'], {'axis': '(0)'}), '(ob1[j, :, :, :], axis=0)\n', (6448, 6473), True, 'import numpy as np\n'), ((6619, 6658), 'numpy.expand_dims', 'np.expand_dims', (['ob1[j, :, :, :]'], {'axis': '(0)'}), '(ob1[j, :, :, :], axis=0)\n', (6633, 6658), True, 'import numpy as np\n'), ((12504, 12525), 'numpy.arange', 'np.arange', (['(nbatch * 2)'], {}), '(nbatch * 2)\n', (12513, 12525), True, 'import numpy as np\n'), ((12564, 12591), 'random.sample', 'random.sample', (['inds', 'nbatch'], {}), '(inds, nbatch)\n', (12577, 12591), False, 'import random\n'), ((13905, 13930), 'numpy.arange', 'np.arange', (['(nenvs * nsteps)'], {}), '(nenvs * nsteps)\n', (13914, 13930), True, 'import numpy as np\n'), ((5085, 5123), 'random.randint', 'random.randint', (['(0)', '(2 if nc == 3 else 1)'], {}), '(0, 2 if nc == 3 else 1)\n', (5099, 5123), False, 'import random\n'), ((5414, 5432), 'tensorflow.add', 'tf.add', (['augment', 'C'], {}), '(augment, C)\n', (5420, 5432), True, 'import tensorflow as tf\n'), ((5474, 5544), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['augment'], {'lower': '(0.5)', 'upper': '(1.5)', 'seed': 'update'}), '(augment, lower=0.5, upper=1.5, seed=update)\n', (5500, 5544), True, 'import tensorflow as tf\n'), ((7495, 7519), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (7517, 7519), True, 'import tensorflow as tf\n'), ((5577, 5649), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['augment'], {'max_delta': '(32.0 / 255.0)', 'seed': 'update'}), '(augment, max_delta=32.0 / 255.0, seed=update)\n', (5603, 5649), True, 'import tensorflow as tf\n'), ((5678, 5746), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['augment'], {'lower': '(0.5)', 'upper': '(1.5)', 'seed': 'update'}), '(augment, lower=0.5, upper=1.5, seed=update)\n', (5702, 5746), True, 'import tensorflow as tf\n'), ((5779, 5835), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['augment'], {'max_delta': '(0.2)', 'seed': 'update'}), '(augment, max_delta=0.2, seed=update)\n', (5798, 5835), True, 'import tensorflow as tf\n'), ((12746, 12773), 'numpy.concatenate', 'np.concatenate', (['(obs, obs2)'], {}), '((obs, obs2))\n', (12760, 12773), True, 'import numpy as np\n'), ((12775, 12810), 'numpy.concatenate', 'np.concatenate', (['(returns, returns2)'], {}), '((returns, returns2))\n', (12789, 12810), True, 'import numpy as np\n'), ((12812, 12843), 'numpy.concatenate', 'np.concatenate', (['(masks, masks2)'], {}), '((masks, masks2))\n', (12826, 12843), True, 'import numpy as np\n'), ((12845, 12880), 'numpy.concatenate', 'np.concatenate', (['(actions, actions2)'], {}), '((actions, actions2))\n', (12859, 12880), True, 'import numpy as np\n'), ((12882, 12915), 'numpy.concatenate', 'np.concatenate', (['(values, values2)'], {}), '((values, values2))\n', (12896, 12915), True, 'import numpy as np\n'), ((12917, 12958), 'numpy.concatenate', 'np.concatenate', (['(neglogpacs, neglogpacs2)'], {}), '((neglogpacs, neglogpacs2))\n', (12931, 12958), True, 'import numpy as np\n')] |
import numpy as np
from scipy.signal import freqz
import spectrum
from koe_acoustic_features.extractor import Extractor
from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments
class LinearPredictionExtractor(Extractor):
def lpc_spectrum(self):
sig = get_sig(self.args)
nfft, fs, noverlap, win_length, order = unroll_args(self.args, ['nfft', 'fs', 'noverlap', 'win_length', 'order'])
hann_window = _cached_get_window('hanning', nfft)
window = unroll_args(self.args, [('window', hann_window)])
siglen = len(sig)
nsegs, segs = split_segments(siglen, win_length, noverlap, incltail=False)
lpcs = np.zeros((nfft, nsegs), dtype=np.complex64)
for i in range(nsegs):
seg_beg, seg_end = segs[i]
frame = sig[seg_beg:seg_end]
lpcs[:, i] = lpc_spectrum_frame(frame * window, order, nfft)
return np.log10(abs(lpcs))
def lpc_cepstrum(self):
sig = get_sig(self.args)
nfft, fs, noverlap, win_length, order = unroll_args(self.args, ['nfft', 'fs', 'noverlap', 'win_length', 'order'])
hann_window = _cached_get_window('hanning', nfft)
window = unroll_args(self.args, [('window', hann_window)])
siglen = len(sig)
nsegs, segs = split_segments(siglen, win_length, noverlap, incltail=False)
lpcs = np.zeros((order, nsegs), dtype=np.float32)
for i in range(nsegs):
seg_beg, seg_end = segs[i]
frame = sig[seg_beg:seg_end]
lpcs[:, i] = lpc_cepstrum_frame(frame * window, order)
return lpcs
def lp_coefficients(self):
sig = get_sig(self.args)
nfft, fs, noverlap, win_length, order = unroll_args(self.args, ['nfft', 'fs', 'noverlap', 'win_length', 'order'])
hann_window = _cached_get_window('hanning', nfft)
window = unroll_args(self.args, [('window', hann_window)])
siglen = len(sig)
nsegs, segs = split_segments(siglen, win_length, noverlap, incltail=False)
lp_coeffs = np.zeros((order, nsegs), dtype=np.float32)
for i in range(nsegs):
seg_beg, seg_end = segs[i]
frame = sig[seg_beg:seg_end]
lp_coeffs[:, i] = lp_coefficients_frame(frame * window, order)
return lp_coeffs
def lpc_spectrum_frame(sig, order, nfft):
lp, e = spectrum.lpc(sig, order)
# Need to add 1 to the lp coefficients to make it similar to result from Matlab
w, h = freqz(1, np.concatenate((np.array([1]), lp)), nfft)
return h
def lp_coefficients_frame(sig, order):
lp, e = spectrum.lpc(sig, order)
return lp.astype(np.float32)
def lpc_cepstrum_frame(sig, order=None):
"""
:param lpc: A sequence of lpc components. Need to be preprocessed by lpc()
:param g: Error term for lpc sequence
:param order: size of the array. Function returns order+1 length array. Default is len(seq)
:return:
"""
lp, g = spectrum.lpc(sig, order)
cepst = np.zeros((order,), dtype=np.float32)
for i in range(0, order):
sum = 0
for j in range(0, i):
sum += (j - i) * lp[j] * cepst[i - j - 1]
cepst[i] = -lp[i] + sum / (i + 1)
return cepst
| [
"koe_acoustic_features.utils.split_segments",
"numpy.zeros",
"koe_acoustic_features.utils._cached_get_window",
"numpy.array",
"koe_acoustic_features.utils.get_sig",
"koe_acoustic_features.utils.unroll_args",
"spectrum.lpc"
] | [((2398, 2422), 'spectrum.lpc', 'spectrum.lpc', (['sig', 'order'], {}), '(sig, order)\n', (2410, 2422), False, 'import spectrum\n'), ((2637, 2661), 'spectrum.lpc', 'spectrum.lpc', (['sig', 'order'], {}), '(sig, order)\n', (2649, 2661), False, 'import spectrum\n'), ((2996, 3020), 'spectrum.lpc', 'spectrum.lpc', (['sig', 'order'], {}), '(sig, order)\n', (3008, 3020), False, 'import spectrum\n'), ((3033, 3069), 'numpy.zeros', 'np.zeros', (['(order,)'], {'dtype': 'np.float32'}), '((order,), dtype=np.float32)\n', (3041, 3069), True, 'import numpy as np\n'), ((307, 325), 'koe_acoustic_features.utils.get_sig', 'get_sig', (['self.args'], {}), '(self.args)\n', (314, 325), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((374, 447), 'koe_acoustic_features.utils.unroll_args', 'unroll_args', (['self.args', "['nfft', 'fs', 'noverlap', 'win_length', 'order']"], {}), "(self.args, ['nfft', 'fs', 'noverlap', 'win_length', 'order'])\n", (385, 447), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((470, 505), 'koe_acoustic_features.utils._cached_get_window', '_cached_get_window', (['"""hanning"""', 'nfft'], {}), "('hanning', nfft)\n", (488, 505), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((523, 572), 'koe_acoustic_features.utils.unroll_args', 'unroll_args', (['self.args', "[('window', hann_window)]"], {}), "(self.args, [('window', hann_window)])\n", (534, 572), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((622, 682), 'koe_acoustic_features.utils.split_segments', 'split_segments', (['siglen', 'win_length', 'noverlap'], {'incltail': '(False)'}), '(siglen, win_length, noverlap, incltail=False)\n', (636, 682), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((699, 742), 'numpy.zeros', 'np.zeros', (['(nfft, nsegs)'], {'dtype': 'np.complex64'}), '((nfft, nsegs), dtype=np.complex64)\n', (707, 742), True, 'import numpy as np\n'), ((1006, 1024), 'koe_acoustic_features.utils.get_sig', 'get_sig', (['self.args'], {}), '(self.args)\n', (1013, 1024), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((1073, 1146), 'koe_acoustic_features.utils.unroll_args', 'unroll_args', (['self.args', "['nfft', 'fs', 'noverlap', 'win_length', 'order']"], {}), "(self.args, ['nfft', 'fs', 'noverlap', 'win_length', 'order'])\n", (1084, 1146), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((1169, 1204), 'koe_acoustic_features.utils._cached_get_window', '_cached_get_window', (['"""hanning"""', 'nfft'], {}), "('hanning', nfft)\n", (1187, 1204), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((1222, 1271), 'koe_acoustic_features.utils.unroll_args', 'unroll_args', (['self.args', "[('window', hann_window)]"], {}), "(self.args, [('window', hann_window)])\n", (1233, 1271), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((1321, 1381), 'koe_acoustic_features.utils.split_segments', 'split_segments', (['siglen', 'win_length', 'noverlap'], {'incltail': '(False)'}), '(siglen, win_length, noverlap, incltail=False)\n', (1335, 1381), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((1398, 1440), 'numpy.zeros', 'np.zeros', (['(order, nsegs)'], {'dtype': 'np.float32'}), '((order, nsegs), dtype=np.float32)\n', (1406, 1440), True, 'import numpy as np\n'), ((1690, 1708), 'koe_acoustic_features.utils.get_sig', 'get_sig', (['self.args'], {}), '(self.args)\n', (1697, 1708), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((1757, 1830), 'koe_acoustic_features.utils.unroll_args', 'unroll_args', (['self.args', "['nfft', 'fs', 'noverlap', 'win_length', 'order']"], {}), "(self.args, ['nfft', 'fs', 'noverlap', 'win_length', 'order'])\n", (1768, 1830), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((1853, 1888), 'koe_acoustic_features.utils._cached_get_window', '_cached_get_window', (['"""hanning"""', 'nfft'], {}), "('hanning', nfft)\n", (1871, 1888), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((1906, 1955), 'koe_acoustic_features.utils.unroll_args', 'unroll_args', (['self.args', "[('window', hann_window)]"], {}), "(self.args, [('window', hann_window)])\n", (1917, 1955), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((2005, 2065), 'koe_acoustic_features.utils.split_segments', 'split_segments', (['siglen', 'win_length', 'noverlap'], {'incltail': '(False)'}), '(siglen, win_length, noverlap, incltail=False)\n', (2019, 2065), False, 'from koe_acoustic_features.utils import get_sig, unroll_args, _cached_get_window, split_segments\n'), ((2087, 2129), 'numpy.zeros', 'np.zeros', (['(order, nsegs)'], {'dtype': 'np.float32'}), '((order, nsegs), dtype=np.float32)\n', (2095, 2129), True, 'import numpy as np\n'), ((2544, 2557), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2552, 2557), True, 'import numpy as np\n')] |
# coding:utf-8
# This file is part of Alkemiems.
#
# Alkemiems is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
__author__ = '<NAME>'
__version__ = 1.0
__maintainer__ = '<NAME>'
__email__ = "<EMAIL>"
__date__ = '2021/05/25 09:01:54'
import numpy as np
import matplotlib.pyplot as plt
import os
import scipy.stats
def plt_mse(data, outfn):
# lv #3CAF6F
fig = plt.figure()
data = data[1:, :]
x = data[:, 0]
ytrain = data[:, -2]
ytest = data[:, -1]
left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
ax1 = fig.add_axes([left, bottom, width, height])
ax1.plot(x, ytrain, c='#347FE2', linewidth=3.2)
ax1.plot(x, ytest, c='#F37878' ,linewidth=3.2)
ax1.set_xlim(-1, 300)
ax1.set_xlabel('Steps')
ax1.set_ylabel("Mean Square Error (MSE)")
left, bottom, width, height = 0.4, 0.4, 0.35, 0.35
ax2 = fig.add_axes([left, bottom, width, height])
ax2.plot(x, ytrain, c='#347FE2', linewidth=2.2)
ax2.plot(x, ytest, c='#F37878', linewidth=2.2)
train_final_mean = np.mean(ytrain[3000:])
test_final_mean = np.mean(ytest[3000:])
ax2.plot(range(300, 5000), [train_final_mean]*(5000-300), 'r', linestyle='--', linewidth=2.2)
ax2.text(2000, 0.004, 'MSE=%.5f' % train_final_mean)
ax2.set_xlabel('Steps')
ax2.set_ylabel('MSE')
ax2.set_xlim(300, 5000)
ax2.set_ylim(0, 0.01)
ax2.set_xticks([300, 1000, 2000, 3000, 4000, 5000])
plt.savefig(outfn)
def read_mse_2data(fn):
with open(fn, 'r') as f:
data =[[m.split(':')[-1].split() for m in i.split('|')] for i in f.readlines()]
dd = []
for m in data:
aa = []
for xx in m:
for ii in xx:
aa.append(ii)
dd.append(aa)
return np.array(dd, dtype=np.float)
def plt_result(data, save_fn=None, show=False):
x = data[:, 0]
zttrain = data[:, 2]
nttrain = data[:, 3]
ztloss = data[:, 4]
ntloss = data[:, 5]
label_font = {"fontsize": 14, 'family': 'Times New Roman'}
legend_font = {"fontsize": 12, 'family': 'Times New Roman'}
tick_font_size = 12
tick_font_dict = {"fontsize": 12, 'family': 'Times New Roman'}
index_label_font = {"fontsize": 20, 'weight': 'bold', 'family': 'Times New Roman'}
pindex = ['A', 'B', 'C', 'D', 'E', 'F']
_xwd, _ywd = 0.118, 0.12
sax = [[0.18098039215686275 + 0.020, 0.60, _xwd, _ywd],
[0.49450980392156866 + 0.035, 0.60, _xwd, _ywd],
[0.82803921568627460 + 0.035, 0.60, _xwd, _ywd],
[0.18098039215686275 + 0.020, 0.11, _xwd, _ywd],
[0.49450980392156866 + 0.035, 0.11, _xwd, _ywd],
[0.82803921568627460 + 0.035, 0.11, _xwd, _ywd]]
nrow = 1
ncol = 2
fig, axes = plt.subplots(nrow, ncol, figsize=(14, 8))
plt.rc('font', family='Times New Roman', weight='normal')
axes = axes.flatten()
ax1, ax2 = axes[0], axes[1]
ax1.plot(x, zttrain)
# ax1.plot(x, ztloss)
ax2.plot(x, nttrain)
# ax2.plot(x, ntloss)
ax1.set_ylabel('MSE')
ax1.legend(fontsize=8)
ax2.set_ylabel('MSE')
ax2.legend(fontsize=8)
plt.tight_layout()
if save_fn:
plt.savefig(save_fn, dpi=600)
if show:
plt.show()
# assert axes.shape[0] == len(predict_data) == len(training_data)
# if text is not None:
# assert axes.shape[0] == len(text)
#
# for i in range(axes.shape[0]):
# ax = axes[i]
# pd1 = predict_data[i]
# ax.scatter(pd1[:, 0], pd1[:, 1], edgecolors='white', color='#347FE2', linewidths=0.2)
# slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(pd1[:, 0], pd1[:, 1])
# slice_set = 0.0, 1.25
# _tmp_xy = np.linspace(slice_set, pd1.shape[0])
# ax.plot(_tmp_xy, _tmp_xy, '#F37878', linewidth=3, alpha=0.8)
# ax.set_xlim(slice_set)
# ax.set_ylim(slice_set)
# ax.set_xlabel("Calculated", fontdict=label_font)
# ax.set_ylabel("Predicted", fontdict=label_font)
# if i > 3:
# ax.text(0.05, 0.9, text[i] % r_value**2, fontdict=legend_font)
# else:
# ax.text(0.10, 0.9, text[i]% r_value**2, fontdict=legend_font)
#
# ax.text(0.01, 1.3, pindex[i], fontdict=index_label_font)
# ax.set_xticklabels([round(i, 2) for i in ax.get_xticks()], tick_font_dict)
# ax.set_yticklabels([round(i, 2) for i in ax.get_yticks()], tick_font_dict)
# # ax.tick_params(axis='both', labelsize=tick_font_size)
# d = ax.get_position()
# print(i, d)
# tdata = training_data[i][1:, :]
# tx = tdata[:, 0]
# ytrain = tdata[:, -2]
# ytest = tdata[:, -1]
#
# # left, bottom, width, height = 1/ncol*0.66 * (i+1), 1/nrow * 1.26 * (int(i / ncol) + 1), 0.125, 0.12
# # left, bottom, width, height = d.x0 + d.width * 1/ncol, d.y0+0.12/nrow, 0.125, 0.12
# left, bottom, width, height = sax[i]
#
# if i == 3:
# left = left + 0.017
# width = width - 0.01
# train_final_mean = np.mean(ytrain[:4000])
# test_final_mean = np.mean(ytest[:4000])
# else:
# left = left
# width = width
# train_final_mean = np.mean(ytrain[2500:])
# test_final_mean = np.mean(ytest[2500:])
#
# ax2 = fig.add_axes([left, bottom, width, height])
# ax2.plot(tx, ytrain, c='#347FE2', linewidth=1.2, label='train')
# ax2.plot(tx, ytest, c='#F37878', linewidth=1.2, label='test')
# if i == 3:
# ax2.set_xlim(-120, 5000)
# ax2.set_ylim(-0.001, 0.2)
# ax2.text(2000, 0.05, 'train:%.5f\ntest :%.5f' % (train_final_mean, test_final_mean))
#
# elif (i == 1) or (i == 0):
# ax2.set_xlim(-120, 3000)
# ax2.set_ylim(-0.001, 0.2)
# ax2.text(1000, 0.05, 'train:%.5f\ntest :%.5f' % (train_final_mean, test_final_mean))
# else:
# ax2.set_xlim(-120, 3000)
# ax2.set_ylim(-0.001, 0.2)
# ax2.text(1000, 0.05, 'train:%.5f\ntest :%.5f' % (train_final_mean, test_final_mean))
if __name__ == '__main__':
fn = os.path.join(r"..\rtrain", "2training_module_2output", "running_3layer_100.log")
data = read_mse_2data(fn)
plt_result(data, save_fn=False, show=True)
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"os.path.join",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((446, 458), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (456, 458), True, 'import matplotlib.pyplot as plt\n'), ((1119, 1141), 'numpy.mean', 'np.mean', (['ytrain[3000:]'], {}), '(ytrain[3000:])\n', (1126, 1141), True, 'import numpy as np\n'), ((1165, 1186), 'numpy.mean', 'np.mean', (['ytest[3000:]'], {}), '(ytest[3000:])\n', (1172, 1186), True, 'import numpy as np\n'), ((1524, 1542), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfn'], {}), '(outfn)\n', (1535, 1542), True, 'import matplotlib.pyplot as plt\n'), ((1884, 1912), 'numpy.array', 'np.array', (['dd'], {'dtype': 'np.float'}), '(dd, dtype=np.float)\n', (1892, 1912), True, 'import numpy as np\n'), ((2898, 2939), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrow', 'ncol'], {'figsize': '(14, 8)'}), '(nrow, ncol, figsize=(14, 8))\n', (2910, 2939), True, 'import matplotlib.pyplot as plt\n'), ((2945, 3002), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Times New Roman"""', 'weight': '"""normal"""'}), "('font', family='Times New Roman', weight='normal')\n", (2951, 3002), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3320), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3318, 3320), True, 'import matplotlib.pyplot as plt\n'), ((6481, 6566), 'os.path.join', 'os.path.join', (['"""..\\\\rtrain"""', '"""2training_module_2output"""', '"""running_3layer_100.log"""'], {}), "('..\\\\rtrain', '2training_module_2output', 'running_3layer_100.log'\n )\n", (6493, 6566), False, 'import os\n'), ((3349, 3378), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_fn'], {'dpi': '(600)'}), '(save_fn, dpi=600)\n', (3360, 3378), True, 'import matplotlib.pyplot as plt\n'), ((3408, 3418), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3416, 3418), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
fig_path = "/Users/gabriel/Documents/Research/USGS_Work/gmprocess/figs/spectra/"
#%%
def get_fft(series, fs, nfft):
ft = np.fft.fft(series,fs)
freq = np.fft.fftfreq(nfft)
return freq, ft
def get_ifft(series, fs, nfft, real=True):
if real:
ift = np.fft.ifft(series,fs).real
else:
ift = np.fft.ifft(series,fs)
return ift
def ps_psd_difference(ps,psd):
diff = []
if len(ps) == len(psd):
diff = np.zeros(len(ps))
for i in range(len(ps)):
diff[i] = psd[i]/ps[i]
else:
print("Spectra must be of the same size")
return diff
#%%
# Make a test signal
np.random.seed(0)
delta = .01
fs = 1/ delta
time_vec = np.arange(0, 70, delta)
#%% Sine wave
delta = .01
fs = 1/ delta
t = np.arange(0,256,delta)
wndw_factor=500
overlap_factor=2
nfft = len(t)
nperseg=len(t)/wndw_factor
noverlap=nperseg/overlap_factor
# filename = "sine_wave_input-del_.01-nfft_256k"
# plt.plot(t,np.sin(t))
# plt.title("Test sine wave, ∆=0.01, N=256000")
# plt.savefig(fname=fig_path + filename + ".png", dpi=500)
# plt.show()
#%% Calculate PSD of test signal with Welch's Method
freqs_psd, psd = signal.welch(np.sin(t),fs=fs, nfft=nfft, nperseg=nperseg, noverlap=noverlap, scaling="density")
# freqs_psd, psd = signal.welch(np.sin(t),fs=fs, nfft=nfft, nperseg=nperseg, scaling="density")
# freqs_psd, psd = signal.welch(np.sin(t),fs=fs, nfft=nfft, scaling="density")
# freqs_psd, psd = signal.welch(np.sin(t),fs=fs, scaling="density")
# freqs_psd, psd = signal.welch(np.sin(t), scaling="density")
freqs_ps, ps = signal.welch(np.sin(t),fs=fs, nfft=nfft, nperseg=nperseg, noverlap=noverlap, scaling="spectrum")
# freqs_ps, ps = signal.welch(np.sin(t),fs=fs, nfft=nfft, nperseg=nperseg, scaling="spectrum")
# freqs_ps, ps = signal.welch(np.sin(t),fs=fs, nfft=nfft, scaling="spectrum")
# freqs_ps, ps = signal.welch(np.sin(t),fs=fs,scaling="spectrum")
# freqs_ps, ps = signal.welch(np.sin(t),scaling="spectrum")
diff = ps_psd_difference(ps,psd)
# Note: freqs are the same for both psd and ps
# filename = "sine_wave-default_fs_and_nfft"
# filename = "sine_wave-default_fs_and_nfft_nperseg"
# filename = "sine_wave-set_fs_and_default_nfft_nperseg"
filename = "sine_wave-using_fs_and_nfft_nperseg->" + str(wndw_factor) + "->" + str(100/overlap_factor) + "perc_overlap"
# No Scaling
plt.plot(freqs_psd, psd, label="PSD")
plt.plot(freqs_ps, ps, "--", label="PS")
plt.title("Parameter Testing for Welch's Method")
plt.legend()
plt.savefig(fname=fig_path + filename + "-no_scaling.png", dpi=500)
plt.show()
# Log x
plt.semilogx(freqs_psd, psd, label="PSD")
plt.semilogx(freqs_ps, ps, "--", label="PS")
plt.title("Parameter Testing for Welch's Method")
plt.legend()
plt.savefig(fname=fig_path + filename + "-logx.png", dpi=500)
plt.show()
# Log Y
plt.semilogy(freqs_psd, psd, label="PSD")
plt.semilogy(freqs_ps, ps, "--", label="PS")
plt.title("Parameter Testing for Welch's Method")
plt.legend()
plt.savefig(fname=fig_path + filename + "-logy.png", dpi=500)
plt.show()
#%% Scaling differences
# PSD / PS scaling ratios
# plt.figure(figsize=(4, 6))
plt.semilogy(freqs_psd, diff, label="PSD/PS ratio")
ax = plt.gca()
# plt.annotate("Variation due to rounding error", xy=(2, 1), xytext=(3, 1.5))
plt.title("Differencing PSD and PS")
plt.legend()
plt.savefig(fname=fig_path + "diff-" + filename + "-logy.png", dpi=500)
plt.show()
#%% Calculate PSD of test signal with just a periodogram
# freqs_ps, ps = signal.periodogram(np.sin(t),fs)
# freqs_psd, psd = signal.periodogram(np.sin(t),fs,scaling="density")
# # No Scaling
# plt.plot(freqs_psd, psd, label="PSD")
# plt.plot(freqs_ps, ps, "--", label="PS")
# plt.legend()
# plt.show()
# # Log x
# plt.semilogx(freqs_psd, psd, label="PSD")
# plt.semilogx(freqs_ps, ps, "--", label="PS")
# plt.legend()
# plt.show()
# # Log Y
# plt.semilogy(freqs_psd, psd, label="PSD")
# plt.semilogy(freqs_ps, ps, "--", label="PS")
# plt.legend()
# plt.show()
#%% Forward FFT
# # Forward transform
# t = np.arange(256)
# sp = np.fft.fft(np.sin(t))
# freq = np.fft.fftfreq(t.shape[-1])
# # plt.plot(freq, sp.real, freq, sp.imag)
# plt.plot(freq, sp.real)
# plt.show()
# # Forward transform, ortho normalization
# t = np.arange(256)
# sp = np.fft.fft(np.sin(t),norm='ortho')
# freq = np.fft.fftfreq(t.shape[-1])
# # plt.plot(freq, sp.real, freq, sp.imag)
# plt.plot(freq, sp.real)
# plt.show()
# Forward transform finer spacing (larger NFFT)
t = np.arange(0,256,0.1)
sp = np.fft.fft(np.sin(t))
freq = np.fft.fftfreq(t.shape[-1],0.1)
# plt.plot(freq, sp.real, freq, sp.imag)
# plt.plot(freq, sp.real)
# plt.show()
# # Forward transform, finer spacing, ortho normalization
# t = np.arange(0,256,0.1)
# sp = np.fft.fft(np.sin(t),norm='ortho')
# freq = np.fft.fftfreq(t.shape[-1],0.1)
# # plt.plot(freq, sp.real, freq, sp.imag)
# plt.plot(freq, sp.real)
# plt.show()
#
# Inverse FFT
#
# # Inverse transform
# t = np.arange(256)
# sig = np.fft.ifft(sp)
# plt.plot(t, sig)
# # plt.show()
# # Inverse transform, ortho normalization
# t = np.arange(256)
# sig = np.fft.ifft(sp, norm='ortho')
# plt.plot(t, sig)
# plt.show()
# Inverse transform, finer spacing (larger NFFT)
t = np.arange(0,256,0.1)
sig = np.fft.ifft(sp)
plt.plot(t, sig)
plt.show()
# Inverse transform, finer spacing, ortho normalization
# t = np.arange(0,256,0.1)
# sig = np.fft.ifft(sp, norm='ortho')
# plt.plot(t, sig)
# plt.show()
| [
"matplotlib.pyplot.title",
"numpy.fft.ifft",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"matplotlib.pyplot.legend",
"numpy.fft.fftfreq",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.semil... | [((746, 763), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (760, 763), True, 'import numpy as np\n'), ((801, 824), 'numpy.arange', 'np.arange', (['(0)', '(70)', 'delta'], {}), '(0, 70, delta)\n', (810, 824), True, 'import numpy as np\n'), ((871, 895), 'numpy.arange', 'np.arange', (['(0)', '(256)', 'delta'], {}), '(0, 256, delta)\n', (880, 895), True, 'import numpy as np\n'), ((2450, 2487), 'matplotlib.pyplot.plot', 'plt.plot', (['freqs_psd', 'psd'], {'label': '"""PSD"""'}), "(freqs_psd, psd, label='PSD')\n", (2458, 2487), True, 'import matplotlib.pyplot as plt\n'), ((2488, 2528), 'matplotlib.pyplot.plot', 'plt.plot', (['freqs_ps', 'ps', '"""--"""'], {'label': '"""PS"""'}), "(freqs_ps, ps, '--', label='PS')\n", (2496, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2529, 2578), 'matplotlib.pyplot.title', 'plt.title', (['"""Parameter Testing for Welch\'s Method"""'], {}), '("Parameter Testing for Welch\'s Method")\n', (2538, 2578), True, 'import matplotlib.pyplot as plt\n'), ((2579, 2591), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2589, 2591), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2659), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': "(fig_path + filename + '-no_scaling.png')", 'dpi': '(500)'}), "(fname=fig_path + filename + '-no_scaling.png', dpi=500)\n", (2603, 2659), True, 'import matplotlib.pyplot as plt\n'), ((2660, 2670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2668, 2670), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2721), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['freqs_psd', 'psd'], {'label': '"""PSD"""'}), "(freqs_psd, psd, label='PSD')\n", (2692, 2721), True, 'import matplotlib.pyplot as plt\n'), ((2722, 2766), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['freqs_ps', 'ps', '"""--"""'], {'label': '"""PS"""'}), "(freqs_ps, ps, '--', label='PS')\n", (2734, 2766), True, 'import matplotlib.pyplot as plt\n'), ((2767, 2816), 'matplotlib.pyplot.title', 'plt.title', (['"""Parameter Testing for Welch\'s Method"""'], {}), '("Parameter Testing for Welch\'s Method")\n', (2776, 2816), True, 'import matplotlib.pyplot as plt\n'), ((2817, 2829), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2827, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2830, 2891), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': "(fig_path + filename + '-logx.png')", 'dpi': '(500)'}), "(fname=fig_path + filename + '-logx.png', dpi=500)\n", (2841, 2891), True, 'import matplotlib.pyplot as plt\n'), ((2892, 2902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2900, 2902), True, 'import matplotlib.pyplot as plt\n'), ((2912, 2953), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['freqs_psd', 'psd'], {'label': '"""PSD"""'}), "(freqs_psd, psd, label='PSD')\n", (2924, 2953), True, 'import matplotlib.pyplot as plt\n'), ((2954, 2998), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['freqs_ps', 'ps', '"""--"""'], {'label': '"""PS"""'}), "(freqs_ps, ps, '--', label='PS')\n", (2966, 2998), True, 'import matplotlib.pyplot as plt\n'), ((2999, 3048), 'matplotlib.pyplot.title', 'plt.title', (['"""Parameter Testing for Welch\'s Method"""'], {}), '("Parameter Testing for Welch\'s Method")\n', (3008, 3048), True, 'import matplotlib.pyplot as plt\n'), ((3049, 3061), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3059, 3061), True, 'import matplotlib.pyplot as plt\n'), ((3062, 3123), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': "(fig_path + filename + '-logy.png')", 'dpi': '(500)'}), "(fname=fig_path + filename + '-logy.png', dpi=500)\n", (3073, 3123), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3134), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3132, 3134), True, 'import matplotlib.pyplot as plt\n'), ((3217, 3268), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['freqs_psd', 'diff'], {'label': '"""PSD/PS ratio"""'}), "(freqs_psd, diff, label='PSD/PS ratio')\n", (3229, 3268), True, 'import matplotlib.pyplot as plt\n'), ((3274, 3283), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3281, 3283), True, 'import matplotlib.pyplot as plt\n'), ((3362, 3398), 'matplotlib.pyplot.title', 'plt.title', (['"""Differencing PSD and PS"""'], {}), "('Differencing PSD and PS')\n", (3371, 3398), True, 'import matplotlib.pyplot as plt\n'), ((3399, 3411), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3409, 3411), True, 'import matplotlib.pyplot as plt\n'), ((3412, 3483), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': "(fig_path + 'diff-' + filename + '-logy.png')", 'dpi': '(500)'}), "(fname=fig_path + 'diff-' + filename + '-logy.png', dpi=500)\n", (3423, 3483), True, 'import matplotlib.pyplot as plt\n'), ((3484, 3494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3492, 3494), True, 'import matplotlib.pyplot as plt\n'), ((4549, 4571), 'numpy.arange', 'np.arange', (['(0)', '(256)', '(0.1)'], {}), '(0, 256, 0.1)\n', (4558, 4571), True, 'import numpy as np\n'), ((4604, 4636), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['t.shape[-1]', '(0.1)'], {}), '(t.shape[-1], 0.1)\n', (4618, 4636), True, 'import numpy as np\n'), ((5279, 5301), 'numpy.arange', 'np.arange', (['(0)', '(256)', '(0.1)'], {}), '(0, 256, 0.1)\n', (5288, 5301), True, 'import numpy as np\n'), ((5306, 5321), 'numpy.fft.ifft', 'np.fft.ifft', (['sp'], {}), '(sp)\n', (5317, 5321), True, 'import numpy as np\n'), ((5322, 5338), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sig'], {}), '(t, sig)\n', (5330, 5338), True, 'import matplotlib.pyplot as plt\n'), ((5339, 5349), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5347, 5349), True, 'import matplotlib.pyplot as plt\n'), ((229, 251), 'numpy.fft.fft', 'np.fft.fft', (['series', 'fs'], {}), '(series, fs)\n', (239, 251), True, 'import numpy as np\n'), ((262, 282), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['nfft'], {}), '(nfft)\n', (276, 282), True, 'import numpy as np\n'), ((1278, 1287), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1284, 1287), True, 'import numpy as np\n'), ((1695, 1704), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1701, 1704), True, 'import numpy as np\n'), ((4586, 4595), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (4592, 4595), True, 'import numpy as np\n'), ((427, 450), 'numpy.fft.ifft', 'np.fft.ifft', (['series', 'fs'], {}), '(series, fs)\n', (438, 450), True, 'import numpy as np\n'), ((375, 398), 'numpy.fft.ifft', 'np.fft.ifft', (['series', 'fs'], {}), '(series, fs)\n', (386, 398), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
from glob import glob
from models.regressors import VGGToHist
from utilities.images import load_images
from tqdm import tqdm
from models.latent_optimizer import VGGFaceProcessing
from models.vgg_face2 import resnet50_scratch_dag
def generate_vgg_descriptors(filenames):
vgg_face_dag = resnet50_scratch_dag('./Trained_model/resnet50_scratch_dag.pth').cuda().eval()
descriptors = []
vgg_processing = VGGFaceProcessing()
for i in filenames:
image = load_images([i])
image = torch.from_numpy(image).cuda()
image = vgg_processing(image)
feature = vgg_face_dag(image).cpu().detach().numpy()
descriptors.append(feature)
return np.concatenate(descriptors, axis=0)
def plot_hist(hist_soft, histc, filenames):
hist_soft = list(hist_soft)
histc = list(histc)
subdiagram = 2
plt.subplot(subdiagram, 1, 1)
plt.title('torch.histc')
plt.xlabel('bin_num')
plt.ylabel('pixel_num')
plt.plot(histc, color="green", linestyle='-', label='histc')
plt.subplot(subdiagram, 1, 2)
plt.subplots_adjust(wspace=0, hspace=0.5)
plt.title('hist_soft')
plt.xlabel('bin_num')
plt.ylabel('pixel_num')
plt.plot(hist_soft, color="red", linestyle='-', label='hist_soft')
path_ = './diagram/plot_histc_'
img_name = filenames.split('\\')[-1]
plot_path = path_ + img_name
plt.savefig(plot_path)
plt.close('all')
def generate_image_hist(filenames, bins=20):
hist_list = []
for i in filenames:
image = load_images([i]) # image value: (0,256)
image = torch.from_numpy(image).cuda() # image value: (0,256)
r = image[:, 0, :]
g = image[:, 1, :]
b = image[:, 2, :]
hist_r = torch.histc(r.float(), bins, min=0, max=255).cpu().detach().numpy()
hist_g = torch.histc(g.float(), bins, min=0, max=255).cpu().detach().numpy()
hist_b = torch.histc(b.float(), bins, min=0, max=255).cpu().detach().numpy()
hist = []
num_pix = 224 * 224
hist.append(hist_r / num_pix)
hist.append(hist_g / num_pix)
hist.append(hist_b / num_pix)
hist_list.append(hist)
hist_list = np.asarray(hist_list)
return hist_list
def EMDLoss(output, target):
loss = torch.zeros(output.shape[0], output.shape[1], output.shape[2] + 1).cuda() # loss: [batch_size, 3, bins_num]
for i in range(1, output.shape[2] + 1):
loss[:, :, i] = output[:, :, i - 1] + loss[:, :, i - 1] - target[:, :, i - 1] # loss:[32,3,20]
loss = loss.abs().sum(dim=2) # loss: [32,3]
# Sum over histograms
loss = loss.sum(dim=1) # loss: [32]
# Average over batch
loss = loss.mean()
return loss
class hist_Dataset(torch.utils.data.Dataset):
def __init__(self, hists, dlatents):
self.hists = hists
self.dlatents = dlatents
def __len__(self):
return len(self.hists)
def __getitem__(self, index):
hists = self.hists[index]
dlatent = self.dlatents[index]
return hists, dlatent
loss_type = 'MSE'
num_trainsets = 47800
Inserver = bool(os.environ['IN_SERVER']) if ('IN_SERVER' in os.environ) else False
epochs = int(os.environ['EPOCHES']) if ('EPOCHES' in os.environ) else 5000
validation_loss = 0.0
bins_num = 10
N_HIDDENS = int(os.environ['N_HIDDENS']) if ('N_HIDDENS' in os.environ) else 2
N_NEURONS = int(os.environ['N_NEURONS']) if ('N_NEURONS' in os.environ) else 8
lr = 0.000001
directory = "./datasets/"
filenames = sorted(glob(directory + "*.jpg"))
dlatents = np.load(directory + "wp.npy")
final_dlatents = []
for i in filenames:
name = int(os.path.splitext(os.path.basename(i))[0])
final_dlatents.append(dlatents[name])
dlatents = np.array(final_dlatents)
train_dlatents = dlatents[0:num_trainsets]
validation_dlatents = dlatents[num_trainsets:]
hist_file = directory + 'images_hist_bins=' + str(bins_num) + '.npy'
descriptor_file = directory + 'descriptors.npy'
if os.path.isfile(hist_file):
hists = np.load(directory + 'images_hist_bins=' + str(bins_num) + '.npy')
else:
hists = generate_image_hist(filenames, bins_num)
np.save(hist_file, hists)
if os.path.isfile(descriptor_file):
descriptor = np.load(directory + "descriptors.npy")
else:
descriptor = generate_vgg_descriptors(filenames)
np.save(descriptor_file, descriptor)
train_descriptors = descriptor[0:num_trainsets]
validation_descriptors = descriptor[num_trainsets:]
train_hist = hists[0:num_trainsets]
validation_hist = hists[num_trainsets:]
train_dataset = hist_Dataset(train_descriptors, train_hist)
validation_dataset = hist_Dataset(validation_descriptors, validation_hist)
train_generator = torch.utils.data.DataLoader(train_dataset, batch_size=32)
validation_generator = torch.utils.data.DataLoader(validation_dataset, batch_size=32)
vgg_to_hist = VGGToHist(bins_num, BN=True, N_HIDDENS=N_HIDDENS, N_NEURONS=N_NEURONS).cuda()
optimizer = torch.optim.Adam(vgg_to_hist.parameters(), lr)
if loss_type == 'MSE':
criterion = torch.nn.MSELoss()
progress_bar = tqdm(range(epochs))
Loss_list = []
Traning_loss = []
Validation_loss = []
for epoch in progress_bar:
running_loss = 0.0
vgg_to_hist.train()
for i, (vgg_descriptors, hists) in enumerate(train_generator, 1):
optimizer.zero_grad()
vgg_descriptors, hists = vgg_descriptors.cuda(), hists.cuda()
pred_hist = vgg_to_hist(vgg_descriptors)
if loss_type == 'MSE':
loss = criterion(pred_hist, hists)
elif loss_type == 'EMD':
loss = EMDLoss(pred_hist, hists)
loss.backward()
optimizer.step()
running_loss += loss.item()
progress_bar.set_description(
"Step: {0}, Loss: {1:4f}, Validation Loss: {2:4f}".format(i, running_loss / i, validation_loss))
traning_loss = running_loss / i
Traning_loss.append(traning_loss)
validation_loss = 0.0
vgg_to_hist.eval()
for i, (vgg_descriptors, hists) in enumerate(validation_generator, 1):
with torch.no_grad():
vgg_descriptors, hists = vgg_descriptors.cuda(), hists.cuda()
pred_hist = vgg_to_hist(vgg_descriptors)
if loss_type == 'MSE':
loss = criterion(pred_hist, hists)
elif loss_type == 'EMD':
loss = EMDLoss(pred_hist, hists)
validation_loss += loss.item()
validation_loss = validation_loss / i
Validation_loss.append(validation_loss)
progress_bar.set_description(
"Step: {0}, Loss: {1:4f}, Validation Loss: {2:4f}".format(i, running_loss / i, validation_loss))
y1 = Traning_loss
y2 = Validation_loss
plt.subplot(2, 1, 1)
plt.plot(y1, 'o-')
plt.title('training loss vs. epoches')
plt.ylabel('training loss')
plt.subplot(2, 1, 2)
plt.plot(y2, '.-')
plt.xlabel('validation loss vs. epoches')
plt.ylabel('validation loss')
save_dir = "./diagram/vgg_to_Histogram_accuracy_loss_lr=" + str(lr) + str(bins_num) + '_N_HIDDENS=' + str(
N_HIDDENS) + '_NEURONS=' + str(N_NEURONS) + ".jpg"
plt.savefig(save_dir)
torch.save(vgg_to_hist.state_dict(),
'./Trained_model/vgg_to_hist_bins=' + str(bins_num) + '_HIDDENS=' + str(N_HIDDENS) + '_NEURONS=' + str(
N_NEURONS) + '.pt')
| [
"matplotlib.pyplot.title",
"numpy.load",
"models.regressors.VGGToHist",
"os.path.isfile",
"glob.glob",
"torch.no_grad",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.close",
"torch.zeros",
"numpy.save",
"models.latent_optimizer.VGGFaceProcessing",
"os.path.basename",
... | [((3610, 3639), 'numpy.load', 'np.load', (["(directory + 'wp.npy')"], {}), "(directory + 'wp.npy')\n", (3617, 3639), True, 'import numpy as np\n'), ((3790, 3814), 'numpy.array', 'np.array', (['final_dlatents'], {}), '(final_dlatents)\n', (3798, 3814), True, 'import numpy as np\n'), ((4026, 4051), 'os.path.isfile', 'os.path.isfile', (['hist_file'], {}), '(hist_file)\n', (4040, 4051), False, 'import os\n'), ((4225, 4256), 'os.path.isfile', 'os.path.isfile', (['descriptor_file'], {}), '(descriptor_file)\n', (4239, 4256), False, 'import os\n'), ((4744, 4801), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': '(32)'}), '(train_dataset, batch_size=32)\n', (4771, 4801), False, 'import torch\n'), ((4825, 4887), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validation_dataset'], {'batch_size': '(32)'}), '(validation_dataset, batch_size=32)\n', (4852, 4887), False, 'import torch\n'), ((6706, 6726), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (6717, 6726), True, 'import matplotlib.pyplot as plt\n'), ((6727, 6745), 'matplotlib.pyplot.plot', 'plt.plot', (['y1', '"""o-"""'], {}), "(y1, 'o-')\n", (6735, 6745), True, 'import matplotlib.pyplot as plt\n'), ((6746, 6784), 'matplotlib.pyplot.title', 'plt.title', (['"""training loss vs. epoches"""'], {}), "('training loss vs. epoches')\n", (6755, 6784), True, 'import matplotlib.pyplot as plt\n'), ((6785, 6812), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""training loss"""'], {}), "('training loss')\n", (6795, 6812), True, 'import matplotlib.pyplot as plt\n'), ((6813, 6833), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (6824, 6833), True, 'import matplotlib.pyplot as plt\n'), ((6834, 6852), 'matplotlib.pyplot.plot', 'plt.plot', (['y2', '""".-"""'], {}), "(y2, '.-')\n", (6842, 6852), True, 'import matplotlib.pyplot as plt\n'), ((6853, 6894), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""validation loss vs. epoches"""'], {}), "('validation loss vs. epoches')\n", (6863, 6894), True, 'import matplotlib.pyplot as plt\n'), ((6895, 6924), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""validation loss"""'], {}), "('validation loss')\n", (6905, 6924), True, 'import matplotlib.pyplot as plt\n'), ((7087, 7108), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_dir'], {}), '(save_dir)\n', (7098, 7108), True, 'import matplotlib.pyplot as plt\n'), ((486, 505), 'models.latent_optimizer.VGGFaceProcessing', 'VGGFaceProcessing', ([], {}), '()\n', (503, 505), False, 'from models.latent_optimizer import VGGFaceProcessing\n'), ((756, 791), 'numpy.concatenate', 'np.concatenate', (['descriptors'], {'axis': '(0)'}), '(descriptors, axis=0)\n', (770, 791), True, 'import numpy as np\n'), ((918, 947), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subdiagram', '(1)', '(1)'], {}), '(subdiagram, 1, 1)\n', (929, 947), True, 'import matplotlib.pyplot as plt\n'), ((952, 976), 'matplotlib.pyplot.title', 'plt.title', (['"""torch.histc"""'], {}), "('torch.histc')\n", (961, 976), True, 'import matplotlib.pyplot as plt\n'), ((981, 1002), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""bin_num"""'], {}), "('bin_num')\n", (991, 1002), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1030), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pixel_num"""'], {}), "('pixel_num')\n", (1017, 1030), True, 'import matplotlib.pyplot as plt\n'), ((1035, 1095), 'matplotlib.pyplot.plot', 'plt.plot', (['histc'], {'color': '"""green"""', 'linestyle': '"""-"""', 'label': '"""histc"""'}), "(histc, color='green', linestyle='-', label='histc')\n", (1043, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1100, 1129), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subdiagram', '(1)', '(2)'], {}), '(subdiagram, 1, 2)\n', (1111, 1129), True, 'import matplotlib.pyplot as plt\n'), ((1134, 1175), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(0.5)'}), '(wspace=0, hspace=0.5)\n', (1153, 1175), True, 'import matplotlib.pyplot as plt\n'), ((1180, 1202), 'matplotlib.pyplot.title', 'plt.title', (['"""hist_soft"""'], {}), "('hist_soft')\n", (1189, 1202), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""bin_num"""'], {}), "('bin_num')\n", (1217, 1228), True, 'import matplotlib.pyplot as plt\n'), ((1233, 1256), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pixel_num"""'], {}), "('pixel_num')\n", (1243, 1256), True, 'import matplotlib.pyplot as plt\n'), ((1261, 1327), 'matplotlib.pyplot.plot', 'plt.plot', (['hist_soft'], {'color': '"""red"""', 'linestyle': '"""-"""', 'label': '"""hist_soft"""'}), "(hist_soft, color='red', linestyle='-', label='hist_soft')\n", (1269, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1465), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_path'], {}), '(plot_path)\n', (1454, 1465), True, 'import matplotlib.pyplot as plt\n'), ((1470, 1486), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1479, 1486), True, 'import matplotlib.pyplot as plt\n'), ((2251, 2272), 'numpy.asarray', 'np.asarray', (['hist_list'], {}), '(hist_list)\n', (2261, 2272), True, 'import numpy as np\n'), ((3572, 3597), 'glob.glob', 'glob', (["(directory + '*.jpg')"], {}), "(directory + '*.jpg')\n", (3576, 3597), False, 'from glob import glob\n'), ((4195, 4220), 'numpy.save', 'np.save', (['hist_file', 'hists'], {}), '(hist_file, hists)\n', (4202, 4220), True, 'import numpy as np\n'), ((4275, 4313), 'numpy.load', 'np.load', (["(directory + 'descriptors.npy')"], {}), "(directory + 'descriptors.npy')\n", (4282, 4313), True, 'import numpy as np\n'), ((4377, 4413), 'numpy.save', 'np.save', (['descriptor_file', 'descriptor'], {}), '(descriptor_file, descriptor)\n', (4384, 4413), True, 'import numpy as np\n'), ((5078, 5096), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (5094, 5096), False, 'import torch\n'), ((546, 562), 'utilities.images.load_images', 'load_images', (['[i]'], {}), '([i])\n', (557, 562), False, 'from utilities.images import load_images\n'), ((1593, 1609), 'utilities.images.load_images', 'load_images', (['[i]'], {}), '([i])\n', (1604, 1609), False, 'from utilities.images import load_images\n'), ((4902, 4972), 'models.regressors.VGGToHist', 'VGGToHist', (['bins_num'], {'BN': '(True)', 'N_HIDDENS': 'N_HIDDENS', 'N_NEURONS': 'N_NEURONS'}), '(bins_num, BN=True, N_HIDDENS=N_HIDDENS, N_NEURONS=N_NEURONS)\n', (4911, 4972), False, 'from models.regressors import VGGToHist\n'), ((2336, 2402), 'torch.zeros', 'torch.zeros', (['output.shape[0]', 'output.shape[1]', '(output.shape[2] + 1)'], {}), '(output.shape[0], output.shape[1], output.shape[2] + 1)\n', (2347, 2402), False, 'import torch\n'), ((6081, 6096), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6094, 6096), False, 'import torch\n'), ((579, 602), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (595, 602), False, 'import torch\n'), ((1650, 1673), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1666, 1673), False, 'import torch\n'), ((3712, 3731), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (3728, 3731), False, 'import os\n'), ((365, 429), 'models.vgg_face2.resnet50_scratch_dag', 'resnet50_scratch_dag', (['"""./Trained_model/resnet50_scratch_dag.pth"""'], {}), "('./Trained_model/resnet50_scratch_dag.pth')\n", (385, 429), False, 'from models.vgg_face2 import resnet50_scratch_dag\n')] |
#!/usr/bin/env python3
#
# collect_accuracies_with_sdev.py: collects accuracies and standard
# deviations of all graph kernels from a large CSV file, and stores
# them in single files (one per data set).
import os
import argparse
import numpy as np
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('FILE', type=str, help='Input file')
args = parser.parse_args()
df = pd.read_csv(args.FILE, header=0, index_col=0)
for column in df.columns:
values = df[column].values
# Will contain the accuracies (first column), followed by the
# standard deviations (second columns).
data = []
for value in values:
if value is not np.nan:
x, y = value.split('+-')
x = float(x.strip())
y = float(y.strip())
data.append((x, y))
data = np.array(data)
print(column)
# check that ouptut director exists, if not create it
if not os.path.exists('../output/sdev/'):
os.makedirs('../output/sdev/')
np.savetxt(f'../output/sdev/{column}.txt', data, fmt='%2.2f')
| [
"argparse.ArgumentParser",
"os.makedirs",
"pandas.read_csv",
"numpy.savetxt",
"os.path.exists",
"numpy.array"
] | [((313, 338), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (336, 338), False, 'import argparse\n'), ((442, 487), 'pandas.read_csv', 'pd.read_csv', (['args.FILE'], {'header': '(0)', 'index_col': '(0)'}), '(args.FILE, header=0, index_col=0)\n', (453, 487), True, 'import pandas as pd\n'), ((925, 939), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (933, 939), True, 'import numpy as np\n'), ((1136, 1197), 'numpy.savetxt', 'np.savetxt', (['f"""../output/sdev/{column}.txt"""', 'data'], {'fmt': '"""%2.2f"""'}), "(f'../output/sdev/{column}.txt', data, fmt='%2.2f')\n", (1146, 1197), True, 'import numpy as np\n'), ((1048, 1081), 'os.path.exists', 'os.path.exists', (['"""../output/sdev/"""'], {}), "('../output/sdev/')\n", (1062, 1081), False, 'import os\n'), ((1095, 1125), 'os.makedirs', 'os.makedirs', (['"""../output/sdev/"""'], {}), "('../output/sdev/')\n", (1106, 1125), False, 'import os\n')] |
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.program import memory_release_manager
from tensorflow_federated.python.program import test_utils
class MemoryReleaseManagerTest(parameterized.TestCase, tf.test.TestCase):
# pyformat: disable
@parameterized.named_parameters(
('none', None, None),
('bool', True, True),
('int', 1, 1),
('str', 'a', 'a'),
('list', [True, 1, 'a'], [True, 1, 'a']),
('list_empty', [], []),
('list_nested', [[True, 1], ['a']], [[True, 1], ['a']]),
('dict', {'a': True, 'b': 1, 'c': 'a'}, {'a': True, 'b': 1, 'c': 'a'}),
('dict_empty', {}, {}),
('dict_nested',
{'x': {'a': True, 'b': 1}, 'y': {'c': 'a'}},
{'x': {'a': True, 'b': 1}, 'y': {'c': 'a'}}),
('attr',
test_utils.TestAttrObject1(True, 1),
test_utils.TestAttrObject1(True, 1)),
('attr_nested',
{'a': [test_utils.TestAttrObject1(True, 1)],
'b': test_utils.TestAttrObject2('a')},
{'a': [test_utils.TestAttrObject1(True, 1)],
'b': test_utils.TestAttrObject2('a')}),
('tensor_int', tf.constant(1), tf.constant(1)),
('tensor_str', tf.constant('a'), tf.constant('a')),
('tensor_2d', tf.ones((2, 3)), tf.ones((2, 3))),
('tensor_nested',
{'a': [tf.constant(True), tf.constant(1)], 'b': [tf.constant('a')]},
{'a': [tf.constant(True), tf.constant(1)], 'b': [tf.constant('a')]}),
('numpy_int', np.int32(1), np.int32(1)),
('numpy_2d', np.ones((2, 3)), np.ones((2, 3))),
('numpy_nested',
{'a': [np.bool(True), np.int32(1)], 'b': [np.str_('a')]},
{'a': [np.bool(True), np.int32(1)], 'b': [np.str_('a')]}),
('materializable_value_reference_tensor',
test_utils.TestMaterializableValueReference(1), 1),
('materializable_value_reference_sequence',
test_utils.TestMaterializableValueReference(
tf.data.Dataset.from_tensor_slices([1, 2, 3])),
tf.data.Dataset.from_tensor_slices([1, 2, 3])),
('materializable_value_reference_nested',
{'a': [test_utils.TestMaterializableValueReference(True),
test_utils.TestMaterializableValueReference(1)],
'b': [test_utils.TestMaterializableValueReference('a')]},
{'a': [True, 1], 'b': ['a']}),
('materializable_value_reference_and_materialized_value',
[1, test_utils.TestMaterializableValueReference(2)],
[1, 2]),
)
# pyformat: enable
def test_release_saves_value(self, value, expected_value):
release_mngr = memory_release_manager.MemoryReleaseManager()
release_mngr.release(value, 1)
self.assertLen(release_mngr._values, 1)
actual_value = release_mngr._values[1]
if (isinstance(actual_value, tf.data.Dataset) and
isinstance(expected_value, tf.data.Dataset)):
self.assertEqual(list(actual_value), list(expected_value))
else:
self.assertAllEqual(actual_value, expected_value)
@parameterized.named_parameters(
('none', None),
('bool', True),
('int', 1),
('str', 'a'),
)
def test_release_saves_key(self, key):
release_mngr = memory_release_manager.MemoryReleaseManager()
release_mngr.release(1, key)
self.assertLen(release_mngr._values, 1)
self.assertIn(key, release_mngr._values)
@parameterized.named_parameters(
('list', []),
('dict', {}),
('orderd_dict', collections.OrderedDict()),
)
def test_release_raises_type_error_with_key(self, key):
release_mngr = memory_release_manager.MemoryReleaseManager()
with self.assertRaises(TypeError):
release_mngr.release(1, key)
@parameterized.named_parameters(
('0', 0),
('1', 1),
('10', 10),
)
def test_values_returns_values(self, count):
release_mngr = memory_release_manager.MemoryReleaseManager()
for i in range(count):
release_mngr._values[i] = i * 10
values = release_mngr.values()
self.assertEqual(values, {i: i * 10 for i in range(count)})
def test_values_returns_copy(self):
release_mngr = memory_release_manager.MemoryReleaseManager()
values_1 = release_mngr.values()
values_2 = release_mngr.values()
self.assertIsNot(values_1, values_2)
if __name__ == '__main__':
absltest.main()
| [
"absl.testing.absltest.main",
"tensorflow.ones",
"tensorflow_federated.python.program.test_utils.TestMaterializableValueReference",
"numpy.str_",
"numpy.ones",
"tensorflow.constant",
"tensorflow_federated.python.program.memory_release_manager.MemoryReleaseManager",
"tensorflow.data.Dataset.from_tensor... | [((3664, 3756), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('none', None)", "('bool', True)", "('int', 1)", "('str', 'a')"], {}), "(('none', None), ('bool', True), ('int', 1),\n ('str', 'a'))\n", (3694, 3756), False, 'from absl.testing import parameterized\n'), ((4344, 4406), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('0', 0)", "('1', 1)", "('10', 10)"], {}), "(('0', 0), ('1', 1), ('10', 10))\n", (4374, 4406), False, 'from absl.testing import parameterized\n'), ((4960, 4975), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4973, 4975), False, 'from absl.testing import absltest\n'), ((3251, 3296), 'tensorflow_federated.python.program.memory_release_manager.MemoryReleaseManager', 'memory_release_manager.MemoryReleaseManager', ([], {}), '()\n', (3294, 3296), False, 'from tensorflow_federated.python.program import memory_release_manager\n'), ((3842, 3887), 'tensorflow_federated.python.program.memory_release_manager.MemoryReleaseManager', 'memory_release_manager.MemoryReleaseManager', ([], {}), '()\n', (3885, 3887), False, 'from tensorflow_federated.python.program import memory_release_manager\n'), ((4219, 4264), 'tensorflow_federated.python.program.memory_release_manager.MemoryReleaseManager', 'memory_release_manager.MemoryReleaseManager', ([], {}), '()\n', (4262, 4264), False, 'from tensorflow_federated.python.program import memory_release_manager\n'), ((4496, 4541), 'tensorflow_federated.python.program.memory_release_manager.MemoryReleaseManager', 'memory_release_manager.MemoryReleaseManager', ([], {}), '()\n', (4539, 4541), False, 'from tensorflow_federated.python.program import memory_release_manager\n'), ((4767, 4812), 'tensorflow_federated.python.program.memory_release_manager.MemoryReleaseManager', 'memory_release_manager.MemoryReleaseManager', ([], {}), '()\n', (4810, 4812), False, 'from tensorflow_federated.python.program import memory_release_manager\n'), ((1501, 1536), 'tensorflow_federated.python.program.test_utils.TestAttrObject1', 'test_utils.TestAttrObject1', (['(True)', '(1)'], {}), '(True, 1)\n', (1527, 1536), False, 'from tensorflow_federated.python.program import test_utils\n'), ((1545, 1580), 'tensorflow_federated.python.program.test_utils.TestAttrObject1', 'test_utils.TestAttrObject1', (['(True)', '(1)'], {}), '(True, 1)\n', (1571, 1580), False, 'from tensorflow_federated.python.program import test_utils\n'), ((1825, 1839), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (1836, 1839), True, 'import tensorflow as tf\n'), ((1841, 1855), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (1852, 1855), True, 'import tensorflow as tf\n'), ((1879, 1895), 'tensorflow.constant', 'tf.constant', (['"""a"""'], {}), "('a')\n", (1890, 1895), True, 'import tensorflow as tf\n'), ((1897, 1913), 'tensorflow.constant', 'tf.constant', (['"""a"""'], {}), "('a')\n", (1908, 1913), True, 'import tensorflow as tf\n'), ((1936, 1951), 'tensorflow.ones', 'tf.ones', (['(2, 3)'], {}), '((2, 3))\n', (1943, 1951), True, 'import tensorflow as tf\n'), ((1953, 1968), 'tensorflow.ones', 'tf.ones', (['(2, 3)'], {}), '((2, 3))\n', (1960, 1968), True, 'import tensorflow as tf\n'), ((2168, 2179), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (2176, 2179), True, 'import numpy as np\n'), ((2181, 2192), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (2189, 2192), True, 'import numpy as np\n'), ((2214, 2229), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (2221, 2229), True, 'import numpy as np\n'), ((2231, 2246), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (2238, 2246), True, 'import numpy as np\n'), ((2458, 2504), 'tensorflow_federated.python.program.test_utils.TestMaterializableValueReference', 'test_utils.TestMaterializableValueReference', (['(1)'], {}), '(1)\n', (2501, 2504), False, 'from tensorflow_federated.python.program import test_utils\n'), ((2678, 2723), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2712, 2723), True, 'import tensorflow as tf\n'), ((4110, 4135), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4133, 4135), False, 'import collections\n'), ((1670, 1701), 'tensorflow_federated.python.program.test_utils.TestAttrObject2', 'test_utils.TestAttrObject2', (['"""a"""'], {}), "('a')\n", (1696, 1701), False, 'from tensorflow_federated.python.program import test_utils\n'), ((1769, 1800), 'tensorflow_federated.python.program.test_utils.TestAttrObject2', 'test_utils.TestAttrObject2', (['"""a"""'], {}), "('a')\n", (1795, 1800), False, 'from tensorflow_federated.python.program import test_utils\n'), ((2623, 2668), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2657, 2668), True, 'import tensorflow as tf\n'), ((3081, 3127), 'tensorflow_federated.python.program.test_utils.TestMaterializableValueReference', 'test_utils.TestMaterializableValueReference', (['(2)'], {}), '(2)\n', (3124, 3127), False, 'from tensorflow_federated.python.program import test_utils\n'), ((1619, 1654), 'tensorflow_federated.python.program.test_utils.TestAttrObject1', 'test_utils.TestAttrObject1', (['(True)', '(1)'], {}), '(True, 1)\n', (1645, 1654), False, 'from tensorflow_federated.python.program import test_utils\n'), ((1718, 1753), 'tensorflow_federated.python.program.test_utils.TestAttrObject1', 'test_utils.TestAttrObject1', (['(True)', '(1)'], {}), '(True, 1)\n', (1744, 1753), False, 'from tensorflow_federated.python.program import test_utils\n'), ((2009, 2026), 'tensorflow.constant', 'tf.constant', (['(True)'], {}), '(True)\n', (2020, 2026), True, 'import tensorflow as tf\n'), ((2028, 2042), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (2039, 2042), True, 'import tensorflow as tf\n'), ((2051, 2067), 'tensorflow.constant', 'tf.constant', (['"""a"""'], {}), "('a')\n", (2062, 2067), True, 'import tensorflow as tf\n'), ((2085, 2102), 'tensorflow.constant', 'tf.constant', (['(True)'], {}), '(True)\n', (2096, 2102), True, 'import tensorflow as tf\n'), ((2104, 2118), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (2115, 2118), True, 'import tensorflow as tf\n'), ((2127, 2143), 'tensorflow.constant', 'tf.constant', (['"""a"""'], {}), "('a')\n", (2138, 2143), True, 'import tensorflow as tf\n'), ((2286, 2299), 'numpy.bool', 'np.bool', (['(True)'], {}), '(True)\n', (2293, 2299), True, 'import numpy as np\n'), ((2301, 2312), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (2309, 2312), True, 'import numpy as np\n'), ((2321, 2333), 'numpy.str_', 'np.str_', (['"""a"""'], {}), "('a')\n", (2328, 2333), True, 'import numpy as np\n'), ((2351, 2364), 'numpy.bool', 'np.bool', (['(True)'], {}), '(True)\n', (2358, 2364), True, 'import numpy as np\n'), ((2366, 2377), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (2374, 2377), True, 'import numpy as np\n'), ((2386, 2398), 'numpy.str_', 'np.str_', (['"""a"""'], {}), "('a')\n", (2393, 2398), True, 'import numpy as np\n'), ((2788, 2837), 'tensorflow_federated.python.program.test_utils.TestMaterializableValueReference', 'test_utils.TestMaterializableValueReference', (['(True)'], {}), '(True)\n', (2831, 2837), False, 'from tensorflow_federated.python.program import test_utils\n'), ((2853, 2899), 'tensorflow_federated.python.program.test_utils.TestMaterializableValueReference', 'test_utils.TestMaterializableValueReference', (['(1)'], {}), '(1)\n', (2896, 2899), False, 'from tensorflow_federated.python.program import test_utils\n'), ((2916, 2964), 'tensorflow_federated.python.program.test_utils.TestMaterializableValueReference', 'test_utils.TestMaterializableValueReference', (['"""a"""'], {}), "('a')\n", (2959, 2964), False, 'from tensorflow_federated.python.program import test_utils\n')] |
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from nomad.datamodel import EntryArchive
from atomisticparsers.lammps import LammpsParser
def approx(value, abs=0, rel=1e-6):
return pytest.approx(value, abs=abs, rel=rel)
@pytest.fixture(scope='module')
def parser():
return LammpsParser()
def test_nvt(parser):
archive = EntryArchive()
parser.parse('tests/data/lammps/hexane_cyclohexane/log.hexane_cyclohexane_nvt', archive, None)
sec_run = archive.run[0]
assert sec_run.program.version == '14 May 2016'
sec_workflow = archive.workflow[0]
assert sec_workflow.type == 'molecular_dynamics'
assert sec_workflow.molecular_dynamics.x_lammps_integrator_dt.magnitude == 2.5e-16
assert sec_workflow.molecular_dynamics.x_lammps_thermostat_target_temperature.magnitude == 300.
assert sec_workflow.molecular_dynamics.ensemble_type == 'NVT'
sec_method = sec_run.method[0]
assert len(sec_method.force_field.model[0].contributions) == 4
assert sec_method.force_field.model[0].contributions[2].type == 'harmonic'
assert sec_method.force_field.model[0].contributions[0].parameters[0][2] == 0.066
sec_system = sec_run.system
assert len(sec_system) == 201
assert sec_system[5].atoms.lattice_vectors[1][1].magnitude == approx(2.24235e-09)
assert False not in sec_system[0].atoms.periodic
assert sec_system[80].atoms.labels[91:96] == ['H', 'H', 'H', 'C', 'C'] # JFR - not reading labels correctly
sec_scc = sec_run.calculation
assert len(sec_scc) == 201
assert sec_scc[21].energy.current.value.magnitude == approx(8.86689197e-18)
assert sec_scc[180].time_calculation.magnitude == 218.5357
assert sec_scc[56].thermodynamics[0].pressure.magnitude == approx(-77642135.4975)
assert sec_scc[103].thermodynamics[0].temperature.magnitude == 291.4591
assert sec_scc[11].thermodynamics[0].time_step == 4400
assert len(sec_scc[1].energy.contributions) == 9
assert sec_scc[112].energy.contributions[8].kind == 'kspace long range'
assert sec_scc[96].energy.contributions[2].value.magnitude == approx(1.19666271e-18)
assert sec_scc[47].energy.contributions[4].value.magnitude == approx(1.42166035e-18)
assert sec_run.x_lammps_section_control_parameters[0].x_lammps_inout_control_atomstyle == 'full'
def test_thermo_format(parser):
archive = EntryArchive()
parser.parse('tests/data/lammps/1_methyl_naphthalene/log.1_methyl_naphthalene', archive, None)
sec_sccs = archive.run[0].calculation
assert len(sec_sccs) == 301
assert sec_sccs[98].energy.total.value.magnitude == approx(1.45322428e-17)
assert len(archive.run[0].system) == 4
def test_traj_xyz(parser):
archive = EntryArchive()
parser.parse('tests/data/lammps/methane_xyz/log.methane_nvt_traj_xyz_thermo_style_custom', archive, None)
sec_systems = archive.run[0].system
assert len(sec_systems) == 201
assert sec_systems[13].atoms.positions[7][0].magnitude == approx(-8.00436e-10)
def test_traj_dcd(parser):
archive = EntryArchive()
parser.parse('tests/data/lammps/methane_dcd/log.methane_nvt_traj_dcd_thermo_style_custom', archive, None)
assert len(archive.run[0].calculation) == 201
sec_systems = archive.run[0].system
assert np.shape(sec_systems[56].atoms.positions) == (320, 3)
assert len(sec_systems[107].atoms.labels) == 320
def test_unwrapped_pos(parser):
archive = EntryArchive()
parser.parse('tests/data/lammps/1_xyz_files/log.lammps', archive, None)
assert len(archive.run[0].calculation) == 101
sec_systems = archive.run[0].system
assert sec_systems[1].atoms.positions[452][2].magnitude == approx(5.99898) # JFR - units are incorrect?!
assert sec_systems[2].atoms.velocities[457][-2].magnitude == approx(-0.928553) # JFR - velocities are not being read!!
def test_multiple_dump(parser):
archive = EntryArchive()
parser.parse('tests/data/lammps/2_xyz_files/log.lammps', archive, None)
sec_systems = archive.run[0].system
assert len(sec_systems) == 101
assert sec_systems[2].atoms.positions[468][0].magnitude == approx(3.00831)
assert sec_systems[-1].atoms.velocities[72][1].magnitude == approx(-4.61496) # JFR - universe cannot be built without positions
def test_md_atomsgroup(parser):
archive = EntryArchive()
parser.parse('tests/data/lammps/polymer_melt/log.step4.0_minimization', archive, None)
sec_run = archive.run[0]
sec_systems = sec_run.system
assert len(sec_systems[0].atoms_group) == 1
assert len(sec_systems[0].atoms_group[0].atoms_group) == 100
assert sec_systems[0].atoms_group[0].label == 'seg_0_0'
assert sec_systems[0].atoms_group[0].type == 'molecule_group'
assert sec_systems[0].atoms_group[0].index == 0
assert sec_systems[0].atoms_group[0].composition_formula == '0(100)'
assert sec_systems[0].atoms_group[0].n_atoms == 7200
assert sec_systems[0].atoms_group[0].atom_indices[5] == 5
assert sec_systems[0].atoms_group[0].is_molecule is False
assert sec_systems[0].atoms_group[0].atoms_group[52].label == '0'
assert sec_systems[0].atoms_group[0].atoms_group[52].type == 'molecule'
assert sec_systems[0].atoms_group[0].atoms_group[52].index == 52
assert sec_systems[0].atoms_group[0].atoms_group[52].composition_formula == '1(1)2(1)3(1)4(1)5(1)6(1)7(1)8(1)9(1)10(1)'
assert sec_systems[0].atoms_group[0].atoms_group[52].n_atoms == 72
assert sec_systems[0].atoms_group[0].atoms_group[52].atom_indices[8] == 3752
assert sec_systems[0].atoms_group[0].atoms_group[52].is_molecule is True
assert sec_systems[0].atoms_group[0].atoms_group[76].atoms_group[7].label == '8'
assert sec_systems[0].atoms_group[0].atoms_group[76].atoms_group[7].type == 'monomer'
assert sec_systems[0].atoms_group[0].atoms_group[76].atoms_group[7].index == 7
assert sec_systems[0].atoms_group[0].atoms_group[76].atoms_group[7].composition_formula == '1(4)4(2)6(1)'
assert sec_systems[0].atoms_group[0].atoms_group[76].atoms_group[7].n_atoms == 7
assert sec_systems[0].atoms_group[0].atoms_group[76].atoms_group[7].atom_indices[5] == 5527
assert sec_systems[0].atoms_group[0].atoms_group[76].atoms_group[7].is_molecule is False
def test_rdf(parser):
archive = EntryArchive()
parser.parse('tests/data/lammps/hexane_cyclohexane/log.hexane_cyclohexane_nvt', archive, None)
sec_workflow = archive.workflow[0]
section_MD = sec_workflow.molecular_dynamics
assert section_MD.ensemble_properties.label == 'molecular radial distribution functions'
assert section_MD.ensemble_properties.n_smooth == 6
assert section_MD.ensemble_properties.types[0] == '0-0'
assert section_MD.ensemble_properties.variables_name[1][0] == 'distance'
assert section_MD.ensemble_properties.bins[0][0][122] == approx(9.380497525533041)
assert section_MD.ensemble_properties.values[0][96] == approx(3.0716656057349994)
assert section_MD.ensemble_properties.types[1] == '1-0'
assert section_MD.ensemble_properties.variables_name[1][0] == 'distance'
assert section_MD.ensemble_properties.bins[1][0][102] == approx(7.88559752146403)
assert section_MD.ensemble_properties.values[1][55] == approx(0.053701564112436415)
| [
"nomad.datamodel.EntryArchive",
"pytest.fixture",
"numpy.shape",
"pytest.approx",
"atomisticparsers.lammps.LammpsParser"
] | [((872, 902), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (886, 902), False, 'import pytest\n'), ((830, 868), 'pytest.approx', 'pytest.approx', (['value'], {'abs': 'abs', 'rel': 'rel'}), '(value, abs=abs, rel=rel)\n', (843, 868), False, 'import pytest\n'), ((928, 942), 'atomisticparsers.lammps.LammpsParser', 'LammpsParser', ([], {}), '()\n', (940, 942), False, 'from atomisticparsers.lammps import LammpsParser\n'), ((981, 995), 'nomad.datamodel.EntryArchive', 'EntryArchive', ([], {}), '()\n', (993, 995), False, 'from nomad.datamodel import EntryArchive\n'), ((2997, 3011), 'nomad.datamodel.EntryArchive', 'EntryArchive', ([], {}), '()\n', (3009, 3011), False, 'from nomad.datamodel import EntryArchive\n'), ((3352, 3366), 'nomad.datamodel.EntryArchive', 'EntryArchive', ([], {}), '()\n', (3364, 3366), False, 'from nomad.datamodel import EntryArchive\n'), ((3679, 3693), 'nomad.datamodel.EntryArchive', 'EntryArchive', ([], {}), '()\n', (3691, 3693), False, 'from nomad.datamodel import EntryArchive\n'), ((4061, 4075), 'nomad.datamodel.EntryArchive', 'EntryArchive', ([], {}), '()\n', (4073, 4075), False, 'from nomad.datamodel import EntryArchive\n'), ((4525, 4539), 'nomad.datamodel.EntryArchive', 'EntryArchive', ([], {}), '()\n', (4537, 4539), False, 'from nomad.datamodel import EntryArchive\n'), ((4952, 4966), 'nomad.datamodel.EntryArchive', 'EntryArchive', ([], {}), '()\n', (4964, 4966), False, 'from nomad.datamodel import EntryArchive\n'), ((6918, 6932), 'nomad.datamodel.EntryArchive', 'EntryArchive', ([], {}), '()\n', (6930, 6932), False, 'from nomad.datamodel import EntryArchive\n'), ((3906, 3947), 'numpy.shape', 'np.shape', (['sec_systems[56].atoms.positions'], {}), '(sec_systems[56].atoms.positions)\n', (3914, 3947), True, 'import numpy as np\n')] |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Hop-skip-jump attack.
"""
import numpy as np
from mindarmour.utils.logger import LogUtil
from mindarmour.utils._check_param import check_pair_numpy_param, check_model, \
check_numpy_param, check_int_positive, check_value_positive, \
check_value_non_negative, check_param_type
from ..attack import Attack
from .black_model import BlackModel
LOGGER = LogUtil.get_instance()
TAG = 'HopSkipJumpAttack'
def _clip_image(image, clip_min, clip_max):
"""
Clip an image, or an image batch, with upper and lower threshold.
"""
return np.clip(image, clip_min, clip_max)
class HopSkipJumpAttack(Attack):
"""
HopSkipJumpAttack proposed by Chen, Jordan and Wainwright is a
decision-based attack. The attack requires access to output labels of
target model.
References: `<NAME>, <NAME>, <NAME>.
HopSkipJumpAttack: A Query-Efficient Decision-Based Attack. 2019.
arXiv:1904.02144 <https://arxiv.org/abs/1904.02144>`_
Args:
model (BlackModel): Target model.
init_num_evals (int): The initial number of evaluations for gradient
estimation. Default: 100.
max_num_evals (int): The maximum number of evaluations for gradient
estimation. Default: 1000.
stepsize_search (str): Indicating how to search for stepsize; Possible
values are 'geometric_progression', 'grid_search', 'geometric_progression'.
Default: 'geometric_progression'.
num_iterations (int): The number of iterations. Default: 20.
gamma (float): Used to set binary search threshold theta. Default: 1.0.
For l2 attack the binary search threshold `theta` is
:math:`gamma / d^{3/2}`. For linf attack is :math:`gamma / d^2`.
Default: 1.0.
constraint (str): The norm distance to optimize. Possible values are 'l2',
'linf'. Default: l2.
batch_size (int): Batch size. Default: 32.
clip_min (float, optional): The minimum image component value.
Default: 0.
clip_max (float, optional): The maximum image component value.
Default: 1.
sparse (bool): If True, input labels are sparse-encoded. If False,
input labels are one-hot-encoded. Default: True.
Raises:
ValueError: If stepsize_search not in ['geometric_progression',
'grid_search']
ValueError: If constraint not in ['l2', 'linf']
Examples:
>>> from mindspore import Tensor
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 3:
... inputs = inputs[np.newaxis, :]
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = HopSkipJumpAttack(model)
>>> n, c, h, w = 1, 1, 32, 32
>>> class_num = 3
>>> x_test = np.asarray(np.random.random((n,c,h,w)), np.float32)
>>> y_test = np.random.randint(0, class_num, size=n)
>>> _, adv_x, _= attack.generate(x_test, y_test)
"""
def __init__(self, model, init_num_evals=100, max_num_evals=1000,
stepsize_search='geometric_progression', num_iterations=20,
gamma=1.0, constraint='l2', batch_size=32, clip_min=0.0,
clip_max=1.0, sparse=True):
super(HopSkipJumpAttack, self).__init__()
self._model = check_model('model', model, BlackModel)
self._init_num_evals = check_int_positive('initial_num_evals',
init_num_evals)
self._max_num_evals = check_int_positive('max_num_evals', max_num_evals)
self._batch_size = check_int_positive('batch_size', batch_size)
self._clip_min = check_value_non_negative('clip_min', clip_min)
self._clip_max = check_value_non_negative('clip_max', clip_max)
self._sparse = check_param_type('sparse', sparse, bool)
self._np_dtype = np.dtype('float32')
if stepsize_search in ['geometric_progression', 'grid_search']:
self._stepsize_search = stepsize_search
else:
msg = "stepsize_search must be in ['geometric_progression'," \
" 'grid_search'], but got {}".format(stepsize_search)
LOGGER.error(TAG, msg)
raise ValueError(msg)
self._num_iterations = check_int_positive('num_iterations',
num_iterations)
self._gamma = check_value_positive('gamma', gamma)
if constraint in ['l2', 'linf']:
self._constraint = constraint
else:
msg = "constraint must be in ['l2', 'linf'], " \
"but got {}".format(constraint)
LOGGER.error(TAG, msg)
raise ValueError(msg)
self.queries = 0
self.is_adv = True
self.y_targets = None
self.image_targets = None
self.y_target = None
self.image_target = None
def _generate_one(self, sample):
"""
Return a tensor that constructs adversarial examples for the given
input.
Args:
sample (Tensor): Input samples.
Returns:
Tensor, generated adversarial examples.
"""
shape = list(np.shape(sample))
dim = int(np.prod(shape))
# Set binary search threshold.
if self._constraint == 'l2':
theta = self._gamma / (np.sqrt(dim)*dim)
else:
theta = self._gamma / (dim*dim)
wrap = self._hsja(sample, self.y_target, self.image_target, dim, theta)
if wrap is None:
self.is_adv = False
else:
self.is_adv = True
return self.is_adv, wrap, self.queries
def set_target_images(self, target_images):
"""
Setting target images for target attack.
Args:
target_images (numpy.ndarray): Target images.
"""
self.image_targets = check_numpy_param('target_images', target_images)
def generate(self, inputs, labels):
"""
Generate adversarial images in a for loop.
Args:
inputs (numpy.ndarray): Origin images.
labels (numpy.ndarray): Target labels.
Returns:
- numpy.ndarray, bool values for each attack result.
- numpy.ndarray, generated adversarial examples.
- numpy.ndarray, query times for each sample.
"""
if labels is not None:
inputs, labels = check_pair_numpy_param('inputs', inputs,
'labels', labels)
if not self._sparse:
labels = np.argmax(labels, axis=1)
x_adv = []
is_advs = []
queries_times = []
if labels is not None:
self.y_targets = labels
for i, x_single in enumerate(inputs):
self.queries = 0
if self.image_targets is not None:
self.image_target = self.image_targets[i]
if self.y_targets is not None:
self.y_target = self.y_targets[i]
is_adv, adv_img, query_time = self._generate_one(x_single)
x_adv.append(adv_img)
is_advs.append(is_adv)
queries_times.append(query_time)
return np.asarray(is_advs), \
np.asarray(x_adv), \
np.asarray(queries_times)
def _hsja(self, sample, target_label, target_image, dim, theta):
"""
The main algorithm for HopSkipJumpAttack.
Args:
sample (numpy.ndarray): Input image. Without the batchsize
dimension.
target_label (int): Integer for targeted attack, None for
nontargeted attack. Without the batchsize dimension.
target_image (numpy.ndarray): An array with the same size as
input sample, or None. Without the batchsize dimension.
Returns:
numpy.ndarray, perturbed images.
"""
original_label = None
# Original label for untargeted attack.
if target_label is None:
original_label = self._model.predict(sample)
original_label = np.argmax(original_label)
# Initialize perturbed image.
# untarget attack
if target_image is None:
perturbed = self._initialize(sample, original_label, target_label)
if perturbed is None:
msg = 'Can not find an initial adversarial example'
LOGGER.info(TAG, msg)
return perturbed
else:
# Target attack
perturbed = target_image
# Project the initial perturbed image to the decision boundary.
perturbed, dist_post_update = self._binary_search_batch(sample,
np.expand_dims(perturbed, 0),
original_label,
target_label,
theta)
# Calculate the distance of perturbed image and original sample
dist = self._compute_distance(perturbed, sample)
for j in np.arange(self._num_iterations):
current_iteration = j + 1
# Select delta.
delta = self._select_delta(dist_post_update, current_iteration, dim,
theta)
# Choose number of evaluations.
num_evals = int(min([self._init_num_evals*np.sqrt(j + 1),
self._max_num_evals]))
# approximate gradient.
gradf = self._approximate_gradient(perturbed, num_evals,
original_label, target_label,
delta, theta)
if self._constraint == 'linf':
update = np.sign(gradf)
else:
update = gradf
# search step size.
if self._stepsize_search == 'geometric_progression':
# find step size.
epsilon = self._geometric_progression_for_stepsize(
perturbed,
update,
dist,
current_iteration,
original_label,
target_label)
# Update the sample.
perturbed = _clip_image(perturbed + epsilon*update,
self._clip_min, self._clip_max)
# Binary search to return to the boundary.
perturbed, dist_post_update = self._binary_search_batch(
sample,
perturbed[None],
original_label,
target_label,
theta)
elif self._stepsize_search == 'grid_search':
epsilons = np.logspace(-4, 0, num=20, endpoint=True)*dist
epsilons_shape = [20] + len(np.shape(sample))*[1]
perturbeds = perturbed + epsilons.reshape(
epsilons_shape)*update
perturbeds = _clip_image(perturbeds, self._clip_min,
self._clip_max)
idx_perturbed = self._decision_function(perturbeds,
original_label,
target_label)
if np.sum(idx_perturbed) > 0:
# Select the perturbation that yields the minimum distance
# after binary search.
perturbed, dist_post_update = self._binary_search_batch(
sample, perturbeds[idx_perturbed],
original_label, target_label, theta)
# compute new distance.
dist = self._compute_distance(perturbed, sample)
LOGGER.debug(TAG,
'iteration: %d, %s distance %4f',
j + 1,
self._constraint, dist)
perturbed = np.expand_dims(perturbed, 0)
return perturbed
def _decision_function(self, images, original_label, target_label):
"""
Decision function returns 1 if the input sample is on the desired
side of the boundary, and 0 otherwise.
"""
images = _clip_image(images, self._clip_min, self._clip_max)
prob = []
self.queries += len(images)
for i in range(0, len(images), self._batch_size):
batch = images[i:i + self._batch_size]
length = len(batch)
prob_i = self._model.predict(batch)[:length]
prob.append(prob_i)
prob = np.concatenate(prob)
if target_label is None:
res = np.argmax(prob, axis=1) != original_label
else:
res = np.argmax(prob, axis=1) == target_label
return res
def _compute_distance(self, original_img, perturbation_img):
"""
Compute the distance between original image and perturbation images.
"""
if self._constraint == 'l2':
distance = np.linalg.norm(original_img - perturbation_img)
else:
distance = np.max(abs(original_img - perturbation_img))
return distance
def _approximate_gradient(self, sample, num_evals, original_label,
target_label, delta, theta):
"""
Gradient direction estimation.
"""
# Generate random noise based on constraint.
noise_shape = [num_evals] + list(np.shape(sample))
if self._constraint == 'l2':
random_noise = np.random.randn(*noise_shape)
else:
random_noise = np.random.uniform(low=-1, high=1, size=noise_shape)
axis = tuple(range(1, 1 + len(np.shape(sample))))
random_noise = random_noise / np.sqrt(
np.sum(random_noise**2, axis=axis, keepdims=True))
# perturbed images
perturbed = sample + delta*random_noise
perturbed = _clip_image(perturbed, self._clip_min, self._clip_max)
random_noise = (perturbed - sample) / theta
# Whether the perturbed images are on the desired side of the boundary.
decisions = self._decision_function(perturbed, original_label,
target_label)
decision_shape = [len(decisions)] + [1]*len(np.shape(sample))
# transform decisions value from 1, 0 to 1, -2
re_decision = 2*np.array(decisions).astype(self._np_dtype).reshape(
decision_shape) - 1.0
if np.mean(re_decision) == 1.0:
grad_direction = np.mean(random_noise, axis=0)
elif np.mean(re_decision) == -1.0:
grad_direction = - np.mean(random_noise, axis=0)
else:
re_decision = re_decision - np.mean(re_decision)
grad_direction = np.mean(re_decision*random_noise, axis=0)
# The gradient direction.
grad_direction = grad_direction / (np.linalg.norm(grad_direction) + 1e-10)
return grad_direction
def _project(self, original_image, perturbed_images, alphas):
"""
Projection input samples onto given l2 or linf balls.
"""
alphas_shape = [len(alphas)] + [1]*len(np.shape(original_image))
alphas = alphas.reshape(alphas_shape)
if self._constraint == 'l2':
projected = (1 - alphas)*original_image + alphas*perturbed_images
else:
projected = _clip_image(perturbed_images, original_image - alphas,
original_image + alphas)
return projected
def _binary_search_batch(self, original_image, perturbed_images,
original_label, target_label, theta):
"""
Binary search to approach the model decision boundary.
"""
# Compute distance between perturbed image and original image.
dists_post_update = np.array([self._compute_distance(original_image,
perturbed_image,)
for perturbed_image in perturbed_images])
# Get higher thresholds
if self._constraint == 'l2':
highs = np.ones(len(perturbed_images))
thresholds = theta
else:
highs = dists_post_update
thresholds = np.minimum(dists_post_update*theta, theta)
# Get lower thresholds
lows = np.zeros(len(perturbed_images))
# Update thresholds.
while np.max((highs - lows) / thresholds) > 1:
mids = (highs + lows) / 2.0
mid_images = self._project(original_image, perturbed_images, mids)
decisions = self._decision_function(mid_images, original_label,
target_label)
lows = np.where(decisions == [0], mids, lows)
highs = np.where(decisions == [1], mids, highs)
out_images = self._project(original_image, perturbed_images, highs)
# Select the best choice based on the distance of the output image.
dists = np.array(
[self._compute_distance(original_image, out_image) for out_image in
out_images])
idx = np.argmin(dists)
dist = dists_post_update[idx]
out_image = out_images[idx]
return out_image, dist
def _initialize(self, sample, original_label, target_label):
"""
Implementation of BlendedUniformNoiseAttack
"""
num_evals = 0
while True:
random_noise = np.random.uniform(self._clip_min, self._clip_max,
size=np.shape(sample))
success = self._decision_function(random_noise[None],
original_label,
target_label)
if success:
break
num_evals += 1
if num_evals > 1e3:
return None
# Binary search.
low = 0.0
high = 1.0
while high - low > 0.001:
mid = (high + low) / 2.0
blended = (1 - mid)*sample + mid*random_noise
success = self._decision_function(blended[None], original_label,
target_label)
if success:
high = mid
else:
low = mid
initialization = (1 - high)*sample + high*random_noise
return initialization
def _geometric_progression_for_stepsize(self, perturbed, update, dist,
current_iteration, original_label,
target_label):
"""
Search for stepsize in the way of Geometric progression.
Keep decreasing stepsize by half until reaching the desired side of
the decision boundary.
"""
epsilon = dist / np.sqrt(current_iteration)
while True:
updated = perturbed + epsilon*update
success = self._decision_function(updated, original_label,
target_label)
if success:
break
epsilon = epsilon / 2.0
return epsilon
def _select_delta(self, dist_post_update, current_iteration, dim, theta):
"""
Choose the delta based on the distance between the input sample
and the perturbed sample.
"""
if current_iteration == 1:
delta = 0.1*(self._clip_max - self._clip_min)
else:
if self._constraint == 'l2':
delta = np.sqrt(dim)*theta*dist_post_update
else:
delta = dim*theta*dist_post_update
return delta
| [
"mindarmour.utils._check_param.check_value_non_negative",
"numpy.sum",
"numpy.argmax",
"numpy.logspace",
"numpy.clip",
"numpy.argmin",
"numpy.shape",
"numpy.mean",
"numpy.arange",
"mindarmour.utils._check_param.check_pair_numpy_param",
"numpy.linalg.norm",
"numpy.prod",
"mindarmour.utils._ch... | [((952, 974), 'mindarmour.utils.logger.LogUtil.get_instance', 'LogUtil.get_instance', ([], {}), '()\n', (972, 974), False, 'from mindarmour.utils.logger import LogUtil\n'), ((1144, 1178), 'numpy.clip', 'np.clip', (['image', 'clip_min', 'clip_max'], {}), '(image, clip_min, clip_max)\n', (1151, 1178), True, 'import numpy as np\n'), ((4444, 4483), 'mindarmour.utils._check_param.check_model', 'check_model', (['"""model"""', 'model', 'BlackModel'], {}), "('model', model, BlackModel)\n", (4455, 4483), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((4515, 4570), 'mindarmour.utils._check_param.check_int_positive', 'check_int_positive', (['"""initial_num_evals"""', 'init_num_evals'], {}), "('initial_num_evals', init_num_evals)\n", (4533, 4570), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((4651, 4701), 'mindarmour.utils._check_param.check_int_positive', 'check_int_positive', (['"""max_num_evals"""', 'max_num_evals'], {}), "('max_num_evals', max_num_evals)\n", (4669, 4701), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((4729, 4773), 'mindarmour.utils._check_param.check_int_positive', 'check_int_positive', (['"""batch_size"""', 'batch_size'], {}), "('batch_size', batch_size)\n", (4747, 4773), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((4799, 4845), 'mindarmour.utils._check_param.check_value_non_negative', 'check_value_non_negative', (['"""clip_min"""', 'clip_min'], {}), "('clip_min', clip_min)\n", (4823, 4845), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((4871, 4917), 'mindarmour.utils._check_param.check_value_non_negative', 'check_value_non_negative', (['"""clip_max"""', 'clip_max'], {}), "('clip_max', clip_max)\n", (4895, 4917), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((4941, 4981), 'mindarmour.utils._check_param.check_param_type', 'check_param_type', (['"""sparse"""', 'sparse', 'bool'], {}), "('sparse', sparse, bool)\n", (4957, 4981), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((5007, 5026), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (5015, 5026), True, 'import numpy as np\n'), ((5413, 5465), 'mindarmour.utils._check_param.check_int_positive', 'check_int_positive', (['"""num_iterations"""', 'num_iterations'], {}), "('num_iterations', num_iterations)\n", (5431, 5465), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((5538, 5574), 'mindarmour.utils._check_param.check_value_positive', 'check_value_positive', (['"""gamma"""', 'gamma'], {}), "('gamma', gamma)\n", (5558, 5574), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((7026, 7075), 'mindarmour.utils._check_param.check_numpy_param', 'check_numpy_param', (['"""target_images"""', 'target_images'], {}), "('target_images', target_images)\n", (7043, 7075), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((10342, 10373), 'numpy.arange', 'np.arange', (['self._num_iterations'], {}), '(self._num_iterations)\n', (10351, 10373), True, 'import numpy as np\n'), ((13273, 13301), 'numpy.expand_dims', 'np.expand_dims', (['perturbed', '(0)'], {}), '(perturbed, 0)\n', (13287, 13301), True, 'import numpy as np\n'), ((13913, 13933), 'numpy.concatenate', 'np.concatenate', (['prob'], {}), '(prob)\n', (13927, 13933), True, 'import numpy as np\n'), ((18517, 18533), 'numpy.argmin', 'np.argmin', (['dists'], {}), '(dists)\n', (18526, 18533), True, 'import numpy as np\n'), ((6332, 6348), 'numpy.shape', 'np.shape', (['sample'], {}), '(sample)\n', (6340, 6348), True, 'import numpy as np\n'), ((6368, 6382), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (6375, 6382), True, 'import numpy as np\n'), ((7573, 7631), 'mindarmour.utils._check_param.check_pair_numpy_param', 'check_pair_numpy_param', (['"""inputs"""', 'inputs', '"""labels"""', 'labels'], {}), "('inputs', inputs, 'labels', labels)\n", (7595, 7631), False, 'from mindarmour.utils._check_param import check_pair_numpy_param, check_model, check_numpy_param, check_int_positive, check_value_positive, check_value_non_negative, check_param_type\n'), ((7735, 7760), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (7744, 7760), True, 'import numpy as np\n'), ((8371, 8390), 'numpy.asarray', 'np.asarray', (['is_advs'], {}), '(is_advs)\n', (8381, 8390), True, 'import numpy as np\n'), ((8409, 8426), 'numpy.asarray', 'np.asarray', (['x_adv'], {}), '(x_adv)\n', (8419, 8426), True, 'import numpy as np\n'), ((8445, 8470), 'numpy.asarray', 'np.asarray', (['queries_times'], {}), '(queries_times)\n', (8455, 8470), True, 'import numpy as np\n'), ((9272, 9297), 'numpy.argmax', 'np.argmax', (['original_label'], {}), '(original_label)\n', (9281, 9297), True, 'import numpy as np\n'), ((9936, 9964), 'numpy.expand_dims', 'np.expand_dims', (['perturbed', '(0)'], {}), '(perturbed, 0)\n', (9950, 9964), True, 'import numpy as np\n'), ((14345, 14392), 'numpy.linalg.norm', 'np.linalg.norm', (['(original_img - perturbation_img)'], {}), '(original_img - perturbation_img)\n', (14359, 14392), True, 'import numpy as np\n'), ((14869, 14898), 'numpy.random.randn', 'np.random.randn', (['*noise_shape'], {}), '(*noise_shape)\n', (14884, 14898), True, 'import numpy as np\n'), ((14940, 14991), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': 'noise_shape'}), '(low=-1, high=1, size=noise_shape)\n', (14957, 14991), True, 'import numpy as np\n'), ((15820, 15840), 'numpy.mean', 'np.mean', (['re_decision'], {}), '(re_decision)\n', (15827, 15840), True, 'import numpy as np\n'), ((15878, 15907), 'numpy.mean', 'np.mean', (['random_noise'], {'axis': '(0)'}), '(random_noise, axis=0)\n', (15885, 15907), True, 'import numpy as np\n'), ((17635, 17679), 'numpy.minimum', 'np.minimum', (['(dists_post_update * theta)', 'theta'], {}), '(dists_post_update * theta, theta)\n', (17645, 17679), True, 'import numpy as np\n'), ((17801, 17836), 'numpy.max', 'np.max', (['((highs - lows) / thresholds)'], {}), '((highs - lows) / thresholds)\n', (17807, 17836), True, 'import numpy as np\n'), ((18118, 18156), 'numpy.where', 'np.where', (['(decisions == [0])', 'mids', 'lows'], {}), '(decisions == [0], mids, lows)\n', (18126, 18156), True, 'import numpy as np\n'), ((18177, 18216), 'numpy.where', 'np.where', (['(decisions == [1])', 'mids', 'highs'], {}), '(decisions == [1], mids, highs)\n', (18185, 18216), True, 'import numpy as np\n'), ((20245, 20271), 'numpy.sqrt', 'np.sqrt', (['current_iteration'], {}), '(current_iteration)\n', (20252, 20271), True, 'import numpy as np\n'), ((11051, 11065), 'numpy.sign', 'np.sign', (['gradf'], {}), '(gradf)\n', (11058, 11065), True, 'import numpy as np\n'), ((13985, 14008), 'numpy.argmax', 'np.argmax', (['prob'], {'axis': '(1)'}), '(prob, axis=1)\n', (13994, 14008), True, 'import numpy as np\n'), ((14059, 14082), 'numpy.argmax', 'np.argmax', (['prob'], {'axis': '(1)'}), '(prob, axis=1)\n', (14068, 14082), True, 'import numpy as np\n'), ((14787, 14803), 'numpy.shape', 'np.shape', (['sample'], {}), '(sample)\n', (14795, 14803), True, 'import numpy as np\n'), ((15109, 15160), 'numpy.sum', 'np.sum', (['(random_noise ** 2)'], {'axis': 'axis', 'keepdims': '(True)'}), '(random_noise ** 2, axis=axis, keepdims=True)\n', (15115, 15160), True, 'import numpy as np\n'), ((15921, 15941), 'numpy.mean', 'np.mean', (['re_decision'], {}), '(re_decision)\n', (15928, 15941), True, 'import numpy as np\n'), ((16116, 16159), 'numpy.mean', 'np.mean', (['(re_decision * random_noise)'], {'axis': '(0)'}), '(re_decision * random_noise, axis=0)\n', (16123, 16159), True, 'import numpy as np\n'), ((16236, 16266), 'numpy.linalg.norm', 'np.linalg.norm', (['grad_direction'], {}), '(grad_direction)\n', (16250, 16266), True, 'import numpy as np\n'), ((6496, 6508), 'numpy.sqrt', 'np.sqrt', (['dim'], {}), '(dim)\n', (6503, 6508), True, 'import numpy as np\n'), ((15625, 15641), 'numpy.shape', 'np.shape', (['sample'], {}), '(sample)\n', (15633, 15641), True, 'import numpy as np\n'), ((15982, 16011), 'numpy.mean', 'np.mean', (['random_noise'], {'axis': '(0)'}), '(random_noise, axis=0)\n', (15989, 16011), True, 'import numpy as np\n'), ((16066, 16086), 'numpy.mean', 'np.mean', (['re_decision'], {}), '(re_decision)\n', (16073, 16086), True, 'import numpy as np\n'), ((16507, 16531), 'numpy.shape', 'np.shape', (['original_image'], {}), '(original_image)\n', (16515, 16531), True, 'import numpy as np\n'), ((18952, 18968), 'numpy.shape', 'np.shape', (['sample'], {}), '(sample)\n', (18960, 18968), True, 'import numpy as np\n'), ((12066, 12107), 'numpy.logspace', 'np.logspace', (['(-4)', '(0)'], {'num': '(20)', 'endpoint': '(True)'}), '(-4, 0, num=20, endpoint=True)\n', (12077, 12107), True, 'import numpy as np\n'), ((12637, 12658), 'numpy.sum', 'np.sum', (['idx_perturbed'], {}), '(idx_perturbed)\n', (12643, 12658), True, 'import numpy as np\n'), ((15030, 15046), 'numpy.shape', 'np.shape', (['sample'], {}), '(sample)\n', (15038, 15046), True, 'import numpy as np\n'), ((20959, 20971), 'numpy.sqrt', 'np.sqrt', (['dim'], {}), '(dim)\n', (20966, 20971), True, 'import numpy as np\n'), ((10667, 10681), 'numpy.sqrt', 'np.sqrt', (['(j + 1)'], {}), '(j + 1)\n', (10674, 10681), True, 'import numpy as np\n'), ((12157, 12173), 'numpy.shape', 'np.shape', (['sample'], {}), '(sample)\n', (12165, 12173), True, 'import numpy as np\n'), ((15722, 15741), 'numpy.array', 'np.array', (['decisions'], {}), '(decisions)\n', (15730, 15741), True, 'import numpy as np\n')] |
"""
EEE511 team assignment 01 - team 03
LMS prediction:
Data from the Mackey-Glass delay differential equation
source: https://github.com/manu-mannattil/nolitsa
"""
import numpy as np
import matplotlib.pyplot as plt
import os.path as osp
import os
def mackey_glass(length=5000, x0=None, a=0.2, b=0.1, c=10.0, tau=23.0,
n=5000, sample=0.46, discard=250):
"""Generate time series using the Mackey-Glass equation.
Generates time series using the discrete approximation of the
Mackey-Glass delay differential equation described by Grassberger &
Procaccia (1983).
Parameters
----------
length : int, optional (default = 10000)
Length of the time series to be generated.
x0 : array, optional (default = random)
Initial condition for the discrete map. Should be of length n.
a : float, optional (default = 0.2)
Constant a in the Mackey-Glass equation.
b : float, optional (default = 0.1)
Constant b in the Mackey-Glass equation.
c : float, optional (default = 10.0)
Constant c in the Mackey-Glass equation.
tau : float, optional (default = 23.0)
Time delay in the Mackey-Glass equation.
n : int, optional (default = 1000)
The number of discrete steps into which the interval between
t and t + tau should be divided. This results in a time
step of tau/n and an n + 1 dimensional map.
sample : float, optional (default = 0.46)
Sampling step of the time series. It is useful to pick
something between tau/100 and tau/10, with tau/sample being
a factor of n. This will make sure that there are only whole
number indices.
discard : int, optional (default = 250)
Number of n-steps to discard in order to eliminate transients.
A total of n*discard steps will be discarded.
Returns
-------
x : array
Array containing the time series.
"""
sample = int(n * sample / tau)
grids = n * discard + sample * length
x = np.empty(grids)
if not x0:
x[:n] = 0.5 + 0.05 * (-1 + 2 * np.random.random(n))
else:
x[:n] = x0
A = (2 * n - b * tau) / (2 * n + b * tau)
B = a * tau / (2 * n + b * tau)
for i in range(n - 1, grids - 1):
x[i + 1] = A * x[i] + B * (x[i - n] / (1 + x[i - n] ** c) +
x[i - n + 1] / (1 + x[i - n + 1] ** c))
return x[n * discard::sample]
if __name__ == '__main__':
data_dir = "./data"
data_size = 5000
if not osp.exists(data_dir):
os.makedirs(data_dir)
x = mackey_glass(length=data_size, tau=23.0, sample=0.46, n=5000)
# print(x)
# y=x[:1000]
# print(f"the shape of the time series: {x.shape}")
# print(f"the shape of the time series: {y.shape}")
np.save(osp.join(data_dir,"mg_time_series_5000.npy"),x.reshape(data_size, 1))
# plt.plot(range(5000), x, linewidth=2) # visualize the time series
# plt.show()
| [
"os.makedirs",
"numpy.empty",
"os.path.exists",
"numpy.random.random",
"os.path.join"
] | [((2038, 2053), 'numpy.empty', 'np.empty', (['grids'], {}), '(grids)\n', (2046, 2053), True, 'import numpy as np\n'), ((2544, 2564), 'os.path.exists', 'osp.exists', (['data_dir'], {}), '(data_dir)\n', (2554, 2564), True, 'import os.path as osp\n'), ((2574, 2595), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (2585, 2595), False, 'import os\n'), ((2826, 2871), 'os.path.join', 'osp.join', (['data_dir', '"""mg_time_series_5000.npy"""'], {}), "(data_dir, 'mg_time_series_5000.npy')\n", (2834, 2871), True, 'import os.path as osp\n'), ((2109, 2128), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (2125, 2128), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
A collection of functions used by ciftifies report generation functions
i.e. ciftify_peaktable and ciftify_dlabel_report
"""
import os
from ciftify.utils import run
import ciftify.config
import numpy as np
import pandas as pd
import logging
import logging.config
def define_atlas_settings():
atlas_settings = {
'DKT': {
'path' : os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'cvs_avg35_inMNI152.aparc.32k_fs_LR.dlabel.nii'),
'order' : 1,
'name': 'DKT',
'map_number': 1
},
'MMP': {
'path': os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'Q1-Q6_RelatedValidation210.CorticalAreas_dil_Final_Final_Areas_Group_Colors.32k_fs_LR.dlabel.nii'),
'order' : 3,
'name' : 'MMP',
'map_number': 1
},
'Yeo7': {
'path' : os.path.join(ciftify.config.find_HCP_S1200_GroupAvg(),
'RSN-networks.32k_fs_LR.dlabel.nii'),
'order' : 2,
'name' : 'Yeo7',
'map_number': 1
}
}
return(atlas_settings)
class HemiSurfaceSettings(object):
'''class that holds the setting for one hemisphere'''
def __init__(self, hemi, arguments):
assert hemi in ['L','R']
self.hemi = hemi
self.wb_structure = self._get_wb_structure()
self.surface = self._get_surf_arg(arguments)
self.vertex_areas = self._get_vertex_areas_arg(arguments)
def _get_wb_structure(self):
''' sets the structure according to the hemisphere'''
if self.hemi == 'L': return 'CORTEX_LEFT'
if self.hemi == 'R': return 'CORTEX_RIGHT'
raise
def _get_surf_arg(self, arguments):
''' sets the surface filename according to user arguments'''
if self.hemi == 'L': return arguments['--left-surface']
if self.hemi == 'R': return arguments['--right-surface']
raise
def _get_vertex_areas_arg(self, arguments):
''' sets the vertex areas according to the user arguments'''
if self.hemi == 'L': return arguments['--left-surf-area']
if self.hemi == 'R': return arguments['--right-surf-area']
raise
def set_surface_to_global(self):
''' sets the surface to the S1200 midthickness'''
self.surface = os.path.join(
ciftify.config.find_HCP_S1200_GroupAvg(),
'S1200.{}.midthickness_MSMAll.32k_fs_LR.surf.gii'
''.format(self.hemi))
return(self)
def set_vertex_areas_to_global(self):
''' sets the vertex areas to the S1200 average'''
self.vertex_areas = os.path.join(
ciftify.config.find_HCP_S1200_GroupAvg(),
'S1200.{}.midthickness_MSMAll_va.32k_fs_LR.shape.gii'
''.format(self.hemi))
return(self)
def calc_vertex_areas_from_surface(self, tmpdir):
''' use wb_command to calculate the vertex areas from the given surface'''
self.vertex_areas = os.path.join(tmpdir,
'surf{}_va.shape.gii'.format(self.hemi))
run(['wb_command', '-surface-vertex-areas',
self.surface, self.vertex_areas])
return(self)
class CombinedSurfaceSettings(object):
''' hold the setttings for both hemispheres'''
def __init__(self, arguments, tmpdir):
logger = logging.getLogger(__name__)
for hemi in ['L','R']:
self.__dict__[hemi] = HemiSurfaceSettings(hemi, arguments)
## check if surfaces or settings have been specified
surfaces_not_set = (self.L.surface == None, self.R.surface == None)
va_not_set = (self.L.vertex_areas == None, self.R.vertex_areas == None)
if sum(surfaces_not_set) == 1:
logger.error("Need both left and right surfaces - only one surface given")
sys.exit(1)
if sum(va_not_set) == 1:
logger.error("Need both left and right surface area arguments - only one given")
sys.exit(1)
if all(va_not_set):
if all(surfaces_not_set):
# if neither have been specified, we use the HCP S1200 averages
for hemi in ['L','R']:
self.__dict__[hemi].set_vertex_areas_to_global()
self.__dict__[hemi].set_surface_to_global()
else:
# if only surfaces have been given, we use the surface to calculate the vertex areas
for hemi in ['L','R']:
self.__dict__[hemi].calc_vertex_areas_from_surface(tmpdir)
def sum_idx_area(clust_idx, surf_va):
'''
calculates the surface area for a given set of indices
'''
area = sum(surf_va[clust_idx])
return(area)
def get_cluster_indices(cluster_id, clust_altas):
'''
gets the indices for a cluster given the label
'''
clust_idx = np.where(clust_altas == cluster_id)[0]
return(clust_idx)
def get_overlaping_idx(clust_id1, clust_atlas1, clust_id2, clust_atlas2):
'''
find the indices that overlap for two labels across two maps
'''
label1_idx = get_cluster_indices(int(clust_id1), clust_atlas1)
label2_idx = get_cluster_indices(int(clust_id2), clust_atlas2)
overlap_idx = np.intersect1d(label1_idx,label2_idx)
return(overlap_idx)
def calc_cluster_area(cluster_id, atlas_array, surf_va_array):
'''
calculate the area of a cluster
'''
clust_idx = get_cluster_indices(int(cluster_id), atlas_array)
area = sum_idx_area(clust_idx, surf_va_array)
return(area)
def calc_overlapping_area(clust_id1, clust_atlas1, clust_id2, clust_atlas2, surf_va_array):
'''
calculates the area of overlap between two clusters
'''
overlap_idx = get_overlaping_idx(clust_id1, clust_atlas1, clust_id2, clust_atlas2)
if len(overlap_idx) > 0:
overlap_area = sum_idx_area(overlap_idx, surf_va_array)
else:
overlap_area = 0
return(overlap_area)
def calc_label_to_atlas_overlap(clust_id1, clust_atlas1_data,
clust_atlas2_dict, clust_atlas2, surf_va_array):
'''create a df of overlap of and atlas with one label'''
## create a data frame to hold the overlap
o_df = pd.DataFrame.from_dict(clust_atlas2_dict, orient = "index")
o_df = o_df.rename(index=str, columns={0: "clusterID"})
for idx_label2 in o_df.index.get_values():
o_df.loc[idx_label2, 'overlap_area'] = calc_overlapping_area(clust_id1, clust_atlas1_data,
idx_label2, clust_atlas2, surf_va_array)
return(o_df)
def overlap_summary_string(overlap_df, min_percent_overlap):
'''summarise the overlap as a sting'''
rdf = overlap_df[overlap_df.overlap_percent > min_percent_overlap]
rdf = rdf.sort_values(by='overlap_percent', ascending=False)
result_string = ""
for o_label in rdf.index.get_values():
result_string += '{} ({:2.1f}%); '.format(rdf.loc[o_label, 'clusterID'],
rdf.loc[o_label, 'overlap_percent'])
return(result_string)
def get_label_overlap_summary(clust_id1, clust_atlas1_data, clust_atlas2_data, clust_atlas2_dict,
surf_va_array, min_percent_overlap = 5):
'''returns of sting listing all clusters in label2 that overlap with label1_idx in label1'''
label1_area = calc_cluster_area(clust_id1, clust_atlas1_data, surf_va_array)
if label1_area == 0:
return("")
## get a pd dataframe of labels names and overlap
overlap_df = calc_label_to_atlas_overlap(clust_id1, clust_atlas1_data,
clust_atlas2_dict, clust_atlas2_data, surf_va_array)
if overlap_df.overlap_area.sum() == 0:
return("")
## convert overlap to percent
overlap_df.loc[:, 'overlap_percent'] = overlap_df.loc[:, 'overlap_area']/label1_area*100
## convert the df to a string report
result_string = overlap_summary_string(overlap_df, min_percent_overlap)
return(result_string)
| [
"pandas.DataFrame.from_dict",
"ciftify.utils.run",
"numpy.where",
"numpy.intersect1d",
"logging.getLogger"
] | [((5426, 5464), 'numpy.intersect1d', 'np.intersect1d', (['label1_idx', 'label2_idx'], {}), '(label1_idx, label2_idx)\n', (5440, 5464), True, 'import numpy as np\n'), ((6407, 6464), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['clust_atlas2_dict'], {'orient': '"""index"""'}), "(clust_atlas2_dict, orient='index')\n", (6429, 6464), True, 'import pandas as pd\n'), ((3283, 3360), 'ciftify.utils.run', 'run', (["['wb_command', '-surface-vertex-areas', self.surface, self.vertex_areas]"], {}), "(['wb_command', '-surface-vertex-areas', self.surface, self.vertex_areas])\n", (3286, 3360), False, 'from ciftify.utils import run\n'), ((3550, 3577), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3567, 3577), False, 'import logging\n'), ((5057, 5092), 'numpy.where', 'np.where', (['(clust_altas == cluster_id)'], {}), '(clust_altas == cluster_id)\n', (5065, 5092), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import torchvision
import torchvision.transforms as transforms
from torch import autograd
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader,Dataset
from torchvision.datasets import ImageFolder
import albumentations as albu
from albumentations import torch as AT
import pandas as pd
from libs.custom_transforms import PadDifferentlyIfNeeded
from libs.constant import *
from libs.model import *
#import segmentation_models_pytorch as smp
#smp.encoders.get_preprocessing_fn()
class ImageDataset(ImageFolder):
def __init__(self, root, transform=None):
super(ImageDataset, self).__init__( root, transform=transform)
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
#mask = self._make_mask( sample)
sample = np.array(sample)
mask = np.ones(sample.shape[:-1])
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# augmented = self.transform(image=sample, mask=mask)
augmented = self.transform(image=sample, mask=mask)
img = augmented['image']
mask = augmented['mask']
return img, mask
# def __len__(self):
# return len(self.img_ids)
def data_loader_mask():
"""
Converting the images for PILImage to tensor,
so they can be accepted as the input to the network
:return :
"""
print("Loading Dataset")
default_transform = albu.Compose([ PadDifferentlyIfNeeded(512,512,mask_value=0)
, AT.ToTensor()])
transform = albu.Compose([ albu.RandomRotate90(1.0)
, albu.HorizontalFlip(0.5),PadDifferentlyIfNeeded(512,512,mask_value=0), AT.ToTensor()])
testset_gt = ImageDataset(root=TEST_ENHANCED_IMG_DIR , transform=default_transform)
trainset_2_gt = ImageDataset(root=ENHANCED2_IMG_DIR, transform=transform)
testset_inp = ImageDataset(root=TEST_INPUT_IMG_DIR , transform=default_transform)
trainset_1_inp = ImageDataset(root=INPUT_IMG_DIR , transform=transform)
train_loader_cross = torch.utils.data.DataLoader(
ConcatDataset(
trainset_1_inp,
trainset_2_gt
),num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE * GPUS_NUM, # Enlarge batch_size by a factor of len(device_ids)
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
ConcatDataset(
testset_inp,
testset_gt
),num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE * GPUS_NUM, # Enlarge batch_size by a factor of len(device_ids)
shuffle=False
)
print("Finished loading dataset")
return train_loader_cross, test_loader
def data_loader():
"""
Converting the images for PILImage to tensor,
so they can be accepted as the input to the network
:return :
"""
print("Loading Dataset")
#transform = transforms.Compose([transforms.Resize((SIZE, SIZE), interpolation='PIL.Image.ANTIALIAS'), transforms.ToTensor()])
transform = transforms.Compose([
# you can add other transformations in this list
transforms.CenterCrop(516),
transforms.ToTensor() ])
testset_gt = torchvision.datasets.ImageFolder(root='./images_LR/Expert-C/Testing/', transform=transform)
trainset_1_gt = torchvision.datasets.ImageFolder(root='./images_LR/Expert-C/Training1/', transform=transform)
trainset_2_gt = torchvision.datasets.ImageFolder(root='./images_LR/Expert-C/Training2/', transform=transform)
testset_inp = torchvision.datasets.ImageFolder(root='./images_LR/input/Testing/', transform=transform)
trainset_1_inp = torchvision.datasets.ImageFolder(root='./images_LR/input/Training1/', transform=transform)
trainset_2_inp = torchvision.datasets.ImageFolder(root='./images_LR/input/Training2/', transform=transform)
train_loader_1 = torch.utils.data.DataLoader(
ConcatDataset(
trainset_1_gt,
trainset_1_inp
),
batch_size=BATCH_SIZE * GPUS_NUM, # Enlarge batch_size by a factor of len(device_ids)
shuffle=True,
)
train_loader_2 = torch.utils.data.DataLoader(
ConcatDataset(
trainset_2_gt,
trainset_2_inp
),
batch_size=BATCH_SIZE * GPUS_NUM, # Enlarge batch_size by a factor of len(device_ids)
shuffle=True,
)
train_loader_cross = torch.utils.data.DataLoader(
ConcatDataset(
trainset_2_inp,
trainset_1_gt
),
batch_size=BATCH_SIZE * GPUS_NUM, # Enlarge batch_size by a factor of len(device_ids)
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
ConcatDataset(
testset_inp,
testset_gt
),
batch_size=BATCH_SIZE * GPUS_NUM, # Enlarge batch_size by a factor of len(device_ids)
shuffle=True,
)
print("Finished loading dataset")
return train_loader_1, train_loader_2, train_loader_cross, test_loader
def computeGradientPenaltyFor1WayGAN(D, realSample, fakeSample):
alpha = torch.rand(realSample.shape[0], 1, device=device)
interpolates = (alpha * realSample + ((1 - alpha) * fakeSample)).requires_grad_(True)
dInterpolation = D(interpolates)
#fakeOutput = Variable(Tensor_gpu(realSample.shape[0], 1, 1, 1).fill_(1.0), requires_grad=False)
fakeOutput = Variable(Tensor_gpu(realSample.shape[0], 1).fill_(1.0), requires_grad=False)
gradients = autograd.grad(
outputs=dInterpolation,
inputs=interpolates,
grad_outputs=fakeOutput,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
## Use Adadpative weighting scheme
gradients = gradients.view(gradients.size(0), -1)
maxVals = []
normGradients = gradients.norm(2, dim=1) - 1
for i in range(len(normGradients)):
if (normGradients[i] > 0):
maxVals.append(Variable(normGradients[i].type(Tensor)).detach().numpy())
else:
maxVals.append(0)
gradientPenalty = np.mean(maxVals)
return gradientPenalty
def compute_gradient_penalty(discriminator, real_sample, fake_sample):
"""
This function used to compute Gradient Penalty
The equation is Equation(4) in Chp5
:param discriminator: stands for D_Y
:param real_sample: stands for Y
:param fake_sample: stands for Y'
:return gradient_penalty: instead of the global parameter LAMBDA
"""
alpha = Tensor_gpu(np.random.random(real_sample.shape))
interpolates = (alpha * real_sample + ((1 - alpha) * fake_sample.detach())).requires_grad_(True) # stands for y^
d_interpolation = discriminator(interpolates) # stands for D_Y(y^)
#fake_output = Variable(Tensor_gpu(real_sample.shape[0], 1).fill_(1.0), requires_grad=False)
fake_output = Variable(Tensor_gpu(real_sample.shape[0]).fill_(1.0), requires_grad=False)
gradients = autograd.grad(
outputs=d_interpolation,
inputs=interpolates,
grad_outputs=fake_output,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
# Use Adaptive weighting scheme
# The following codes stand for the Equation(4) in Chp5
gradients = gradients.view(gradients.size(0), -1)
max_vals = []
norm_gradients = gradients.norm(2, dim=1) - 1
for i in range(len(norm_gradients)):
if norm_gradients[i] > 0:
# temp_data = Variable(norm_gradients[i].type(Tensor)).detach().item()
temp_data = Variable(norm_gradients[i].type(Tensor)).item()
max_vals.append(temp_data )
else:
max_vals.append(0)
tensor_max_vals = torch.tensor(max_vals, dtype=torch.float64, device=device, requires_grad=True)
# gradient_penalty = np.mean(max_vals)
gradient_penalty = torch.mean(tensor_max_vals)
# gradient_penalty.backward(retain_graph=True)
return gradient_penalty
def _gradient_penalty(self, data, generated_data, gamma=10):
batch_size = data.size(0)
epsilon = torch.rand(batch_size, 1, 1, 1)
epsilon = epsilon.expand_as(data)
if self.use_cuda:
epsilon = epsilon.cuda()
interpolation = epsilon * data.data + (1 - epsilon) * generated_data.data
interpolation = Variable(interpolation, requires_grad=True)
if self.use_cuda:
interpolation = interpolation.cuda()
interpolation_logits = self.D(interpolation)
grad_outputs = torch.ones(interpolation_logits.size())
if self.use_cuda:
grad_outputs = grad_outputs.cuda()
gradients = autograd.grad(outputs=interpolation_logits,
inputs=interpolation,
grad_outputs=grad_outputs,
create_graph=True,
retain_graph=True)[0]
gradients = gradients.view(batch_size, -1)
gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)
return self.gamma * ((gradients_norm - 1) ** 2).mean()
def generatorAdversarialLoss(output_images, discriminator):
"""
This function is used to compute Generator Adversarial Loss
:param output_images:
:param discriminator:
:return: the value of Generator Adversarial Loss
"""
validity = discriminator(output_images)
gen_adv_loss = torch.mean(validity)
return gen_adv_loss
def discriminatorLoss(d1Real, d1Fake, gradPenalty):
"""
This function is used to compute Discriminator Loss E[D(x)]
:param d1Real:
:param d1Fake:
:param gradPenalty:
:return:
"""
return (torch.mean(d1Fake) - torch.mean(d1Real)) + (LAMBDA * gradPenalty)
def computeGeneratorLoss(inputs, outputs_g1, discriminator, criterion):
"""
This function is used to compute Generator Loss
:param inputs:
:param outputs_g1:
:param discriminator:
:param criterion:
:return:
"""
gen_adv_loss1 = generatorAdversarialLoss(outputs_g1, discriminator)
i_loss = criterion(inputs, outputs_g1)
gen_loss = -gen_adv_loss1 + ALPHA * i_loss
return gen_loss
def computeIdentityMappingLoss(generatorX,generatorY,realEnhanced,realInput):
criterion = nn.MSELoss()
i_loss = criterion(realEnhanced, generatorX(realEnhanced)).mean() + criterion(realInput, generatorY(realInput)).mean()
return i_loss
def computeIdentityMappingLoss_dpeversion(realInput, realEnhanced, fakeInput, fakeEnhanced):
"""
This function is used to compute the identity mapping loss
The equation is Equation(5) in Chp6
:param x:
:param x1:
:param y:
:param y1:
:return:
"""
# MSE Loss and Optimizer
criterion = nn.MSELoss()
i_loss = criterion(realInput, fakeEnhanced).mean() + criterion(realEnhanced, fakeInput).mean()
return i_loss
def computeCycleConsistencyLoss(x, x2, y, y2):
"""
This function is used to compute the cycle consistency loss
The equation is Equation(6) in Chp6
:param x:
:param x2:
:param y:
:param y2:
:return:
"""
# MSE Loss and Optimizer
criterion = nn.MSELoss()
c_loss = criterion(x, x2).mean() + criterion(y, y2).mean()
return c_loss
def computeAdversarialLosses(discriminator,discriminatorX, x, x1, y, y1):
"""
This function is used to compute the adversarial losses
for the discriminator and the generator
The equations are Equation(7)(8)(9) in Chp6
:param discriminator:
:param x:
:param x1:
:param y:
:param y1:
:return:
"""
dx = discriminatorX(x)
dx1 = discriminatorX(x1)
dy = discriminator(y)
dy1 = discriminator(y1)
ad = torch.mean(dx) - torch.mean(dx1) + \
torch.mean(dy) - torch.mean(dy1)
ag = torch.mean(dx1) + torch.mean(dy1)
return ad, ag
def compute_d_adv_loss(discriminator,real,fake):
# dx = discriminatorX(x)
# dx1 = discriminatorX(x1)
# dy = discriminator(y)
# dy1 = discriminator(y1)
# ad = torch.mean(dx) - torch.mean(dx1) + \
# + torch.mean(dy) - torch.mean(dy1)
ad = torch.mean(discriminator(real)) - torch.mean(discriminator(fake.detach()))
return ad
def compute_g_adv_loss(discriminatorY,discriminatorX, fakeEnhanced,fakeInput):
dx1 = discriminatorX(fakeInput)
dy1 = discriminatorY(fakeEnhanced)
ag = torch.mean(dx1) + torch.mean(dy1)
return ag
def computeGradientPenaltyFor2Way(discriminator, x, x1, y, y1):
"""
This function is used to compute the gradient penalty for 2-Way GAN
The equations are Equation(10)(11) in Chp6
:param generator:
:param discriminator:
:param x:
:param x1:
:param y:
:param y1:
:return:
"""
gradient_penalty = computeGradientPenaltyFor1WayGAN(discriminator, y.data, y1.data) + \
computeGradientPenaltyFor1WayGAN(discriminator, x.data, x1.data)
return gradient_penalty
def computeDiscriminatorLossFor2WayGan(ad, penalty):
#return -ad + LAMBDA * penalty
return - ad + penalty
def computeGeneratorLossFor2WayGan(ag, i_loss, c_loss):
return -ag + ALPHA * i_loss + 10 * ALPHA * c_loss
def adjustLearningRate( decay_rate, limit_epoch):
"""
Adjust Learning rate to get better performance
:param learning_rate:
:param decay_rate:
:param epoch_num:
:return:
"""
def get_decay(epoch_num):
if epoch_num <= limit_epoch:
return 1
else:
return 1 - ( 1/decay_rate ) *(epoch_num- limit_epoch)
return get_decay
def set_requires_grad(nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
class LambdaAdapter:
def __init__(self, lambda_init,D_G_ratio):
self.loss_wgan_gp_bound = 5e-2
self.loss_wgan_gp_mv_decay = 0.99
self.netD_wgan_gp_mvavg_1 = 0
self.netD_wgan_gp_mvavg_2 = 0
self.netD_update_buffer_1 = 0
self.netD_update_buffer_2 = 0
self.netD_gp_weight_1 = lambda_init
self.netD_gp_weight_2 = lambda_init
self.netD_times = D_G_ratio
self.netD_times_grow = 1
self.netD_buffer_times = 50 #should depend on batch size as the original
self.netD_change_times_1 = D_G_ratio
self.netD_change_times_2 = D_G_ratio
self.loss_wgan_lambda_ignore = 1
self.loss_wgan_lambda_grow = 2.0
def update_penalty_weights(self,batches_done,gr_penalty1,gr_penalty2):
# if not (epoch * batch_count + current_iter < 1):
if not (batches_done < 1):
# gradient penalty 1 and 2 ___ -netD_train_s[-7] -netD_train_s[-7]
self.netD_wgan_gp_mvavg_1 = self.netD_wgan_gp_mvavg_1 * self.loss_wgan_gp_mv_decay + (gr_penalty1 / self.netD_gp_weight_1) * (1 - self.loss_wgan_gp_mv_decay)
self.netD_wgan_gp_mvavg_2 = self.netD_wgan_gp_mvavg_2 * self.loss_wgan_gp_mv_decay + (gr_penalty2 / self.netD_gp_weight_2) * (1 - self.loss_wgan_gp_mv_decay)
if (self.netD_update_buffer_1 == 0) and (self.netD_wgan_gp_mvavg_1 > self.loss_wgan_gp_bound) :
self.netD_gp_weight_1 = self.netD_gp_weight_1 * self.loss_wgan_lambda_grow
self.netD_change_times_1 = self.netD_change_times_1 * self.netD_times_grow
self.netD_update_buffer_1 = self.netD_buffer_times
self.netD_wgan_gp_mvavg_1 = 0
self.netD_update_buffer_1 = 0 if self.netD_update_buffer_1 == 0 else self.netD_update_buffer_1 - 1
if (self.netD_update_buffer_2 == 0) and (self.netD_wgan_gp_mvavg_2 > self.loss_wgan_gp_bound) :
self.netD_gp_weight_2 = self.netD_gp_weight_2 * self.loss_wgan_lambda_grow
self.netD_change_times_2 = self.netD_change_times_2 * self.netD_times_grow
self.netD_update_buffer_2 = self.netD_buffer_times
self.netD_wgan_gp_mvavg_2 = 0
self.netD_update_buffer_2 = 0 if self.netD_update_buffer_2 == 0 else self.netD_update_buffer_2 - 1
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
| [
"torch.autograd.grad",
"torch.autograd.Variable",
"numpy.ones",
"torchvision.transforms.ToTensor",
"torchvision.datasets.ImageFolder",
"numpy.mean",
"numpy.array",
"numpy.random.random",
"torchvision.transforms.CenterCrop",
"albumentations.HorizontalFlip",
"libs.custom_transforms.PadDifferentlyI... | [((3259, 3354), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': '"""./images_LR/Expert-C/Testing/"""', 'transform': 'transform'}), "(root='./images_LR/Expert-C/Testing/',\n transform=transform)\n", (3291, 3354), False, 'import torchvision\n'), ((3371, 3468), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': '"""./images_LR/Expert-C/Training1/"""', 'transform': 'transform'}), "(root='./images_LR/Expert-C/Training1/',\n transform=transform)\n", (3403, 3468), False, 'import torchvision\n'), ((3485, 3582), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': '"""./images_LR/Expert-C/Training2/"""', 'transform': 'transform'}), "(root='./images_LR/Expert-C/Training2/',\n transform=transform)\n", (3517, 3582), False, 'import torchvision\n'), ((3598, 3690), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': '"""./images_LR/input/Testing/"""', 'transform': 'transform'}), "(root='./images_LR/input/Testing/',\n transform=transform)\n", (3630, 3690), False, 'import torchvision\n'), ((3708, 3802), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': '"""./images_LR/input/Training1/"""', 'transform': 'transform'}), "(root='./images_LR/input/Training1/',\n transform=transform)\n", (3740, 3802), False, 'import torchvision\n'), ((3820, 3914), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ([], {'root': '"""./images_LR/input/Training2/"""', 'transform': 'transform'}), "(root='./images_LR/input/Training2/',\n transform=transform)\n", (3852, 3914), False, 'import torchvision\n'), ((6116, 6132), 'numpy.mean', 'np.mean', (['maxVals'], {}), '(maxVals)\n', (6123, 6132), True, 'import numpy as np\n'), ((8322, 8365), 'torch.autograd.Variable', 'Variable', (['interpolation'], {'requires_grad': '(True)'}), '(interpolation, requires_grad=True)\n', (8330, 8365), False, 'from torch.autograd import Variable\n'), ((887, 903), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (895, 903), True, 'import numpy as np\n'), ((919, 945), 'numpy.ones', 'np.ones', (['sample.shape[:-1]'], {}), '(sample.shape[:-1])\n', (926, 945), True, 'import numpy as np\n'), ((5537, 5681), 'torch.autograd.grad', 'autograd.grad', ([], {'outputs': 'dInterpolation', 'inputs': 'interpolates', 'grad_outputs': 'fakeOutput', 'create_graph': '(True)', 'retain_graph': '(True)', 'only_inputs': '(True)'}), '(outputs=dInterpolation, inputs=interpolates, grad_outputs=\n fakeOutput, create_graph=True, retain_graph=True, only_inputs=True)\n', (5550, 5681), False, 'from torch import autograd\n'), ((6548, 6583), 'numpy.random.random', 'np.random.random', (['real_sample.shape'], {}), '(real_sample.shape)\n', (6564, 6583), True, 'import numpy as np\n'), ((6986, 7132), 'torch.autograd.grad', 'autograd.grad', ([], {'outputs': 'd_interpolation', 'inputs': 'interpolates', 'grad_outputs': 'fake_output', 'create_graph': '(True)', 'retain_graph': '(True)', 'only_inputs': '(True)'}), '(outputs=d_interpolation, inputs=interpolates, grad_outputs=\n fake_output, create_graph=True, retain_graph=True, only_inputs=True)\n', (6999, 7132), False, 'from torch import autograd\n'), ((8626, 8760), 'torch.autograd.grad', 'autograd.grad', ([], {'outputs': 'interpolation_logits', 'inputs': 'interpolation', 'grad_outputs': 'grad_outputs', 'create_graph': '(True)', 'retain_graph': '(True)'}), '(outputs=interpolation_logits, inputs=interpolation,\n grad_outputs=grad_outputs, create_graph=True, retain_graph=True)\n', (8639, 8760), False, 'from torch import autograd\n'), ((1530, 1576), 'libs.custom_transforms.PadDifferentlyIfNeeded', 'PadDifferentlyIfNeeded', (['(512)', '(512)'], {'mask_value': '(0)'}), '(512, 512, mask_value=0)\n', (1552, 1576), False, 'from libs.custom_transforms import PadDifferentlyIfNeeded\n'), ((1581, 1594), 'albumentations.torch.ToTensor', 'AT.ToTensor', ([], {}), '()\n', (1592, 1594), True, 'from albumentations import torch as AT\n'), ((1631, 1655), 'albumentations.RandomRotate90', 'albu.RandomRotate90', (['(1.0)'], {}), '(1.0)\n', (1650, 1655), True, 'import albumentations as albu\n'), ((1662, 1686), 'albumentations.HorizontalFlip', 'albu.HorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (1681, 1686), True, 'import albumentations as albu\n'), ((1687, 1733), 'libs.custom_transforms.PadDifferentlyIfNeeded', 'PadDifferentlyIfNeeded', (['(512)', '(512)'], {'mask_value': '(0)'}), '(512, 512, mask_value=0)\n', (1709, 1733), False, 'from libs.custom_transforms import PadDifferentlyIfNeeded\n'), ((1733, 1746), 'albumentations.torch.ToTensor', 'AT.ToTensor', ([], {}), '()\n', (1744, 1746), True, 'from albumentations import torch as AT\n'), ((3179, 3205), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(516)'], {}), '(516)\n', (3200, 3205), True, 'import torchvision.transforms as transforms\n'), ((3211, 3232), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3230, 3232), True, 'import torchvision.transforms as transforms\n')] |
import os
import numpy as np
import pytest
import torch
from skimage.metrics import structural_similarity as ski_ssim
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import SSIM
def test_zero_div():
ssim = SSIM(data_range=1.0)
with pytest.raises(NotComputableError):
ssim.compute()
def test_invalid_ssim():
y_pred = torch.rand(1, 1, 4, 4)
y = y_pred + 0.125
with pytest.raises(ValueError, match=r"Expected kernel_size to have odd positive number."):
ssim = SSIM(data_range=1.0, kernel_size=2)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected kernel_size to have odd positive number."):
ssim = SSIM(data_range=1.0, kernel_size=-1)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Argument kernel_size should be either int or a sequence of int."):
ssim = SSIM(data_range=1.0, kernel_size=1.0)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Argument sigma should be either float or a sequence of float."):
ssim = SSIM(data_range=1.0, sigma=-1)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected sigma to have positive number."):
ssim = SSIM(data_range=1.0, sigma=(-1, -1))
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Argument sigma should be either float or a sequence of float."):
ssim = SSIM(data_range=1.0, sigma=1)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected y_pred and y to have the same shape."):
y = y.squeeze(dim=0)
ssim = SSIM(data_range=1.0)
ssim.update((y_pred, y))
ssim.compute()
with pytest.raises(ValueError, match=r"Expected y_pred and y to have BxCxHxW shape."):
y = y.squeeze(dim=0)
ssim = SSIM(data_range=1.0)
ssim.update((y, y))
ssim.compute()
with pytest.raises(TypeError, match=r"Expected y_pred and y to have the same data type."):
y = y.double()
ssim = SSIM(data_range=1.0)
ssim.update((y_pred, y))
ssim.compute()
@pytest.mark.parametrize("device", ["cpu"] + ["cuda"] if torch.cuda.is_available() else [])
@pytest.mark.parametrize(
"shape, kernel_size, gaussian, use_sample_covariance",
[[(8, 3, 224, 224), 7, False, True], [(12, 3, 28, 28), 11, True, False]],
)
def test_ssim(device, shape, kernel_size, gaussian, use_sample_covariance):
y_pred = torch.rand(shape, device=device)
y = y_pred * 0.8
sigma = 1.5
data_range = 1.0
ssim = SSIM(data_range=data_range, sigma=sigma, device=device)
ssim.update((y_pred, y))
ignite_ssim = ssim.compute()
skimg_pred = y_pred.cpu().numpy()
skimg_y = skimg_pred * 0.8
skimg_ssim = ski_ssim(
skimg_pred,
skimg_y,
win_size=kernel_size,
sigma=sigma,
channel_axis=1,
gaussian_weights=gaussian,
data_range=data_range,
use_sample_covariance=use_sample_covariance,
)
assert isinstance(ignite_ssim, float)
assert np.allclose(ignite_ssim, skimg_ssim, atol=7e-5)
def test_ssim_variable_batchsize():
# Checks https://github.com/pytorch/ignite/issues/2532
sigma = 1.5
data_range = 1.0
ssim = SSIM(data_range=data_range, sigma=sigma)
y_preds = [
torch.rand(12, 3, 28, 28),
torch.rand(12, 3, 28, 28),
torch.rand(8, 3, 28, 28),
torch.rand(16, 3, 28, 28),
torch.rand(1, 3, 28, 28),
torch.rand(30, 3, 28, 28),
]
y_true = [v * 0.8 for v in y_preds]
for y_pred, y in zip(y_preds, y_true):
ssim.update((y_pred, y))
out = ssim.compute()
ssim.reset()
ssim.update((torch.cat(y_preds), torch.cat(y_true)))
expected = ssim.compute()
assert np.allclose(out, expected)
def _test_distrib_integration(device, tol=1e-4):
from ignite.engine import Engine
rank = idist.get_rank()
n_iters = 100
s = 10
offset = n_iters * s
def _test(metric_device):
y_pred = torch.rand(offset * idist.get_world_size(), 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
def update(engine, i):
return (
y_pred[i * s + offset * rank : (i + 1) * s + offset * rank],
y[i * s + offset * rank : (i + 1) * s + offset * rank],
)
engine = Engine(update)
SSIM(data_range=1.0, device=metric_device).attach(engine, "ssim")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
assert "ssim" in engine.state.metrics
res = engine.state.metrics["ssim"]
np_pred = y_pred.cpu().numpy()
np_true = np_pred * 0.65
true_res = ski_ssim(
np_pred,
np_true,
win_size=11,
sigma=1.5,
channel_axis=1,
gaussian_weights=True,
data_range=1.0,
use_sample_covariance=False,
)
assert pytest.approx(res, abs=tol) == true_res
engine = Engine(update)
SSIM(data_range=1.0, gaussian=False, kernel_size=7, device=metric_device).attach(engine, "ssim")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
assert "ssim" in engine.state.metrics
res = engine.state.metrics["ssim"]
np_pred = y_pred.cpu().numpy()
np_true = np_pred * 0.65
true_res = ski_ssim(np_pred, np_true, win_size=7, channel_axis=1, gaussian_weights=False, data_range=1.0)
assert pytest.approx(res, abs=tol) == true_res
_test("cpu")
if torch.device(device).type != "xla":
_test(idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if torch.device(device).type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
ssim = SSIM(data_range=1.0, device=metric_device)
for dev in [ssim._device, ssim._kernel.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
y_pred = torch.rand(2, 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
ssim.update((y_pred, y))
dev = ssim._sum_of_ssim.device
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device, tol=1e-3)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device, tol=1e-3)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
| [
"ignite.distributed.get_rank",
"ignite.distributed.device",
"numpy.allclose",
"torch.cat",
"ignite.distributed.get_world_size",
"torch.cuda.device_count",
"ignite.engine.Engine",
"pytest.raises",
"skimage.metrics.structural_similarity",
"pytest.mark.skipif",
"ignite.metrics.SSIM",
"torch.cuda.... | [((2416, 2572), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape, kernel_size, gaussian, use_sample_covariance"""', '[[(8, 3, 224, 224), 7, False, True], [(12, 3, 28, 28), 11, True, False]]'], {}), "('shape, kernel_size, gaussian, use_sample_covariance',\n [[(8, 3, 224, 224), 7, False, True], [(12, 3, 28, 28), 11, True, False]])\n", (2439, 2572), False, 'import pytest\n'), ((6633, 6732), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not idist.has_native_dist_support)'], {'reason': '"""Skip if no native dist support"""'}), "(not idist.has_native_dist_support, reason=\n 'Skip if no native dist support')\n", (6651, 6732), False, 'import pytest\n'), ((7009, 7108), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not idist.has_native_dist_support)'], {'reason': '"""Skip if no native dist support"""'}), "(not idist.has_native_dist_support, reason=\n 'Skip if no native dist support')\n", (7027, 7108), False, 'import pytest\n'), ((7326, 7425), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not idist.has_native_dist_support)'], {'reason': '"""Skip if no native dist support"""'}), "(not idist.has_native_dist_support, reason=\n 'Skip if no native dist support')\n", (7344, 7425), False, 'import pytest\n'), ((7422, 7529), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('MULTINODE_DISTRIB' not in os.environ)"], {'reason': '"""Skip if not multi-node distributed"""'}), "('MULTINODE_DISTRIB' not in os.environ, reason=\n 'Skip if not multi-node distributed')\n", (7440, 7529), False, 'import pytest\n'), ((7756, 7855), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not idist.has_native_dist_support)'], {'reason': '"""Skip if no native dist support"""'}), "(not idist.has_native_dist_support, reason=\n 'Skip if no native dist support')\n", (7774, 7855), False, 'import pytest\n'), ((7852, 7963), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('GPU_MULTINODE_DISTRIB' not in os.environ)"], {'reason': '"""Skip if not multi-node distributed"""'}), "('GPU_MULTINODE_DISTRIB' not in os.environ, reason=\n 'Skip if not multi-node distributed')\n", (7870, 7963), False, 'import pytest\n'), ((8165, 8270), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('NUM_TPU_WORKERS' in os.environ)"], {'reason': '"""Skip if NUM_TPU_WORKERS is in env vars"""'}), "('NUM_TPU_WORKERS' in os.environ, reason=\n 'Skip if NUM_TPU_WORKERS is in env vars')\n", (8183, 8270), False, 'import pytest\n'), ((8267, 8358), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not idist.has_xla_support)'], {'reason': '"""Skip if no PyTorch XLA package"""'}), "(not idist.has_xla_support, reason=\n 'Skip if no PyTorch XLA package')\n", (8285, 8358), False, 'import pytest\n'), ((8693, 8802), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('NUM_TPU_WORKERS' not in os.environ)"], {'reason': '"""Skip if no NUM_TPU_WORKERS in env vars"""'}), "('NUM_TPU_WORKERS' not in os.environ, reason=\n 'Skip if no NUM_TPU_WORKERS in env vars')\n", (8711, 8802), False, 'import pytest\n'), ((8799, 8890), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not idist.has_xla_support)'], {'reason': '"""Skip if no PyTorch XLA package"""'}), "(not idist.has_xla_support, reason=\n 'Skip if no PyTorch XLA package')\n", (8817, 8890), False, 'import pytest\n'), ((9062, 9154), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not idist.has_hvd_support)'], {'reason': '"""Skip if no Horovod dist support"""'}), "(not idist.has_hvd_support, reason=\n 'Skip if no Horovod dist support')\n", (9080, 9154), False, 'import pytest\n'), ((9151, 9242), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Skip if launched as multiproc"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Skip if launched as multiproc')\n", (9169, 9242), False, 'import pytest\n'), ((270, 290), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)'}), '(data_range=1.0)\n', (274, 290), False, 'from ignite.metrics import SSIM\n'), ((398, 420), 'torch.rand', 'torch.rand', (['(1)', '(1)', '(4)', '(4)'], {}), '(1, 1, 4, 4)\n', (408, 420), False, 'import torch\n'), ((2669, 2701), 'torch.rand', 'torch.rand', (['shape'], {'device': 'device'}), '(shape, device=device)\n', (2679, 2701), False, 'import torch\n'), ((2772, 2827), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': 'data_range', 'sigma': 'sigma', 'device': 'device'}), '(data_range=data_range, sigma=sigma, device=device)\n', (2776, 2827), False, 'from ignite.metrics import SSIM\n'), ((2977, 3160), 'skimage.metrics.structural_similarity', 'ski_ssim', (['skimg_pred', 'skimg_y'], {'win_size': 'kernel_size', 'sigma': 'sigma', 'channel_axis': '(1)', 'gaussian_weights': 'gaussian', 'data_range': 'data_range', 'use_sample_covariance': 'use_sample_covariance'}), '(skimg_pred, skimg_y, win_size=kernel_size, sigma=sigma,\n channel_axis=1, gaussian_weights=gaussian, data_range=data_range,\n use_sample_covariance=use_sample_covariance)\n', (2985, 3160), True, 'from skimage.metrics import structural_similarity as ski_ssim\n'), ((3278, 3326), 'numpy.allclose', 'np.allclose', (['ignite_ssim', 'skimg_ssim'], {'atol': '(7e-05)'}), '(ignite_ssim, skimg_ssim, atol=7e-05)\n', (3289, 3326), True, 'import numpy as np\n'), ((3471, 3511), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': 'data_range', 'sigma': 'sigma'}), '(data_range=data_range, sigma=sigma)\n', (3475, 3511), False, 'from ignite.metrics import SSIM\n'), ((4001, 4027), 'numpy.allclose', 'np.allclose', (['out', 'expected'], {}), '(out, expected)\n', (4012, 4027), True, 'import numpy as np\n'), ((4128, 4144), 'ignite.distributed.get_rank', 'idist.get_rank', ([], {}), '()\n', (4142, 4144), True, 'import ignite.distributed as idist\n'), ((6883, 6897), 'ignite.distributed.device', 'idist.device', ([], {}), '()\n', (6895, 6897), True, 'import ignite.distributed as idist\n'), ((7190, 7204), 'ignite.distributed.device', 'idist.device', ([], {}), '()\n', (7202, 7204), True, 'import ignite.distributed as idist\n'), ((7620, 7634), 'ignite.distributed.device', 'idist.device', ([], {}), '()\n', (7632, 7634), True, 'import ignite.distributed as idist\n'), ((8047, 8061), 'ignite.distributed.device', 'idist.device', ([], {}), '()\n', (8059, 8061), True, 'import ignite.distributed as idist\n'), ((8405, 8419), 'ignite.distributed.device', 'idist.device', ([], {}), '()\n', (8417, 8419), True, 'import ignite.distributed as idist\n'), ((8565, 8579), 'ignite.distributed.device', 'idist.device', ([], {}), '()\n', (8577, 8579), True, 'import ignite.distributed as idist\n'), ((300, 333), 'pytest.raises', 'pytest.raises', (['NotComputableError'], {}), '(NotComputableError)\n', (313, 333), False, 'import pytest\n'), ((453, 542), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Expected kernel_size to have odd positive number."""'}), "(ValueError, match=\n 'Expected kernel_size to have odd positive number.')\n", (466, 542), False, 'import pytest\n'), ((555, 590), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'kernel_size': '(2)'}), '(data_range=1.0, kernel_size=2)\n', (559, 590), False, 'from ignite.metrics import SSIM\n'), ((657, 746), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Expected kernel_size to have odd positive number."""'}), "(ValueError, match=\n 'Expected kernel_size to have odd positive number.')\n", (670, 746), False, 'import pytest\n'), ((759, 795), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'kernel_size': '(-1)'}), '(data_range=1.0, kernel_size=-1)\n', (763, 795), False, 'from ignite.metrics import SSIM\n'), ((862, 965), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Argument kernel_size should be either int or a sequence of int."""'}), "(ValueError, match=\n 'Argument kernel_size should be either int or a sequence of int.')\n", (875, 965), False, 'import pytest\n'), ((978, 1015), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'kernel_size': '(1.0)'}), '(data_range=1.0, kernel_size=1.0)\n', (982, 1015), False, 'from ignite.metrics import SSIM\n'), ((1082, 1183), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Argument sigma should be either float or a sequence of float."""'}), "(ValueError, match=\n 'Argument sigma should be either float or a sequence of float.')\n", (1095, 1183), False, 'import pytest\n'), ((1196, 1226), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'sigma': '(-1)'}), '(data_range=1.0, sigma=-1)\n', (1200, 1226), False, 'from ignite.metrics import SSIM\n'), ((1293, 1367), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Expected sigma to have positive number."""'}), "(ValueError, match='Expected sigma to have positive number.')\n", (1306, 1367), False, 'import pytest\n'), ((1385, 1421), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'sigma': '(-1, -1)'}), '(data_range=1.0, sigma=(-1, -1))\n', (1389, 1421), False, 'from ignite.metrics import SSIM\n'), ((1488, 1589), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Argument sigma should be either float or a sequence of float."""'}), "(ValueError, match=\n 'Argument sigma should be either float or a sequence of float.')\n", (1501, 1589), False, 'import pytest\n'), ((1602, 1631), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'sigma': '(1)'}), '(data_range=1.0, sigma=1)\n', (1606, 1631), False, 'from ignite.metrics import SSIM\n'), ((1698, 1783), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Expected y_pred and y to have the same shape."""'}), "(ValueError, match='Expected y_pred and y to have the same shape.'\n )\n", (1711, 1783), False, 'import pytest\n'), ((1825, 1845), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)'}), '(data_range=1.0)\n', (1829, 1845), False, 'from ignite.metrics import SSIM\n'), ((1912, 1991), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Expected y_pred and y to have BxCxHxW shape."""'}), "(ValueError, match='Expected y_pred and y to have BxCxHxW shape.')\n", (1925, 1991), False, 'import pytest\n'), ((2038, 2058), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)'}), '(data_range=1.0)\n', (2042, 2058), False, 'from ignite.metrics import SSIM\n'), ((2120, 2208), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Expected y_pred and y to have the same data type."""'}), "(TypeError, match=\n 'Expected y_pred and y to have the same data type.')\n", (2133, 2208), False, 'import pytest\n'), ((2244, 2264), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)'}), '(data_range=1.0)\n', (2248, 2264), False, 'from ignite.metrics import SSIM\n'), ((2380, 2405), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2403, 2405), False, 'import torch\n'), ((3537, 3562), 'torch.rand', 'torch.rand', (['(12)', '(3)', '(28)', '(28)'], {}), '(12, 3, 28, 28)\n', (3547, 3562), False, 'import torch\n'), ((3572, 3597), 'torch.rand', 'torch.rand', (['(12)', '(3)', '(28)', '(28)'], {}), '(12, 3, 28, 28)\n', (3582, 3597), False, 'import torch\n'), ((3607, 3631), 'torch.rand', 'torch.rand', (['(8)', '(3)', '(28)', '(28)'], {}), '(8, 3, 28, 28)\n', (3617, 3631), False, 'import torch\n'), ((3641, 3666), 'torch.rand', 'torch.rand', (['(16)', '(3)', '(28)', '(28)'], {}), '(16, 3, 28, 28)\n', (3651, 3666), False, 'import torch\n'), ((3676, 3700), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(28)', '(28)'], {}), '(1, 3, 28, 28)\n', (3686, 3700), False, 'import torch\n'), ((3710, 3735), 'torch.rand', 'torch.rand', (['(30)', '(3)', '(28)', '(28)'], {}), '(30, 3, 28, 28)\n', (3720, 3735), False, 'import torch\n'), ((4596, 4610), 'ignite.engine.Engine', 'Engine', (['update'], {}), '(update)\n', (4602, 4610), False, 'from ignite.engine import Engine\n'), ((4948, 5086), 'skimage.metrics.structural_similarity', 'ski_ssim', (['np_pred', 'np_true'], {'win_size': '(11)', 'sigma': '(1.5)', 'channel_axis': '(1)', 'gaussian_weights': '(True)', 'data_range': '(1.0)', 'use_sample_covariance': '(False)'}), '(np_pred, np_true, win_size=11, sigma=1.5, channel_axis=1,\n gaussian_weights=True, data_range=1.0, use_sample_covariance=False)\n', (4956, 5086), True, 'from skimage.metrics import structural_similarity as ski_ssim\n'), ((5264, 5278), 'ignite.engine.Engine', 'Engine', (['update'], {}), '(update)\n', (5270, 5278), False, 'from ignite.engine import Engine\n'), ((5647, 5746), 'skimage.metrics.structural_similarity', 'ski_ssim', (['np_pred', 'np_true'], {'win_size': '(7)', 'channel_axis': '(1)', 'gaussian_weights': '(False)', 'data_range': '(1.0)'}), '(np_pred, np_true, win_size=7, channel_axis=1, gaussian_weights=\n False, data_range=1.0)\n', (5655, 5746), True, 'from skimage.metrics import structural_similarity as ski_ssim\n'), ((5960, 5979), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5972, 5979), False, 'import torch\n'), ((6127, 6169), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'device': 'metric_device'}), '(data_range=1.0, device=metric_device)\n', (6131, 6169), False, 'from ignite.metrics import SSIM\n'), ((6348, 6406), 'torch.rand', 'torch.rand', (['(2)', '(3)', '(28)', '(28)'], {'dtype': 'torch.float', 'device': 'device'}), '(2, 3, 28, 28, dtype=torch.float, device=device)\n', (6358, 6406), False, 'import torch\n'), ((6748, 6773), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6771, 6773), False, 'import torch\n'), ((9396, 9421), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (9419, 9421), False, 'import torch\n'), ((3920, 3938), 'torch.cat', 'torch.cat', (['y_preds'], {}), '(y_preds)\n', (3929, 3938), False, 'import torch\n'), ((3940, 3957), 'torch.cat', 'torch.cat', (['y_true'], {}), '(y_true)\n', (3949, 3957), False, 'import torch\n'), ((5206, 5233), 'pytest.approx', 'pytest.approx', (['res'], {'abs': 'tol'}), '(res, abs=tol)\n', (5219, 5233), False, 'import pytest\n'), ((5758, 5785), 'pytest.approx', 'pytest.approx', (['res'], {'abs': 'tol'}), '(res, abs=tol)\n', (5771, 5785), False, 'import pytest\n'), ((5823, 5843), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (5835, 5843), False, 'import torch\n'), ((5873, 5887), 'ignite.distributed.device', 'idist.device', ([], {}), '()\n', (5885, 5887), True, 'import ignite.distributed as idist\n'), ((5988, 6008), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (6000, 6008), False, 'import torch\n'), ((6054, 6068), 'ignite.distributed.device', 'idist.device', ([], {}), '()\n', (6066, 6068), True, 'import ignite.distributed as idist\n'), ((9306, 9331), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9329, 9331), False, 'import torch\n'), ((9365, 9390), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9388, 9390), False, 'import torch\n'), ((4267, 4289), 'ignite.distributed.get_world_size', 'idist.get_world_size', ([], {}), '()\n', (4287, 4289), True, 'import ignite.distributed as idist\n'), ((4619, 4661), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'device': 'metric_device'}), '(data_range=1.0, device=metric_device)\n', (4623, 4661), False, 'from ignite.metrics import SSIM\n'), ((5287, 5360), 'ignite.metrics.SSIM', 'SSIM', ([], {'data_range': '(1.0)', 'gaussian': '(False)', 'kernel_size': '(7)', 'device': 'metric_device'}), '(data_range=1.0, gaussian=False, kernel_size=7, device=metric_device)\n', (5291, 5360), False, 'from ignite.metrics import SSIM\n')] |
import os
import warnings
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
warnings.filterwarnings('ignore')
import tensorflow as tf
import malaya_speech.augmentation.waveform as augmentation
from malaya_speech.train.model import super_res
from malaya_speech.train.model import enhancement
import malaya_speech
import malaya_speech.train as train
from glob import glob
import random
import numpy as np
import IPython.display as ipd
from multiprocessing import Pool
from itertools import cycle
import itertools
np.seterr(all = 'raise')
def chunks(l, n):
for i in range(0, len(l), n):
yield (l[i : i + n], i // n)
def multiprocessing(strings, function, cores = 6, returned = True):
df_split = chunks(strings, len(strings) // cores)
pool = Pool(cores)
print('initiate pool map')
pooled = pool.map(function, df_split)
print('gather from pool')
pool.close()
pool.join()
print('closed pool')
if returned:
return list(itertools.chain(*pooled))
files = glob('../youtube/clean-wav/*.wav')
random.shuffle(files)
len(files)
noises = glob('../noise-44k/noise/*.wav') + glob('../noise-44k/clean-wav/*.wav')
basses = glob('HHDS/Sources/**/*bass.wav', recursive = True)
drums = glob('HHDS/Sources/**/*drums.wav', recursive = True)
others = glob('HHDS/Sources/**/*other.wav', recursive = True)
noises = noises + basses + drums + others
random.shuffle(noises)
noises = [n for n in noises if (os.path.getsize(n) / 1e6) < 50]
file_cycle = cycle(files)
sr = 44100
selected_sr = [6000, 8000, 16000]
def read_wav(f):
return malaya_speech.load(f, sr = sr)
def random_sampling(s, length):
return augmentation.random_sampling(s, sr = sr, length = length)
def downsample(y, sr, down_sr):
y_ = malaya_speech.resample(y, sr, down_sr)
return malaya_speech.resample(y_, down_sr, sr)
def parallel(f):
y = read_wav(f)[0]
y = random_sampling(y, length = random.randint(3000, 10000))
y = y / (np.max(np.abs(y)) + 1e-9)
y_ = downsample(y, sr, random.choice(selected_sr))
return y_, y
def loop(files):
files = files[0]
results = []
for f in files:
results.append(parallel(f))
return results
def generate(batch_size = 10, repeat = 5):
while True:
fs = [next(file_cycle) for _ in range(batch_size)]
results = multiprocessing(fs, loop, cores = len(fs))
for _ in range(repeat):
random.shuffle(results)
for r in results:
if not np.isnan(r[0]).any() and not np.isnan(r[1]).any():
yield {'combined': r[0], 'y': r[1]}
def get_dataset():
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{'combined': tf.float32, 'y': tf.float32},
output_shapes = {
'combined': tf.TensorShape([None]),
'y': tf.TensorShape([None]),
},
)
return dataset
return get
init_lr = 2e-4
epochs = 500_000
partition = 8192
def model_fn(features, labels, mode, params):
combined = tf.expand_dims(features['combined'], -1)
y = tf.expand_dims(features['y'], -1)
partitioned_x = malaya_speech.tf_featurization.pad_and_partition(
combined, partition
)
partitioned_y = malaya_speech.tf_featurization.pad_and_partition(
y, partition
)
model = super_res.AudioTFILM(partitioned_x, dropout = 0.0)
l2_loss, snr = enhancement.loss.snr(model.logits, partitioned_y)
sdr = enhancement.loss.sdr(model.logits, partitioned_y)
loss = l2_loss
tf.identity(loss, 'total_loss')
tf.summary.scalar('total_loss', loss)
tf.summary.scalar('snr', snr)
tf.summary.scalar('sdr', sdr)
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value = init_lr, shape = [], dtype = tf.float32)
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
epochs,
end_learning_rate = 1e-6,
power = 1.0,
cycle = False,
)
tf.summary.scalar('learning_rate', learning_rate)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(
learning_rate = learning_rate, beta1 = 0.99, beta2 = 0.999
)
train_op = optimizer.minimize(loss, global_step = global_step)
estimator_spec = tf.estimator.EstimatorSpec(
mode = mode, loss = loss, train_op = train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode = tf.estimator.ModeKeys.EVAL, loss = loss
)
return estimator_spec
train_hooks = [tf.train.LoggingTensorHook(['total_loss'], every_n_iter = 1)]
train_dataset = get_dataset()
save_directory = 'super-resolution-audiotfilm'
train.run_training(
train_fn = train_dataset,
model_fn = model_fn,
model_dir = save_directory,
num_gpus = 1,
log_step = 1,
save_checkpoint_step = 3000,
max_steps = epochs,
train_hooks = train_hooks,
eval_step = 0,
)
| [
"numpy.abs",
"tensorflow.identity",
"random.shuffle",
"malaya_speech.augmentation.waveform.random_sampling",
"tensorflow.train.LoggingTensorHook",
"numpy.isnan",
"malaya_speech.train.model.enhancement.loss.snr",
"glob.glob",
"itertools.cycle",
"random.randint",
"malaya_speech.train.model.enhance... | [((68, 101), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (91, 101), False, 'import warnings\n'), ((505, 527), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (514, 527), True, 'import numpy as np\n'), ((1003, 1037), 'glob.glob', 'glob', (['"""../youtube/clean-wav/*.wav"""'], {}), "('../youtube/clean-wav/*.wav')\n", (1007, 1037), False, 'from glob import glob\n'), ((1038, 1059), 'random.shuffle', 'random.shuffle', (['files'], {}), '(files)\n', (1052, 1059), False, 'import random\n'), ((1162, 1211), 'glob.glob', 'glob', (['"""HHDS/Sources/**/*bass.wav"""'], {'recursive': '(True)'}), "('HHDS/Sources/**/*bass.wav', recursive=True)\n", (1166, 1211), False, 'from glob import glob\n'), ((1222, 1272), 'glob.glob', 'glob', (['"""HHDS/Sources/**/*drums.wav"""'], {'recursive': '(True)'}), "('HHDS/Sources/**/*drums.wav', recursive=True)\n", (1226, 1272), False, 'from glob import glob\n'), ((1284, 1334), 'glob.glob', 'glob', (['"""HHDS/Sources/**/*other.wav"""'], {'recursive': '(True)'}), "('HHDS/Sources/**/*other.wav', recursive=True)\n", (1288, 1334), False, 'from glob import glob\n'), ((1379, 1401), 'random.shuffle', 'random.shuffle', (['noises'], {}), '(noises)\n', (1393, 1401), False, 'import random\n'), ((1479, 1491), 'itertools.cycle', 'cycle', (['files'], {}), '(files)\n', (1484, 1491), False, 'from itertools import cycle\n'), ((4806, 5009), 'malaya_speech.train.run_training', 'train.run_training', ([], {'train_fn': 'train_dataset', 'model_fn': 'model_fn', 'model_dir': 'save_directory', 'num_gpus': '(1)', 'log_step': '(1)', 'save_checkpoint_step': '(3000)', 'max_steps': 'epochs', 'train_hooks': 'train_hooks', 'eval_step': '(0)'}), '(train_fn=train_dataset, model_fn=model_fn, model_dir=\n save_directory, num_gpus=1, log_step=1, save_checkpoint_step=3000,\n max_steps=epochs, train_hooks=train_hooks, eval_step=0)\n', (4824, 5009), True, 'import malaya_speech.train as train\n'), ((756, 767), 'multiprocessing.Pool', 'Pool', (['cores'], {}), '(cores)\n', (760, 767), False, 'from multiprocessing import Pool\n'), ((1081, 1113), 'glob.glob', 'glob', (['"""../noise-44k/noise/*.wav"""'], {}), "('../noise-44k/noise/*.wav')\n", (1085, 1113), False, 'from glob import glob\n'), ((1116, 1152), 'glob.glob', 'glob', (['"""../noise-44k/clean-wav/*.wav"""'], {}), "('../noise-44k/clean-wav/*.wav')\n", (1120, 1152), False, 'from glob import glob\n'), ((1567, 1595), 'malaya_speech.load', 'malaya_speech.load', (['f'], {'sr': 'sr'}), '(f, sr=sr)\n', (1585, 1595), False, 'import malaya_speech\n'), ((1643, 1696), 'malaya_speech.augmentation.waveform.random_sampling', 'augmentation.random_sampling', (['s'], {'sr': 'sr', 'length': 'length'}), '(s, sr=sr, length=length)\n', (1671, 1696), True, 'import malaya_speech.augmentation.waveform as augmentation\n'), ((1744, 1782), 'malaya_speech.resample', 'malaya_speech.resample', (['y', 'sr', 'down_sr'], {}), '(y, sr, down_sr)\n', (1766, 1782), False, 'import malaya_speech\n'), ((1794, 1833), 'malaya_speech.resample', 'malaya_speech.resample', (['y_', 'down_sr', 'sr'], {}), '(y_, down_sr, sr)\n', (1816, 1833), False, 'import malaya_speech\n'), ((3062, 3102), 'tensorflow.expand_dims', 'tf.expand_dims', (["features['combined']", '(-1)'], {}), "(features['combined'], -1)\n", (3076, 3102), True, 'import tensorflow as tf\n'), ((3111, 3144), 'tensorflow.expand_dims', 'tf.expand_dims', (["features['y']", '(-1)'], {}), "(features['y'], -1)\n", (3125, 3144), True, 'import tensorflow as tf\n'), ((3165, 3234), 'malaya_speech.tf_featurization.pad_and_partition', 'malaya_speech.tf_featurization.pad_and_partition', (['combined', 'partition'], {}), '(combined, partition)\n', (3213, 3234), False, 'import malaya_speech\n'), ((3269, 3331), 'malaya_speech.tf_featurization.pad_and_partition', 'malaya_speech.tf_featurization.pad_and_partition', (['y', 'partition'], {}), '(y, partition)\n', (3317, 3331), False, 'import malaya_speech\n'), ((3358, 3406), 'malaya_speech.train.model.super_res.AudioTFILM', 'super_res.AudioTFILM', (['partitioned_x'], {'dropout': '(0.0)'}), '(partitioned_x, dropout=0.0)\n', (3378, 3406), False, 'from malaya_speech.train.model import super_res\n'), ((3428, 3477), 'malaya_speech.train.model.enhancement.loss.snr', 'enhancement.loss.snr', (['model.logits', 'partitioned_y'], {}), '(model.logits, partitioned_y)\n', (3448, 3477), False, 'from malaya_speech.train.model import enhancement\n'), ((3488, 3537), 'malaya_speech.train.model.enhancement.loss.sdr', 'enhancement.loss.sdr', (['model.logits', 'partitioned_y'], {}), '(model.logits, partitioned_y)\n', (3508, 3537), False, 'from malaya_speech.train.model import enhancement\n'), ((3562, 3593), 'tensorflow.identity', 'tf.identity', (['loss', '"""total_loss"""'], {}), "(loss, 'total_loss')\n", (3573, 3593), True, 'import tensorflow as tf\n'), ((3599, 3636), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'loss'], {}), "('total_loss', loss)\n", (3616, 3636), True, 'import tensorflow as tf\n'), ((3641, 3670), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""snr"""', 'snr'], {}), "('snr', snr)\n", (3658, 3670), True, 'import tensorflow as tf\n'), ((3675, 3704), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""sdr"""', 'sdr'], {}), "('sdr', sdr)\n", (3692, 3704), True, 'import tensorflow as tf\n'), ((3724, 3760), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (3758, 3760), True, 'import tensorflow as tf\n'), ((3782, 3836), 'tensorflow.constant', 'tf.constant', ([], {'value': 'init_lr', 'shape': '[]', 'dtype': 'tf.float32'}), '(value=init_lr, shape=[], dtype=tf.float32)\n', (3793, 3836), True, 'import tensorflow as tf\n'), ((3863, 3977), 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['learning_rate', 'global_step', 'epochs'], {'end_learning_rate': '(1e-06)', 'power': '(1.0)', 'cycle': '(False)'}), '(learning_rate, global_step, epochs,\n end_learning_rate=1e-06, power=1.0, cycle=False)\n', (3888, 3977), True, 'import tensorflow as tf\n'), ((4038, 4087), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'learning_rate'], {}), "('learning_rate', learning_rate)\n", (4055, 4087), True, 'import tensorflow as tf\n'), ((4665, 4723), 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', (["['total_loss']"], {'every_n_iter': '(1)'}), "(['total_loss'], every_n_iter=1)\n", (4691, 4723), True, 'import tensorflow as tf\n'), ((2007, 2033), 'random.choice', 'random.choice', (['selected_sr'], {}), '(selected_sr)\n', (2020, 2033), False, 'import random\n'), ((4154, 4230), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate', 'beta1': '(0.99)', 'beta2': '(0.999)'}), '(learning_rate=learning_rate, beta1=0.99, beta2=0.999)\n', (4176, 4230), True, 'import tensorflow as tf\n'), ((4356, 4423), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'train_op': 'train_op'}), '(mode=mode, loss=loss, train_op=train_op)\n', (4382, 4423), True, 'import tensorflow as tf\n'), ((967, 991), 'itertools.chain', 'itertools.chain', (['*pooled'], {}), '(*pooled)\n', (982, 991), False, 'import itertools\n'), ((1912, 1939), 'random.randint', 'random.randint', (['(3000)', '(10000)'], {}), '(3000, 10000)\n', (1926, 1939), False, 'import random\n'), ((2409, 2432), 'random.shuffle', 'random.shuffle', (['results'], {}), '(results)\n', (2423, 2432), False, 'import random\n'), ((4524, 4594), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'tf.estimator.ModeKeys.EVAL', 'loss': 'loss'}), '(mode=tf.estimator.ModeKeys.EVAL, loss=loss)\n', (4550, 4594), True, 'import tensorflow as tf\n'), ((1434, 1452), 'os.path.getsize', 'os.path.getsize', (['n'], {}), '(n)\n', (1449, 1452), False, 'import os\n'), ((1961, 1970), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (1967, 1970), True, 'import numpy as np\n'), ((2814, 2836), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (2828, 2836), True, 'import tensorflow as tf\n'), ((2859, 2881), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (2873, 2881), True, 'import tensorflow as tf\n'), ((2486, 2500), 'numpy.isnan', 'np.isnan', (['r[0]'], {}), '(r[0])\n', (2494, 2500), True, 'import numpy as np\n'), ((2515, 2529), 'numpy.isnan', 'np.isnan', (['r[1]'], {}), '(r[1])\n', (2523, 2529), True, 'import numpy as np\n')] |
"""
Train a neural network on the given dataset with given configuration
"""
import numpy as np
np.random.seed(12345)
import tensorflow as tf
tf.random.set_seed(12345)
import random
random.seed(12345)
import argparse
import math
import re
import sys
import traceback
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Activation
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import EarlyStopping, Callback, ModelCheckpoint
import time
from data_utils import *
from sklearn import preprocessing
from sklearn.metrics import mean_absolute_error, mean_squared_error, accuracy_score
from tensorflow.python import debug as tf_debug
from train_utils import *
parser = argparse.ArgumentParser(description='run ml regressors on dataset',argument_default=argparse.SUPPRESS)
parser.add_argument('--train_data_path', help='path to the training dataset',default=None, type=str, required=False)
parser.add_argument('--val_data_path', help='path to the validation dataset',default=None, type=str, required=False)
parser.add_argument('--test_data_path', help='path to the test dataset', default=None, type=str,required=False)
parser.add_argument('--label', help='output variable', default=None, type=str,required=False)
parser.add_argument('--input', help='input attributes set', default=None, type=str, required=False)
parser.add_argument('--config_file', help='configuration file path', default=None, type=str, required=False)
parser.add_argument('--test_metric', help='test_metric to use', default=None, type=str, required=False)
parser.add_argument('--priority', help='priority of this job', default=0, type=int, required=False)
args,_ = parser.parse_known_args()
hyper_params = {'batch_size':32, 'num_epochs':4000, 'EVAL_FREQUENCY':1000, 'learning_rate':1e-4, 'momentum':0.9, 'lr_drop_rate':0.5, 'epoch_step':500, 'nesterov':True, 'reg_W':0., 'optimizer':'Adam', 'reg_type':'L2', 'activation':'relu', 'patience':100}
# NN architecture
SEED = 66478
def run_regressors(train_X, train_y, valid_X, valid_y, test_X, test_y, logger=None, config=None):
assert config is not None
hyper_params.update(config['paramsGrid'])
assert logger is not None
rr = logger
def define_model(data, architecture, num_labels=1, activation='relu', dropouts=[]):
assert '-' in architecture
archs = architecture.strip().split('-')
net = data
pen_layer = net
prev_layer = net
prev_num_outputs = None
prev_block_num_outputs = None
prev_stub_output = net
for i in range(len(archs)):
arch = archs[i]
if 'x' in arch:
arch = arch.split('x')
num_outputs = int(re.findall(r'\d+',arch[0])[0])
layers = int(re.findall(r'\d+',arch[1])[0])
j = 0
aux_layers = re.findall(r'[A-Z]',arch[0])
for l in range(layers):
if aux_layers and aux_layers[0] == 'B':
if len(aux_layers)>1 and aux_layers[1]=='A':
rr.fprint('adding fully connected layers with %d outputs followed by batch_norm and act' % num_outputs)
net = Dense(num_outputs,
name='fc' + str(i) + '_' + str(j),
activation=None)(net)
net = BatchNormalization(center=True, scale=True, name='fc_bn'+str(i)+'_'+str(j))(net)
if activation =='relu': net = Activation('relu')(net)
else:
rr.fprint('adding fully connected layers with %d outputs followed by batch_norm' % num_outputs)
net = Dense(num_outputs,
name='fc' + str(i) + '_' + str(j),
activation=activation)(net)
net = BatchNormalization(center=True, scale=True,
name='fc_bn' + str(i) + '_' + str(j))(net)
else:
rr.fprint('adding fully connected layers with %d outputs' % num_outputs)
net = Dense(num_outputs,
name='fc' + str(i) + '_' + str(j),
activation=activation)(net)
if 'R' in aux_layers:
if prev_num_outputs and prev_num_outputs==num_outputs:
rr.fprint('adding residual, both sizes are same')
net = net+prev_layer
else:
rr.fprint('adding residual with fc as the size are different')
net = net + Dense(num_outputs,
name='fc' + str(i) + '_' +'dim_'+ str(j),
activation=None)(prev_layer)
prev_num_outputs = num_outputs
j += 1
prev_layer = net
aux_layers_sub = re.findall(r'[A-Z]', arch[1])
if 'R' in aux_layers_sub:
if prev_block_num_outputs and prev_block_num_outputs == num_outputs:
rr.fprint('adding residual to stub, both sizes are same')
net = net + prev_stub_output
else:
rr.fprint('adding residual to stub with fc as the size are different')
net = net + Dense(num_outputs,
name='fc' + str(i) + '_' + 'stub_dim_' + str(j),
activation=None)(prev_stub_output)
if 'D' in aux_layers_sub and (num_labels == 1) and len(dropouts) > i:
rr.fprint('adding dropout', dropouts[i])
net = Dropout(1.-dropouts[i], seed=SEED)(net, training=False)
prev_stub_output = net
prev_block_num_outputs = num_outputs
prev_layer = net
else:
if 'R' in arch:
act_fun = 'relu'
rr.fprint('using ReLU at last layer')
elif 'T' in arch:
act_fun = 'tanh'
rr.fprint('using TanH at last layer')
else:
act_fun = None
pen_layer = net
rr.fprint('adding final layer with ' + str(num_labels) + ' output')
net = Dense(num_labels, name='fc' + str(i),
activation=act_fun)(net)
return net
def error_rate(predictions, labels, step=0, dataset_partition=''):
return np.mean(np.absolute(predictions - labels))
def error_rate_classification(predictions, labels, step=0, dataset_partition=''):
return 100.0 - (100.0 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0])
train_X = train_X.reshape(train_X.shape[0], -1).astype("float32")
valid_X = valid_X.reshape(valid_X.shape[0], -1).astype("float32")
test_X = test_X.reshape(test_X.shape[0], -1).astype("float32")
num_input = train_X.shape[1]
batch_size = hyper_params['batch_size']
learning_rate = hyper_params['learning_rate']
optimizer = hyper_params['optimizer']
architecture = config['architecture']
num_epochs = hyper_params['num_epochs']
model_path = config['model_path']
patience = hyper_params['patience']
save_path = config['save_path']
loss_type = config['loss_type']
keras_path = config['keras_path']
last_layer_with_weight = config['last_layer_with_weight']
if 'dropouts' in hyper_params:
dropouts = hyper_params['dropouts']
else:
dropouts = []
test_metric = mean_squared_error
if config['test_metric']=='mae':
test_metric = mean_absolute_error
if config['test_metric']=='accuracy':
test_metric = accuracy_score
use_valid = config['use_valid']
EVAL_FREQUENCY = hyper_params['EVAL_FREQUENCY']
train_y = train_y.reshape(train_y.shape[0]).astype("float32")
valid_y = valid_y.reshape(valid_y.shape[0]).astype("float32")
test_y = test_y.reshape(test_y.shape[0]).astype("float32")
train_data = train_X
train_labels = train_y
test_data = test_X
test_labels = test_y
validation_data = valid_X
validation_labels = valid_y
rr.fprint("train matrix shape of train_X: ",train_X.shape, ' train_y: ', train_y.shape)
rr.fprint("valid matrix shape of train_X: ",valid_X.shape, ' valid_y: ', valid_y.shape)
rr.fprint("test matrix shape of valid_X: ",test_X.shape, ' test_y: ', test_y.shape)
rr.fprint('architecture is: ',architecture)
rr.fprint('learning rate is ',learning_rate)
rr.fprint('model path is ', model_path)
model = None
inputs = Input(shape=(num_input,), name='elemental_fractions')
outputs = define_model(inputs, architecture, dropouts=dropouts)
model = Model(inputs=inputs, outputs=outputs, name= 'ElemNet')
model.summary(print_fn=lambda x: rr.fprint(x))
if model_path:
rr.fprint('Restoring model from %s' % model_path)
model_h5 = "%s.h5" % model_path
model.load_weights(model_h5)
if not last_layer_with_weight:
rr.fprint('removing last layer to add model and adding dense layer without weight')
newl16 = Dense(1, activation=None)(model.layers[-2].output)
model = Model(inputs=model.input, outputs=[newl16])
assert optimizer == 'Adam'
if loss_type=='mae':
model.compile(loss=tf.keras.losses.mean_absolute_error, optimizer=optimizers.Adam(learning_rate=learning_rate), metrics=['mean_absolute_error'])
elif loss_type=='binary':
model.compile(loss=tf.keras.losses.binary_crossentropy, optimizer=optimizers.Adam(learning_rate=learning_rate), metrics=[tf.keras.metrics.BinaryAccuracy()])
class LossHistory(Callback):
def on_epoch_end(self, epoch, logs={}):
#rr.fprint(
# 'Step %d (epoch %.2d), %.1f s minibatch loss: %.5f, validation error: %.5f, test error: %.5f, best validation error: %.5f' % (
# step, int(step * batch_size) / train_size,
# elapsed_time, l_, val_error, test_error, best_val_error))
rr.fprint('{}: Current epoch: {}, loss: {}, validation loss: {}'.format(datetime.datetime.now(), epoch, logs['loss'], logs['val_loss']))
rr.fprint('start training')
early_stopping = EarlyStopping(patience=patience, restore_best_weights=True, monitor='val_loss')
checkpointer = ModelCheckpoint(filepath=save_path, verbose=0, save_best_only=True, save_freq='epoch', save_format='tf', period=10)
history = model.fit(train_X, train_y, verbose=2, batch_size=batch_size, epochs=num_epochs, validation_data=(valid_X, valid_y), callbacks=[early_stopping, LossHistory(), checkpointer])
if use_valid:
test_result = model.evaluate(test_X, test_y, batch_size=32)
rr.fprint('the test error is ',test_result)
rr.fprint(history.history)
model.save(save_path, save_format='tf')
filename_json = "%s.json" % keras_path
filename_h5 = "%s.h5" % keras_path
model_json = model.to_json()
with open(filename_json, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(filename_h5)
rr.fprint('saved model to '+save_path)
return
if __name__=='__main__':
args = parser.parse_args()
config = {}
config['train_data_path'] = args.train_data_path
config['val_data_path'] = args.val_data_path
config['test_data_path'] = args.test_data_path
config['label'] = args.label
config['input_type'] = args.input
config['log_folder'] = 'logs_dl'
config['log_file'] = 'dl_log_' + get_date_str() + '.log'
config['test_metric'] = args.test_metric
config['architecture'] = 'infile'
if args.config_file:
config.update(load_config(args.config_file))
if not os.path.exists(config['log_folder']):
createDir(config['log_folder'])
logger = Record_Results(os.path.join(config['log_folder'], config['log_file']))
logger.fprint('job config: ' + str(config))
train_X, train_y, valid_X, valid_y, test_X, test_y = load_csv(train_data_path=config['train_data_path'],
val_data_path=config['val_data_path'],
test_data_path=config['test_data_path'],
input_types=config['input_types'],
label=config['label'], logger=logger)
run_regressors(train_X, train_y, valid_X, valid_y, test_X, test_y, logger=logger, config=config)
logger.fprint('done')
| [
"tensorflow.random.set_seed",
"numpy.absolute",
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.Model",
"re.findall",
"random.seed",
"tenso... | [((98, 119), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (112, 119), True, 'import numpy as np\n'), ((144, 169), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(12345)'], {}), '(12345)\n', (162, 169), True, 'import tensorflow as tf\n'), ((184, 202), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (195, 202), False, 'import random\n'), ((1000, 1107), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""run ml regressors on dataset"""', 'argument_default': 'argparse.SUPPRESS'}), "(description='run ml regressors on dataset',\n argument_default=argparse.SUPPRESS)\n", (1023, 1107), False, 'import argparse\n'), ((9263, 9316), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(num_input,)', 'name': '"""elemental_fractions"""'}), "(shape=(num_input,), name='elemental_fractions')\n", (9268, 9316), False, 'from tensorflow.keras.layers import Input\n'), ((9397, 9450), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': '"""ElemNet"""'}), "(inputs=inputs, outputs=outputs, name='ElemNet')\n", (9402, 9450), False, 'from tensorflow.keras.models import Model, load_model\n'), ((10936, 11015), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': 'patience', 'restore_best_weights': '(True)', 'monitor': '"""val_loss"""'}), "(patience=patience, restore_best_weights=True, monitor='val_loss')\n", (10949, 11015), False, 'from tensorflow.keras.callbacks import EarlyStopping, Callback, ModelCheckpoint\n'), ((11035, 11154), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'save_path', 'verbose': '(0)', 'save_best_only': '(True)', 'save_freq': '"""epoch"""', 'save_format': '"""tf"""', 'period': '(10)'}), "(filepath=save_path, verbose=0, save_best_only=True,\n save_freq='epoch', save_format='tf', period=10)\n", (11050, 11154), False, 'from tensorflow.keras.callbacks import EarlyStopping, Callback, ModelCheckpoint\n'), ((7119, 7152), 'numpy.absolute', 'np.absolute', (['(predictions - labels)'], {}), '(predictions - labels)\n', (7130, 7152), True, 'import numpy as np\n'), ((9887, 9930), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.input', 'outputs': '[newl16]'}), '(inputs=model.input, outputs=[newl16])\n', (9892, 9930), False, 'from tensorflow.keras.models import Model, load_model\n'), ((3154, 3182), 're.findall', 're.findall', (['"""[A-Z]"""', 'arch[0]'], {}), "('[A-Z]', arch[0])\n", (3164, 3182), False, 'import re\n'), ((5444, 5472), 're.findall', 're.findall', (['"""[A-Z]"""', 'arch[1]'], {}), "('[A-Z]', arch[1])\n", (5454, 5472), False, 'import re\n'), ((9814, 9839), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'None'}), '(1, activation=None)\n', (9819, 9839), False, 'from tensorflow.keras.layers import Dense\n'), ((10064, 10108), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (10079, 10108), False, 'from tensorflow.keras import optimizers\n'), ((10247, 10291), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (10262, 10291), False, 'from tensorflow.keras import optimizers\n'), ((3012, 3039), 're.findall', 're.findall', (['"""\\\\d+"""', 'arch[0]'], {}), "('\\\\d+', arch[0])\n", (3022, 3039), False, 'import re\n'), ((3072, 3099), 're.findall', 're.findall', (['"""\\\\d+"""', 'arch[1]'], {}), "('\\\\d+', arch[1])\n", (3082, 3099), False, 'import re\n'), ((6256, 6293), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(1.0 - dropouts[i])'], {'seed': 'SEED'}), '(1.0 - dropouts[i], seed=SEED)\n', (6263, 6293), False, 'from tensorflow.keras.layers import Dropout\n'), ((10302, 10335), 'tensorflow.keras.metrics.BinaryAccuracy', 'tf.keras.metrics.BinaryAccuracy', ([], {}), '()\n', (10333, 10335), True, 'import tensorflow as tf\n'), ((7280, 7305), 'numpy.argmax', 'np.argmax', (['predictions', '(1)'], {}), '(predictions, 1)\n', (7289, 7305), True, 'import numpy as np\n'), ((3849, 3867), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3859, 3867), False, 'from tensorflow.keras.layers import Activation\n')] |
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from itertools import permutations
import numpy as np
import cunumeric as cn
def gen_result(lib):
# Try various non-square shapes, to nudge the core towards trying many
# different partitionings.
for shape in permutations((3, 4, 5)):
x = lib.ones(shape)
for axis in range(len(shape)):
yield x.sum(axis=axis)
def test():
for (np_res, cn_res) in zip(gen_result(np), gen_result(cn)):
assert np.array_equal(np_res, cn_res)
if __name__ == "__main__":
test()
| [
"numpy.array_equal",
"itertools.permutations"
] | [((815, 838), 'itertools.permutations', 'permutations', (['(3, 4, 5)'], {}), '((3, 4, 5))\n', (827, 838), False, 'from itertools import permutations\n'), ((1036, 1066), 'numpy.array_equal', 'np.array_equal', (['np_res', 'cn_res'], {}), '(np_res, cn_res)\n', (1050, 1066), True, 'import numpy as np\n')] |
import torch.nn as nn
import torch
from torch.optim import Adam, SGD
import numpy as np
import matplotlib.pyplot as plot
class ODE_LSTM(nn.Module):
def __init__(self, steps, input_size, hidden_size, num_layers):
super().__init__()
self.steps = steps
self.en = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
self.de = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True,)
self.o_net = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.SELU(),
nn.Linear(hidden_size, input_size)
)
def forward(self, init, steps=None):
steps = self.steps if steps is None else steps
# 实际上不需要进行输入,只是官方实现需要输入
zeros = torch.zeros(init.shape[0],
steps,
init.shape[1],
dtype=init.dtype)
_, init_state = self.en(init.unsqueeze(1))
outputs, _ = self.de(zeros, init_state)
# steps
outputs = self.o_net(outputs)
return outputs
mse_loss = torch.nn.MSELoss(reduction='none')
def ode_loss(y_true, y_pred):
mask = torch.sum(y_true, dim=-1, keepdim=True) > 0
mask = mask.float()
return torch.sum(mask * mse_loss(y_true, y_pred)) / mask.sum()
if __name__ == '__main__':
steps, h = 50, 1
ori_series = {0: [100, 150],
10: [165, 283],
15: [197, 290],
30: [280, 276],
36: [305, 269],
40: [318, 266],
42: [324, 264]}
# 归一化
series = {}
for k, v in ori_series.items():
series[k] = [(v[0] - 100) / (324 - 100), (v[1] - 150) / (264 - 150)]
X = np.array([series[0]])
Y = np.zeros((1, steps, 2))
for i, j in series.items():
if i != 0:
Y[0, int(i / h) - 1] += series[i]
X = torch.tensor(X, dtype=torch.float32)
Y = torch.tensor(Y, dtype=torch.float32)
model = ODE_LSTM(steps, input_size=2, hidden_size=64, num_layers=2)
optimizer = Adam(model.parameters(), lr=1e-4)
for epoch in range(1500):
outputs = model(X)
loss = ode_loss(Y, outputs)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print(epoch, loss.item())
with torch.no_grad():
result = model(torch.tensor([[0, 0]], dtype=torch.float32))[0]
result = result * torch.tensor([324 - 100, 264 - 150], dtype=torch.float32) + \
torch.tensor([100, 150], dtype=torch.float32)
times = np.arange(1, steps + 1) * h
plot.clf()
plot.plot(times, result[:, 0], color='blue')
plot.plot(times, result[:, 1], color='green')
plot.plot(list(ori_series.keys()), [i[0] for i in ori_series.values()], 'o', color='blue')
plot.plot(list(ori_series.keys()), [i[1] for i in ori_series.values()], 'o', color='green')
plot.savefig('ode_c.png')
| [
"torch.nn.MSELoss",
"matplotlib.pyplot.savefig",
"torch.no_grad",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"numpy.zeros",
"numpy.array",
"torch.nn.SELU",
"numpy.arange",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.LSTM",
"torch.sum",
"torch.tensor"
] | [((1213, 1247), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (1229, 1247), False, 'import torch\n'), ((1865, 1886), 'numpy.array', 'np.array', (['[series[0]]'], {}), '([series[0]])\n', (1873, 1886), True, 'import numpy as np\n'), ((1895, 1918), 'numpy.zeros', 'np.zeros', (['(1, steps, 2)'], {}), '((1, steps, 2))\n', (1903, 1918), True, 'import numpy as np\n'), ((2026, 2062), 'torch.tensor', 'torch.tensor', (['X'], {'dtype': 'torch.float32'}), '(X, dtype=torch.float32)\n', (2038, 2062), False, 'import torch\n'), ((2071, 2107), 'torch.tensor', 'torch.tensor', (['Y'], {'dtype': 'torch.float32'}), '(Y, dtype=torch.float32)\n', (2083, 2107), False, 'import torch\n'), ((2738, 2748), 'matplotlib.pyplot.clf', 'plot.clf', ([], {}), '()\n', (2746, 2748), True, 'import matplotlib.pyplot as plot\n'), ((2753, 2797), 'matplotlib.pyplot.plot', 'plot.plot', (['times', 'result[:, 0]'], {'color': '"""blue"""'}), "(times, result[:, 0], color='blue')\n", (2762, 2797), True, 'import matplotlib.pyplot as plot\n'), ((2802, 2847), 'matplotlib.pyplot.plot', 'plot.plot', (['times', 'result[:, 1]'], {'color': '"""green"""'}), "(times, result[:, 1], color='green')\n", (2811, 2847), True, 'import matplotlib.pyplot as plot\n'), ((3044, 3069), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""ode_c.png"""'], {}), "('ode_c.png')\n", (3056, 3069), True, 'import matplotlib.pyplot as plot\n'), ((292, 393), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'batch_first': '(True)'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, batch_first=True)\n', (299, 393), True, 'import torch.nn as nn\n'), ((433, 534), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'batch_first': '(True)'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, batch_first=True)\n', (440, 534), True, 'import torch.nn as nn\n'), ((869, 935), 'torch.zeros', 'torch.zeros', (['init.shape[0]', 'steps', 'init.shape[1]'], {'dtype': 'init.dtype'}), '(init.shape[0], steps, init.shape[1], dtype=init.dtype)\n', (880, 935), False, 'import torch\n'), ((1291, 1330), 'torch.sum', 'torch.sum', (['y_true'], {'dim': '(-1)', 'keepdim': '(True)'}), '(y_true, dim=-1, keepdim=True)\n', (1300, 1330), False, 'import torch\n'), ((2450, 2465), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2463, 2465), False, 'import torch\n'), ((606, 641), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (615, 641), True, 'import torch.nn as nn\n'), ((655, 664), 'torch.nn.SELU', 'nn.SELU', ([], {}), '()\n', (662, 664), True, 'import torch.nn as nn\n'), ((678, 712), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'input_size'], {}), '(hidden_size, input_size)\n', (687, 712), True, 'import torch.nn as nn\n'), ((2643, 2688), 'torch.tensor', 'torch.tensor', (['[100, 150]'], {'dtype': 'torch.float32'}), '([100, 150], dtype=torch.float32)\n', (2655, 2688), False, 'import torch\n'), ((2705, 2728), 'numpy.arange', 'np.arange', (['(1)', '(steps + 1)'], {}), '(1, steps + 1)\n', (2714, 2728), True, 'import numpy as np\n'), ((2490, 2533), 'torch.tensor', 'torch.tensor', (['[[0, 0]]'], {'dtype': 'torch.float32'}), '([[0, 0]], dtype=torch.float32)\n', (2502, 2533), False, 'import torch\n'), ((2564, 2621), 'torch.tensor', 'torch.tensor', (['[324 - 100, 264 - 150]'], {'dtype': 'torch.float32'}), '([324 - 100, 264 - 150], dtype=torch.float32)\n', (2576, 2621), False, 'import torch\n')] |
import numpy as np
import math
import cmath
import scipy
import scipy.linalg
import scipy.integrate
import sys
import tqdm
import warnings
h = 1.0
hbar = h / (2.0 * np.pi)
ZERO_TOLERANCE = 10**-4
MAX_VIBRATIONAL_STATES = 75
MAX_FREQ_TIME_SCALE_DT = 18
STARTING_NUMBER_VIBRATIONAL_STATES = 12
NUMBER_ELECTRONIC_STATES = 4
class VibrationalStateOverFlowException(Exception):
def __init__(self):
pass
class MolmerSorenson():
def __init__(self,
number_vibrational_states,
energy_vibrational,
eta_values_list,
transition_dipole_c_values,
oscillator_mass = 1.0,
electronic_energies = [0.0, 0.0, 0.0, 0.0]):
self.number_vibrational_states = number_vibrational_states
self.energy_vibrational = energy_vibrational
self.oscillator_mass = oscillator_mass
self.electronic_energies = electronic_energies
vibrational_frequency = energy_vibrational / h
self.nu = energy_vibrational / hbar
self.one_vibrational_time = 1.0 / vibrational_frequency
self.eta_values = eta_values_list
self.c_values = transition_dipole_c_values
self.propagated_results = None
##VIBRATIONAL HELPER FUNCTIONS
def vibrational_subspace_zero_operator(self):
return np.zeros((self.number_vibrational_states, self.number_vibrational_states))
def vibrational_subspace_identity_operator(self):
output = self.vibrational_subspace_zero_operator()
np.fill_diagonal(output, 1.0)
return output
def vibrational_subspace_creation_operator(self):
vibrational_subspace = self.vibrational_subspace_zero_operator()
for vibrational_index in range(self.number_vibrational_states - 1):
vibrational_subspace[vibrational_index, vibrational_index + 1] = math.sqrt(vibrational_index + 1)
return vibrational_subspace.T
def vibrational_subspace_dimensionless_position_operator(self):
creation = self.vibrational_subspace_creation_operator()
annihilation = np.conj(creation.T)
position = (creation + annihilation)
return position
# def vibrational_subspace_position_operator(self):
# warnings.warn("MAY BE OFF BY 2pi^n!!!")
# dimensionless_position = self.vibrational_subspace_dimensionless_position_operator()
# return dimensionless_position * math.sqrt(hbar / (1.0 * self.oscillator_mass * (self.energy_vibrational / hbar)) )
def vibrational_subspace_hamiltonian_operator(self):
vibrational_subspace = self.vibrational_subspace_zero_operator()
for vibrational_index in range(self.number_vibrational_states):
vibrational_subspace[vibrational_index, vibrational_index ] = self.energy_vibrational * (vibrational_index + .5)
return vibrational_subspace
def vibrational_subspace_laser_recoil_operator(self, eta_value):
exp_arg = eta_value * self.vibrational_subspace_dimensionless_position_operator()
return scipy.linalg.expm(1.0j * exp_arg)
def vibrational_subspace_position_to_nth_power_operator(self, n_power, dimensionless=False):
if n_power == 0:
return self.vibrational_subspace_identity_operator()
assert(n_power >=1)
if dimensionless:
x = self.vibrational_subspace_dimensionless_position_operator()
else:
x = self.vibrational_subspace_position_operator()
output = x
for i in range(n_power-1):
output = output.dot(x)
return output
def vibrational_transition_polynomial_operator(self, c_list):
output = self.vibrational_subspace_zero_operator()
for poly_order, poly_coefficient in enumerate(c_list):
x_nthPower = poly_coefficient * self.vibrational_subspace_position_to_nth_power_operator(poly_order, dimensionless=True)
output = output + x_nthPower
return output
##VIBRATIONAL HELPER FUNCTIONS
def blank_wavefunction(self):
return np.zeros((self.number_vibrational_states * NUMBER_ELECTRONIC_STATES), dtype=np.complex)
def test_wf(self, normalized = True):
shape = (self.number_vibrational_states * NUMBER_ELECTRONIC_STATES)
output = np.random.rand(shape)
if normalized:
normalizer = 1.0
else:
normalizer = output.T.dot(np.conj(output))
return output / np.sqrt(normalizer)
def ground_state_wavefunction(self):
output = self.blank_wavefunction()
output[0] = 1.0
return output
def access_wavefunction_entry(self, wf_vector, electronic_index, vibrational_index):
assert(vibrational_index < self.number_vibrational_states)
index = electronic_index * self.number_vibrational_states + vibrational_index
return wf_vector[index]
def coherent_vibrational_state_ground_electronic(self, alpha_value):
creation_operator = self.vibrational_subspace_creation_operator()
coherent_state_operator = np.exp(-np.abs(alpha_value)**2 / 2.0) * scipy.linalg.expm(alpha_value * creation_operator)
vibrational_subspace_wf = np.zeros(self.number_vibrational_states, dtype=np.complex)
vibrational_subspace_wf[0] = 1.0
coherent_vibrational_subspace = coherent_state_operator.dot(vibrational_subspace_wf)
output = self.blank_wavefunction()
for i, amp in enumerate(coherent_vibrational_subspace):
output[i] = amp
return output
##FULL OPERATOR HELPER FUNCTIONS:
def total_zero_operator(self):
return np.zeros((self.number_vibrational_states * NUMBER_ELECTRONIC_STATES, self.number_vibrational_states * NUMBER_ELECTRONIC_STATES), dtype=np.complex)
def place_vibrational_subspace_into_electronic_indeces(self, electronic_operator, vibrational_subspace,electronic_index1, electronic_index2):
i_0_start = electronic_index1 * self.number_vibrational_states
i_0_end = i_0_start + self.number_vibrational_states
i_1_start = electronic_index2 * self.number_vibrational_states
i_1_end = i_1_start + self.number_vibrational_states
electronic_operator[i_0_start:i_0_end, i_1_start:i_1_end] = vibrational_subspace
def total_time_independent_hamiltonian(self):
output = self.total_zero_operator()
vib_ham = self.vibrational_subspace_hamiltonian_operator()
for electronic_index in range(NUMBER_ELECTRONIC_STATES):
starting_index = electronic_index * self.number_vibrational_states
ending_index = starting_index + self.number_vibrational_states
output[starting_index:ending_index, starting_index:ending_index] = vib_ham + self.electronic_energies[electronic_index]
return output
def total_time_independent_transition_dipole_helper(self,
eta_value,
electronic_index_pair_list):
output = self.total_zero_operator()
recoil_operator = self.vibrational_subspace_laser_recoil_operator(eta_value)
#create the vibrational transition dipole
vibrational_part_transition_dipole = self.vibrational_transition_polynomial_operator(self.c_values)
total_vibrational_off_diagonal = vibrational_part_transition_dipole.dot(recoil_operator)
#place the vibrational part into the total output operator
for indexpair in electronic_index_pair_list:
index_0, index_1 = indexpair
self.place_vibrational_subspace_into_electronic_indeces(output,
total_vibrational_off_diagonal,
index_0, index_1)
return output
def total_time_independent_transition_dipole_1_excitation(self):
return self.total_time_independent_transition_dipole_helper(
self.eta_values[0],
[(1,0), (3,2)])
def total_time_independent_transition_dipole_2_excitation(self):
return self.total_time_independent_transition_dipole_helper(
self.eta_values[1],
[(2,0), (3,1)])
def IR_transition_dipole():
output = self.total_zero_operator()
creation = self.vibrational_subspace_creation_operator()
destruction = np.conj(creation.T)
ir_dipole = creation + destruction
for electronic_i in range(NUMBER_ELECTRONIC_STATES):
self.place_vibrational_subspace_into_electronic_indeces(output,
ir_dipole,
electronic_i, electronic_i)
return output
## TIME DEPENDENT HELPER FUNCTIONS
def ode_diagonal_matrix(self):
return -1.0j * self.total_time_independent_hamiltonian() / hbar
def propagate(self, laser_energy_list_of_lists,
rabi_energy_list_of_lists,
initial_state_generator,
max_time_vibrational_cycles,
use_activation_function=False,
time_scale_set = MAX_FREQ_TIME_SCALE_DT):
while self.number_vibrational_states < MAX_VIBRATIONAL_STATES:
#define time scales
max_energy = self.electronic_energies[-1] + self.energy_vibrational * (.5 + self.number_vibrational_states - 1)
max_frequency = max_energy / h
dt = 1.0 / (time_scale_set * max_frequency)
self.dt = dt
max_time = max_time_vibrational_cycles * self.one_vibrational_time
if use_activation_function:
activation_width = 2.0 * dt
def activation_function(t):
return 1.0/(1.0 + np.exp(-(t/activation_width - 10.0)))
else:
def activation_function(t):
return 1.0
ODE_DIAGONAL = self.ode_diagonal_matrix()
MU_1_EXCITATION = self.total_time_independent_transition_dipole_1_excitation()
MU_2_EXCITATION = self.total_time_independent_transition_dipole_2_excitation()
ion_1_energies = laser_energy_list_of_lists[0]
ion_2_energies = laser_energy_list_of_lists[1]
ion_1_amplitudes = rabi_energy_list_of_lists[0]
ion_2_amplitudes = rabi_energy_list_of_lists[1]
#Added /2 in the amplitudes...?
def time_function_1_excitation(time):
output = 0
for beam_index, beam_energy in enumerate(ion_1_energies):
amp = ion_1_amplitudes[beam_index] / 2.0
f = beam_energy / hbar
output += amp*np.exp(-1.0j * f * time)
return output
def time_function_2_excitation(time):
output = 0
for beam_index, beam_energy in enumerate(ion_2_energies):
amp = ion_2_amplitudes[beam_index] / 2.0
f = beam_energy / hbar
output += amp*np.exp(-1.0j * f * time)
return output
def ODE_integrable_function(time, wf_coefficient_vector):
mu_1_perturber_excitation = MU_1_EXCITATION * time_function_1_excitation(time)
mu_2_perturber_excitation = MU_2_EXCITATION * time_function_2_excitation(time)
mu_1_total = mu_1_perturber_excitation + np.conj(mu_1_perturber_excitation.T)
mu_2_total = mu_2_perturber_excitation + np.conj(mu_2_perturber_excitation.T)
ODE_OFF_DIAGONAL = -1.0j *activation_function(time) * (mu_1_total + mu_2_total) / hbar
ODE_TOTAL_MATRIX = ODE_OFF_DIAGONAL + ODE_DIAGONAL
return np.dot(ODE_TOTAL_MATRIX, wf_coefficient_vector)
#define the starting wavefuntion
initial_conditions = initial_state_generator()
#create ode solver
current_time = 0.0
ode_solver = scipy.integrate.complex_ode(ODE_integrable_function)
ode_solver.set_initial_value(initial_conditions, current_time)
#Run it
results = [initial_conditions]
time_values = [current_time]
n_time = int(math.ceil(max_time / dt))
try: #this block catches an overflow into the highest vibrational state
for time_index in tqdm.tqdm(range(n_time)):
#update time, perform solution
current_time = ode_solver.t+dt
new_result = ode_solver.integrate(current_time)
results.append(new_result)
#make sure solver was successful
if not ode_solver.successful():
raise Exception("ODE Solve Failed!")
#make sure that there hasn't been substantial leakage to the highest excited states
time_values.append(current_time)
re_start_calculation = False
for electronic_index in range(NUMBER_ELECTRONIC_STATES):
max_vibrational_amp = self.access_wavefunction_entry(new_result, electronic_index, self.number_vibrational_states-1)
p = np.abs(max_vibrational_amp)**2
if p >= ZERO_TOLERANCE:
self.number_vibrational_states+=1
print("\nIncreasing Number of vibrational states to %i " % self.number_vibrational_states)
print("Time reached:" + str(current_time))
print("electronic quantum number" + str(electronic_index))
raise VibrationalStateOverFlowException()
except VibrationalStateOverFlowException:
#Move on and re-start the calculation
continue
#Finish calculating
results = np.array(results)
time_values = np.array(time_values)
self.propagated_results = results
self.time_values = time_values
return time_values, results
raise Exception("NEEDED TOO MANY VIBRATIONAL STATES! RE-RUN WITH DIFFERENT PARAMETERS!")
## POST-PROPAGATION CALCULATIONS
def reduced_electronic_density_matrix(self):
if self.propagated_results is None:
raise Exception("No reusults generated yet!")
results = self.propagated_results
output = np.zeros((results.shape[0], NUMBER_ELECTRONIC_STATES, NUMBER_ELECTRONIC_STATES), dtype=np.complex)
for electronic_index_1 in range(NUMBER_ELECTRONIC_STATES):
for electronic_index_2 in range(NUMBER_ELECTRONIC_STATES):
new_entry = 0.0
for vibrational_index in range(self.number_vibrational_states):
total_index_1 = self.number_vibrational_states*electronic_index_1 + vibrational_index
total_index_2 = self.number_vibrational_states*electronic_index_2 + vibrational_index
new_entry += results[:, total_index_1] * np.conj(results[:, total_index_2])
output[:, electronic_index_1, electronic_index_2] = new_entry
return output
def effective_rabi_energy(self, eta, laser_detuning, laser_rabi_energy):
return -(eta * laser_rabi_energy)**2 / (2 * (self.energy_vibrational - laser_detuning))
def expected_unitary_dynamics(self,
expected_rabi_energy,
initial_density_matrix,
time_values):
rho_0 = initial_density_matrix
#THis is difficult for me but the argument of this funciton in the
#original paper by <NAME> Sorenson says the argument should be:
#expected_rabi_energy * time_values / ( 2.0 * hbar)
#but I seem to need it to be what it is below.
#there are some odd conventions of 2 in the pauli matrices so I'm
#guessing it's from that, but I'd be more comfortable if I could
#precisely chase down the issue....
cos_func = np.cos(expected_rabi_energy * time_values / ( hbar))
sin_func = 1.0j * np.sin(expected_rabi_energy * time_values / ( hbar))
time_evolution_operator = np.zeros((time_values.shape[0], NUMBER_ELECTRONIC_STATES, NUMBER_ELECTRONIC_STATES), dtype = np.complex)
for electronic_i in range(NUMBER_ELECTRONIC_STATES):
time_evolution_operator[:, electronic_i, electronic_i] = cos_func
time_evolution_operator[:, 0, 3] = sin_func
time_evolution_operator[:, 3, 0] = sin_func
time_evolution_operator[:, 1, 2] = -sin_func
time_evolution_operator[:, 2, 1] = -sin_func
output = np.zeros((time_values.shape[0], NUMBER_ELECTRONIC_STATES, NUMBER_ELECTRONIC_STATES), dtype = np.complex)
for time_i, time in enumerate(time_values):
U = time_evolution_operator[time_i]
U_dagger = np.conj(U)
np.matmul(rho_0, U_dagger)
output[time_i,:,:] = np.dot(U, np.dot(rho_0, U_dagger))
return output
def trace(self, matrix_in):
out = 0.0
for i in range(matrix_in.shape[0]):
out += matrix_in[i,i]
return out
def moving_average(self, a, window_size) :
ret = np.cumsum(a, dtype=float)
ret[window_size:] = ret[window_size:] - ret[:-window_size]
return ret[window_size - 1:] / window_size
def fidelity(self, density_matrix_series1, density_matrix_series2):
assert(density_matrix_series1.shape == density_matrix_series2.shape)
fidelity_series = []
for i in range(density_matrix_series2.shape[0]):
rho_1 = density_matrix_series1[i]
rho_2 = density_matrix_series2[i]
rho_product = np.dot(rho_1, rho_2)
rho_1_det = np.linalg.det(rho_1)
rho_2_det = np.linalg.det(rho_2)
new_fidelity = math.sqrt(self.trace(rho_product) + 2 * math.sqrt(rho_1_det * rho_2_det))
fidelity_series.append(new_fidelity)
return np.array(fidelity_series)
def reduced_vibrational_density_matrix(self):
if self.propagated_results is None:
raise Exception("No results generated yet!")
results = self.propagated_results
output = np.zeros((results.shape[0], self.number_vibrational_states, self.number_vibrational_states), dtype=np.complex)
for vibrational_index_1 in range(self.number_vibrational_states):
for vibrational_index_2 in range(self.number_vibrational_states):
new_entry = 0.0
for electronic_index in range(NUMBER_ELECTRONIC_STATES):
total_index_1 = self.number_vibrational_states*electronic_index + vibrational_index_1
total_index_2 = self.number_vibrational_states*electronic_index + vibrational_index_2
new_entry += results[:, total_index_1] * np.conj(results[:, total_index_2])
output[:, vibrational_index_1, vibrational_index_2] = new_entry
return output
def average_vibrational_quanta(self):
if self.propagated_results == None:
raise Exception("No results generated yet!")
results = self.propagated_results
time_output_shape = results.shape[0]
output = np.zeros(time_output_shape, dtype=np.complex)
for electronic_index in range(NUMBER_ELECTRONIC_STATES):
for vibrational_index in range(self.number_vibrational_states):
total_index = self.number_vibrational_states*electronic_index + vibrational_index
output += vibrational_index * results[:, total_index] * np.conj(results[:, total_index])
return output
def ir_spectrum():
if self.propagated_results == None:
raise Exception("No reusults generated yet!")
results = self.propagated_results
operator = self.IR_transition_dipole()
time_trace = []
for time_index in range(results.shape[0]):
wf = results[time_index]
new_amp = np.conj(wf.T).dot(operator.dot(wf))
time_trace.append(new_amp)
time_trace = np.array(time_trace)
frequency_amplitude = np.fft.fftshift(np.fft.fft(time_trace))
frequency_values = np.fft.fftshift(np.fft.fftfreq(time_trace.shape[0], d = self.dt))
return frequency_values, frequency_amplitude
| [
"numpy.abs",
"numpy.sin",
"numpy.exp",
"numpy.fft.fft",
"numpy.cumsum",
"numpy.fft.fftfreq",
"numpy.linalg.det",
"numpy.conj",
"numpy.fill_diagonal",
"math.sqrt",
"math.ceil",
"numpy.cos",
"numpy.dot",
"scipy.linalg.expm",
"numpy.zeros",
"scipy.integrate.complex_ode",
"numpy.array",
... | [((1344, 1418), 'numpy.zeros', 'np.zeros', (['(self.number_vibrational_states, self.number_vibrational_states)'], {}), '((self.number_vibrational_states, self.number_vibrational_states))\n', (1352, 1418), True, 'import numpy as np\n'), ((1541, 1570), 'numpy.fill_diagonal', 'np.fill_diagonal', (['output', '(1.0)'], {}), '(output, 1.0)\n', (1557, 1570), True, 'import numpy as np\n'), ((2103, 2122), 'numpy.conj', 'np.conj', (['creation.T'], {}), '(creation.T)\n', (2110, 2122), True, 'import numpy as np\n'), ((3059, 3092), 'scipy.linalg.expm', 'scipy.linalg.expm', (['(1.0j * exp_arg)'], {}), '(1.0j * exp_arg)\n', (3076, 3092), False, 'import scipy\n'), ((4070, 4160), 'numpy.zeros', 'np.zeros', (['(self.number_vibrational_states * NUMBER_ELECTRONIC_STATES)'], {'dtype': 'np.complex'}), '(self.number_vibrational_states * NUMBER_ELECTRONIC_STATES, dtype=\n np.complex)\n', (4078, 4160), True, 'import numpy as np\n'), ((4294, 4315), 'numpy.random.rand', 'np.random.rand', (['shape'], {}), '(shape)\n', (4308, 4315), True, 'import numpy as np\n'), ((5195, 5253), 'numpy.zeros', 'np.zeros', (['self.number_vibrational_states'], {'dtype': 'np.complex'}), '(self.number_vibrational_states, dtype=np.complex)\n', (5203, 5253), True, 'import numpy as np\n'), ((5634, 5785), 'numpy.zeros', 'np.zeros', (['(self.number_vibrational_states * NUMBER_ELECTRONIC_STATES, self.\n number_vibrational_states * NUMBER_ELECTRONIC_STATES)'], {'dtype': 'np.complex'}), '((self.number_vibrational_states * NUMBER_ELECTRONIC_STATES, self.\n number_vibrational_states * NUMBER_ELECTRONIC_STATES), dtype=np.complex)\n', (5642, 5785), True, 'import numpy as np\n'), ((8520, 8539), 'numpy.conj', 'np.conj', (['creation.T'], {}), '(creation.T)\n', (8527, 8539), True, 'import numpy as np\n'), ((14661, 14763), 'numpy.zeros', 'np.zeros', (['(results.shape[0], NUMBER_ELECTRONIC_STATES, NUMBER_ELECTRONIC_STATES)'], {'dtype': 'np.complex'}), '((results.shape[0], NUMBER_ELECTRONIC_STATES,\n NUMBER_ELECTRONIC_STATES), dtype=np.complex)\n', (14669, 14763), True, 'import numpy as np\n'), ((16301, 16350), 'numpy.cos', 'np.cos', (['(expected_rabi_energy * time_values / hbar)'], {}), '(expected_rabi_energy * time_values / hbar)\n', (16307, 16350), True, 'import numpy as np\n'), ((16468, 16574), 'numpy.zeros', 'np.zeros', (['(time_values.shape[0], NUMBER_ELECTRONIC_STATES, NUMBER_ELECTRONIC_STATES)'], {'dtype': 'np.complex'}), '((time_values.shape[0], NUMBER_ELECTRONIC_STATES,\n NUMBER_ELECTRONIC_STATES), dtype=np.complex)\n', (16476, 16574), True, 'import numpy as np\n'), ((16942, 17048), 'numpy.zeros', 'np.zeros', (['(time_values.shape[0], NUMBER_ELECTRONIC_STATES, NUMBER_ELECTRONIC_STATES)'], {'dtype': 'np.complex'}), '((time_values.shape[0], NUMBER_ELECTRONIC_STATES,\n NUMBER_ELECTRONIC_STATES), dtype=np.complex)\n', (16950, 17048), True, 'import numpy as np\n'), ((17522, 17547), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (17531, 17547), True, 'import numpy as np\n'), ((18300, 18325), 'numpy.array', 'np.array', (['fidelity_series'], {}), '(fidelity_series)\n', (18308, 18325), True, 'import numpy as np\n'), ((18537, 18652), 'numpy.zeros', 'np.zeros', (['(results.shape[0], self.number_vibrational_states, self.\n number_vibrational_states)'], {'dtype': 'np.complex'}), '((results.shape[0], self.number_vibrational_states, self.\n number_vibrational_states), dtype=np.complex)\n', (18545, 18652), True, 'import numpy as np\n'), ((19564, 19609), 'numpy.zeros', 'np.zeros', (['time_output_shape'], {'dtype': 'np.complex'}), '(time_output_shape, dtype=np.complex)\n', (19572, 19609), True, 'import numpy as np\n'), ((20422, 20442), 'numpy.array', 'np.array', (['time_trace'], {}), '(time_trace)\n', (20430, 20442), True, 'import numpy as np\n'), ((1874, 1906), 'math.sqrt', 'math.sqrt', (['(vibrational_index + 1)'], {}), '(vibrational_index + 1)\n', (1883, 1906), False, 'import math\n'), ((4461, 4480), 'numpy.sqrt', 'np.sqrt', (['normalizer'], {}), '(normalizer)\n', (4468, 4480), True, 'import numpy as np\n'), ((5110, 5160), 'scipy.linalg.expm', 'scipy.linalg.expm', (['(alpha_value * creation_operator)'], {}), '(alpha_value * creation_operator)\n', (5127, 5160), False, 'import scipy\n'), ((12174, 12226), 'scipy.integrate.complex_ode', 'scipy.integrate.complex_ode', (['ODE_integrable_function'], {}), '(ODE_integrable_function)\n', (12201, 12226), False, 'import scipy\n'), ((14118, 14135), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (14126, 14135), True, 'import numpy as np\n'), ((14162, 14183), 'numpy.array', 'np.array', (['time_values'], {}), '(time_values)\n', (14170, 14183), True, 'import numpy as np\n'), ((16380, 16429), 'numpy.sin', 'np.sin', (['(expected_rabi_energy * time_values / hbar)'], {}), '(expected_rabi_energy * time_values / hbar)\n', (16386, 16429), True, 'import numpy as np\n'), ((17171, 17181), 'numpy.conj', 'np.conj', (['U'], {}), '(U)\n', (17178, 17181), True, 'import numpy as np\n'), ((17194, 17220), 'numpy.matmul', 'np.matmul', (['rho_0', 'U_dagger'], {}), '(rho_0, U_dagger)\n', (17203, 17220), True, 'import numpy as np\n'), ((18022, 18042), 'numpy.dot', 'np.dot', (['rho_1', 'rho_2'], {}), '(rho_1, rho_2)\n', (18028, 18042), True, 'import numpy as np\n'), ((18068, 18088), 'numpy.linalg.det', 'np.linalg.det', (['rho_1'], {}), '(rho_1)\n', (18081, 18088), True, 'import numpy as np\n'), ((18113, 18133), 'numpy.linalg.det', 'np.linalg.det', (['rho_2'], {}), '(rho_2)\n', (18126, 18133), True, 'import numpy as np\n'), ((20489, 20511), 'numpy.fft.fft', 'np.fft.fft', (['time_trace'], {}), '(time_trace)\n', (20499, 20511), True, 'import numpy as np\n'), ((20556, 20602), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['time_trace.shape[0]'], {'d': 'self.dt'}), '(time_trace.shape[0], d=self.dt)\n', (20570, 20602), True, 'import numpy as np\n'), ((4420, 4435), 'numpy.conj', 'np.conj', (['output'], {}), '(output)\n', (4427, 4435), True, 'import numpy as np\n'), ((11934, 11981), 'numpy.dot', 'np.dot', (['ODE_TOTAL_MATRIX', 'wf_coefficient_vector'], {}), '(ODE_TOTAL_MATRIX, wf_coefficient_vector)\n', (11940, 11981), True, 'import numpy as np\n'), ((12432, 12456), 'math.ceil', 'math.ceil', (['(max_time / dt)'], {}), '(max_time / dt)\n', (12441, 12456), False, 'import math\n'), ((17264, 17287), 'numpy.dot', 'np.dot', (['rho_0', 'U_dagger'], {}), '(rho_0, U_dagger)\n', (17270, 17287), True, 'import numpy as np\n'), ((11609, 11645), 'numpy.conj', 'np.conj', (['mu_1_perturber_excitation.T'], {}), '(mu_1_perturber_excitation.T)\n', (11616, 11645), True, 'import numpy as np\n'), ((11703, 11739), 'numpy.conj', 'np.conj', (['mu_2_perturber_excitation.T'], {}), '(mu_2_perturber_excitation.T)\n', (11710, 11739), True, 'import numpy as np\n'), ((19921, 19953), 'numpy.conj', 'np.conj', (['results[:, total_index]'], {}), '(results[:, total_index])\n', (19928, 19953), True, 'import numpy as np\n'), ((20326, 20339), 'numpy.conj', 'np.conj', (['wf.T'], {}), '(wf.T)\n', (20333, 20339), True, 'import numpy as np\n'), ((10890, 10914), 'numpy.exp', 'np.exp', (['(-1.0j * f * time)'], {}), '(-1.0j * f * time)\n', (10896, 10914), True, 'import numpy as np\n'), ((11234, 11258), 'numpy.exp', 'np.exp', (['(-1.0j * f * time)'], {}), '(-1.0j * f * time)\n', (11240, 11258), True, 'import numpy as np\n'), ((15283, 15317), 'numpy.conj', 'np.conj', (['results[:, total_index_2]'], {}), '(results[:, total_index_2])\n', (15290, 15317), True, 'import numpy as np\n'), ((18202, 18234), 'math.sqrt', 'math.sqrt', (['(rho_1_det * rho_2_det)'], {}), '(rho_1_det * rho_2_det)\n', (18211, 18234), False, 'import math\n'), ((19178, 19212), 'numpy.conj', 'np.conj', (['results[:, total_index_2]'], {}), '(results[:, total_index_2])\n', (19185, 19212), True, 'import numpy as np\n'), ((5078, 5097), 'numpy.abs', 'np.abs', (['alpha_value'], {}), '(alpha_value)\n', (5084, 5097), True, 'import numpy as np\n'), ((9949, 9987), 'numpy.exp', 'np.exp', (['(-(t / activation_width - 10.0))'], {}), '(-(t / activation_width - 10.0))\n', (9955, 9987), True, 'import numpy as np\n'), ((13440, 13467), 'numpy.abs', 'np.abs', (['max_vibrational_amp'], {}), '(max_vibrational_amp)\n', (13446, 13467), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys
import argparse
import scipy.cluster.vq as vq
import numpy
from traj import trajimport
from traj import trajnorm
from traj import trajfeat
from traj import boof
from traj import spatialpyramid
NORM_ARGS = ["flip", "slope", "resample", "slant", "height", "origin"]
FEAT_ARGS = ["dir", "curv", "vic_aspect", "vic_curl", "vic_line", "vic_slope", "bitmap"]
SPATIAL_PYRAMID_LEVELCONF = [[6, 2], [3, 2]]
def process_single_file(filename, outfilename, normalize=False, cluster_file=None):
print("Importing {}...".format(filename))
traj, word = trajimport.read_trajectory_from_file(filename)
print("Read {} points for word '{}'.".format(len(traj), word))
if normalize:
print("Normalizing trajectory...")
traj = trajnorm.normalize_trajectory(traj, NORM_ARGS)
print("Calculating feature vector sequence...")
feat_seq_mat = trajfeat.calculate_feature_vector_sequence(traj, FEAT_ARGS)
if cluster_file is not None:
print("Reading cluster file...")
clusters = trajimport.read_traj_clusters(cluster_file)
num_clusters = len(clusters)
print("Read {} clusters.".format(num_clusters))
print("Quantizing feature vectors. This may take some time...")
labels, _ = vq.vq(feat_seq_mat, clusters)
print("Calculating bag-of-features representation...")
spatial_pyramid = spatialpyramid.SpatialPyramid(SPATIAL_PYRAMID_LEVELCONF, num_clusters)
gen = boof.BoofGenerator(spatial_pyramid)
bof = gen.build_feature_vector(traj[:, :2], labels)
print("Calculated bag-of-feature representation of length {}".format(len(bof)))
print("Writing {}...".format(outfilename))
numpy.savetxt(outfilename, bof)
else:
# otherwise we just save the feature vector sequence
print("Writing {}...".format(outfilename))
numpy.savetxt(outfilename, feat_seq_mat)
def main(argv):
parser = argparse.ArgumentParser(description="Calculate feature vector sequences or bag-of-features representations for a online-handwritten trajectory.")
parser.add_argument("trajectoryfile", help="path to trajectory file")
parser.add_argument("-n", "--normalize", action="store_true", default=False, help="normalize input trajectory (default=False)")
parser.add_argument("-b", "--bof", nargs=1, help="use given cluster file to calculate bag-of-features representation for input trajectory (default=False)", metavar="cluster_file")
parser.add_argument("resultfile", help="resulting bag-of-feature representation or feature vector sequence (depending on other parameters) is written into outfile in numpy-txt format")
if len(argv) == 1:
# no parameters given
parser.print_help()
sys.exit(1)
args = parser.parse_args()
cluster_file = None if args.bof is None else args.bof[0]
process_single_file(args.trajectoryfile, args.resultfile, normalize=args.normalize, cluster_file=cluster_file)
if __name__ == '__main__':
main(sys.argv)
| [
"traj.trajnorm.normalize_trajectory",
"traj.boof.BoofGenerator",
"argparse.ArgumentParser",
"traj.trajimport.read_trajectory_from_file",
"scipy.cluster.vq.vq",
"numpy.savetxt",
"traj.trajimport.read_traj_clusters",
"traj.trajfeat.calculate_feature_vector_sequence",
"traj.spatialpyramid.SpatialPyrami... | [((584, 630), 'traj.trajimport.read_trajectory_from_file', 'trajimport.read_trajectory_from_file', (['filename'], {}), '(filename)\n', (620, 630), False, 'from traj import trajimport\n'), ((898, 957), 'traj.trajfeat.calculate_feature_vector_sequence', 'trajfeat.calculate_feature_vector_sequence', (['traj', 'FEAT_ARGS'], {}), '(traj, FEAT_ARGS)\n', (940, 957), False, 'from traj import trajfeat\n'), ((1964, 2119), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calculate feature vector sequences or bag-of-features representations for a online-handwritten trajectory."""'}), "(description=\n 'Calculate feature vector sequences or bag-of-features representations for a online-handwritten trajectory.'\n )\n", (1987, 2119), False, 'import argparse\n'), ((779, 825), 'traj.trajnorm.normalize_trajectory', 'trajnorm.normalize_trajectory', (['traj', 'NORM_ARGS'], {}), '(traj, NORM_ARGS)\n', (808, 825), False, 'from traj import trajnorm\n'), ((1052, 1095), 'traj.trajimport.read_traj_clusters', 'trajimport.read_traj_clusters', (['cluster_file'], {}), '(cluster_file)\n', (1081, 1095), False, 'from traj import trajimport\n'), ((1282, 1311), 'scipy.cluster.vq.vq', 'vq.vq', (['feat_seq_mat', 'clusters'], {}), '(feat_seq_mat, clusters)\n', (1287, 1311), True, 'import scipy.cluster.vq as vq\n'), ((1402, 1472), 'traj.spatialpyramid.SpatialPyramid', 'spatialpyramid.SpatialPyramid', (['SPATIAL_PYRAMID_LEVELCONF', 'num_clusters'], {}), '(SPATIAL_PYRAMID_LEVELCONF, num_clusters)\n', (1431, 1472), False, 'from traj import spatialpyramid\n'), ((1487, 1522), 'traj.boof.BoofGenerator', 'boof.BoofGenerator', (['spatial_pyramid'], {}), '(spatial_pyramid)\n', (1505, 1522), False, 'from traj import boof\n'), ((1730, 1761), 'numpy.savetxt', 'numpy.savetxt', (['outfilename', 'bof'], {}), '(outfilename, bof)\n', (1743, 1761), False, 'import numpy\n'), ((1892, 1932), 'numpy.savetxt', 'numpy.savetxt', (['outfilename', 'feat_seq_mat'], {}), '(outfilename, feat_seq_mat)\n', (1905, 1932), False, 'import numpy\n'), ((2783, 2794), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2791, 2794), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
#
# JSON osu! map analysis (for osu!mania)
#
import numpy as np;
def get_map_timing_array(map_json, length=-1, divisor=4):
if length == -1:
length = map_json["obj"][-1]["time"] + 1000; # it has an extra time interval after the last note
if map_json["obj"][-1]["type"] & 8: # spinner end
length = map_json["obj"][-1]["spinnerEndTime"] + 1000;
uts_a = map_json["timing"]["uts"];
out = [];
for i, uts in enumerate(uts_a):
begin_time = uts["beginTime"];
mspb = uts["tickLength"];
if i < len(uts_a)-1:
end_time = uts_a[i+1]["beginTime"];
else:
end_time = length;
arr = np.floor(np.arange(begin_time, end_time, mspb / divisor));
out = out + list(map(lambda f: int(f), arr));
return out;
def get_tick_len(map_json, tick):
uts_a = map_json["timing"]["uts"];
if tick < uts_a[0]["beginTime"]:
return uts_a[0]["tickLength"];
_out = 600;
for uts in uts_a:
if tick >= uts["beginTime"]:
_out = uts["tickLength"];
else:
return _out;
return _out;
def get_slider_len(map_json, tick):
ts_a = map_json["timing"]["ts"];
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_slider_len_ts(ts_a, tick):
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_end_time(note):
if note["type"] & 8:
return note["spinnerEndTime"];
# elif note["type"] & 2:
# return note["sliderData"]["endTime"];
elif note["type"] & 128:
return note["holdEndTime"];
else:
return note["time"];
# edited from uts to ts
def get_all_ticks_and_lengths_from_ts(uts_array, ts_array, end_time, divisor=4):
# Returns array of all timestamps, ticklens and sliderlens.
endtimes = ([uts["beginTime"] for uts in uts_array] + [end_time])[1:];
timestamps = [np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor) for i, uts in enumerate(uts_array)];
ticks_from_uts = [list(range(len(timestamp_group))) for timestamp_group in timestamps];
tick_len = [[uts["tickLength"]] * len(np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor)) for i, uts in enumerate(uts_array)];
# slider_len = [[ts["sliderLength"]] * len(np.arange(ts["beginTime"], endtimes[i], ts["tickLength"] / divisor)) for i, ts in enumerate(ts_array)];
slider_len = [get_slider_len_ts(ts_array, timestamp) for timestamp in np.concatenate(timestamps)];
return np.concatenate(ticks_from_uts), np.round(np.concatenate(timestamps)).astype(int), np.concatenate(tick_len), np.array(slider_len);
def is_uts_begin(map_json, tick):
uts_a = map_json["timing"]["uts"];
begin_times = [uts["beginTime"] for uts in uts_a];
for t in begin_times:
if tick > t - 1 and tick < t + 5:
return True
return False
def get_metronome_count(map_json, tick):
uts_a = map_json["timing"]["uts"];
if tick < uts_a[0]["beginTime"]:
return uts_a[0]["whiteLines"];
for uts in reversed(uts_a):
if tick >= uts["beginTime"]:
return uts["whiteLines"];
def get_map_notes_and_patterns(map_json, **kwargs):
"""
Reads JSON map data and creates a list for every tick
Returns:
data = list of data array: [TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3]
patterns = numpy array shape (num_groups, main_metronome * divisor, 2 * key_count + 1)
[:, :, 0] pattern_avail_hold
[:, :, 1:1+key_count] pattern_note_begin
[:, :, 1+key_count:1+2*key_count] pattern_note_end
Ex1, Ex2, Ex3 = tickLength/500, BPM/120, sliderLength/150
"""
# keyword arguments
length = kwargs.get("length", -1);
divisor = kwargs.get("divisor", 4);
note_max_wait_time = kwargs.get("note_max_wait_time", 1000);
main_metronome = kwargs.get("main_metronome", 4);
# constant multipliers and subtractions
tlen_mp = 1/500;
tlen_s = 1;
bpm_mp = 1/120;
bpm_s = 1;
slen_mp = 1/150;
slen_s = 1;
# get the timing array of timestamps of each tick
tick_times = get_map_timing_array(map_json, length = length, divisor = divisor);
objs_all = map_json["obj"];
key_count = map_json["diff"]["CS"];
objs_each = [[] for i in range(key_count)];
for obj in objs_all:
x = obj["x"]
obj_key = np.floor(x * key_count / 512).astype(int)
objs_each[obj_key].append(obj)
def get_note_type_mania(obj):
if not obj:
return 0;
if obj["type"] & 128:
return 4;
return 1;
# object times each key
obj_times_each = []
for objs in objs_each:
obj_times = [obj["time"] for obj in objs]
obj_times_each.append(obj_times)
# object end times each key
obj_end_times_each = []
for objs in objs_each:
obj_end_times = [get_end_time(obj) for obj in objs]
obj_end_times_each.append(obj_end_times)
obj_ptr_each = [0] * key_count
obj_end_ptr_each = [0] * key_count
po = 0;
start_time = obj_times[0] - note_max_wait_time;
last_obj_time = start_time;
holding_each = [0] * key_count
data = [];
pattern_avail_hold = []
pattern_data = []
pattern_data_end = []
pattern_avail_hold_grouped = []
pattern_data_grouped = []
pattern_data_end_grouped = []
# tick count from start of uninherited timing section
uts_i = 0;
# tick is timestamp here
for i, tick in enumerate(tick_times):
if is_uts_begin(map_json, tick):
uts_i = 0;
else:
uts_i += 1;
# save group in a metronome when at the start of next metronome
metronome = get_metronome_count(map_json, tick)
if uts_i % (metronome * divisor) == 0:
if len(pattern_data) > 0 and np.sum(pattern_data) > 0 and np.sum(pattern_data_end) > 0:
pattern_avail_hold_grouped.append(pattern_avail_hold)
pattern_data_grouped.append(pattern_data)
pattern_data_end_grouped.append(pattern_data_end)
pattern_avail_hold = []
pattern_data = []
pattern_data_end = []
# Attach extra vars at the end of each tick date point
tlen = get_tick_len(map_json, tick);
bpm = 60000 / tlen;
slen = get_slider_len(map_json, tick);
ex1 = tlen * tlen_mp - tlen_s;
ex2 = bpm * bpm_mp - bpm_s;
ex3 = slen * slen_mp - slen_s;
has_note = False
has_note_end = False
has_hold = False
# list of length (key_count) for pattern on current tick
tick_pattern = []
tick_pattern_end = []
for k in range(key_count):
objs = objs_each[k]
obj_times = obj_times_each[k]
obj_end_times = obj_end_times_each[k]
# locate pointers
while obj_times[obj_ptr_each[k]] < tick - 5 and obj_ptr_each[k] < len(obj_times) - 1:
obj_ptr_each[k] += 1;
while obj_end_times[obj_end_ptr_each[k]] < tick - 5 and obj_end_ptr_each[k] < len(obj_end_times) - 1:
obj_end_ptr_each[k] += 1;
obj_ptr = obj_ptr_each[k]
obj_end_ptr = obj_end_ptr_each[k]
if obj_times[obj_ptr] >= tick - 5 and obj_times[obj_ptr] <= tick + 5: # found note on key
has_note = True
note_type = get_note_type_mania(objs[obj_ptr])
if note_type == 4:
has_hold = True
tick_pattern.append(1)
else:
tick_pattern.append(0)
if obj_end_times[obj_end_ptr] >= tick - 5 and obj_end_times[obj_end_ptr] <= tick + 5: # found note end on key
has_note_end = True
tick_pattern_end.append(1)
else:
tick_pattern_end.append(0)
if has_note:
# TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3
# For mania, NOTE_TYPE = Hit(1) HoldStartOnly(2) HoldStart+Note(3) HoldEndOnly(4)
# SLIDING = SPINNING = MOMENTUM = 0
if has_note_end:
if has_hold:
data.append([i, tick, 1, 3, 0, 0, 0, ex1, ex2, ex3])
else:
data.append([i, tick, 1, 1, 0, 0, 0, ex1, ex2, ex3])
else:
data.append([i, tick, 1, 2, 0, 0, 0, ex1, ex2, ex3])
else:
if has_note_end:
data.append([i, tick, 0, 4, 0, 0, 0, ex1, ex2, ex3])
else:
data.append([i, tick, 0, 0, 0, 0, 0, ex1, ex2, ex3])
pattern_avail_hold.append(1 if has_hold else 0)
pattern_data.append(tick_pattern)
pattern_data_end.append(tick_pattern_end)
# everything limit to 4 metronomes (main_metronome)
for i, pattern_avail_hold in enumerate(pattern_avail_hold_grouped):
pattern_data = pattern_data_grouped[i]
pattern_data_end = pattern_data_end_grouped[i]
if len(pattern_avail_hold) < main_metronome * divisor:
added_len = main_metronome * divisor - len(pattern_avail_hold)
pattern_avail_hold += [0] * added_len
pattern_avail_hold_grouped[i] = pattern_avail_hold
pattern_data += [[0] * key_count] * added_len
pattern_data_grouped[i] = pattern_data
pattern_data_end += [[0] * key_count] * added_len
pattern_data_end_grouped[i] = pattern_data_end
if len(pattern_avail_hold) > main_metronome * divisor:
pattern_avail_hold = pattern_avail_hold[:main_metronome * divisor]
pattern_avail_hold_grouped[i] = pattern_avail_hold
pattern_data = pattern_data[:main_metronome * divisor]
pattern_data_grouped[i] = pattern_data
pattern_data_end = pattern_data_end[:main_metronome * divisor]
pattern_data_end_grouped[i] = pattern_data_end
if len(pattern_avail_hold_grouped) > 0:
pattern_avail_hold_expanded = np.expand_dims(pattern_avail_hold_grouped, axis=2)
pattern_data = np.concatenate([pattern_avail_hold_expanded, pattern_data_grouped, pattern_data_end_grouped], axis=2)
else:
pattern_data = np.zeros((0, main_metronome * divisor, 1 + 2 * key_count))
return data, pattern_data; | [
"numpy.sum",
"numpy.floor",
"numpy.zeros",
"numpy.expand_dims",
"numpy.array",
"numpy.arange",
"numpy.concatenate"
] | [((2277, 2346), 'numpy.arange', 'np.arange', (["uts['beginTime']", 'endtimes[i]', "(uts['tickLength'] / divisor)"], {}), "(uts['beginTime'], endtimes[i], uts['tickLength'] / divisor)\n", (2286, 2346), True, 'import numpy as np\n'), ((2891, 2921), 'numpy.concatenate', 'np.concatenate', (['ticks_from_uts'], {}), '(ticks_from_uts)\n', (2905, 2921), True, 'import numpy as np\n'), ((2973, 2997), 'numpy.concatenate', 'np.concatenate', (['tick_len'], {}), '(tick_len)\n', (2987, 2997), True, 'import numpy as np\n'), ((2999, 3019), 'numpy.array', 'np.array', (['slider_len'], {}), '(slider_len)\n', (3007, 3019), True, 'import numpy as np\n'), ((10499, 10549), 'numpy.expand_dims', 'np.expand_dims', (['pattern_avail_hold_grouped'], {'axis': '(2)'}), '(pattern_avail_hold_grouped, axis=2)\n', (10513, 10549), True, 'import numpy as np\n'), ((10573, 10678), 'numpy.concatenate', 'np.concatenate', (['[pattern_avail_hold_expanded, pattern_data_grouped, pattern_data_end_grouped]'], {'axis': '(2)'}), '([pattern_avail_hold_expanded, pattern_data_grouped,\n pattern_data_end_grouped], axis=2)\n', (10587, 10678), True, 'import numpy as np\n'), ((10708, 10766), 'numpy.zeros', 'np.zeros', (['(0, main_metronome * divisor, 1 + 2 * key_count)'], {}), '((0, main_metronome * divisor, 1 + 2 * key_count))\n', (10716, 10766), True, 'import numpy as np\n'), ((709, 756), 'numpy.arange', 'np.arange', (['begin_time', 'end_time', '(mspb / divisor)'], {}), '(begin_time, end_time, mspb / divisor)\n', (718, 756), True, 'import numpy as np\n'), ((2851, 2877), 'numpy.concatenate', 'np.concatenate', (['timestamps'], {}), '(timestamps)\n', (2865, 2877), True, 'import numpy as np\n'), ((2518, 2587), 'numpy.arange', 'np.arange', (["uts['beginTime']", 'endtimes[i]', "(uts['tickLength'] / divisor)"], {}), "(uts['beginTime'], endtimes[i], uts['tickLength'] / divisor)\n", (2527, 2587), True, 'import numpy as np\n'), ((4813, 4842), 'numpy.floor', 'np.floor', (['(x * key_count / 512)'], {}), '(x * key_count / 512)\n', (4821, 4842), True, 'import numpy as np\n'), ((2932, 2958), 'numpy.concatenate', 'np.concatenate', (['timestamps'], {}), '(timestamps)\n', (2946, 2958), True, 'import numpy as np\n'), ((6276, 6296), 'numpy.sum', 'np.sum', (['pattern_data'], {}), '(pattern_data)\n', (6282, 6296), True, 'import numpy as np\n'), ((6305, 6329), 'numpy.sum', 'np.sum', (['pattern_data_end'], {}), '(pattern_data_end)\n', (6311, 6329), True, 'import numpy as np\n')] |
import os
import torch
import pickle as pkl
import torch.nn as nn
import numpy as np
torch.manual_seed(2020)
embeds = nn.Embedding(35, 300)
print(embeds.weight)
save_path = './'
param_path = os.path.join(save_path, 'PETA_word2vec.pkl')
with open(param_path, 'wb') as f:
# npembeds = np.array(embeds.weight)
npembeds = embeds.weight.detach().numpy()
pkl.dump(npembeds, f)
np.set_printoptions(threshold=np.inf)
path = os.path.join(save_path, 'PETA_word2vec.pkl')
# path = './coco_glove_word2vec.pkl'
files = open(path,'rb')
cont = pkl.load(files,encoding='iso-8859-1') #读取pkl文件的内容
print("cont: ",cont)
obj_path = './PETA_word2vec.txt'
# obj_path = './coco_glove_word2vec.txt'
cont = str(cont)
ft = open(obj_path, 'w')
ft.write(cont)
ft.close()
| [
"pickle.dump",
"numpy.set_printoptions",
"torch.manual_seed",
"torch.nn.Embedding",
"pickle.load",
"os.path.join"
] | [((86, 109), 'torch.manual_seed', 'torch.manual_seed', (['(2020)'], {}), '(2020)\n', (103, 109), False, 'import torch\n'), ((119, 140), 'torch.nn.Embedding', 'nn.Embedding', (['(35)', '(300)'], {}), '(35, 300)\n', (131, 140), True, 'import torch.nn as nn\n'), ((193, 237), 'os.path.join', 'os.path.join', (['save_path', '"""PETA_word2vec.pkl"""'], {}), "(save_path, 'PETA_word2vec.pkl')\n", (205, 237), False, 'import os\n'), ((386, 423), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (405, 423), True, 'import numpy as np\n'), ((431, 475), 'os.path.join', 'os.path.join', (['save_path', '"""PETA_word2vec.pkl"""'], {}), "(save_path, 'PETA_word2vec.pkl')\n", (443, 475), False, 'import os\n'), ((544, 582), 'pickle.load', 'pkl.load', (['files'], {'encoding': '"""iso-8859-1"""'}), "(files, encoding='iso-8859-1')\n", (552, 582), True, 'import pickle as pkl\n'), ((363, 384), 'pickle.dump', 'pkl.dump', (['npembeds', 'f'], {}), '(npembeds, f)\n', (371, 384), True, 'import pickle as pkl\n')] |
#!/usr/bin/env python
from unityagents import UnityEnvironment
import numpy as np
import os
os.chdir(os.path.dirname(__file__))
env = UnityEnvironment(file_name='../../unity/Reacher_Linux_NoVis/Reacher.x86_64')
# env = UnityEnvironment(file_name='./unity/Reacher_One_Linux_NoVis/Reacher_One_Linux_NoVis.x86_64')
# env = UnityEnvironment(file_name='./unity/Crawler_Linux_NoVis/Crawler.x86_64')
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
actions = np.clip(actions, -1, 1) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))
| [
"numpy.random.randn",
"os.path.dirname",
"numpy.zeros",
"numpy.clip",
"numpy.any",
"numpy.mean",
"unityagents.UnityEnvironment"
] | [((137, 213), 'unityagents.UnityEnvironment', 'UnityEnvironment', ([], {'file_name': '"""../../unity/Reacher_Linux_NoVis/Reacher.x86_64"""'}), "(file_name='../../unity/Reacher_Linux_NoVis/Reacher.x86_64')\n", (153, 213), False, 'from unityagents import UnityEnvironment\n'), ((1215, 1235), 'numpy.zeros', 'np.zeros', (['num_agents'], {}), '(num_agents)\n', (1223, 1235), True, 'import numpy as np\n'), ((104, 129), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (119, 129), False, 'import os\n'), ((1329, 1369), 'numpy.random.randn', 'np.random.randn', (['num_agents', 'action_size'], {}), '(num_agents, action_size)\n', (1344, 1369), True, 'import numpy as np\n'), ((1421, 1444), 'numpy.clip', 'np.clip', (['actions', '(-1)', '(1)'], {}), '(actions, -1, 1)\n', (1428, 1444), True, 'import numpy as np\n'), ((2037, 2050), 'numpy.any', 'np.any', (['dones'], {}), '(dones)\n', (2043, 2050), True, 'import numpy as np\n'), ((2200, 2215), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (2207, 2215), True, 'import numpy as np\n')] |
from .setup_helpers.env import (IS_64BIT, IS_DARWIN, IS_WINDOWS,
DEBUG, REL_WITH_DEB_INFO, USE_MKLDNN,
check_env_flag, check_negative_env_flag)
import os
import sys
import distutils
import distutils.sysconfig
from subprocess import check_call, check_output
from distutils.version import LooseVersion
from .setup_helpers.cuda import USE_CUDA, CUDA_HOME
from .setup_helpers.dist_check import USE_DISTRIBUTED, USE_GLOO_IBVERBS
from .setup_helpers.nccl import USE_SYSTEM_NCCL, NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB, USE_NCCL
from .setup_helpers.rocm import USE_ROCM
from .setup_helpers.nnpack import USE_NNPACK
from .setup_helpers.qnnpack import USE_QNNPACK
from .setup_helpers.cudnn import CUDNN_INCLUDE_DIR, CUDNN_LIBRARY, USE_CUDNN
from pprint import pprint
from glob import glob
import multiprocessing
import shutil
def which(thefile):
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
for dir in path:
fname = os.path.join(dir, thefile)
fnames = [fname]
if IS_WINDOWS:
exts = os.environ.get('PATHEXT', '').split(os.pathsep)
fnames += [fname + ext for ext in exts]
for name in fnames:
if (os.path.exists(name) and os.access(name, os.F_OK | os.X_OK)
and not os.path.isdir(name)):
return name
return None
def cmake_version(cmd):
for line in check_output([cmd, '--version']).decode('utf-8').split('\n'):
if 'version' in line:
return LooseVersion(line.strip().split(' ')[2])
raise Exception('no version found')
def get_cmake_command():
cmake_command = 'cmake'
if IS_WINDOWS:
return cmake_command
cmake3 = which('cmake3')
if cmake3 is not None:
cmake = which('cmake')
if cmake is not None:
bare_version = cmake_version(cmake)
if bare_version < LooseVersion("3.5.0") and cmake_version(cmake3) > bare_version:
cmake_command = 'cmake3'
return cmake_command
def cmake_defines(lst, **kwargs):
for key in sorted(kwargs.keys()):
value = kwargs[key]
if value is not None:
lst.append('-D{}={}'.format(key, value))
# Ninja
# Use ninja if it is on the PATH. Previous version of PyTorch required the
# ninja python package, but we no longer use it, so we do not have to import it
USE_NINJA = not check_negative_env_flag('USE_NINJA') and (which('ninja') is not None)
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
install_dir = base_dir + "/torch"
build_type = "Release"
if DEBUG:
build_type = "Debug"
elif REL_WITH_DEB_INFO:
build_type = "RelWithDebInfo"
def overlay_windows_vcvars(env):
from distutils._msvccompiler import _get_vc_env
vc_arch = 'x64' if IS_64BIT else 'x86'
vc_env = _get_vc_env(vc_arch)
for k, v in env.items():
lk = k.lower()
if lk not in vc_env:
vc_env[lk] = v
return vc_env
def mkdir_p(dir):
try:
os.makedirs(dir)
except OSError:
pass
def create_build_env():
# XXX - our cmake file sometimes looks at the system environment
# and not cmake flags!
# you should NEVER add something to this list. It is bad practice to
# have cmake read the environment
my_env = os.environ.copy()
if USE_CUDNN:
my_env['CUDNN_LIBRARY'] = escape_path(CUDNN_LIBRARY)
my_env['CUDNN_INCLUDE_DIR'] = escape_path(CUDNN_INCLUDE_DIR)
if USE_CUDA:
my_env['CUDA_BIN_PATH'] = escape_path(CUDA_HOME)
if IS_WINDOWS:
my_env = overlay_windows_vcvars(my_env)
# When using Ninja under Windows, the gcc toolchain will be chosen as default.
# But it should be set to MSVC as the user's first choice.
if USE_NINJA:
cc = my_env.get('CC', 'cl')
cxx = my_env.get('CXX', 'cl')
my_env['CC'] = cc
my_env['CXX'] = cxx
return my_env
def run_cmake(version,
cmake_python_library,
build_python,
build_test,
build_dir,
my_env):
cmake_args = [
get_cmake_command()
]
if USE_NINJA:
cmake_args.append('-GNinja')
elif IS_WINDOWS:
if IS_64BIT:
cmake_args.append('-GVisual Studio 15 2017 Win64')
else:
cmake_args.append('-GVisual Studio 15 2017')
try:
import numpy as np
NUMPY_INCLUDE_DIR = np.get_include()
USE_NUMPY = True
except ImportError:
USE_NUMPY = False
NUMPY_INCLUDE_DIR = None
cflags = os.getenv('CFLAGS', "") + " " + os.getenv('CPPFLAGS', "")
ldflags = os.getenv('LDFLAGS', "")
if IS_WINDOWS:
cmake_defines(cmake_args, MSVC_Z7_OVERRIDE=os.getenv('MSVC_Z7_OVERRIDE', "ON"))
cflags += " /EHa"
mkdir_p(install_dir)
mkdir_p(build_dir)
cmake_defines(
cmake_args,
PYTHON_EXECUTABLE=escape_path(sys.executable),
PYTHON_LIBRARY=escape_path(cmake_python_library),
PYTHON_INCLUDE_DIR=escape_path(distutils.sysconfig.get_python_inc()),
BUILDING_WITH_TORCH_LIBS=os.getenv("BUILDING_WITH_TORCH_LIBS", "ON"),
TORCH_BUILD_VERSION=version,
CMAKE_BUILD_TYPE=build_type,
BUILD_TORCH=os.getenv("BUILD_TORCH", "ON"),
BUILD_PYTHON=build_python,
BUILD_SHARED_LIBS=os.getenv("BUILD_SHARED_LIBS", "ON"),
BUILD_BINARY=check_env_flag('BUILD_BINARY'),
BUILD_TEST=build_test,
INSTALL_TEST=build_test,
BUILD_CAFFE2_OPS=not check_negative_env_flag('BUILD_CAFFE2_OPS'),
ONNX_NAMESPACE=os.getenv("ONNX_NAMESPACE", "onnx_torch"),
ONNX_ML=os.getenv("ONNX_ML", False),
USE_CUDA=USE_CUDA,
USE_DISTRIBUTED=USE_DISTRIBUTED,
USE_FBGEMM=not (check_env_flag('NO_FBGEMM') or check_negative_env_flag('USE_FBGEMM')),
USE_NUMPY=USE_NUMPY,
NUMPY_INCLUDE_DIR=escape_path(NUMPY_INCLUDE_DIR),
USE_SYSTEM_NCCL=USE_SYSTEM_NCCL,
NCCL_INCLUDE_DIR=NCCL_INCLUDE_DIR,
NCCL_ROOT_DIR=NCCL_ROOT_DIR,
NCCL_SYSTEM_LIB=NCCL_SYSTEM_LIB,
CAFFE2_STATIC_LINK_CUDA=check_env_flag('USE_CUDA_STATIC_LINK'),
USE_ROCM=USE_ROCM,
USE_NNPACK=USE_NNPACK,
USE_LEVELDB=check_env_flag('USE_LEVELDB'),
USE_LMDB=check_env_flag('USE_LMDB'),
USE_OPENCV=check_env_flag('USE_OPENCV'),
USE_QNNPACK=USE_QNNPACK,
USE_TENSORRT=check_env_flag('USE_TENSORRT'),
USE_FFMPEG=check_env_flag('USE_FFMPEG'),
USE_SYSTEM_EIGEN_INSTALL="OFF",
USE_MKLDNN=USE_MKLDNN,
USE_NCCL=USE_NCCL,
NCCL_EXTERNAL=USE_NCCL,
CMAKE_INSTALL_PREFIX=install_dir,
CMAKE_C_FLAGS=cflags,
CMAKE_CXX_FLAGS=cflags,
CMAKE_EXE_LINKER_FLAGS=ldflags,
CMAKE_SHARED_LINKER_FLAGS=ldflags,
THD_SO_VERSION="1",
CMAKE_PREFIX_PATH=os.getenv('CMAKE_PREFIX_PATH') or distutils.sysconfig.get_python_lib(),
BLAS=os.getenv('BLAS'),
CUDA_NVCC_EXECUTABLE=escape_path(os.getenv('CUDA_NVCC_EXECUTABLE')),
USE_REDIS=os.getenv('USE_REDIS'),
USE_GLOG=os.getenv('USE_GLOG'),
USE_GFLAGS=os.getenv('USE_GFLAGS'),
WERROR=os.getenv('WERROR'))
if USE_GLOO_IBVERBS:
cmake_defines(cmake_args, USE_IBVERBS="1", USE_GLOO_IBVERBS="1")
if USE_MKLDNN:
cmake_defines(cmake_args, MKLDNN_ENABLE_CONCURRENT_EXEC="ON")
expected_wrapper = '/usr/local/opt/ccache/libexec'
if IS_DARWIN and os.path.exists(expected_wrapper):
cmake_defines(cmake_args,
CMAKE_C_COMPILER="{}/gcc".format(expected_wrapper),
CMAKE_CXX_COMPILER="{}/g++".format(expected_wrapper))
for env_var_name in my_env:
if env_var_name.startswith('gh'):
# github env vars use utf-8, on windows, non-ascii code may
# cause problem, so encode first
try:
my_env[env_var_name] = str(my_env[env_var_name].encode("utf-8"))
except UnicodeDecodeError as e:
shex = ':'.join('{:02x}'.format(ord(c)) for c in my_env[env_var_name])
sys.stderr.write('Invalid ENV[{}] = {}\n'.format(env_var_name, shex))
# According to the CMake manual, we should pass the arguments first,
# and put the directory as the last element. Otherwise, these flags
# may not be passed correctly.
# Reference:
# 1. https://cmake.org/cmake/help/latest/manual/cmake.1.html#synopsis
# 2. https://stackoverflow.com/a/27169347
cmake_args.append(base_dir)
pprint(cmake_args)
check_call(cmake_args, cwd=build_dir, env=my_env)
def build_caffe2(version,
cmake_python_library,
build_python,
rerun_cmake,
build_dir):
my_env = create_build_env()
build_test = not check_negative_env_flag('BUILD_TEST')
max_jobs = os.getenv('MAX_JOBS', None)
cmake_cache_file = 'build/CMakeCache.txt'
if rerun_cmake and os.path.isfile(cmake_cache_file):
os.remove(cmake_cache_file)
if not os.path.exists(cmake_cache_file) or (USE_NINJA and not os.path.exists('build/build.ninja')):
run_cmake(version,
cmake_python_library,
build_python,
build_test,
build_dir,
my_env)
if IS_WINDOWS:
build_cmd = ['cmake', '--build', '.', '--target', 'install', '--config', build_type, '--']
if USE_NINJA:
# sccache will fail if all cores are used for compiling
j = max(1, multiprocessing.cpu_count() - 1)
if max_jobs is not None:
j = min(int(max_jobs), j)
build_cmd += ['-j', str(j)]
check_call(build_cmd, cwd=build_dir, env=my_env)
else:
j = max_jobs or str(multiprocessing.cpu_count())
build_cmd += ['/maxcpucount:{}'.format(j)]
check_call(build_cmd, cwd=build_dir, env=my_env)
else:
if USE_NINJA:
ninja_cmd = ['ninja', 'install']
if max_jobs is not None:
ninja_cmd += ['-j', max_jobs]
check_call(ninja_cmd, cwd=build_dir, env=my_env)
else:
max_jobs = max_jobs or str(multiprocessing.cpu_count())
check_call(['make', '-j', str(max_jobs), 'install'], cwd=build_dir, env=my_env)
# in cmake, .cu compilation involves generating certain intermediates
# such as .cu.o and .cu.depend, and these intermediates finally get compiled
# into the final .so.
# Ninja updates build.ninja's timestamp after all dependent files have been built,
# and re-kicks cmake on incremental builds if any of the dependent files
# have a timestamp newer than build.ninja's timestamp.
# There is a cmake bug with the Ninja backend, where the .cu.depend files
# are still compiling by the time the build.ninja timestamp is updated,
# so the .cu.depend file's newer timestamp is screwing with ninja's incremental
# build detector.
# This line works around that bug by manually updating the build.ninja timestamp
# after the entire build is finished.
if os.path.exists('build/build.ninja'):
os.utime('build/build.ninja', None)
if build_python:
for proto_file in glob('build/caffe2/proto/*.py'):
if os.path.sep != '/':
proto_file = proto_file.replace(os.path.sep, '/')
if proto_file != 'build/caffe2/proto/__init__.py':
shutil.copyfile(proto_file, "caffe2/proto/" + os.path.basename(proto_file))
def escape_path(path):
if os.path.sep != '/' and path is not None:
return path.replace(os.path.sep, '/')
return path
| [
"os.remove",
"os.environ.copy",
"os.path.isfile",
"pprint.pprint",
"glob.glob",
"distutils._msvccompiler._get_vc_env",
"os.utime",
"os.path.join",
"subprocess.check_call",
"multiprocessing.cpu_count",
"os.path.abspath",
"distutils.sysconfig.get_python_inc",
"os.path.exists",
"numpy.get_inc... | [((2872, 2892), 'distutils._msvccompiler._get_vc_env', '_get_vc_env', (['vc_arch'], {}), '(vc_arch)\n', (2883, 2892), False, 'from distutils._msvccompiler import _get_vc_env\n'), ((3352, 3369), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (3367, 3369), False, 'import os\n'), ((4720, 4744), 'os.getenv', 'os.getenv', (['"""LDFLAGS"""', '""""""'], {}), "('LDFLAGS', '')\n", (4729, 4744), False, 'import os\n'), ((8642, 8660), 'pprint.pprint', 'pprint', (['cmake_args'], {}), '(cmake_args)\n', (8648, 8660), False, 'from pprint import pprint\n'), ((8665, 8714), 'subprocess.check_call', 'check_call', (['cmake_args'], {'cwd': 'build_dir', 'env': 'my_env'}), '(cmake_args, cwd=build_dir, env=my_env)\n', (8675, 8714), False, 'from subprocess import check_call, check_output\n'), ((8978, 9005), 'os.getenv', 'os.getenv', (['"""MAX_JOBS"""', 'None'], {}), "('MAX_JOBS', None)\n", (8987, 9005), False, 'import os\n'), ((11262, 11297), 'os.path.exists', 'os.path.exists', (['"""build/build.ninja"""'], {}), "('build/build.ninja')\n", (11276, 11297), False, 'import os\n'), ((1017, 1043), 'os.path.join', 'os.path.join', (['dir', 'thefile'], {}), '(dir, thefile)\n', (1029, 1043), False, 'import os\n'), ((2551, 2576), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2566, 2576), False, 'import os\n'), ((3056, 3072), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (3067, 3072), False, 'import os\n'), ((4509, 4525), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (4523, 4525), True, 'import numpy as np\n'), ((4680, 4705), 'os.getenv', 'os.getenv', (['"""CPPFLAGS"""', '""""""'], {}), "('CPPFLAGS', '')\n", (4689, 4705), False, 'import os\n'), ((7565, 7597), 'os.path.exists', 'os.path.exists', (['expected_wrapper'], {}), '(expected_wrapper)\n', (7579, 7597), False, 'import os\n'), ((9075, 9107), 'os.path.isfile', 'os.path.isfile', (['cmake_cache_file'], {}), '(cmake_cache_file)\n', (9089, 9107), False, 'import os\n'), ((9117, 9144), 'os.remove', 'os.remove', (['cmake_cache_file'], {}), '(cmake_cache_file)\n', (9126, 9144), False, 'import os\n'), ((11307, 11342), 'os.utime', 'os.utime', (['"""build/build.ninja"""', 'None'], {}), "('build/build.ninja', None)\n", (11315, 11342), False, 'import os\n'), ((11391, 11422), 'glob.glob', 'glob', (['"""build/caffe2/proto/*.py"""'], {}), "('build/caffe2/proto/*.py')\n", (11395, 11422), False, 'from glob import glob\n'), ((927, 961), 'os.environ.get', 'os.environ.get', (['"""PATH"""', 'os.defpath'], {}), "('PATH', os.defpath)\n", (941, 961), False, 'import os\n'), ((4648, 4671), 'os.getenv', 'os.getenv', (['"""CFLAGS"""', '""""""'], {}), "('CFLAGS', '')\n", (4657, 4671), False, 'import os\n'), ((5191, 5234), 'os.getenv', 'os.getenv', (['"""BUILDING_WITH_TORCH_LIBS"""', '"""ON"""'], {}), "('BUILDING_WITH_TORCH_LIBS', 'ON')\n", (5200, 5234), False, 'import os\n'), ((5330, 5360), 'os.getenv', 'os.getenv', (['"""BUILD_TORCH"""', '"""ON"""'], {}), "('BUILD_TORCH', 'ON')\n", (5339, 5360), False, 'import os\n'), ((5423, 5459), 'os.getenv', 'os.getenv', (['"""BUILD_SHARED_LIBS"""', '"""ON"""'], {}), "('BUILD_SHARED_LIBS', 'ON')\n", (5432, 5459), False, 'import os\n'), ((5675, 5716), 'os.getenv', 'os.getenv', (['"""ONNX_NAMESPACE"""', '"""onnx_torch"""'], {}), "('ONNX_NAMESPACE', 'onnx_torch')\n", (5684, 5716), False, 'import os\n'), ((5734, 5761), 'os.getenv', 'os.getenv', (['"""ONNX_ML"""', '(False)'], {}), "('ONNX_ML', False)\n", (5743, 5761), False, 'import os\n'), ((7041, 7058), 'os.getenv', 'os.getenv', (['"""BLAS"""'], {}), "('BLAS')\n", (7050, 7058), False, 'import os\n'), ((7155, 7177), 'os.getenv', 'os.getenv', (['"""USE_REDIS"""'], {}), "('USE_REDIS')\n", (7164, 7177), False, 'import os\n'), ((7196, 7217), 'os.getenv', 'os.getenv', (['"""USE_GLOG"""'], {}), "('USE_GLOG')\n", (7205, 7217), False, 'import os\n'), ((7238, 7261), 'os.getenv', 'os.getenv', (['"""USE_GFLAGS"""'], {}), "('USE_GFLAGS')\n", (7247, 7261), False, 'import os\n'), ((7278, 7297), 'os.getenv', 'os.getenv', (['"""WERROR"""'], {}), "('WERROR')\n", (7287, 7297), False, 'import os\n'), ((9156, 9188), 'os.path.exists', 'os.path.exists', (['cmake_cache_file'], {}), '(cmake_cache_file)\n', (9170, 9188), False, 'import os\n'), ((9828, 9876), 'subprocess.check_call', 'check_call', (['build_cmd'], {'cwd': 'build_dir', 'env': 'my_env'}), '(build_cmd, cwd=build_dir, env=my_env)\n', (9838, 9876), False, 'from subprocess import check_call, check_output\n'), ((10019, 10067), 'subprocess.check_call', 'check_call', (['build_cmd'], {'cwd': 'build_dir', 'env': 'my_env'}), '(build_cmd, cwd=build_dir, env=my_env)\n', (10029, 10067), False, 'from subprocess import check_call, check_output\n'), ((10240, 10288), 'subprocess.check_call', 'check_call', (['ninja_cmd'], {'cwd': 'build_dir', 'env': 'my_env'}), '(ninja_cmd, cwd=build_dir, env=my_env)\n', (10250, 10288), False, 'from subprocess import check_call, check_output\n'), ((1255, 1275), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (1269, 1275), False, 'import os\n'), ((1280, 1314), 'os.access', 'os.access', (['name', '(os.F_OK | os.X_OK)'], {}), '(name, os.F_OK | os.X_OK)\n', (1289, 1314), False, 'import os\n'), ((4815, 4850), 'os.getenv', 'os.getenv', (['"""MSVC_Z7_OVERRIDE"""', '"""ON"""'], {}), "('MSVC_Z7_OVERRIDE', 'ON')\n", (4824, 4850), False, 'import os\n'), ((5119, 5155), 'distutils.sysconfig.get_python_inc', 'distutils.sysconfig.get_python_inc', ([], {}), '()\n', (5153, 5155), False, 'import distutils\n'), ((6956, 6986), 'os.getenv', 'os.getenv', (['"""CMAKE_PREFIX_PATH"""'], {}), "('CMAKE_PREFIX_PATH')\n", (6965, 6986), False, 'import os\n'), ((6990, 7026), 'distutils.sysconfig.get_python_lib', 'distutils.sysconfig.get_python_lib', ([], {}), '()\n', (7024, 7026), False, 'import distutils\n'), ((7101, 7134), 'os.getenv', 'os.getenv', (['"""CUDA_NVCC_EXECUTABLE"""'], {}), "('CUDA_NVCC_EXECUTABLE')\n", (7110, 7134), False, 'import os\n'), ((9211, 9246), 'os.path.exists', 'os.path.exists', (['"""build/build.ninja"""'], {}), "('build/build.ninja')\n", (9225, 9246), False, 'import os\n'), ((1111, 1140), 'os.environ.get', 'os.environ.get', (['"""PATHEXT"""', '""""""'], {}), "('PATHEXT', '')\n", (1125, 1140), False, 'import os\n'), ((1343, 1362), 'os.path.isdir', 'os.path.isdir', (['name'], {}), '(name)\n', (1356, 1362), False, 'import os\n'), ((1451, 1483), 'subprocess.check_output', 'check_output', (["[cmd, '--version']"], {}), "([cmd, '--version'])\n", (1463, 1483), False, 'from subprocess import check_call, check_output\n'), ((1941, 1962), 'distutils.version.LooseVersion', 'LooseVersion', (['"""3.5.0"""'], {}), "('3.5.0')\n", (1953, 1962), False, 'from distutils.version import LooseVersion\n'), ((9664, 9691), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (9689, 9691), False, 'import multiprocessing\n'), ((9923, 9950), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (9948, 9950), False, 'import multiprocessing\n'), ((10342, 10369), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (10367, 10369), False, 'import multiprocessing\n'), ((11650, 11678), 'os.path.basename', 'os.path.basename', (['proto_file'], {}), '(proto_file)\n', (11666, 11678), False, 'import os\n')] |
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import numpy as np
from .graph_utils import vertex_by_token_position
def show_relation_graph(g):
"""
Displays the relation graph using matplotlib.
:param g: input graph.
"""
if "vertexSet" not in g:
vertex_indices = {str(indices):indices for e in g["edgeSet"] for indices in [e["left"]] + [e["right"]] }
g["vertexSet"] = []
for k, v in vertex_indices.items():
g["vertexSet"].append({"lexicalInput": " ".join([g['tokens'][idx] for idx in v])})
fig, ax = plt.subplots()
step = np.pi*2 / float(len(g["vertexSet"]))
print(step, len(g["vertexSet"]))
x, y = 0.0, 0.0
vertex_coordinates = {}
for i, vertex in enumerate(g["vertexSet"]):
x, y = 1 - np.cos(step*i)*2, 1 - np.sin(step*i)
vertex_coordinates[vertex["lexicalInput"]] = x, y
circle = mpatches.Circle([x,y], 0.1, fc = "none")
ax.add_patch(circle)
x, y = 1 - np.cos(step*i)*2.5, 1 - np.sin(step*i)*1.25
plt.text(x, y, vertex["lexicalInput"], ha="center", family='sans-serif', size=10)
for edge in g["edgeSet"]:
left_vertex = vertex_by_token_position(g, edge['left']) if len(edge['left']) > 0 else {}
right_vertex = vertex_by_token_position(g, edge['right']) if len(edge['right']) > 0 else {}
if left_vertex == {}:
left_vertex['lexicalInput'] = " ".join([g['tokens'][idx] for idx in edge['left']])
if right_vertex == {}:
right_vertex['lexicalInput'] = " ".join([g['tokens'][idx] for idx in edge['right']])
x, y = list(zip(vertex_coordinates[left_vertex["lexicalInput"]], vertex_coordinates[right_vertex["lexicalInput"]]))
line = mlines.Line2D(x, y, lw=1., alpha=1)
ax.add_line(line)
property_kbid = "" if 'kbID' not in edge else edge['kbID']
property_label = "" if 'lexicalInput' not in edge else edge['lexicalInput']
plt.text(np.average(x), np.average(y), property_kbid + ":" + property_label, ha="center", family='sans-serif', size=10)
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.axis('equal')
plt.axis('off')
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.axis",
"matplotlib.patches.Circle",
"matplotlib.pyplot.text",
"numpy.sin",
"numpy.cos",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplots"
] | [((622, 636), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (634, 636), True, 'import matplotlib.pyplot as plt\n'), ((2140, 2193), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0)', 'right': '(1)', 'bottom': '(0)', 'top': '(1)'}), '(left=0, right=1, bottom=0, top=1)\n', (2159, 2193), True, 'import matplotlib.pyplot as plt\n'), ((2198, 2215), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2206, 2215), True, 'import matplotlib.pyplot as plt\n'), ((2220, 2235), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2228, 2235), True, 'import matplotlib.pyplot as plt\n'), ((2241, 2251), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2249, 2251), True, 'import matplotlib.pyplot as plt\n'), ((950, 989), 'matplotlib.patches.Circle', 'mpatches.Circle', (['[x, y]', '(0.1)'], {'fc': '"""none"""'}), "([x, y], 0.1, fc='none')\n", (965, 989), True, 'import matplotlib.patches as mpatches\n'), ((1091, 1176), 'matplotlib.pyplot.text', 'plt.text', (['x', 'y', "vertex['lexicalInput']"], {'ha': '"""center"""', 'family': '"""sans-serif"""', 'size': '(10)'}), "(x, y, vertex['lexicalInput'], ha='center', family='sans-serif',\n size=10)\n", (1099, 1176), True, 'import matplotlib.pyplot as plt\n'), ((1794, 1830), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['x', 'y'], {'lw': '(1.0)', 'alpha': '(1)'}), '(x, y, lw=1.0, alpha=1)\n', (1807, 1830), True, 'import matplotlib.lines as mlines\n'), ((2024, 2037), 'numpy.average', 'np.average', (['x'], {}), '(x)\n', (2034, 2037), True, 'import numpy as np\n'), ((2039, 2052), 'numpy.average', 'np.average', (['y'], {}), '(y)\n', (2049, 2052), True, 'import numpy as np\n'), ((860, 876), 'numpy.sin', 'np.sin', (['(step * i)'], {}), '(step * i)\n', (866, 876), True, 'import numpy as np\n'), ((838, 854), 'numpy.cos', 'np.cos', (['(step * i)'], {}), '(step * i)\n', (844, 854), True, 'import numpy as np\n'), ((1039, 1055), 'numpy.cos', 'np.cos', (['(step * i)'], {}), '(step * i)\n', (1045, 1055), True, 'import numpy as np\n'), ((1063, 1079), 'numpy.sin', 'np.sin', (['(step * i)'], {}), '(step * i)\n', (1069, 1079), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
import numpy as np
import codecs
import argparse
parse = argparse.ArgumentParser()
parse.add_argument('-same', nargs='+', help='translations')
parse.add_argument('-b', type=int, default=5, help='#bins')
parse.add_argument('-gap', type=int, default=10, help='bin width')
parse.add_argument('-escape', type=bool, default=False, help='escape long sentences')
args = parse.parse_args()
b = args.b
gap = args.gap
datas = []
maxlens = [0] * len(args.same)
for idx, f in enumerate(args.same):
with codecs.open(f, 'r', encoding='utf-8') as f:
data = []
for line in f.readlines():
es = line.strip().split()
maxlens[idx] = max(maxlens[idx], len(es))
data.append([eval(e) for e in es])
datas.append(data)
for idx, (data, f) in enumerate(zip(datas, args.same)):
s, a = 0., 0.
for l in data:
s += sum(l)
a += len(l)
print('{} (overall) accuracy: {:.4f}'.format(f, s / a))
same = [0.] * b
all_ = [0.] * b
for l in data:
for i, e in enumerate(l):
if args.escape:
try:
same[i // gap] += e
all_[i // gap] += 1
except IndexError:
continue
else:
same[min(i // gap, b - 1)] += e
all_[min(i // gap, b - 1)] += 1
acc = np.asarray(same) / np.asarray(all_)
print('{} (per pos) accuracy: {}'.format(f, acc))
plt.plot(np.arange(1, b + 1) * gap, acc, label=f)
plt.xlabel('Position')
plt.ylabel('Accuracy')
plt.title('Accuracy vs. Position')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"codecs.open",
"matplotlib.pyplot.legend",
"numpy.asarray",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((96, 121), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (119, 121), False, 'import argparse\n'), ((1549, 1571), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (1559, 1571), True, 'from matplotlib import pyplot as plt\n'), ((1572, 1594), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1582, 1594), True, 'from matplotlib import pyplot as plt\n'), ((1595, 1629), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy vs. Position"""'], {}), "('Accuracy vs. Position')\n", (1604, 1629), True, 'from matplotlib import pyplot as plt\n'), ((1630, 1642), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1640, 1642), True, 'from matplotlib import pyplot as plt\n'), ((1643, 1653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1651, 1653), True, 'from matplotlib import pyplot as plt\n'), ((536, 573), 'codecs.open', 'codecs.open', (['f', '"""r"""'], {'encoding': '"""utf-8"""'}), "(f, 'r', encoding='utf-8')\n", (547, 573), False, 'import codecs\n'), ((1404, 1420), 'numpy.asarray', 'np.asarray', (['same'], {}), '(same)\n', (1414, 1420), True, 'import numpy as np\n'), ((1423, 1439), 'numpy.asarray', 'np.asarray', (['all_'], {}), '(all_)\n', (1433, 1439), True, 'import numpy as np\n'), ((1507, 1526), 'numpy.arange', 'np.arange', (['(1)', '(b + 1)'], {}), '(1, b + 1)\n', (1516, 1526), True, 'import numpy as np\n')] |
import argparse
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.optim
from torch.utils.data import DataLoader
from prototree.prototree import ProtoTree
from util.log import Log
@torch.no_grad()
def eval(tree: ProtoTree,
test_loader: DataLoader,
epoch,
device,
log: Log = None,
sampling_strategy: str = 'distributed',
log_prefix: str = 'log_eval_epochs',
progress_prefix: str = 'Eval Epoch'
) -> dict:
tree = tree.to(device)
# Keep an info dict about the procedure
info = dict()
if sampling_strategy != 'distributed':
info['out_leaf_ix'] = []
# Build a confusion matrix
cm = np.zeros((tree._num_classes, tree._num_classes), dtype=int)
# Make sure the model is in evaluation mode
tree.eval()
# Show progress on progress bar
test_iter = tqdm(enumerate(test_loader),
total=len(test_loader),
desc=progress_prefix+' %s'%epoch,
ncols=0)
# Iterate through the test set
for i, (xs, ys) in test_iter:
xs, ys = xs.to(device), ys.to(device)
# Use the model to classify this batch of input data
out, test_info = tree.forward(xs, sampling_strategy)
ys_pred = torch.argmax(out, dim=1)
# Update the confusion matrix
cm_batch = np.zeros((tree._num_classes, tree._num_classes), dtype=int)
for y_pred, y_true in zip(ys_pred, ys):
cm[y_true][y_pred] += 1
cm_batch[y_true][y_pred] += 1
acc = acc_from_cm(cm_batch)
test_iter.set_postfix_str(
f'Batch [{i + 1}/{len(test_iter)}], Acc: {acc:.3f}'
)
# keep list of leaf indices where test sample ends up when deterministic routing is used.
if sampling_strategy != 'distributed':
info['out_leaf_ix'] += test_info['out_leaf_ix']
del out
del ys_pred
del test_info
info['confusion_matrix'] = cm
info['test_accuracy'] = acc_from_cm(cm)
log.log_message("\nEpoch %s - Test accuracy with %s routing: "%(epoch, sampling_strategy)+str(info['test_accuracy']))
return info
@torch.no_grad()
def eval_fidelity(tree: ProtoTree,
test_loader: DataLoader,
device,
log: Log = None,
progress_prefix: str = 'Fidelity'
) -> dict:
tree = tree.to(device)
# Keep an info dict about the procedure
info = dict()
# Make sure the model is in evaluation mode
tree.eval()
# Show progress on progress bar
test_iter = tqdm(enumerate(test_loader),
total=len(test_loader),
desc=progress_prefix,
ncols=0)
distr_samplemax_fidelity = 0
distr_greedy_fidelity = 0
# Iterate through the test set
for i, (xs, ys) in test_iter:
xs, ys = xs.to(device), ys.to(device)
# Use the model to classify this batch of input data, with 3 types of routing
out_distr, _ = tree.forward(xs, 'distributed')
ys_pred_distr = torch.argmax(out_distr, dim=1)
out_samplemax, _ = tree.forward(xs, 'sample_max')
ys_pred_samplemax = torch.argmax(out_samplemax, dim=1)
out_greedy, _ = tree.forward(xs, 'greedy')
ys_pred_greedy = torch.argmax(out_greedy, dim=1)
# Calculate fidelity
distr_samplemax_fidelity += torch.sum(torch.eq(ys_pred_samplemax, ys_pred_distr)).item()
distr_greedy_fidelity += torch.sum(torch.eq(ys_pred_greedy, ys_pred_distr)).item()
# Update the progress bar
test_iter.set_postfix_str(
f'Batch [{i + 1}/{len(test_iter)}]'
)
del out_distr
del out_samplemax
del out_greedy
distr_samplemax_fidelity = distr_samplemax_fidelity/float(len(test_loader.dataset))
distr_greedy_fidelity = distr_greedy_fidelity/float(len(test_loader.dataset))
info['distr_samplemax_fidelity'] = distr_samplemax_fidelity
info['distr_greedy_fidelity'] = distr_greedy_fidelity
log.log_message("Fidelity between standard distributed routing and sample_max routing: "+str(distr_samplemax_fidelity))
log.log_message("Fidelity between standard distributed routing and greedy routing: "+str(distr_greedy_fidelity))
return info
@torch.no_grad()
def eval_ensemble(trees: list, test_loader: DataLoader, device, log: Log, args: argparse.Namespace, sampling_strategy: str = 'distributed', progress_prefix: str = 'Eval Ensemble'):
# Keep an info dict about the procedure
info = dict()
# Build a confusion matrix
cm = np.zeros((trees[0]._num_classes, trees[0]._num_classes), dtype=int)
# Show progress on progress bar
test_iter = tqdm(enumerate(test_loader),
total=len(test_loader),
desc=progress_prefix,
ncols=0)
# Iterate through the test set
for i, (xs, ys) in test_iter:
xs, ys = xs.to(device), ys.to(device)
outs = []
for tree in trees:
# Make sure the model is in evaluation mode
tree.eval()
tree = tree.to(device)
# Use the model to classify this batch of input data
out, _ = tree.forward(xs, sampling_strategy)
outs.append(out)
del out
stacked = torch.stack(outs, dim=0)
ys_pred = torch.argmax(torch.mean(stacked, dim=0), dim=1)
for y_pred, y_true in zip(ys_pred, ys):
cm[y_true][y_pred] += 1
test_iter.set_postfix_str(
f'Batch [{i + 1}/{len(test_iter)}]'
)
del outs
info['confusion_matrix'] = cm
info['test_accuracy'] = acc_from_cm(cm)
log.log_message("Ensemble accuracy with %s routing: %s"%(sampling_strategy, str(info['test_accuracy'])))
return info
def acc_from_cm(cm: np.ndarray) -> float:
"""
Compute the accuracy from the confusion matrix
:param cm: confusion matrix
:return: the accuracy score
"""
assert len(cm.shape) == 2 and cm.shape[0] == cm.shape[1]
correct = 0
for i in range(len(cm)):
correct += cm[i, i]
total = np.sum(cm)
if total == 0:
return 1
else:
return correct / total
| [
"torch.mean",
"torch.eq",
"numpy.sum",
"torch.stack",
"torch.argmax",
"numpy.zeros",
"torch.no_grad"
] | [((236, 251), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (249, 251), False, 'import torch\n'), ((2290, 2305), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2303, 2305), False, 'import torch\n'), ((4479, 4494), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4492, 4494), False, 'import torch\n'), ((749, 808), 'numpy.zeros', 'np.zeros', (['(tree._num_classes, tree._num_classes)'], {'dtype': 'int'}), '((tree._num_classes, tree._num_classes), dtype=int)\n', (757, 808), True, 'import numpy as np\n'), ((4783, 4850), 'numpy.zeros', 'np.zeros', (['(trees[0]._num_classes, trees[0]._num_classes)'], {'dtype': 'int'}), '((trees[0]._num_classes, trees[0]._num_classes), dtype=int)\n', (4791, 4850), True, 'import numpy as np\n'), ((6422, 6432), 'numpy.sum', 'np.sum', (['cm'], {}), '(cm)\n', (6428, 6432), True, 'import numpy as np\n'), ((1369, 1393), 'torch.argmax', 'torch.argmax', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (1381, 1393), False, 'import torch\n'), ((1455, 1514), 'numpy.zeros', 'np.zeros', (['(tree._num_classes, tree._num_classes)'], {'dtype': 'int'}), '((tree._num_classes, tree._num_classes), dtype=int)\n', (1463, 1514), True, 'import numpy as np\n'), ((3214, 3244), 'torch.argmax', 'torch.argmax', (['out_distr'], {'dim': '(1)'}), '(out_distr, dim=1)\n', (3226, 3244), False, 'import torch\n'), ((3335, 3369), 'torch.argmax', 'torch.argmax', (['out_samplemax'], {'dim': '(1)'}), '(out_samplemax, dim=1)\n', (3347, 3369), False, 'import torch\n'), ((3450, 3481), 'torch.argmax', 'torch.argmax', (['out_greedy'], {'dim': '(1)'}), '(out_greedy, dim=1)\n', (3462, 3481), False, 'import torch\n'), ((5549, 5573), 'torch.stack', 'torch.stack', (['outs'], {'dim': '(0)'}), '(outs, dim=0)\n', (5560, 5573), False, 'import torch\n'), ((5606, 5632), 'torch.mean', 'torch.mean', (['stacked'], {'dim': '(0)'}), '(stacked, dim=0)\n', (5616, 5632), False, 'import torch\n'), ((3569, 3611), 'torch.eq', 'torch.eq', (['ys_pred_samplemax', 'ys_pred_distr'], {}), '(ys_pred_samplemax, ys_pred_distr)\n', (3577, 3611), False, 'import torch\n'), ((3664, 3703), 'torch.eq', 'torch.eq', (['ys_pred_greedy', 'ys_pred_distr'], {}), '(ys_pred_greedy, ys_pred_distr)\n', (3672, 3703), False, 'import torch\n')] |
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.metrics import pairwise_distances, pairwise_distances_argmin_min
from okzm.okzm import OnlineKZMed, _trivial_k_median
from utils import debug_print
class TestOKZM(unittest.TestCase):
def test_trivial_k_median(self):
C = np.random.randn(10, 2)
F1 = np.random.randn(5, 2)
F2 = np.random.randn(15, 2)
opened, asgnt = _trivial_k_median(C, F1)
true_asgnt, _ = pairwise_distances_argmin_min(C, F1)
assert len(opened) == 5
assert_array_equal(asgnt, true_asgnt)
opened, asgnt = _trivial_k_median(C, F2)
true_asgnt, _ = pairwise_distances_argmin_min(C, F2)
assert len(opened) == 10
assert_array_equal(asgnt, true_asgnt)
def check_okzm(self, okzm, C, F, inlier_val, outlier_val, debugging, F_is_C=False, check_centers=True):
# permute C and F
np.random.shuffle(F)
np.random.shuffle(C)
# obtain outlier indices and cluster center indices
center_indices = np.where(np.abs(F).max(axis=1) < inlier_val)[0]
is_outlier = np.abs(C).max(axis=1) > outlier_val
_, optimal_cost = pairwise_distances_argmin_min(C[np.logical_not(is_outlier)], F[center_indices])
optimal_cost = optimal_cost.sum()
outlier_indices = np.where(is_outlier)[0]
debug_print("True outliers: {}".format(outlier_indices), debugging)
debug_print("Optimal centers: {}".format(center_indices), debugging)
debug_print("Optimal cost (removing outliers): {}".format(optimal_cost), debugging)
dist_mat = pairwise_distances(C, F)
okzm.fit(C, F, distances=dist_mat, F_is_C=F_is_C)
debug_print("Identified outliers: {}".format(okzm.outlier_indices), debugging)
debug_print("Cluster centers: {}".format(okzm.cluster_center_indices), debugging)
debug_print("Clustering cost: {}".format(okzm.cost('p')), debugging)
assert okzm.cost('p') < 5 * optimal_cost
if not check_centers:
return None
# check centers
# Some times the centers doesn't match but that's because okzm can remove more than n_outliers outliers
if set(okzm.cluster_center_indices) != set(center_indices) and okzm.cost('p') > 2 * optimal_cost:
print("Cluster center indices doesn't match:\n\tfitted center: {}\n\ttrue centers: {}"
.format(set(okzm.cluster_center_indices), set(center_indices)))
assert False
# check outliers
if not set(okzm.outlier_indices).issuperset(outlier_indices):
print("Outlier indices doesn't match:\n\tfitted outliers: {}\n\ttrue outliers: {}"
.format(set(okzm.outlier_indices), set(outlier_indices)))
assert False
def test_toy_data(self):
debugging = False
C = np.array([[10.1, 10.1], # 0: cluster 1
[-9.9, -10], # 1: cluster 4
[-1000, 0], # 2: outlier 1
[-10.1, 9.9], # 3: cluster 3
[10.1, 9.9], # 4: cluster 1
[10.1, -10.1], # 5: cluster 2
[-10.1, 10.1], # 6: cluster 3
[9.9, 10], # 7: cluster 1
[-1, 1000], # 8: outlier 2
[-9.9, 10], # 9: cluster 3
[-10.1, -10.1], # 10: cluster 4
[9.9, -10], # 11: cluster 2
[-10.1, -9.9], # 12: cluster 4
[10.1, -9.9], # 13: cluster 2
[10, 10],
[9, -11],
[-9, 11],
[-11, -9]
])
F = np.array([[9.9, 10], # 0: cluster 1
[10, -10], # 1: cluster 2
[-9.9, 10], # 2: cluster 3
[-10, -10], # 3: cluster 4
[500, 0], # 4
[-500, 0], # 5
[0, 500], # 6
[0, -500]]) # 7
n_clusters = 4
n_outliers = 2
epsilon = 0.1
gamma = 0
okzm1 = OnlineKZMed(n_clusters, n_outliers=n_outliers,
epsilon=epsilon, gamma=gamma,
debugging=debugging)
self.check_okzm(okzm1, C, F, inlier_val=20, outlier_val=800, debugging=debugging)
okzm2 = OnlineKZMed(n_clusters, n_outliers=n_outliers,
epsilon=epsilon, gamma=gamma,
random_swap_out=None, random_swap_in=None,
debugging=debugging)
self.check_okzm(okzm2, C, F, inlier_val=20, outlier_val=800, debugging=debugging)
def test_random_data(self):
debugging = False
F = np.array([[9.9, 10], # 0: cluster 1
[10, -10], # 1: cluster 2
[-9.9, 10], # 2: cluster 3
[-10, -10], # 3: cluster 4
[500, 0], # 4
[-500, 0], # 5
[0, 500], # 6
[0, -500]]) # 7
cov = np.identity(2) * 0.5
clusters = [np.random.multivariate_normal(mean=F[i], cov=cov, size=50)
for i in range(4)]
C = np.vstack(clusters)
outliers = np.array([[-1000, 0],
[0, 1000],
[1000, 100],
[100, -1000]])
C = np.vstack((C, outliers))
n_clusters = 4
n_outliers = 4
epsilon = 0.1
gamma = 0
okzm1 = OnlineKZMed(n_clusters, n_outliers=n_outliers,
epsilon=epsilon, gamma=gamma,
debugging=debugging)
self.check_okzm(okzm1, C, F, inlier_val=20, outlier_val=800, debugging=debugging)
okzm2 = OnlineKZMed(n_clusters, n_outliers=n_outliers,
epsilon=epsilon, gamma=gamma,
random_swap_out=5, random_swap_in=20,
debugging=debugging)
self.check_okzm(okzm2, C, F, inlier_val=20, outlier_val=800, debugging=debugging)
def test_evolving_F_and_z(self):
debugging = True
F = np.array([[9.9, 10], # 0: cluster 1
[10, -10], # 1: cluster 2
[-9.9, 10], # 2: cluster 3
[-10, -10] # 3: cluster 4
])
cov = np.identity(2) * 0.5
clusters = [np.random.multivariate_normal(mean=F[i], cov=cov, size=50)
for i in range(4)]
C = np.vstack(clusters)
# add outliers to data
outlier_frac = 0.1
n_outliers = int(outlier_frac * len(C))
outlier_range = np.hstack([np.arange(-1200, -800), np.arange(800, 1200)])
outliers = np.random.choice(outlier_range, size=n_outliers).reshape(int(n_outliers/2), 2)
C = np.vstack((C, outliers))
np.random.shuffle(C)
_, optimal_costs = pairwise_distances_argmin_min(C, F)
optimal_cost = np.sort(optimal_costs)[:len(C)-n_outliers].sum()
# model parameters
n_clusters = 4
n_outliers_func = lambda j: int(outlier_frac * j)
epsilon = 0.1
gamma = 0
dist_mat = pairwise_distances(C, C)
# evolving F with fixed z
okzm1 = OnlineKZMed(n_clusters, n_outliers=n_outliers,
epsilon=epsilon, gamma=gamma,
debugging=debugging)
okzm1.fit(C, C.copy(), distances=dist_mat, F_is_C=True)
assert 5 * optimal_cost > okzm1.cost('p')
# evolving F with evolving z
okzm2 = OnlineKZMed(n_clusters, n_outliers=n_outliers,
n_outliers_func=n_outliers_func,
epsilon=epsilon, gamma=gamma,
random_swap_out=5, random_swap_in=20,
debugging=debugging)
okzm2.fit(C, C.copy(), distances=dist_mat, F_is_C=True)
assert 5 * optimal_cost > okzm2.cost('p')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"sklearn.metrics.pairwise_distances_argmin_min",
"numpy.abs",
"numpy.random.randn",
"sklearn.metrics.pairwise_distances",
"numpy.testing.assert_array_equal",
"numpy.logical_not",
"okzm.okzm._trivial_k_median",
"numpy.identity",
"numpy.sort",
"numpy.where",
"numpy.array",
"nu... | [((8205, 8220), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8218, 8220), False, 'import unittest\n'), ((328, 350), 'numpy.random.randn', 'np.random.randn', (['(10)', '(2)'], {}), '(10, 2)\n', (343, 350), True, 'import numpy as np\n'), ((364, 385), 'numpy.random.randn', 'np.random.randn', (['(5)', '(2)'], {}), '(5, 2)\n', (379, 385), True, 'import numpy as np\n'), ((399, 421), 'numpy.random.randn', 'np.random.randn', (['(15)', '(2)'], {}), '(15, 2)\n', (414, 421), True, 'import numpy as np\n'), ((447, 471), 'okzm.okzm._trivial_k_median', '_trivial_k_median', (['C', 'F1'], {}), '(C, F1)\n', (464, 471), False, 'from okzm.okzm import OnlineKZMed, _trivial_k_median\n'), ((496, 532), 'sklearn.metrics.pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', (['C', 'F1'], {}), '(C, F1)\n', (525, 532), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_argmin_min\n'), ((573, 610), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['asgnt', 'true_asgnt'], {}), '(asgnt, true_asgnt)\n', (591, 610), False, 'from numpy.testing import assert_array_equal\n'), ((636, 660), 'okzm.okzm._trivial_k_median', '_trivial_k_median', (['C', 'F2'], {}), '(C, F2)\n', (653, 660), False, 'from okzm.okzm import OnlineKZMed, _trivial_k_median\n'), ((685, 721), 'sklearn.metrics.pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', (['C', 'F2'], {}), '(C, F2)\n', (714, 721), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_argmin_min\n'), ((763, 800), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['asgnt', 'true_asgnt'], {}), '(asgnt, true_asgnt)\n', (781, 800), False, 'from numpy.testing import assert_array_equal\n'), ((944, 964), 'numpy.random.shuffle', 'np.random.shuffle', (['F'], {}), '(F)\n', (961, 964), True, 'import numpy as np\n'), ((973, 993), 'numpy.random.shuffle', 'np.random.shuffle', (['C'], {}), '(C)\n', (990, 993), True, 'import numpy as np\n'), ((1648, 1672), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['C', 'F'], {}), '(C, F)\n', (1666, 1672), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_argmin_min\n'), ((2897, 3149), 'numpy.array', 'np.array', (['[[10.1, 10.1], [-9.9, -10], [-1000, 0], [-10.1, 9.9], [10.1, 9.9], [10.1, -\n 10.1], [-10.1, 10.1], [9.9, 10], [-1, 1000], [-9.9, 10], [-10.1, -10.1],\n [9.9, -10], [-10.1, -9.9], [10.1, -9.9], [10, 10], [9, -11], [-9, 11],\n [-11, -9]]'], {}), '([[10.1, 10.1], [-9.9, -10], [-1000, 0], [-10.1, 9.9], [10.1, 9.9],\n [10.1, -10.1], [-10.1, 10.1], [9.9, 10], [-1, 1000], [-9.9, 10], [-10.1,\n -10.1], [9.9, -10], [-10.1, -9.9], [10.1, -9.9], [10, 10], [9, -11], [-\n 9, 11], [-11, -9]])\n', (2905, 3149), True, 'import numpy as np\n'), ((3774, 3876), 'numpy.array', 'np.array', (['[[9.9, 10], [10, -10], [-9.9, 10], [-10, -10], [500, 0], [-500, 0], [0, 500\n ], [0, -500]]'], {}), '([[9.9, 10], [10, -10], [-9.9, 10], [-10, -10], [500, 0], [-500, 0],\n [0, 500], [0, -500]])\n', (3782, 3876), True, 'import numpy as np\n'), ((4214, 4315), 'okzm.okzm.OnlineKZMed', 'OnlineKZMed', (['n_clusters'], {'n_outliers': 'n_outliers', 'epsilon': 'epsilon', 'gamma': 'gamma', 'debugging': 'debugging'}), '(n_clusters, n_outliers=n_outliers, epsilon=epsilon, gamma=gamma,\n debugging=debugging)\n', (4225, 4315), False, 'from okzm.okzm import OnlineKZMed, _trivial_k_median\n'), ((4476, 4620), 'okzm.okzm.OnlineKZMed', 'OnlineKZMed', (['n_clusters'], {'n_outliers': 'n_outliers', 'epsilon': 'epsilon', 'gamma': 'gamma', 'random_swap_out': 'None', 'random_swap_in': 'None', 'debugging': 'debugging'}), '(n_clusters, n_outliers=n_outliers, epsilon=epsilon, gamma=gamma,\n random_swap_out=None, random_swap_in=None, debugging=debugging)\n', (4487, 4620), False, 'from okzm.okzm import OnlineKZMed, _trivial_k_median\n'), ((4862, 4964), 'numpy.array', 'np.array', (['[[9.9, 10], [10, -10], [-9.9, 10], [-10, -10], [500, 0], [-500, 0], [0, 500\n ], [0, -500]]'], {}), '([[9.9, 10], [10, -10], [-9.9, 10], [-10, -10], [500, 0], [-500, 0],\n [0, 500], [0, -500]])\n', (4870, 4964), True, 'import numpy as np\n'), ((5364, 5383), 'numpy.vstack', 'np.vstack', (['clusters'], {}), '(clusters)\n', (5373, 5383), True, 'import numpy as np\n'), ((5403, 5463), 'numpy.array', 'np.array', (['[[-1000, 0], [0, 1000], [1000, 100], [100, -1000]]'], {}), '([[-1000, 0], [0, 1000], [1000, 100], [100, -1000]])\n', (5411, 5463), True, 'import numpy as np\n'), ((5563, 5587), 'numpy.vstack', 'np.vstack', (['(C, outliers)'], {}), '((C, outliers))\n', (5572, 5587), True, 'import numpy as np\n'), ((5692, 5793), 'okzm.okzm.OnlineKZMed', 'OnlineKZMed', (['n_clusters'], {'n_outliers': 'n_outliers', 'epsilon': 'epsilon', 'gamma': 'gamma', 'debugging': 'debugging'}), '(n_clusters, n_outliers=n_outliers, epsilon=epsilon, gamma=gamma,\n debugging=debugging)\n', (5703, 5793), False, 'from okzm.okzm import OnlineKZMed, _trivial_k_median\n'), ((5953, 6092), 'okzm.okzm.OnlineKZMed', 'OnlineKZMed', (['n_clusters'], {'n_outliers': 'n_outliers', 'epsilon': 'epsilon', 'gamma': 'gamma', 'random_swap_out': '(5)', 'random_swap_in': '(20)', 'debugging': 'debugging'}), '(n_clusters, n_outliers=n_outliers, epsilon=epsilon, gamma=gamma,\n random_swap_out=5, random_swap_in=20, debugging=debugging)\n', (5964, 6092), False, 'from okzm.okzm import OnlineKZMed, _trivial_k_median\n'), ((6338, 6394), 'numpy.array', 'np.array', (['[[9.9, 10], [10, -10], [-9.9, 10], [-10, -10]]'], {}), '([[9.9, 10], [10, -10], [-9.9, 10], [-10, -10]])\n', (6346, 6394), True, 'import numpy as np\n'), ((6703, 6722), 'numpy.vstack', 'np.vstack', (['clusters'], {}), '(clusters)\n', (6712, 6722), True, 'import numpy as np\n'), ((7022, 7046), 'numpy.vstack', 'np.vstack', (['(C, outliers)'], {}), '((C, outliers))\n', (7031, 7046), True, 'import numpy as np\n'), ((7055, 7075), 'numpy.random.shuffle', 'np.random.shuffle', (['C'], {}), '(C)\n', (7072, 7075), True, 'import numpy as np\n'), ((7103, 7138), 'sklearn.metrics.pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', (['C', 'F'], {}), '(C, F)\n', (7132, 7138), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_argmin_min\n'), ((7379, 7403), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['C', 'C'], {}), '(C, C)\n', (7397, 7403), False, 'from sklearn.metrics import pairwise_distances, pairwise_distances_argmin_min\n'), ((7455, 7556), 'okzm.okzm.OnlineKZMed', 'OnlineKZMed', (['n_clusters'], {'n_outliers': 'n_outliers', 'epsilon': 'epsilon', 'gamma': 'gamma', 'debugging': 'debugging'}), '(n_clusters, n_outliers=n_outliers, epsilon=epsilon, gamma=gamma,\n debugging=debugging)\n', (7466, 7556), False, 'from okzm.okzm import OnlineKZMed, _trivial_k_median\n'), ((7777, 7954), 'okzm.okzm.OnlineKZMed', 'OnlineKZMed', (['n_clusters'], {'n_outliers': 'n_outliers', 'n_outliers_func': 'n_outliers_func', 'epsilon': 'epsilon', 'gamma': 'gamma', 'random_swap_out': '(5)', 'random_swap_in': '(20)', 'debugging': 'debugging'}), '(n_clusters, n_outliers=n_outliers, n_outliers_func=\n n_outliers_func, epsilon=epsilon, gamma=gamma, random_swap_out=5,\n random_swap_in=20, debugging=debugging)\n', (7788, 7954), False, 'from okzm.okzm import OnlineKZMed, _trivial_k_median\n'), ((1359, 1379), 'numpy.where', 'np.where', (['is_outlier'], {}), '(is_outlier)\n', (1367, 1379), True, 'import numpy as np\n'), ((5213, 5227), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (5224, 5227), True, 'import numpy as np\n'), ((5254, 5312), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'F[i]', 'cov': 'cov', 'size': '(50)'}), '(mean=F[i], cov=cov, size=50)\n', (5283, 5312), True, 'import numpy as np\n'), ((6552, 6566), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (6563, 6566), True, 'import numpy as np\n'), ((6593, 6651), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'F[i]', 'cov': 'cov', 'size': '(50)'}), '(mean=F[i], cov=cov, size=50)\n', (6622, 6651), True, 'import numpy as np\n'), ((1243, 1269), 'numpy.logical_not', 'np.logical_not', (['is_outlier'], {}), '(is_outlier)\n', (1257, 1269), True, 'import numpy as np\n'), ((6865, 6887), 'numpy.arange', 'np.arange', (['(-1200)', '(-800)'], {}), '(-1200, -800)\n', (6874, 6887), True, 'import numpy as np\n'), ((6889, 6909), 'numpy.arange', 'np.arange', (['(800)', '(1200)'], {}), '(800, 1200)\n', (6898, 6909), True, 'import numpy as np\n'), ((6931, 6979), 'numpy.random.choice', 'np.random.choice', (['outlier_range'], {'size': 'n_outliers'}), '(outlier_range, size=n_outliers)\n', (6947, 6979), True, 'import numpy as np\n'), ((1149, 1158), 'numpy.abs', 'np.abs', (['C'], {}), '(C)\n', (1155, 1158), True, 'import numpy as np\n'), ((7162, 7184), 'numpy.sort', 'np.sort', (['optimal_costs'], {}), '(optimal_costs)\n', (7169, 7184), True, 'import numpy as np\n'), ((1089, 1098), 'numpy.abs', 'np.abs', (['F'], {}), '(F)\n', (1095, 1098), True, 'import numpy as np\n')] |
#!/usr/bin/env python
u"""
mar_extrap_mean.py
Written by <NAME> (01/2021)
Interpolates mean MAR products to times and coordinates
Uses fast nearest-neighbor search algorithms
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.BallTree.html
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html
and inverse distance weighted interpolation to extrapolate spatially
INPUTS:
DIRECTORY: full path to the MAR data directory
<path_to_mar>/MARv3.11/Greenland/ERA_1958-2019-15km/daily_15km
<path_to_mar>/MARv3.11/Greenland/NCEP1_1948-2020_20km/daily_20km
<path_to_mar>/MARv3.10/Greenland/NCEP1_1948-2019_20km/daily_20km
<path_to_mar>/MARv3.9/Greenland/ERA_1958-2018_10km/daily_10km
EPSG: projection of input spatial coordinates
tdec: dates to interpolate in year-decimal
X: x-coordinates to interpolate in projection EPSG
Y: y-coordinates to interpolate in projection EPSG
OPTIONS:
XNAME: x-coordinate variable name in MAR netCDF4 file
YNAME: x-coordinate variable name in MAR netCDF4 file
TIMENAME: time variable name in MAR netCDF4 file
VARIABLE: MAR product to interpolate
RANGE: start year and end year of mean file
SIGMA: Standard deviation for Gaussian kernel
SEARCH: nearest-neighbor search algorithm (BallTree or KDTree)
NN: number of nearest-neighbor points to use
POWER: inverse distance weighting power
FILL_VALUE: output fill_value for invalid points
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
pyproj: Python interface to PROJ library
https://pypi.org/project/pyproj/
scikit-learn: Machine Learning in Python
https://scikit-learn.org/stable/index.html
https://github.com/scikit-learn/scikit-learn
UPDATE HISTORY:
Updated 01/2021: using conversion protocols following pyproj-2 updates
https://pyproj4.github.io/pyproj/stable/gotchas.html
Written 08/2020
"""
from __future__ import print_function
import sys
import os
import re
import pyproj
import netCDF4
import numpy as np
import scipy.spatial
import scipy.ndimage
import scipy.interpolate
from sklearn.neighbors import KDTree, BallTree
#-- PURPOSE: read and interpolate a mean field of MAR outputs
def extrapolate_mar_mean(DIRECTORY, EPSG, VERSION, tdec, X, Y,
XNAME=None, YNAME=None, TIMENAME='TIME', VARIABLE='SMB',
RANGE=[2000,2019], SIGMA=1.5, SEARCH='BallTree', NN=10, POWER=2.0,
FILL_VALUE=None):
#-- regular expression pattern for MAR dataset
rx = re.compile('MAR_SMBavg(.*?){0}-{1}.nc$'.format(*RANGE))
#-- find mar mean file for RANGE
FILE, = [f for f in os.listdir(DIRECTORY) if rx.match(f)]
#-- Open the MAR NetCDF file for reading
with netCDF4.Dataset(os.path.join(DIRECTORY,FILE), 'r') as fileID:
nx = len(fileID.variables[XNAME][:])
ny = len(fileID.variables[YNAME][:])
#-- python dictionary with file variables
fd = {}
#-- create a masked array with all data
fd[VARIABLE] = np.ma.zeros((ny,nx),fill_value=FILL_VALUE)
fd[VARIABLE].mask = np.zeros((ny,nx),dtype=bool)
#-- python dictionary with gaussian filtered variables
gs = {}
#-- use a gaussian filter to smooth each model field
gs[VARIABLE] = np.ma.zeros((ny,nx), fill_value=FILL_VALUE)
gs[VARIABLE].mask = np.ones((ny,nx), dtype=bool)
#-- Open the MAR NetCDF file for reading
with netCDF4.Dataset(os.path.join(DIRECTORY,FILE), 'r') as fileID:
#-- surface type
SRF=fileID.variables['SRF'][:]
#-- indices of specified ice mask
i,j=np.nonzero(SRF == 4)
#-- Get data from netCDF variable and remove singleton dimensions
tmp=np.squeeze(fileID.variables[VARIABLE][:])
#-- combine sectors for multi-layered data
if (np.ndim(tmp) == 3):
#-- ice fraction
FRA=fileID.variables['FRA'][:]/100.0
#-- create mask for combining data
MASK = np.zeros((ny,nx))
MASK[i,j] = FRA[i,j]
#-- combine data
fd[VARIABLE][:,:] = MASK*tmp[0,:,:] + \
(1.0-MASK)*tmp[1,:,:]
else:
#-- copy data
fd[VARIABLE][:,:] = tmp.copy()
#-- verify mask object for interpolating data
fd[VARIABLE].mask[:,:] |= (SRF != 4)
#-- combine mask object through time to create a single mask
fd['MASK']=1.0 - np.array(fd[VARIABLE].mask,dtype=np.float)
#-- MAR coordinates
fd['LON']=fileID.variables['LON'][:,:].copy()
fd['LAT']=fileID.variables['LAT'][:,:].copy()
#-- convert x and y coordinates to meters
fd['x']=1000.0*fileID.variables[XNAME][:].copy()
fd['y']=1000.0*fileID.variables[YNAME][:].copy()
#-- use a gaussian filter to smooth mask
gs['MASK']=scipy.ndimage.gaussian_filter(fd['MASK'],SIGMA,
mode='constant',cval=0)
#-- indices of smoothed ice mask
ii,jj = np.nonzero(np.ceil(gs['MASK']) == 1.0)
#-- replace fill values before smoothing data
temp1 = np.zeros((ny,nx))
i,j = np.nonzero(~fd[VARIABLE].mask)
temp1[i,j] = fd[VARIABLE][i,j].copy()
#-- smooth spatial field
temp2 = scipy.ndimage.gaussian_filter(temp1, SIGMA,
mode='constant', cval=0)
#-- scale output smoothed field
gs[VARIABLE].data[ii,jj] = temp2[ii,jj]/gs['MASK'][ii,jj]
#-- replace valid values with original
gs[VARIABLE].data[i,j] = temp1[i,j]
#-- set mask variables for time
gs[VARIABLE].mask[ii,jj] = False
#-- convert MAR latitude and longitude to input coordinates (EPSG)
crs1 = pyproj.CRS.from_string(EPSG)
crs2 = pyproj.CRS.from_string("epsg:{0:d}".format(4326))
transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)
direction = pyproj.enums.TransformDirection.INVERSE
#-- convert projection from model coordinates
xg,yg = transformer.transform(fd['LON'], fd['LAT'], direction=direction)
#-- construct search tree from original points
#-- can use either BallTree or KDTree algorithms
xy1 = np.concatenate((xg[i,j,None],yg[i,j,None]),axis=1)
tree = BallTree(xy1) if (SEARCH == 'BallTree') else KDTree(xy1)
#-- number of output data points
npts = len(tdec)
#-- output interpolated arrays of output variable
extrap = np.ma.zeros((npts),fill_value=FILL_VALUE,dtype=np.float)
extrap.mask = np.ones((npts),dtype=bool)
#-- query the search tree to find the NN closest points
xy2 = np.concatenate((X[:,None],Y[:,None]),axis=1)
dist,indices = tree.query(xy2, k=NN, return_distance=True)
#-- normalized weights if POWER > 0 (typically between 1 and 3)
#-- in the inverse distance weighting
power_inverse_distance = dist**(-POWER)
s = np.sum(power_inverse_distance)
w = power_inverse_distance/s
#-- variable for valid points
var1 = gs[VARIABLE][i,j]
#-- spatially extrapolate using inverse distance weighting
extrap.data[:] = np.sum(w*var1[indices],axis=1)
#-- complete mask if any invalid in data
invalid, = np.nonzero((extrap.data == extrap.fill_value) |
np.isnan(extrap.data))
extrap.mask[invalid] = True
#-- return the interpolated values
return extrap
| [
"numpy.sum",
"os.path.join",
"numpy.ceil",
"numpy.zeros",
"pyproj.CRS.from_string",
"numpy.ones",
"numpy.ndim",
"numpy.nonzero",
"numpy.isnan",
"sklearn.neighbors.BallTree",
"numpy.array",
"pyproj.Transformer.from_crs",
"numpy.squeeze",
"numpy.ma.zeros",
"sklearn.neighbors.KDTree",
"os... | [((3325, 3369), 'numpy.ma.zeros', 'np.ma.zeros', (['(ny, nx)'], {'fill_value': 'FILL_VALUE'}), '((ny, nx), fill_value=FILL_VALUE)\n', (3336, 3369), True, 'import numpy as np\n'), ((3392, 3422), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': 'bool'}), '((ny, nx), dtype=bool)\n', (3400, 3422), True, 'import numpy as np\n'), ((3568, 3612), 'numpy.ma.zeros', 'np.ma.zeros', (['(ny, nx)'], {'fill_value': 'FILL_VALUE'}), '((ny, nx), fill_value=FILL_VALUE)\n', (3579, 3612), True, 'import numpy as np\n'), ((3636, 3665), 'numpy.ones', 'np.ones', (['(ny, nx)'], {'dtype': 'bool'}), '((ny, nx), dtype=bool)\n', (3643, 3665), True, 'import numpy as np\n'), ((5982, 6010), 'pyproj.CRS.from_string', 'pyproj.CRS.from_string', (['EPSG'], {}), '(EPSG)\n', (6004, 6010), False, 'import pyproj\n'), ((6090, 6145), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['crs1', 'crs2'], {'always_xy': '(True)'}), '(crs1, crs2, always_xy=True)\n', (6117, 6145), False, 'import pyproj\n'), ((6444, 6500), 'numpy.concatenate', 'np.concatenate', (['(xg[i, j, None], yg[i, j, None])'], {'axis': '(1)'}), '((xg[i, j, None], yg[i, j, None]), axis=1)\n', (6458, 6500), True, 'import numpy as np\n'), ((6689, 6745), 'numpy.ma.zeros', 'np.ma.zeros', (['npts'], {'fill_value': 'FILL_VALUE', 'dtype': 'np.float'}), '(npts, fill_value=FILL_VALUE, dtype=np.float)\n', (6700, 6745), True, 'import numpy as np\n'), ((6764, 6789), 'numpy.ones', 'np.ones', (['npts'], {'dtype': 'bool'}), '(npts, dtype=bool)\n', (6771, 6789), True, 'import numpy as np\n'), ((6862, 6910), 'numpy.concatenate', 'np.concatenate', (['(X[:, None], Y[:, None])'], {'axis': '(1)'}), '((X[:, None], Y[:, None]), axis=1)\n', (6876, 6910), True, 'import numpy as np\n'), ((7132, 7162), 'numpy.sum', 'np.sum', (['power_inverse_distance'], {}), '(power_inverse_distance)\n', (7138, 7162), True, 'import numpy as np\n'), ((7343, 7376), 'numpy.sum', 'np.sum', (['(w * var1[indices])'], {'axis': '(1)'}), '(w * var1[indices], axis=1)\n', (7349, 7376), True, 'import numpy as np\n'), ((3899, 3919), 'numpy.nonzero', 'np.nonzero', (['(SRF == 4)'], {}), '(SRF == 4)\n', (3909, 3919), True, 'import numpy as np\n'), ((4006, 4047), 'numpy.squeeze', 'np.squeeze', (['fileID.variables[VARIABLE][:]'], {}), '(fileID.variables[VARIABLE][:])\n', (4016, 4047), True, 'import numpy as np\n'), ((5382, 5400), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (5390, 5400), True, 'import numpy as np\n'), ((5414, 5444), 'numpy.nonzero', 'np.nonzero', (['(~fd[VARIABLE].mask)'], {}), '(~fd[VARIABLE].mask)\n', (5424, 5444), True, 'import numpy as np\n'), ((6506, 6519), 'sklearn.neighbors.BallTree', 'BallTree', (['xy1'], {}), '(xy1)\n', (6514, 6519), False, 'from sklearn.neighbors import KDTree, BallTree\n'), ((6551, 6562), 'sklearn.neighbors.KDTree', 'KDTree', (['xy1'], {}), '(xy1)\n', (6557, 6562), False, 'from sklearn.neighbors import KDTree, BallTree\n'), ((2959, 2980), 'os.listdir', 'os.listdir', (['DIRECTORY'], {}), '(DIRECTORY)\n', (2969, 2980), False, 'import os\n'), ((3067, 3096), 'os.path.join', 'os.path.join', (['DIRECTORY', 'FILE'], {}), '(DIRECTORY, FILE)\n', (3079, 3096), False, 'import os\n'), ((3735, 3764), 'os.path.join', 'os.path.join', (['DIRECTORY', 'FILE'], {}), '(DIRECTORY, FILE)\n', (3747, 3764), False, 'import os\n'), ((4111, 4123), 'numpy.ndim', 'np.ndim', (['tmp'], {}), '(tmp)\n', (4118, 4123), True, 'import numpy as np\n'), ((4275, 4293), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (4283, 4293), True, 'import numpy as np\n'), ((4721, 4764), 'numpy.array', 'np.array', (['fd[VARIABLE].mask'], {'dtype': 'np.float'}), '(fd[VARIABLE].mask, dtype=np.float)\n', (4729, 4764), True, 'import numpy as np\n'), ((7491, 7512), 'numpy.isnan', 'np.isnan', (['extrap.data'], {}), '(extrap.data)\n', (7499, 7512), True, 'import numpy as np\n'), ((5284, 5303), 'numpy.ceil', 'np.ceil', (["gs['MASK']"], {}), "(gs['MASK'])\n", (5291, 5303), True, 'import numpy as np\n')] |
import numpy as np
def clip(a, min_value, max_value):
return min(max(a, min_value), max_value)
def compute(array_1, array_2, a, b, c):
"""
This function must implement the formula
np.clip(array_1, 2, 10) * a + array_2 * b + c
array_1 and array_2 are 2D.
"""
x_max = array_1.shape[0]
y_max = array_1.shape[1]
assert array_1.shape == array_2.shape
result = np.zeros((x_max, y_max), dtype=array_1.dtype)
for x in range(x_max):
for y in range(y_max):
tmp = clip(array_1[x, y], 2, 10)
tmp = tmp * a + array_2[x, y] * b
result[x, y] = tmp + c
return result
| [
"numpy.zeros"
] | [((402, 447), 'numpy.zeros', 'np.zeros', (['(x_max, y_max)'], {'dtype': 'array_1.dtype'}), '((x_max, y_max), dtype=array_1.dtype)\n', (410, 447), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
from keras import backend as K
from tqdm.keras import TqdmCallback
from scipy.stats import spearmanr
from tensorflow.keras import Input
from tensorflow.keras import optimizers
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.models import Model
from statistics import mean
from sklearn.utils import shuffle
from tensorflow import keras
from tensorflow.keras.optimizers import Adam
import pandas as pd
import datetime
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau ,Callback,TensorBoard
from keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras import applications
import PIL
from keras.activations import softmax,sigmoid
import h5py
from PIL import Image
from keras.layers import Layer
from scipy.stats import spearmanr,pearsonr
import sklearn
import tensorflow as tf
from tensorflow.keras.layers import MaxPooling2D ,Dense,Concatenate ,Dropout ,Input,concatenate,Conv2D,Reshape,GlobalMaxPooling2D,Flatten,GlobalAveragePooling2D,AveragePooling2D,Lambda,MaxPooling2D,TimeDistributed, Bidirectional, LSTM
import argparse
import random
from tqdm import tqdm
import time
from scipy.optimize import curve_fit
tf.keras.backend.clear_session()
start_time = time.time()
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
# 4-parameter logistic function
logisticPart = 1 + np.exp(np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
def data_generator_1(data, nb, batch_size=1):
num_samples = len(data)
while True:
for offset in range(0, num_samples, batch_size ):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, nb,25,2048))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
yield X_train
def data_generator_2(data, nb ,batch_size=1):
num_samples = len(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, nb,25,2048))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
yield y_train
def build_model(batch_shape, model_final):
model = models.Sequential()
model.add(TimeDistributed(model_final,input_shape = batch_shape))
model.add(Bidirectional(LSTM(64,return_sequences=True,kernel_initializer='random_normal',
recurrent_initializer='random_normal',
dropout=0.4,recurrent_dropout=0)))
model.add(Bidirectional(LSTM(64,return_sequences=True,
kernel_initializer='random_normal',
recurrent_initializer='random_normal', dropout=0.4,recurrent_dropout=0)))
model.add(Flatten())
model.add(Dense(256,activation='relu', kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.001)))
model.add(layers.Dropout(rate=0.5))
model.add(layers.Dense(1))
model.add(layers.Activation('linear'))
model.compile(optimizer=optimizers.Adam(),loss='mse',metrics=['mae'])
model.summary()
return model
def data_prepare():
x = os.listdir('./features_X/')
li = []
for i in range(len(x)):
tem = []
x_f = './features_X/' + x[i]
y_f = './features_y/' + x[i]
tem.append(x_f)
tem.append(y_f)
li.append(tem)
li.sort()
return (li)
if __name__ == '__main__':
parser = argparse.ArgumentParser("Demo")
parser.add_argument('-nf',
'--num_frames',
default=30,
type=int,
help='Number of cropped frames per video.'
)
parser.add_argument('-m',
'--pretrained_model',
default='',
type=str,
help='path to pretrained End2End module.'
)
parser.add_argument('-f',
'--paths',
default='',
type=str,
help='path to videos features.'
)
args = parser.parse_args()
model_sp = './models/res-bi-sp_koniq.h5'
nb = args.num_frames
model_end = args.pretrained_model
paths = args.paths
model = load_model(model_sp)
model_final = Model(inputs=model.input,outputs=model.layers[-3].output )
model = build_model((nb,25,2048), model_final)
model.load_weights(model_end)
test_l = data_prepare()
test_gen = data_generator_1(test_l,nb)
s_gen = data_generator_2(test_l,nb)
y_p = model.predict(test_gen, steps = int(len(test_l)))
y_ss = []
for i in range(len(test_l)):
y = next(s_gen)
y_ss.append(y)
y_ss = np.array(y_ss)
y_ss = y_ss.reshape(len(test_l),1)
srocc = spearmanr(y_ss,y_p).correlation
y_p = np.reshape(y_p,(len(test_l)))
y_p = y_p * 5
y_ss = np.reshape(y_ss,(len(test_l)))
names = []
for i in range(len(test_l)):
a = test_l[i][0].split('/')[-1]
a = a.split('.npy')[0]
names.append(a)
y_ss_l = y_ss.tolist()
y_p_l = y_p.tolist()
df = pd.DataFrame(list(zip(names, y_ss_l,y_p_l)),
columns =['Name', 'MOS', 'Predicted MOS'])
df.to_csv('results.csv', index=False)
beta_init = [np.max(y_ss), np.min(y_ss), np.mean(y_p), 0.5]
popt, _ = curve_fit(logistic_func, y_p, y_ss, p0=beta_init, maxfev=int(1e8))
y_pred_logistic = logistic_func(y_p, *popt)
plcc = stats.pearsonr(y_ss,y_pred_logistic)[0]
rmse = np.sqrt(mean_squared_error(y_ss,y_pred_logistic))
try:
KRCC = scipy.stats.kendalltau(y_ss, y_p)[0]
except:
KRCC = scipy.stats.kendalltau(y_ss, y_p, method='asymptotic')[0]
rmse = np.sqrt(mean_squared_error(y_ss,y_pred_logistic))
print('srocc = ', srocc )
print('plcc = ' , plcc)
print( 'rmse = ', rmse)
print('krocc = ', KRCC)
| [
"keras.models.load_model",
"numpy.load",
"numpy.abs",
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dense",
"numpy.mean",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"numpy.max",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.optimizers.Adam",
"numpy... | [((1390, 1422), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (1420, 1422), True, 'import tensorflow as tf\n'), ((1437, 1448), 'time.time', 'time.time', ([], {}), '()\n', (1446, 1448), False, 'import time\n'), ((2961, 2980), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (2978, 2980), False, 'from tensorflow.keras import models\n'), ((3887, 3914), 'os.listdir', 'os.listdir', (['"""./features_X/"""'], {}), "('./features_X/')\n", (3897, 3914), False, 'import os\n'), ((4171, 4202), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Demo"""'], {}), "('Demo')\n", (4194, 4202), False, 'import argparse\n'), ((4832, 4852), 'keras.models.load_model', 'load_model', (['model_sp'], {}), '(model_sp)\n', (4842, 4852), False, 'from keras.models import load_model\n'), ((4870, 4928), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.input', 'outputs': 'model.layers[-3].output'}), '(inputs=model.input, outputs=model.layers[-3].output)\n', (4875, 4928), False, 'from tensorflow.keras.models import Model\n'), ((5285, 5299), 'numpy.array', 'np.array', (['y_ss'], {}), '(y_ss)\n', (5293, 5299), True, 'import numpy as np\n'), ((1641, 1681), 'numpy.divide', 'np.divide', (['(bayta1 - bayta2)', 'logisticPart'], {}), '(bayta1 - bayta2, logisticPart)\n', (1650, 1681), True, 'import numpy as np\n'), ((2994, 3047), 'tensorflow.keras.layers.TimeDistributed', 'TimeDistributed', (['model_final'], {'input_shape': 'batch_shape'}), '(model_final, input_shape=batch_shape)\n', (3009, 3047), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((3509, 3518), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3516, 3518), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((3647, 3671), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (3661, 3671), False, 'from tensorflow.keras import layers\n'), ((3688, 3703), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (3700, 3703), False, 'from tensorflow.keras import layers\n'), ((3718, 3745), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""linear"""'], {}), "('linear')\n", (3735, 3745), False, 'from tensorflow.keras import layers\n'), ((5349, 5369), 'scipy.stats.spearmanr', 'spearmanr', (['y_ss', 'y_p'], {}), '(y_ss, y_p)\n', (5358, 5369), False, 'from scipy.stats import spearmanr, pearsonr\n'), ((5842, 5854), 'numpy.max', 'np.max', (['y_ss'], {}), '(y_ss)\n', (5848, 5854), True, 'import numpy as np\n'), ((5856, 5868), 'numpy.min', 'np.min', (['y_ss'], {}), '(y_ss)\n', (5862, 5868), True, 'import numpy as np\n'), ((5870, 5882), 'numpy.mean', 'np.mean', (['y_p'], {}), '(y_p)\n', (5877, 5882), True, 'import numpy as np\n'), ((2025, 2061), 'numpy.zeros', 'np.zeros', (['(batch_size, nb, 25, 2048)'], {}), '((batch_size, nb, 25, 2048))\n', (2033, 2061), True, 'import numpy as np\n'), ((2083, 2108), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (2091, 2108), True, 'import numpy as np\n'), ((2625, 2661), 'numpy.zeros', 'np.zeros', (['(batch_size, nb, 25, 2048)'], {}), '((batch_size, nb, 25, 2048))\n', (2633, 2661), True, 'import numpy as np\n'), ((2683, 2708), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (2691, 2708), True, 'import numpy as np\n'), ((3080, 3224), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)', 'kernel_initializer': '"""random_normal"""', 'recurrent_initializer': '"""random_normal"""', 'dropout': '(0.4)', 'recurrent_dropout': '(0)'}), "(64, return_sequences=True, kernel_initializer='random_normal',\n recurrent_initializer='random_normal', dropout=0.4, recurrent_dropout=0)\n", (3084, 3224), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((3285, 3429), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)', 'kernel_initializer': '"""random_normal"""', 'recurrent_initializer': '"""random_normal"""', 'dropout': '(0.4)', 'recurrent_dropout': '(0)'}), "(64, return_sequences=True, kernel_initializer='random_normal',\n recurrent_initializer='random_normal', dropout=0.4, recurrent_dropout=0)\n", (3289, 3429), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((3776, 3793), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (3791, 3793), False, 'from tensorflow.keras import optimizers\n'), ((2183, 2211), 'numpy.load', 'np.load', (['batch_samples[i][0]'], {}), '(batch_samples[i][0])\n', (2190, 2211), True, 'import numpy as np\n'), ((2242, 2270), 'numpy.load', 'np.load', (['batch_samples[i][1]'], {}), '(batch_samples[i][1])\n', (2249, 2270), True, 'import numpy as np\n'), ((2783, 2811), 'numpy.load', 'np.load', (['batch_samples[i][0]'], {}), '(batch_samples[i][0])\n', (2790, 2811), True, 'import numpy as np\n'), ((2842, 2870), 'numpy.load', 'np.load', (['batch_samples[i][1]'], {}), '(batch_samples[i][1])\n', (2849, 2870), True, 'import numpy as np\n'), ((3583, 3631), 'tensorflow.keras.initializers.RandomNormal', 'tf.keras.initializers.RandomNormal', ([], {'stddev': '(0.001)'}), '(stddev=0.001)\n', (3617, 3631), True, 'import tensorflow as tf\n'), ((1604, 1618), 'numpy.abs', 'np.abs', (['bayta4'], {}), '(bayta4)\n', (1610, 1618), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.keras.constraints import Constraint
from tensorflow.keras.initializers import Initializer
import tensorflow.keras as keras
import numpy as np
from .core import Ball, Box, Vertex, F_FORWARD, F_IBP, F_HYBRID, StaticVariables
from tensorflow.math import greater_equal
from tensorflow.keras.layers import Flatten
# from ..utils import get_linear_hull_relu, get_linear_softplus_hull
from ..utils import *
# create static variables for varying convex domain
class V_slope:
name = "volume-slope"
class S_slope:
name = "same-slope"
class Z_slope:
name = "zero-lb"
class O_slope:
name = "one-lb"
"""
def _get_shape_tuple(init_tuple, tensor, start_idx, int_shape=None):
""Finds non-specific dimensions in the static shapes.
The static shapes are replaced with the corresponding dynamic shapes of the
tensor.
Arguments:
init_tuple: a tuple, the first part of the output shape
tensor: the tensor from which to get the (static and dynamic) shapes
as the last part of the output shape
start_idx: int, which indicate the first dimension to take from
the static shape of the tensor
int_shape: an alternative static shape to take as the last part
of the output shape
Returns:
The new int_shape with the first part from init_tuple
and the last part from either `int_shape` (if provided)
or `tensor.shape`, where every `None` is replaced by
the corresponding dimension from `tf.shape(tensor)`.
sources: official Keras directory
""
# replace all None in int_shape by K.shape
if int_shape is None:
int_shape = K.int_shape(tensor)[start_idx:]
if not any(not s for s in int_shape):
return init_tuple + tuple(int_shape)
shape = K.shape(tensor)
int_shape = list(int_shape)
for i, s in enumerate(int_shape):
if not s:
int_shape[i] = shape[start_idx + i]
return init_tuple + tuple(int_shape)
"""
##############
# SYMBOLIC UPPER/ LOWER BOUNDS
# compute symbolically constant upper and lower
# with the current knowledge of the convex domain considered
##############
# case 1: a box
def get_upper_box(x_min, x_max, w, b, **kwargs):
"""
#compute the max of an affine function
within a box (hypercube) defined by its extremal corners
:param x_min: lower bound of the box domain
:param x_max: upper bound of the box domain
:param w: weights of the affine function
:param b: bias of the affine function
:return: max_(x >= x_min, x<=x_max) w*x + b
"""
z_value = K.cast(0.0, K.floatx())
if len(w.shape) == len(b.shape): # identity function
return x_max
# split into positive and negative components
w_pos = K.maximum(w, z_value)
w_neg = K.minimum(w, z_value)
x_min_ = x_min + z_value * x_min
x_max_ = x_max + z_value * x_max
for _ in range(len(w.shape) - len(x_max.shape)):
x_min_ = K.expand_dims(x_min_, -1)
x_max_ = K.expand_dims(x_max_, -1)
return K.sum(w_pos * x_max_ + w_neg * x_min_, 1) + b
def get_lower_box(x_min, x_max, w, b, **kwargs):
"""
:param x_min: lower bound of the box domain
:param x_max: upper bound of the box domain
:param w_l: weights of the affine lower bound
:param b_l: bias of the affine lower bound
:return: min_(x >= x_min, x<=x_max) w*x + b
"""
z_value = K.cast(0.0, K.floatx())
if len(w.shape) == len(b.shape):
return x_min
w_pos = K.maximum(w, z_value)
w_neg = K.minimum(w, z_value)
x_min_ = x_min + z_value * x_min
x_max_ = x_max + z_value * x_max
for _ in range(len(w.shape) - len(x_max.shape)):
x_min_ = K.expand_dims(x_min_, -1)
x_max_ = K.expand_dims(x_max_, -1)
return K.sum(w_pos * x_min_ + w_neg * x_max_, 1) + b
# case 2 : a ball
def get_lq_norm(x, p, axis=-1):
"""
compute Lp norm (p=1 or 2)
:param x: tensor
:param p: the power must be an integer in (1, 2)
:param axis: the axis on which we compute the norm
:return: ||w||^p
"""
if p not in [1, 2]:
raise NotImplementedError()
if p == 1:
# x_p = K.sum(K.abs(x), axis)
x_q = K.max(K.abs(x), axis)
if p == 2:
x_q = K.sqrt(K.sum(K.pow(x, p), axis))
return x_q
def get_upper_ball(x_0, eps, p, w, b, **kwargs):
"""
max of an affine function over an Lp ball
:param x_0: the center of the ball
:param eps: the radius
:param p: the type of Lp norm considered
:param w: weights of the affine function
:param b: bias of the affine function
:return: max_(|x - x_0|_p<= eps) w*x + b
"""
if len(w.shape) == len(b.shape):
return x_0 + eps
if p == np.inf:
# compute x_min and x_max according to eps
x_min = x_0 - eps
x_max = x_0 + eps
return get_upper_box(x_min, x_max, w, b)
else:
# use Holder's inequality p+q=1
# ||w||_q*eps + w*x_0 + b
if len(kwargs):
return get_upper_ball_finetune(x_0, eps, p, w, b, **kwargs)
upper = eps * get_lq_norm(w, p, axis=1) + b
for _ in range(len(w.shape) - len(x_0.shape)):
x_0 = K.expand_dims(x_0, -1)
return K.sum(w * x_0, 1) + upper
def get_lower_ball_finetune(x_0, eps, p, w, b, **kwargs):
if "finetune_lower" in kwargs and "upper" in kwargs or "lower" in kwargs:
alpha = kwargs["finetune_lower"]
# assume alpha is the same shape as w, minus the batch dimension
n_shape = len(w.shape) - 2
if "upper" and "lower" in kwargs:
upper = kwargs['upper'] # flatten vector
lower = kwargs['lower'] # flatten vector
upper_ = np.reshape(upper, [1, -1]+[1]*n_shape)
lower_ = np.reshape(lower, [1, -1] + [1] * n_shape)
w_alpha = w*alpha[None]
w_alpha_bar = w*(1-alpha)
score_box = K.sum(K.maximum(0., w_alpha_bar)*lower_, 1) + K.sum(K.minimum(0., w_alpha_bar)*upper_, 1)
score_ball = get_lower_ball(x_0, eps, p, w_alpha, b)
return score_box+score_ball
if "upper" in kwargs:
upper = kwargs['upper'] # flatten vector
upper_ = np.reshape(upper, [1, -1]+[1]*n_shape)
w_alpha = K.minimum(0, w)*alpha[None] + K.maximum(0., w)
w_alpha_bar = K.minimum(0, w)*(1-alpha[None])
score_box = K.sum(K.minimum(0., w_alpha_bar)*upper_, 1)
score_ball = get_lower_ball(x_0, eps, p, w_alpha, b)
return score_box+score_ball
if "lower" in kwargs:
lower = kwargs['lower'] # flatten vector
lower_ = np.reshape(lower, [1, -1]+[1]*n_shape)
w_alpha = K.maximum(0, w)*alpha[None] + K.minimum(0., w)
w_alpha_bar = K.maximum(0, w)*(1-alpha[None])
score_box = K.sum(K.maximum(0., w_alpha_bar)*lower_, 1)
score_ball = get_lower_ball(x_0, eps, p, w_alpha, b)
return score_box+score_ball
return get_lower_ball(x_0, eps, p, w, b)
def get_upper_ball_finetune(x_0, eps, p, w, b, **kwargs):
if "finetune_upper" in kwargs and "upper" in kwargs or "lower" in kwargs:
alpha = kwargs["finetune_upper"]
# assume alpha is the same shape as w, minus the batch dimension
n_shape = len(w.shape) - 2
if "upper" and "lower" in kwargs:
upper = kwargs['upper'] # flatten vector
lower = kwargs['lower'] # flatten vector
upper_ = np.reshape(upper, [1, -1]+[1]*n_shape)
lower_ = np.reshape(lower, [1, -1] + [1] * n_shape)
w_alpha = w*alpha[None]
w_alpha_bar = w*(1-alpha)
score_box = K.sum(K.maximum(0., w_alpha_bar)*upper_, 1) + K.sum(K.minimum(0., w_alpha_bar)*lower_, 1)
score_ball = get_lower_ball(x_0, eps, p, w_alpha, b)
return score_box+score_ball
if "upper" in kwargs:
upper = kwargs['upper'] # flatten vector
upper_ = np.reshape(upper, [1, -1]+[1]*n_shape)
w_alpha = K.minimum(0, w)*alpha[None] + K.maximum(0., w)
w_alpha_bar = K.minimum(0, w)*(1-alpha[None])
score_box = K.sum(K.maximum(0., w_alpha_bar)*upper_, 1)
score_ball = get_lower_ball(x_0, eps, p, w_alpha, b)
return score_box+score_ball
if "lower" in kwargs:
lower = kwargs['lower'] # flatten vector
lower_ = np.reshape(lower, [1, -1]+[1]*n_shape)
w_alpha = K.maximum(0, w)*alpha[None] + K.minimum(0., w)
w_alpha_bar = K.maximum(0, w)*(1-alpha[None])
score_box = K.sum(K.minimum(0., w_alpha_bar)*lower_, 1)
score_ball = get_lower_ball(x_0, eps, p, w_alpha, b)
return score_box+score_ball
return get_upper_ball(x_0, eps, p, w, b)
def get_lower_ball(x_0, eps, p, w, b, **kwargs):
"""
min of an affine fucntion over an Lp ball
:param x_0: the center of the ball
:param eps: the radius
:param p: the type of Lp norm considered
:param w: weights of the affine function
:param b: bias of the affine function
:return: min_(|x - x_0|_p<= eps) w*x + b
"""
if len(w.shape) == len(b.shape):
return x_0 - eps
if p == np.inf:
# compute x_min and x_max according to eps
x_min = x_0 - eps
x_max = x_0 + eps
return get_lower_box(x_min, x_max, w, b)
else:
# use Holder's inequality p+q=1
# ||w||_q*eps + w*x_0 + b
if len(kwargs):
return get_lower_ball_finetune(x_0, eps, p, w, b, **kwargs)
lower = -eps * get_lq_norm(w, p, axis=1) + b
for _ in range(len(w.shape) - len(x_0.shape)):
x_0 = K.expand_dims(x_0, -1)
return K.sum(w * x_0, 1) + lower
def get_upper(x, w, b, convex_domain={}, **kwargs):
"""
Meta function that aggregates all the way
to compute a constant upper bounds depending on the convex domain
:param x: the tensors that represent the domain
:param w: the weights of the affine function
:param b: the bias
:param convex_domain: the type of convex domain (see ???)
:return: a constant upper bound of the affine function
"""
if convex_domain is None or len(convex_domain) == 0:
# box
x_min = x[:, 0]
x_max = x[:, 1]
return get_upper_box(x_min, x_max, w, b, **kwargs)
if convex_domain["name"] == Box.name:
x_min = x[:, 0]
x_max = x[:, 1]
return get_upper_box(x_min, x_max, w, b, **kwargs)
if convex_domain["name"] == Ball.name:
eps = convex_domain["eps"]
p = convex_domain["p"]
# check for extra options
kwargs.update(convex_domain)
return get_upper_ball(x, eps, p, w, b, **kwargs)
if convex_domain["name"] == Vertex.name:
raise NotImplementedError()
raise NotImplementedError()
def get_lower(x, w, b, convex_domain={}, **kwargs):
"""
Meta function that aggregates all the way
to compute a constant lower bound depending on the convex domain
:param x: the tensors that represent the domain
:param w: the weights of the affine function
:param b: the bias
:param convex_domain: the type of convex domain (see ???)
:return: a constant upper bound of the affine function
"""
if convex_domain is None or len(convex_domain) == 0:
# box
x_min = x[:, 0]
x_max = x[:, 1]
return get_lower_box(x_min, x_max, w, b)
if convex_domain["name"] == Box.name:
x_min = x[:, 0]
x_max = x[:, 1]
return get_lower_box(x_min, x_max, w, b)
if convex_domain["name"] == Ball.name:
eps = convex_domain["eps"]
p = convex_domain["p"]
return get_lower_ball(x, eps, p, w, b)
if convex_domain["name"] == Vertex.name:
raise NotImplementedError()
raise NotImplementedError()
#####
# USE SYMBOLIC GRADIENT DESCENT WITH OVERESTIMATION GUARANTEES
#####
# first compute gradient of the function
def get_grad(x, constant, W, b):
"""
We compute the gradient of the function f at sample x
f = sum_{i<= n_linear} max(constant_i, W_i*x + b_i)
it is quite easy to compute the gradient symbolically, without using the gradient operator
it is either 0 if f(x) = constant or W else
this trick allows to be backpropagation compatible
:param x: Keras Tensor (None, n_dim, ...)
:param constant: Keras Tensor (None, n_linear, ...)
:param W: Keras Tensor (None, n_dim, n_linear, ...)
:param b: Keras Tensor (None, n_linear, ...)
:return: Keras Tensor the gradient of the function (None, n_dim, ...)
"""
# product W*x + b
# import pdb; pdb.set_trace()
# x_ = K.expand_dims(x, 2) # (None, n_dim, 1, 1)
# z = K.sum(W * x_, 1) + b
x_ = K.expand_dims(x, 2)
z = K.sum(W * x_, 1) + b
# grad_ = K.sum(K.expand_dims(K.clip(K.sign(K.maximum(constant, z) - constant), 0., 1.), 1)*W, 2)
grad_ = K.sum(K.expand_dims(-K.sign(constant - K.maximum(constant, z)), 1) * W, 2) # (None, n_dim, ...)
return grad_
def compute_L(W):
"""
We compute the largest possible norm of the gradient
:param W: Keras Tensor (None, n_dim, n_linear, ...)
:return: Keras Tensor with an upper bound on the largest magnitude of the gradient
"""
# do L2 norm
return K.sum(K.sqrt(K.sum(W * W, 1)), 1)
# return K.sum(K.sqrt(K.sum(K.pow(W, 2), 1)), 1)
def compute_R(z, convex_domain):
"""
We compute the largest L2 distance of the starting point with the global optimum
:param z: Keras Tensor
:param convex_domain: Dictionnary to complement z on the convex domain
:return: Keras Tensor an upper bound on the distance
"""
if len(convex_domain) == 0:
# compute the L2 distance z[:, 0], z[:, 1]
dist_ = K.sqrt(K.sum(K.pow((z[:, 1] - z[:, 0]) / 2.0, 2), -1))
elif convex_domain["name"] == Box.name: # to improve
dist_ = K.sqrt(K.sum(K.pow((z[:, 1] - z[:, 0]) / 2.0, 2), -1))
elif convex_domain["name"] == Ball.name and convex_domain["p"] == np.inf:
dist_ = K.sqrt(K.sum(K.pow(z - z + convex_domain["eps"], 2), -1))
elif convex_domain["name"] == Ball.name and convex_domain["p"] == 2:
dist_ = convex_domain["eps"] * (0 * z + 1.0)
else:
raise NotImplementedError()
return dist_
def get_start_point(z, convex_domain):
"""
Create a warm start for the optimization (the mid point to minimize the largest distance between the
warm start and the global optimum
:param z: Keras Tensor
:param convex_domain: Dictionnary to complement z on the convex domain
:return: Keras Tensor
"""
if len(convex_domain) == 0:
# compute the L2 distance z[:, 0], z[:, 1]
# return (z[:, 0] + z[:, 1]) / 2.0
return z[:, 0]
elif convex_domain["name"] == Box.name: # to improve
return (z[:, 0] + z[:, 1]) / 2.0
elif convex_domain["name"] == Ball.name and convex_domain["p"] == np.inf:
return z
elif convex_domain["name"] == Ball.name and convex_domain["p"] == 2:
return z
else:
raise NotImplementedError()
def get_coeff_grad(R, k, g):
"""
:param R: Keras Tensor that reprends the largest distance to the gloabl optimum
:param k: the number of iteration done so far
:param g: the gradient
:return: the adaptative step size for the gradient
"""
denum = np.sqrt(k) * K.sqrt(K.sum(K.pow(g, 2), 1))
alpha = R / K.maximum(K.epsilon(), denum)
return alpha
def grad_descent_conv(z, concave_upper, convex_lower, op_pos, ops_neg, n_iter):
"""
:param z:
:param concave_upper:
:param convex_lower:
:param op_pos:
:param ops_neg:
:param n_iter:
:return:
"""
raise NotImplementedError()
def grad_descent(z, convex_0, convex_1, convex_domain, n_iter=5):
"""
:param z: Keras Tensor
:param constant: Keras Tensor, the constant of each component
:param W: Keras Tensor, the affine of each component
:param b: Keras Tensor, the bias of each component
:param convex_domain: Dictionnary to complement z on the convex domain
:param n_iter: the number of total iteration
:return:
"""
constant_0, W_0, b_0 = convex_0
constant_1, W_1, b_1 = convex_1
# init
# import pdb; pdb.set_trace()
x_k = K.expand_dims(get_start_point(z, convex_domain), -1) + 0 * K.sum(constant_0, 1)[:, None]
R = compute_R(z, convex_domain)
n_dim = len(x_k.shape[1:])
while n_dim > 1:
R = K.expand_dims(R, -1)
n_dim -= 1
def step_grad(x_, x_k_):
x_k = x_k_[0]
g_k_0 = get_grad(x_k, constant_0, W_0, b_0)
g_k_1 = get_grad(x_k, constant_1, W_1, b_1)
g_k = g_k_0 + g_k_1
alpha_k = get_coeff_grad(R, n_iter + 1, g_k)
# x_result = x_k - alpha_k* g_k
x_result = alpha_k[:, None] * g_k
return x_result, [x_k]
# step_grad(x_k, [x_k])
x_vec = K.rnn(
step_function=step_grad, inputs=K.concatenate([x_k[:, None]] * n_iter, 1), initial_states=[x_k], unroll=False
)[1]
# check convergence
x_k = x_vec[:, -1]
g_k = get_grad(x_k, constant_0, W_0, b_0) + get_grad(x_k, constant_1, W_1, b_1)
mask_grad = K.sign(K.sqrt(K.sum(K.pow(g_k, 2), 1))) # check whether we have converge
X_vec = K.expand_dims(x_vec, -2)
f_vec = K.sum(
K.maximum(constant_0[:, None], K.sum(W_0[:, None] * X_vec, 2) + b_0[:, None])
+ K.maximum(constant_1[:, None], K.sum(W_1[:, None] * X_vec, 2) + b_1[:, None]),
2,
)
# f_vec = K.min(f_vec, 1)
f_vec = f_vec[0]
L_0 = compute_L(W_0)
L_1 = compute_L(W_1)
L = L_0 + L_1
penalty = (L * R) / np.sqrt(n_iter + 1)
return f_vec - mask_grad * penalty
class NonPos(Constraint):
"""Constrains the weights to be non-negative."""
def __call__(self, w):
return w * K.cast(K.less_equal(w, 0.0), K.floatx())
class ClipAlpha(Constraint):
"""Cosntraints the weights to be between 0 and 1."""
def __call__(self, w):
return K.clip(w, 0.0, 1.0)
class ClipAlphaAndSumtoOne(Constraint):
"""Cosntraints the weights to be between 0 and 1."""
def __call__(self, w):
w = K.clip(w, 0.0, 1.0)
# normalize the first colum to 1
w_scale = K.maximum(K.sum(w, 0), K.epsilon())
return w/w_scale[:,None,None,None]
class MultipleConstraint(Constraint):
"""
stacking multiple constraints
"""
def __init__(self, constraint_0, constraint_1, **kwargs):
super(MultipleConstraint, self).__init__(**kwargs)
if constraint_0:
self.constraints = [constraint_0, constraint_1]
else:
self.constraints = [constraint_1]
def __call__(self, w):
w_ = w
for c in self.constraints:
w_ = c.__call__(w_)
return w_
class Project_initializer_pos(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, initializer, **kwargs):
super(Project_initializer_pos, **kwargs)
self.initializer = initializer
def __call__(self, shape, dtype=None):
w_ = self.initializer.__call__(shape, dtype)
return K.maximum(0.0, w_)
class Project_initializer_neg(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, initializer, **kwargs):
super(Project_initializer_neg, **kwargs)
self.initializer = initializer
def __call__(self, shape, dtype=None):
w_ = self.initializer.__call__(shape, dtype)
return K.minimum(0.0, w_)
def relu_(x, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name, slope=V_slope.name, **kwargs):
if mode not in [F_HYBRID.name, F_IBP.name, F_FORWARD.name]:
raise ValueError("unknown mode {}".format(mode))
z_value = K.cast(0.0, K.floatx())
o_value = K.cast(1.0, K.floatx())
nb_tensors = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_HYBRID.name:
# y, x_0, u_c, w_u, b_u, l_c, w_l, b_l = x[:8]
x_0, u_c, w_u, b_u, l_c, w_l, b_l = x[:nb_tensors]
elif mode == F_IBP.name:
# y, x_0, u_c, l_c = x[:4]
u_c, l_c = x[:nb_tensors]
elif mode == F_FORWARD.name:
# y, x_0, w_u, b_u, w_l, b_l = x[:6]
x_0, w_u, b_u, w_l, b_l = x[:nb_tensors]
if mode == F_FORWARD.name:
upper = get_upper(x_0, w_u, b_u, convex_domain)
lower = get_lower(x_0, w_l, b_l, convex_domain)
# if mode == F_HYBRID.name:
# upper = K.minimum(u_c,upper)
# lower = K.maximum(l_c, lower)
if mode in [F_IBP.name, F_HYBRID.name]:
upper = u_c
lower = l_c
if dc_decomp:
h, g = x[-2:]
h_ = K.maximum(h, -g)
g_ = g
index_dead = -K.clip(K.sign(upper) - o_value, -o_value, z_value) # =1 if inactive state
index_linear = K.clip(K.sign(lower) + o_value, z_value, o_value) # 1 if linear state
h_ = (o_value - index_dead) * h_
g_ = (o_value - index_dead) * g_
h_ = (o_value - index_linear) * h_ + index_linear * h
g_ = (o_value - index_linear) * g_ + index_linear * g
u_c_ = K.relu(upper)
l_c_ = K.relu(lower)
if mode in [F_FORWARD.name, F_HYBRID.name]:
w_u_, b_u_, w_l_, b_l_ = get_linear_hull_relu(upper, lower, slope, **kwargs)
b_u_ = w_u_ * b_u + b_u_
b_l_ = w_l_ * b_l + b_l_
w_u_ = K.expand_dims(w_u_, 1) * w_u
w_l_ = K.expand_dims(w_l_, 1) * w_l
output = []
if mode == F_IBP.name:
output += [u_c_, l_c_]
if mode == F_FORWARD.name:
output += [x_0, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
output += [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
if dc_decomp:
output += [h_, g_]
return output
def softplus_(x, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name, slope=V_slope.name, **kwargs):
if mode not in [F_HYBRID.name, F_IBP.name, F_FORWARD.name]:
raise ValueError("unknown mode {}".format(mode))
nb_tensors = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_HYBRID.name:
# y, x_0, u_c, w_u, b_u, l_c, w_l, b_l = x[:8]
x_0, u_c, w_u, b_u, l_c, w_l, b_l = x[:nb_tensors]
elif mode == F_IBP.name:
# y, x_0, u_c, l_c = x[:4]
u_c, l_c = x[:nb_tensors]
elif mode == F_FORWARD.name:
# y, x_0, w_u, b_u, w_l, b_l = x[:6]
x_0, w_u, b_u, w_l, b_l = x[:nb_tensors]
if dc_decomp:
h, g = x[-2:]
h_ = K.maximum(h, -g)
g_ = g
if mode == F_FORWARD.name:
upper = get_upper(x_0, w_u, b_u, convex_domain)
lower = get_lower(x_0, w_l, b_l, convex_domain)
if mode == F_HYBRID.name:
upper = u_c
lower = l_c
if mode == F_IBP.name:
upper = u_c
lower = l_c
u_c_ = K.softplus(upper)
l_c_ = K.softplus(lower)
if mode in [F_FORWARD.name, F_HYBRID.name]:
w_u_, b_u_, w_l_, b_l_ = get_linear_softplus_hull(upper, lower, slope, **kwargs)
b_u_ = w_u_ * b_u + b_u_
b_l_ = w_l_ * b_l + b_l_
w_u_ = K.expand_dims(w_u_, 1) * w_u
w_l_ = K.expand_dims(w_l_, 1) * w_l
if mode == F_IBP.name:
return [u_c_, l_c_]
if mode == F_FORWARD.name:
return [x_0, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
return [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def substract(inputs_0, inputs_1, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name):
"""
LiRPA implementation of inputs_0-inputs_1
:param inputs_0: tensor
:param inputs_1: tensor
:param dc_decomp: boolean that indicates
whether we return a difference of convex decomposition of our layer
:param convex_domain: the type of convex domain
:return: inputs_0 - inputs_1
"""
inputs_1_ = minus(inputs_1, mode=mode, dc_decomp=dc_decomp)
return add(inputs_0, inputs_1_, dc_decomp=dc_decomp, mode=mode, convex_domain=convex_domain)
def add(inputs_0, inputs_1, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name):
"""
LiRPA implementation of inputs_0+inputs_1
:param inputs_0: tensor
:param inputs_1: tensor
:param dc_decomp: boolean that indicates
whether we return a difference of convex decomposition of our layer
:param convex_domain: the type of convex domain
:return: inputs_0 + inputs_1
"""
nb_tensor = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if dc_decomp:
h_0, g_0 = inputs_0[-2:]
h_1, g_1 = inputs_1[-2:]
h_ = h_0 + h_1
g_ = g_0 + g_1
if mode == F_HYBRID.name:
# y_0, x_0, u_c_0, w_u_0, b_u_0, l_c_0, w_l_0, b_l_0 = inputs_0[:8]
# y_1, _, u_c_1, w_u_1, b_u_1, l_c_1, w_l_1, b_l_1 = inputs_1[:8]
x_0, u_c_0, w_u_0, b_u_0, l_c_0, w_l_0, b_l_0 = inputs_0[:nb_tensor]
_, u_c_1, w_u_1, b_u_1, l_c_1, w_l_1, b_l_1 = inputs_1[:nb_tensor]
if mode == F_IBP.name:
# y_0, x_0, u_c_0, l_c_0 = inputs_0[:4]
# y_1, _, u_c_1, l_c_1 = inputs_1[:4]
u_c_0, l_c_0 = inputs_0[:nb_tensor]
u_c_1, l_c_1 = inputs_1[:nb_tensor]
if mode == F_FORWARD.name:
# y_0, x_0, w_u_0, b_u_0, w_l_0, b_l_0 = inputs_0[:6]
# y_1, _, w_u_1, b_u_1, w_l_1, b_l_1 = inputs_1[:6]
x_0, w_u_0, b_u_0, w_l_0, b_l_0 = inputs_0[:nb_tensor]
_, w_u_1, b_u_1, w_l_1, b_l_1 = inputs_1[:nb_tensor]
if mode in [F_HYBRID.name, F_IBP.name]:
u_c_ = u_c_0 + u_c_1
l_c_ = l_c_0 + l_c_1
if mode in [F_HYBRID.name, F_FORWARD.name]:
w_u_ = w_u_0 + w_u_1
w_l_ = w_l_0 + w_l_1
b_u_ = b_u_0 + b_u_1
b_l_ = b_l_0 + b_l_1
if mode == F_HYBRID.name:
upper_ = get_upper(x_0, w_u_, b_u_, convex_domain) # we can see an improvement
u_c_ = K.minimum(upper_, u_c_)
lower_ = get_lower(x_0, w_l_, b_l_, convex_domain) # we can see an improvement
l_c_ = K.maximum(lower_, l_c_)
# y_ = y_0 + y_1
if mode == F_HYBRID.name:
# output = [y_, x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
output = [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
if mode == F_IBP.name:
# output = [y_, x_0, u_c_, l_c_]
output = [u_c_, l_c_]
if mode == F_FORWARD.name:
# output = [y_, x_0, w_u_, b_u_, w_l_, b_l_]
output = [x_0, w_u_, b_u_, w_l_, b_l_]
if dc_decomp:
output += [h_, g_]
return output
def sum(x, axis=-1, dc_decomp=False, mode=F_HYBRID.name, **kwargs):
if dc_decomp:
raise NotImplementedError()
if mode == F_IBP.name:
return [K.sum(x[0], axis=axis), K.sum(x[1], axis=axis)]
if mode == F_FORWARD.name:
x_0, w_u, b_u, w_l, b_l = x
if mode == F_HYBRID.name:
x_0, u_c, w_u, b_u, l_c, w_l, b_l = x
u_c_ = K.sum(u_c, axis=axis)
l_c_ = K.sum(l_c, axis=axis)
axis_w = -1
if axis!=-1:
axis_w = axis+1
w_u_ = K.sum(w_u, axis=axis_w)
w_l_ = K.sum(w_l, axis=axis_w)
b_u_ = K.sum(b_u, axis=axis)
b_l_ = K.sum(b_l, axis=axis)
if mode == F_FORWARD.name:
return [x_0, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
return [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def frac_pos(x, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name, **kwargs):
if dc_decomp:
raise NotImplementedError()
# frac_pos is convex for positive values
if mode ==F_IBP.name:
u_c, l_c = x
u_c_ = 1. / l_c
l_c_ = 1. / u_c
return [u_c_, l_c_]
if mode == F_FORWARD.name:
x_0, w_u,b_u, w_l, b_l = x
u_c = get_upper(x_0, w_u, b_u, convex_domain=convex_domain)
l_c = get_lower(x_0, w_u, b_u, convex_domain=convex_domain)
if mode == F_HYBRID.name:
x_0, u_c, w_u, b_u, l_c, w_l, b_l = x
u_c_ = 1./l_c
l_c_ = 1./u_c
w_u_0 = (u_c_-l_c_)/K.maximum(u_c-l_c, K.epsilon())
b_u_0 = l_c_ - w_u_0*l_c
y = (u_c+l_c)/2.
b_l_0 = 2./y
w_l_0 = -1/y**2
w_u_ = w_u_0[:, None] * w_l
b_u_ = b_u_0 * b_l + b_u_0
w_l_ = w_l_0[:, None] * w_u
b_l_ = b_l_0 * b_u + b_l_0
if mode == F_FORWARD.name:
return [x_0, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
return [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def maximum(inputs_0, inputs_1, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name, finetune=False, **kwargs):
"""
LiRPA implementation of element-wise max
:param inputs_0: list of tensors
:param inputs_1: list of tensors
:param dc_decomp: boolean that indicates
whether we return a difference of convex decomposition of our layer
:param convex_domain: the type of convex domain
:return: maximum(inputs_0, inputs_1)
"""
output_0 = substract(inputs_1, inputs_0, dc_decomp=dc_decomp, convex_domain=convex_domain, mode=mode)
if finetune:
finetune=kwargs['finetune_params']
output_1 = relu_(
output_0,
dc_decomp=dc_decomp,
convex_domain=convex_domain,
mode=mode,
finetune=finetune)
else:
output_1 = relu_(
output_0,
dc_decomp=dc_decomp,
convex_domain=convex_domain,
mode=mode)
return add(
output_1,
inputs_0,
dc_decomp=dc_decomp,
convex_domain=convex_domain,
mode=mode,
)
def minimum(inputs_0, inputs_1, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name, finetune=False, **kwargs):
"""
LiRPA implementation of element-wise min
:param inputs_0:
:param inputs_1:
:param dc_decomp:
:param convex_domain:
:param mode:
:return:
"""
return minus(
maximum(
minus(inputs_0, dc_decomp=dc_decomp, mode=mode),
minus(inputs_1, dc_decomp=dc_decomp, mode=mode),
dc_decomp=dc_decomp,
convex_domain=convex_domain,
mode=mode,
finetune=finetune,
**kwargs
),
dc_decomp=dc_decomp,
mode=mode,
)
# convex hull of the maximum between two functions
def max_(x, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name, axis=-1, finetune=False, **kwargs):
"""
LiRPA implementation of max(x, axis)
:param x: list of tensors
:param dc_decomp: boolean that indicates
whether we return a difference of convex decomposition of our layer
:param convex_domain: the type of convex domain
:param axis: axis to perform the maximum
:return: max operation along an axis
"""
if dc_decomp:
h, g = x[-2:]
nb_tensor = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_HYBRID.name:
# y, x_0, u_c, w_u, b_u, l_c, w_l, b_l = x[:8]
x_0, u_c, w_u, b_u, l_c, w_l, b_l = x[:nb_tensor]
if mode == F_FORWARD.name:
# y, x_0, w_u, b_u, w_l, b_l = x[:6]
x_0, w_u, b_u, w_l, b_l = x[:nb_tensor]
if mode == F_IBP.name:
# y, x_0, u_c, l_c = x[:4]
u_c, l_c = x[:nb_tensor]
if mode == F_IBP.name and not dc_decomp:
u_c_ = K.max(u_c, axis=axis)
l_c_ = K.max(l_c, axis=axis)
return [u_c_, l_c_]
input_shape = K.int_shape(x[-1])
max_dim = input_shape[axis]
# do some transpose so that the last axis is also at the end
if dc_decomp:
h_list = tf.split(h, max_dim, axis)
g_list = tf.split(g, max_dim, axis)
h_tmp = h_list[0] + 0 * (h_list[0])
g_tmp = g_list[0] + 0 * (g_list[0])
# y_list = tf.split(y, max_dim, axis)
# y_tmp = y_list[0] + 0 * (y_list[0])
if mode in [F_HYBRID.name, F_IBP.name]:
u_c_list = tf.split(u_c, max_dim, axis)
l_c_list = tf.split(l_c, max_dim, axis)
u_c_tmp = u_c_list[0] + 0 * (u_c_list[0])
l_c_tmp = l_c_list[0] + 0 * (l_c_list[0])
if mode in [F_HYBRID.name, F_FORWARD.name]:
b_u_list = tf.split(b_u, max_dim, axis)
b_l_list = tf.split(b_l, max_dim, axis)
b_u_tmp = b_u_list[0] + 0 * (b_u_list[0])
b_l_tmp = b_l_list[0] + 0 * (b_l_list[0])
if axis == -1:
w_u_list = tf.split(w_u, max_dim, axis)
w_l_list = tf.split(w_l, max_dim, axis)
else:
w_u_list = tf.split(w_u, max_dim, axis + 1)
w_l_list = tf.split(w_l, max_dim, axis + 1)
w_u_tmp = w_u_list[0] + 0 * (w_u_list[0])
w_l_tmp = w_l_list[0] + 0 * (w_l_list[0])
if finetune:
key = [e for e in kwargs.keys()][0]
params = kwargs[key][0]
params_ = [e[0] for e in tf.split(params[None], max_dim, axis)]
output_tmp = []
if mode == F_HYBRID.name:
# output_tmp = [y_tmp,x_0,u_c_tmp,w_u_tmp,b_u_tmp,l_c_tmp,w_l_tmp,b_l_tmp,]
output_tmp = [
x_0,
u_c_tmp,
w_u_tmp,
b_u_tmp,
l_c_tmp,
w_l_tmp,
b_l_tmp,
]
for i in range(1, max_dim):
# output_i = [y_list[i], x_0, u_c_list[i], w_u_list[i], b_u_list[i], l_c_list[i], w_l_list[i], b_l_list[i]]
output_i = [x_0, u_c_list[i], w_u_list[i], b_u_list[i], l_c_list[i], w_l_list[i], b_l_list[i]]
if finetune:
output_tmp = maximum(output_tmp, output_i, dc_decomp=False, mode=mode, finetune=finetune, finetune_params=params_[i])
else:
output_tmp = maximum(output_tmp, output_i, dc_decomp=False, mode=mode)
if mode == F_IBP.name:
# output_tmp = [y_tmp,x_0,u_c_tmp,l_c_tmp,]
output_tmp = [
u_c_tmp,
l_c_tmp,
]
for i in range(1, max_dim):
# output_i = [y_list[i], x_0, u_c_list[i], l_c_list[i]]
output_i = [u_c_list[i], l_c_list[i]]
output_tmp = maximum(output_tmp, output_i, dc_decomp=False, mode=mode, finetune=finetune)
if mode == F_FORWARD.name:
# output_tmp = [y_tmp,x_0,w_u_tmp,b_u_tmp,w_l_tmp,b_l_tmp,]
output_tmp = [
x_0,
w_u_tmp,
b_u_tmp,
w_l_tmp,
b_l_tmp,
]
for i in range(1, max_dim):
# output_i = [y_list[i], x_0, w_u_list[i], b_u_list[i], w_l_list[i], b_l_list[i]]
output_i = [x_0, w_u_list[i], b_u_list[i], w_l_list[i], b_l_list[i]]
if finetune:
output_tmp = maximum(output_tmp, output_i, dc_decomp=False, mode=mode, finetune=finetune, finetune_params=params_[i])
else:
output_tmp = maximum(output_tmp, output_i, dc_decomp=False, mode=mode)
# reduce the dimension
if mode == F_IBP.name:
# _, _, u_c_, l_c_ = output_tmp[:4]
u_c_, l_c_ = output_tmp[:nb_tensor]
if mode == F_FORWARD.name:
# _, _, w_u_, b_u_, w_l_, b_l_ = output_tmp[:6]
_, w_u_, b_u_, w_l_, b_l_ = output_tmp[:nb_tensor]
if mode == F_HYBRID.name:
# _, _, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_ = output_tmp[:8]
_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_ = output_tmp[:nb_tensor]
if mode in [F_IBP.name, F_HYBRID.name]:
u_c_ = K.squeeze(u_c_, axis)
l_c_ = K.squeeze(l_c_, axis)
if mode in [F_HYBRID.name, F_FORWARD.name]:
b_u_ = K.squeeze(b_u_, axis)
b_l_ = K.squeeze(b_l_, axis)
if axis == -1:
w_u_ = K.squeeze(w_u_, axis)
w_l_ = K.squeeze(w_l_, axis)
else:
w_u_ = K.squeeze(w_u_, axis + 1)
w_l_ = K.squeeze(w_l_, axis + 1)
if dc_decomp:
g_ = K.sum(g, axis=axis)
h_ = K.max(h + g, axis=axis) - g_
if mode == F_HYBRID.name:
upper_ = get_upper(x_0, w_u_, b_u_, convex_domain)
u_c_ = K.minimum(upper_, u_c_)
lower_ = get_lower(x_0, w_l_, b_l_, convex_domain)
l_c_ = K.maximum(lower_, l_c_)
# y_ = K.max(y, axis=axis)
if mode == F_HYBRID.name:
# output = [y_, x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
output = [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
if mode == F_IBP.name:
# output = [y_, x_0, u_c_, l_c_]
output = [u_c_, l_c_]
if mode == F_FORWARD.name:
# output = [y_, x_0, w_u_, b_u_, w_l_, b_l_]
output = [x_0, w_u_, b_u_, w_l_, b_l_]
if dc_decomp:
output += [h_, g_]
return output
def softmax_to_linear(model):
"""
linearize the softmax layer for verification
:param model: Keras Model
:return: model without the softmax
"""
layer = model.layers[-1]
# check that layer is not an instance of the object Softmax
if isinstance(layer, keras.layers.Softmax):
model_normalize = keras.models.Model(model.get_input_at(0), keras.layers.Activation("linear")(layer.input))
return model_normalize, True
if hasattr(layer, "activation"):
if not layer.get_config()["activation"] == "softmax":
return model, False
layer.get_config()["activation"] = "linear"
layer.activation = keras.activations.get("linear")
return model, True
return model, False
def linear_to_softmax(model):
model.layers[-1].activation = keras.activations.get("softmax")
return model
def minus(inputs, mode=F_HYBRID.name, dc_decomp=False, **kwargs):
"""
LiRPA implementation of minus(x)=-x.
:param inputs:
:param mode:
:return:
"""
nb_tensor = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_IBP.name:
# y, x, u, l = inputs[:4]
u, l = inputs[:nb_tensor]
if mode == F_FORWARD.name:
# y, x, w_u, b_u, w_l, b_l = inputs[:6]
x, w_u, b_u, w_l, b_l = inputs[:nb_tensor]
if mode == F_HYBRID.name:
# y, x, u, w_u, b_u, l, w_l, b_l = inputs[:8]
x, u, w_u, b_u, l, w_l, b_l = inputs[:nb_tensor]
if dc_decomp:
h, g = inputs[-2:]
# y_ = -y
if mode in [F_IBP.name, F_HYBRID.name]:
u_ = -l
l_ = -u
if mode in [F_FORWARD.name, F_HYBRID.name]:
w_u_ = -w_l
b_u_ = -b_l
w_l_ = -w_u
b_l_ = -b_u
# output = [y_, x]
if mode == F_IBP.name:
output = [u_, l_]
if mode == F_FORWARD.name:
output = [x, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
output = [x, u_, w_u_, b_u_, l_, w_l_, b_l_]
if dc_decomp:
output += [-g, -h]
return output
def multiply_old(inputs_0, inputs_1, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name):
"""
LiRPA implementation of (element-wise) multiply(x,y)=-x*y.
:param inputs_0: list of tensors
:param inputs_1: list of tensors
:param dc_decomp: boolean that indicates
whether we return a difference of convex decomposition of our layer
whether we propagate upper and lower bounds on the values of the gradient
:param convex_domain: the type of convex domain
:param mode: type of Forward propagation (IBP, Forward or Hybrid)
:return: maximum(inputs_0, inputs_1)
"""
if dc_decomp:
raise NotImplementedError()
nb_tensor = StaticVariables(dc_decomp, mode=mode).nb_tensors
if mode == F_IBP.name:
# y0, x, u0, l0 = inputs_0[:4]
# y1, _, u1, l1 = inputs_1[:4]
u0, l0 = inputs_0[:nb_tensor]
u1, l1 = inputs_1[:nb_tensor]
if mode == F_FORWARD.name:
# y0, x, w_u0, b_u0, w_l0, b_l0 = inputs_0[:6]
# y1, _, w_u1, b_u1, w_l1, b_l1 = inputs_1[:6]
x, w_u0, b_u0, w_l0, b_l0 = inputs_0[:nb_tensor]
_, w_u1, b_u1, w_l1, b_l1 = inputs_1[:nb_tensor]
u0 = get_upper(x, w_u0, b_u0, convex_domain=convex_domain)
l0 = get_lower(x, w_l0, b_l0, convex_domain=convex_domain)
u1 = get_upper(x, w_u1, b_u1, convex_domain=convex_domain)
l1 = get_lower(x, w_l1, b_l1, convex_domain=convex_domain)
if mode == F_HYBRID.name:
# y0, x, u0, w_u0, b_u0, l0, w_l0, b_l0 = inputs_0[:8]
# y1, _, u1, w_u1, b_u1, l1, w_l1, b_l1 = inputs_1[:8]
x, u0, w_u0, b_u0, l0, w_l0, b_l0 = inputs_0[:nb_tensor]
_, u1, w_u1, b_u1, l1, w_l1, b_l1 = inputs_1[:nb_tensor]
# using McCormick's inequalities to derive bounds
# xy<= x_u*y + x*y_L - xU*y_L
# xy<= x*y_u + x_L*y - x_L*y_U
# xy >=x_L*y + x*y_L -x_L*y_L
# xy >= x_U*y + x*y_U - x_U*y_U
if mode in [F_IBP.name, F_HYBRID.name]:
upper_0 = K.relu(u0) * u1 - K.relu(-u0) * l1 + K.relu(l1) * u0 - K.relu(-l1) * l0 - u0 * l1
upper_1 = K.relu(u1) * u0 - K.relu(-u1) * l0 + K.relu(l0) * u1 - K.relu(-l0) * l1 - u1 * l0
lower_0 = K.relu(l0) * l1 - K.relu(-l0) * u1 + K.relu(l1) * l0 - K.relu(-l1) * u0 - l0 * l1
lower_1 = K.relu(u1) * l0 - K.relu(-u1) * u0 + K.relu(u0) * l1 - K.relu(-u0) * u1 - u1 * u0
if mode in [F_FORWARD.name, F_HYBRID.name]:
w_0_u = (
K.relu(u0)[:, None] * w_u1
- K.relu(-u0)[:, None] * w_l1
+ K.relu(l1)[:, None] * w_u0
- K.relu(-l1)[:, None] * w_l0
)
w_1_u = (
K.relu(u1)[:, None] * w_u0
- K.relu(-u1)[:, None] * w_l0
+ K.relu(l0)[:, None] * w_u1
- K.relu(-l0)[:, None] * w_l1
)
w_0_l = (
K.relu(l0)[:, None] * w_l1
- K.relu(-l0)[:, None] * w_u1
+ K.relu(l1)[:, None] * w_l0
- K.relu(-l1)[:, None] * w_u0
)
w_1_l = (
K.relu(u1)[:, None] * w_l0
- K.relu(-u1)[:, None] * w_u0
+ K.relu(u0)[:, None] * w_l1
- K.relu(-u0)[:, None] * w_u1
)
b_u_0 = K.relu(u0) * b_u1 - K.relu(-u0) * b_l1 + K.relu(l1) * b_u0 - K.relu(-l1) * b_l0 - u0 * l1
b_u_1 = K.relu(u1) * b_u0 - K.relu(-u1) * b_l0 + K.relu(l0) * b_u1 - K.relu(-l0) * b_l1 - u1 * l0
b_l_0 = K.relu(l0) * b_l1 - K.relu(-l0) * b_u1 + K.relu(l1) * b_l0 - K.relu(-l1) * b_u0 - l0 * l1
b_l_1 = K.relu(u1) * b_l0 - K.relu(-u1) * b_u0 + K.relu(u0) * b_l1 - K.relu(-u0) * b_u1 - u1 * u0
# if mode == F_IBP.name:
# inputs_0_ = [y0*y1, x, upper_0, lower_0]
# inputs_1_ = [y0*y1, x, upper_1, lower_1]
if mode == F_HYBRID.name:
# inputs_0_ = [y0 * y1, x, upper_0, w_0_u, b_u_0, lower_0, w_0_l, b_l_0]
# inputs_1_ = [y0 * y1, x, upper_1, w_1_u, b_u_1, lower_1, w_1_l, b_l_1]
inputs_0_ = [x, upper_0, w_0_u, b_u_0, lower_0, w_0_l, b_l_0]
inputs_1_ = [x, upper_1, w_1_u, b_u_1, lower_1, w_1_l, b_l_1]
if mode == F_FORWARD.name:
# inputs_0_ = [y0 * y1, x, w_0_u, b_u_0, w_0_l, b_l_0]
# inputs_1_ = [y0 * y1, x, w_1_u, b_u_1, w_1_l, b_l_1]
inputs_0_ = [x, w_0_u, b_u_0, w_0_l, b_l_0]
inputs_1_ = [x, w_1_u, b_u_1, w_1_l, b_l_1]
if mode == F_IBP.name:
# output = [y0 * y1, x, K.minimum(upper_0, upper_1), K.maximum(lower_0, lower_1)]
output = [K.minimum(upper_0, upper_1), K.maximum(lower_0, lower_1)]
else:
output_upper = minimum(inputs_0_, inputs_1_, dc_decomp=dc_decomp, convex_domain=convex_domain, mode=mode)
output_lower = maximum(inputs_0_, inputs_1_, dc_decomp=dc_decomp, convex_domain=convex_domain, mode=mode)
if mode == F_FORWARD.name:
# _, _, w_u_, b_u_, _, _ = output_upper
# _, _, _, _, w_l_, b_l_ = output_upper
_, w_u_, b_u_, _, _ = output_upper
_, _, _, w_l_, b_l_ = output_lower
# output = [y0 * y1, x, w_u_, b_u_, w_l_, b_l_]
output = [x, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
# _, _, u_, w_u_, b_u_, _, _, _ = output_upper
# _, _, _, _, _, l_, w_l_, b_l_ = output_upper
_, u_, w_u_, b_u_, _, _, _ = output_upper
_, _, _, _, l_, w_l_, b_l_ = output_upper
# output = [y0 * y1, x, u_, w_u_, b_u_, l_, w_l_, b_l_]
output = [x, u_, w_u_, b_u_, l_, w_l_, b_l_]
return output
def multiply(inputs_0, inputs_1, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name):
"""
LiRPA implementation of (element-wise) multiply(x,y)=-x*y.
:param inputs_0: list of tensors
:param inputs_1: list of tensors
:param dc_decomp: boolean that indicates
whether we return a difference of convex decomposition of our layer
whether we propagate upper and lower bounds on the values of the gradient
:param convex_domain: the type of convex domain
:param mode: type of Forward propagation (IBP, Forward or Hybrid)
:return: maximum(inputs_0, inputs_1)
"""
if dc_decomp:
raise NotImplementedError()
nb_tensor = StaticVariables(dc_decomp, mode=mode).nb_tensors
if mode == F_IBP.name:
# y0, x, u0, l0 = inputs_0[:4]
# y1, _, u1, l1 = inputs_1[:4]
u0, l0 = inputs_0[:nb_tensor]
u1, l1 = inputs_1[:nb_tensor]
if mode == F_FORWARD.name:
# y0, x, w_u0, b_u0, w_l0, b_l0 = inputs_0[:6]
# y1, _, w_u1, b_u1, w_l1, b_l1 = inputs_1[:6]
x, w_u0, b_u0, w_l0, b_l0 = inputs_0[:nb_tensor]
_, w_u1, b_u1, w_l1, b_l1 = inputs_1[:nb_tensor]
u0 = get_upper(x, w_u0, b_u0, convex_domain=convex_domain)
l0 = get_lower(x, w_l0, b_l0, convex_domain=convex_domain)
u1 = get_upper(x, w_u1, b_u1, convex_domain=convex_domain)
l1 = get_lower(x, w_l1, b_l1, convex_domain=convex_domain)
if mode == F_HYBRID.name:
# y0, x, u0, w_u0, b_u0, l0, w_l0, b_l0 = inputs_0[:8]
# y1, _, u1, w_u1, b_u1, l1, w_l1, b_l1 = inputs_1[:8]
x, u0, w_u0, b_u0, l0, w_l0, b_l0 = inputs_0[:nb_tensor]
_, u1, w_u1, b_u1, l1, w_l1, b_l1 = inputs_1[:nb_tensor]
# using McCormick's inequalities to derive bounds
# xy<= x_u*y + x*y_L - xU*y_L
# xy<= x*y_u + x_L*y - x_L*y_U
# xy >=x_L*y + x*y_L -x_L*y_L
# xy >= x_U*y + x*y_U - x_U*y_U
if mode in [F_IBP.name, F_HYBRID.name]:
u_c_0_ = K.maximum(u0*u1, u0*l1) + K.maximum(u0*l1, l0*l1) - u0*l1
u_c_1_ = K.maximum(u1*u0, u1*l0) + K.maximum(u1*l0, l1*l0) - u1*l0
u_c_ = K.minimum(u_c_0_, u_c_1_)
l_c_ = K.minimum(l0*l1, l0*u1) + K.minimum(l0*l1, u0*l1) - l0*l1
if mode in [F_HYBRID.name, F_FORWARD.name]:
#xy <= x_u * y + x * y_L - xU * y_L
cx_u_pos = K.maximum(u0, 0.)
cx_u_neg = K.minimum(u0, 0.)
cy_l_pos = K.maximum(l1, 0.)
cy_l_neg = K.minimum(l1, 0.)
w_u_ = cx_u_pos[:,None]*w_u1 + cx_u_neg[:,None]*w_l1 + cy_l_pos[:,None]*w_u0 + cy_l_neg[:,None]*w_l0
b_u_ = cx_u_pos*b_u1 + cx_u_neg*b_l1 + cy_l_pos*b_u0 + cy_l_neg*b_l0 - u0*l1
# xy >= x_U*y + x*y_U - x_U*y_U
cy_u_pos = K.maximum(u1, 0.)
cy_u_neg = K.minimum(u1, 0.)
cx_l_pos = K.maximum(l0, 0.)
cx_l_neg = K.minimum(l0, 0.)
w_l_ = cx_l_pos[:,None]*w_l1 + cx_l_neg[:,None]*w_u1 + cy_l_pos[:,None]*w_l0 + cy_l_neg[:,None]*w_u0
b_l_ = cx_l_pos*b_l1 + cx_l_neg*b_u1 + cy_l_pos*b_l0 + cy_l_neg*b_u0 - l0*l1
if mode == F_IBP.name:
return [u_c_, l_c_]
if mode == F_FORWARD.name:
return [x, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
return [x, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def permute_dimensions(x, axis, mode=F_HYBRID.name, axis_perm=1):
"""
LiRPA implementation of (element-wise) permute(x,axis)
:param x: list of input tensors
:param axis: axis on which we apply the permutation
:param mode: type of Forward propagation (IBP, Forward or Hybrid)
:param axis_perm: see DecomonPermute operator
:return:
"""
if len(x[0].shape) <= 2:
return x
index = np.arange(len(x[0].shape))
index = np.insert(np.delete(index, axis), axis_perm, axis)
nb_tensor = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_IBP.name:
return [
# K.permute_dimensions(x[0], index),
# x[1],
K.permute_dimensions(x[2], index),
K.permute_dimensions(x[3], index),
]
index_w = np.arange(len(x[0].shape) + 1)
index_w = np.insert(np.delete(index_w, axis), axis_perm + 1, axis)
if mode == F_FORWARD.name:
# y, x_0, w_u, b_u, w_l, b_l = x
x_0, w_u, b_u, w_l, b_l = x[:nb_tensor]
return [
# K.permute_dimensions(y, index),
x_0,
K.permute_dimensions(w_u, index_w),
K.permute_dimensions(b_u, index),
K.permute_dimensions(w_l, index_w),
K.permute_dimensions(b_l, index),
]
if mode == F_HYBRID.name:
# y, x_0, u, w_u, b_u, l, w_l, b_l = x
x_0, u, w_u, b_u, l, w_l, b_l = x
return [
# K.permute_dimensions(y, index),
x_0,
K.permute_dimensions(u, index),
K.permute_dimensions(w_u, index_w),
K.permute_dimensions(b_u, index),
K.permute_dimensions(l, index),
K.permute_dimensions(w_l, index_w),
K.permute_dimensions(b_l, index),
]
def broadcast(inputs, n, axis, mode):
"""
LiRPA implementation of broadcasting
:param inputs:
:param n:
:param axis:
:param mode:
:return:
"""
nb_tensor = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_IBP.name:
# y, x, u, l = inputs
u, l = inputs[:nb_tensor]
if mode == F_FORWARD.name:
# y, x, w_u, b_u, w_l, b_l = inputs
x, w_u, b_u, w_l, b_l = inputs[:nb_tensor]
if mode == F_HYBRID.name:
# y, x, u, w_u, b_u, l, w_l, b_l = inputs
x, u, w_u, b_u, l, w_l, b_l = inputs[:nb_tensor]
# for _ in range(n):
# y = K.expand_dims(y, axis)
if mode in [F_IBP.name, F_HYBRID.name]:
for _ in range(n):
u = K.expand_dims(u, axis)
l = K.expand_dims(l, axis)
if axis != -1:
axis_w = axis + 1
else:
axis_w = -1
if mode in [F_FORWARD.name, F_HYBRID.name]:
for _ in range(n):
b_u = K.expand_dims(b_u, axis)
b_l = K.expand_dims(b_l, axis)
w_u = K.expand_dims(w_u, axis_w)
w_l = K.expand_dims(w_l, axis_w)
if mode == F_IBP.name:
# output = [y, x, u, l]
output = [u, l]
if mode == F_FORWARD.name:
# output = [y, x, w_u, b_u, w_l, b_l]
output = [x, w_u, b_u, w_l, b_l]
if mode == F_HYBRID.name:
# output = [y, x, u, w_u, b_u, l, w_l, b_l]
output = [x, u, w_u, b_u, l, w_l, b_l]
return output
def split(input_, axis=-1, mode=F_HYBRID.name):
"""
LiRPA implementation of split
:param input_:
:param axis:
:param mode:
:return:
"""
nb_tensor = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_IBP.name:
# y_, x_, u_, l_ = input_
u_, l_ = input_[:nb_tensor]
if mode == F_HYBRID.name:
# y_, x_, u_, w_u_, b_u_, l_, w_l_, b_l_ = input_
x_, u_, w_u_, b_u_, l_, w_l_, b_l_ = input_[:nb_tensor]
if mode == F_FORWARD.name:
# y_, x_, w_u_, b_u_, w_l_, b_l_ = input_
x_, w_u_, b_u_, w_l_, b_l_ = input_[:nb_tensor]
# y_list = tf.split(y_, 1, axis=axis)
if mode in [F_IBP.name, F_HYBRID.name]:
u_list = tf.split(u_, 1, axis=axis)
l_list = tf.split(l_, 1, axis=axis)
n = len(u_list)
if mode in [F_HYBRID.name, F_FORWARD.name]:
b_u_list = tf.split(b_u_, 1, axis=axis)
b_l_list = tf.split(b_l_, 1, axis=axis)
n = len(b_u_list)
if axis != -1:
axis += 1
w_u_list = tf.split(w_u_, 1, axis=axis)
w_l_list = tf.split(w_l_, 1, axis=axis)
if mode == F_IBP.name:
# outputs = [[y_list[i], x_, u_list[i], l_list[i]] for i in range(n)]
outputs = [[u_list[i], l_list[i]] for i in range(n)]
if mode == F_FORWARD.name:
# outputs = [[y_list[i], x_, w_u_list[i], b_u_list[i], w_l_list[i], b_l_list[i]] for i in range(n)]
outputs = [[x_, w_u_list[i], b_u_list[i], w_l_list[i], b_l_list[i]] for i in range(n)]
if mode == F_HYBRID.name:
# outputs = [[y_list[i], x_, u_list[i], w_u_list[i], b_u_list[i], l_list[i], w_l_list[i], b_l_list[i]] for i in range(n)]
outputs = [[x_, u_list[i], w_u_list[i], b_u_list[i], l_list[i], w_l_list[i], b_l_list[i]] for i in range(n)]
return outputs
def sort(input_, axis=-1, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name):
"""
LiRPA implementation of sort by selection
:param input_:
:param axis:
:param dc_decomp:
:param convex_domain:
:param mode:
:return:
"""
if dc_decomp:
raise NotImplementedError()
# remove grad bounds
nb_tensor = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_IBP.name:
# y, x_0, u_c, l_c = input_
u_c, l_c = input_[:nb_tensor]
if mode == F_HYBRID.name:
# y, x_0, u_c, w_u, b_u, l_c, w_l, b_l = input_
x_0, u_c, w_u, b_u, l_c, w_l, b_l = input_[:nb_tensor]
if mode == F_FORWARD.name:
# y_, x_0, w_u, b_u, w_l, b_l = input_
x_0, w_u, b_u, w_l, b_l = input_[:nb_tensor]
if axis == -1:
n = input_.shape[-1]
axis = len(input_.shape) - 1
else:
n = input_.shape[axis]
# y_ = tf.sort(y, axis=axis)
# what about splitting elements
op = lambda x: tf.split(x, n, axis=axis)
# y_list = op(y)
if mode in [F_IBP.name, F_HYBRID.name]:
u_c_list = op(u_c)
l_c_list = op(l_c)
if mode in [F_HYBRID.name, F_FORWARD.name]:
w_u_list = tf.split(w_u, n, axis=axis + 1)
b_u_list = op(b_u)
w_l_list = tf.split(w_l, n, axis=axis + 1)
b_l_list = op(b_l)
def get_input(mode, i):
if mode == F_IBP.name:
# return [y_list[i], x_0, u_c_list[i], l_c_list[i]]
return [u_c_list[i], l_c_list[i]]
if mode == F_FORWARD.name:
# return [y_list[i], x_0, w_u_list[i], b_u_list[i], w_l_list[i], b_l_list[i]]
return [x_0, w_u_list[i], b_u_list[i], w_l_list[i], b_l_list[i]]
if mode == F_HYBRID.name:
# return [y_list[i], x_0, u_c_list[i], w_u_list[i], b_u_list[i], l_c_list[i], w_l_list[i], b_l_list[i]]
return [x_0, u_c_list[i], w_u_list[i], b_u_list[i], l_c_list[i], w_l_list[i], b_l_list[i]]
def set_input(input_, mode, i):
if mode == F_IBP.name:
# y_i, _, u_i, l_i = input_
u_i, l_i = input_
if mode == F_FORWARD.name:
# y_i, _, w_u_i, b_u_i, w_l_i, b_l_i = input_
_, w_u_i, b_u_i, w_l_i, b_l_i = input_
if mode == F_HYBRID.name:
# y_i, _, u_i, w_u_i, b_u_i, l_i, w_l_i, b_l_i = input_
_, u_i, w_u_i, b_u_i, l_i, w_l_i, b_l_i = input_
# y_list[i] = y_i
if mode in [F_IBP.name, F_HYBRID.name]:
u_c_list[i] = u_i
l_c_list[i] = l_i
if mode in [F_FORWARD.name, F_HYBRID.name]:
w_u_list[i] = w_u_i
w_l_list[i] = w_l_i
b_u_list[i] = b_u_i
b_l_list[i] = b_l_i
# use selection sort
for i in range(n - 1):
for j in range(i + 1, n):
input_i = get_input(mode, i)
input_j = get_input(mode, j)
output_a = maximum(input_i, input_j, mode=mode, convex_domain=convex_domain, dc_decomp=dc_decomp)
output_b = minimum(input_i, input_j, mode=mode, convex_domain=convex_domain, dc_decomp=dc_decomp)
set_input(output_a, mode, j)
set_input(output_b, mode, i)
op_ = lambda x: K.concatenate(x, axis)
# update the inputs
if mode in [F_IBP.name, F_HYBRID.name]:
u_c_ = op_(u_c_list)
l_c_ = op_(l_c_list)
if mode in [F_FORWARD.name, F_HYBRID.name]:
w_u_ = K.concatenate(w_u_list, axis + 1)
w_l_ = K.concatenate(w_l_list, axis + 1)
b_u_ = op_(b_u_list)
b_l_ = op_(b_l_list)
if mode == F_IBP.name:
# output = [y_, x_0, u_c_, l_c_]
output = [u_c_, l_c_]
if mode == F_FORWARD.name:
# output = [y_, x_0, w_u_, b_u_, w_l_, b_l_]
output = [x_0, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
# output = [y_, x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
output = [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
return output
def pow(inputs_, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name):
"""
LiRPA implementation of pow(x )=x**2
:param inputs_:
:param dc_decomp:
:param convex_domain:
:param mode:
:return:
"""
return multiply(inputs_, inputs_, dc_decomp=dc_decomp, convex_domain=convex_domain, mode=mode)
def abs(inputs_, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name):
"""
LiRPA implementation of |x|
:param inputs_:
:param dc_decomp:
:param convex_domain:
:param mode:
:return:
"""
inputs_0 = relu_(inputs_, dc_decomp=dc_decomp, convex_domain=convex_domain, mode=mode)
inputs_1 = minus(
relu_(minus(inputs_, mode=mode), dc_decomp=dc_decomp, convex_domain=convex_domain, mode=mode), mode=mode
)
return add(inputs_0, inputs_1, dc_decomp=dc_decomp, convex_domain=convex_domain, mode=mode)
def frac_pos_hull(inputs_, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name):
"""
LiRPA implementation of 1/x for x>0
:param inputs_:
:param dc_decomp:
:param convex_domain:
:param mode:
:return:
"""
if dc_decomp:
raise NotImplementedError()
nb_tensor = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_IBP.name:
# y, x_0, u_c, l_c = inputs_
u_c, l_c = inputs_[:nb_tensor]
if mode == F_FORWARD.name:
# y, x_0, w_u, b_u, w_l, b_l = inputs_
x_0, w_u, b_u, w_l, b_l = inputs_[:nb_tensor]
if mode == F_HYBRID.name:
# y, x_0, u_c, w_u, b_u, l_c, w_l, b_l = inputs_
x_0, u_c, w_u, b_u, l_c, w_l, b_l = inputs_[:nb_tensor]
# y_ = 1 / y
if mode in [F_FORWARD.name, F_HYBRID.name]:
u_c_ = get_upper(x_0, w_u, b_u, convex_domain=convex_domain)
l_c_ = get_lower(x_0, w_u, b_u, convex_domain=convex_domain)
if mode == F_FORWARD.name:
u_c = u_c_
l_c = l_c_
else:
u_c = K.minimum(u_c_, u_c)
l_c = K.maximum(l_c, l_c_)
l_c = K.maximum(l_c, 1.0)
z = (u_c + l_c) / 2.0
w_l = -1 / K.pow(z)
b_l = 2 / z
w_u = (1.0 / u_c - 1.0 / l_c) / (u_c - l_c)
b_u = 1.0 / u_c - w_u * u_c
return [w_u, b_u, w_l, b_l]
# convex hull for min
def min_(x, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name, axis=-1, finetune=False, **kwargs):
"""
LiRPA implementation of min(x, axis=axis)
:param x:
:param dc_decomp:
:param convex_domain:
:param mode:
:param axis:
:return:
"""
# return - max - x
return minus(
max_(minus(x, mode=mode), dc_decomp=dc_decomp, convex_domain=convex_domain, mode=mode, axis=axis, finetune=finetune, **kwargs), mode=mode
)
def expand_dims(inputs_, dc_decomp=False, mode=F_HYBRID.name, axis=-1, **kwargs):
nb_tensor = StaticVariables(dc_decomp=False, mode=mode).nb_tensors
if mode == F_IBP.name:
# y, x_0, u_c, l_c = inputs_[:4]
u_c, l_c = inputs_[:nb_tensor]
if mode == F_FORWARD.name:
# y, x_0, w_u, b_u, w_l, b_l = inputs_[:6]
x_0, w_u, b_u, w_l, b_l = inputs_[:nb_tensor]
if mode == F_HYBRID.name:
# y, x_0, u_c, w_u, b_u, l_c, w_l, b_l = inputs_[:8]
x_0, u_c, w_u, b_u, l_c, w_l, b_l = inputs_[:nb_tensor]
if dc_decomp:
h, g = inputs_[-2:]
if mode in [F_HYBRID.name, F_FORWARD.name]:
if axis == -1:
axis_w = axis
else:
axis_w = axis + 1
op = lambda t: K.expand_dims(t, axis)
# y_ = op(y)
if mode in [F_IBP.name, F_HYBRID.name]:
u_c_ = op(u_c)
l_c_ = op(l_c)
if mode in [F_FORWARD.name, F_HYBRID.name]:
b_u_ = op(b_u)
b_l_ = op(b_l)
w_u_ = K.expand_dims(w_u, axis_w)
w_l_ = K.expand_dims(w_l, axis_w)
if mode == F_IBP.name:
# output = [y_, x_0, u_c_, l_c_]
output = [u_c_, l_c_]
if mode == F_FORWARD.name:
# output = [y, x_0, w_u_, b_u_, w_l_, b_l_]
output = [x_0, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
# output = [y, x_0, u_c_, w_u_, l_c_, w_l_, b_l_]
output = [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
return output
def log(x, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name, **kwargs):
"""Exponential activation function.
:param x: list of input tensors
:param dc_decomp: boolean that indicates
whether we return a difference of convex decomposition of our layer
:param convex_domain: the type of convex input domain
:param mode: type of Forward propagation (IBP, Forward or Hybrid)
:param kwargs: extra parameters
:return: the updated list of tensors
"""
if dc_decomp:
raise NotImplementedError()
if mode == F_IBP.name:
u_c, l_c = x
return [K.log(u_c), K.log(l_c)]
if mode == F_FORWARD.name:
x_0, w_u, b_u, w_l, b_l = x
u_c = get_upper(x_0, w_u, b_u, convex_domain=convex_domain)
l_c = get_lower(x_0, w_l, b_l, convex_domain=convex_domain)
l_c = K.maximum(K.epsilon(), l_c)
if mode == F_HYBRID.name:
x_0, u_c, w_u, b_u, l_c, w_l, b_l = x
u_c_ = K.log(u_c)
l_c_ = K.log(l_c)
y = (u_c+l_c)/2. # do finetuneting
w_l_0 = (u_c_ - l_c_)/K.maximum(u_c-l_c, K.epsilon())
b_l_0 = l_c_ - w_l_0*l_c
w_u_0 = 1/y
b_u_0 = K.log(y)-1
w_u_ = w_u_0[:,None]*w_u
b_u_ = w_u_0*b_u + b_u_0
w_l_ = w_l_0[:, None] * w_l
b_l_ = w_l_0 * b_l + b_l_0
if mode == F_FORWARD.name:
return [x_0, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
return [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
def exp(x, dc_decomp=False, convex_domain={}, mode=F_HYBRID.name, **kwargs):
"""Exponential activation function.
:param x: list of input tensors
:param dc_decomp: boolean that indicates
whether we return a difference of convex decomposition of our layer
:param convex_domain: the type of convex input domain
:param mode: type of Forward propagation (IBP, Forward or Hybrid)
:param kwargs: extra parameters
:return: the updated list of tensors
"""
if dc_decomp:
raise NotImplementedError()
if mode == F_IBP.name:
return [K.exp(e) for e in x]
if mode == F_FORWARD.name:
x_0, w_u, b_u, w_l, b_l = x
u_c = get_upper(x_0, w_u, b_u, convex_domain=convex_domain)
l_c = get_lower(x_0, w_l, b_l, convex_domain=convex_domain)
if mode == F_HYBRID.name:
x_0, u_c, w_u, b_u, l_c, w_l, b_l = x
u_c_ = K.exp(u_c)
l_c_ = K.exp(l_c)
y = (u_c+l_c)/2. # do finetuneting
slope = K.exp(y)
w_u_0 = (u_c_ - l_c_)/K.maximum(u_c-l_c, K.epsilon())
b_u_0 = l_c_ - w_u_0*l_c
w_l_0 = K.exp(y)
b_l_0 = w_l_0*(1-y)
w_u_ = w_u_0[:,None]*w_u
b_u_ = w_u_0*b_u + b_u_0
w_l_ = w_l_0[:, None] * w_l
b_l_ = w_l_0 * b_l + b_l_0
if mode == F_FORWARD.name:
return [x_0, w_u_, b_u_, w_l_, b_l_]
if mode == F_HYBRID.name:
return [x_0, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_]
| [
"tensorflow.python.keras.backend.abs",
"tensorflow.python.keras.backend.maximum",
"tensorflow.python.keras.backend.permute_dimensions",
"tensorflow.python.keras.backend.sum",
"tensorflow.split",
"tensorflow.keras.activations.get",
"tensorflow.python.keras.backend.log",
"tensorflow.python.keras.backend... | [((2859, 2880), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['w', 'z_value'], {}), '(w, z_value)\n', (2868, 2880), True, 'from tensorflow.python.keras import backend as K\n'), ((2893, 2914), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['w', 'z_value'], {}), '(w, z_value)\n', (2902, 2914), True, 'from tensorflow.python.keras import backend as K\n'), ((3607, 3628), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['w', 'z_value'], {}), '(w, z_value)\n', (3616, 3628), True, 'from tensorflow.python.keras import backend as K\n'), ((3641, 3662), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['w', 'z_value'], {}), '(w, z_value)\n', (3650, 3662), True, 'from tensorflow.python.keras import backend as K\n'), ((12991, 13010), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['x', '(2)'], {}), '(x, 2)\n', (13004, 13010), True, 'from tensorflow.python.keras import backend as K\n'), ((17549, 17573), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['x_vec', '(-2)'], {}), '(x_vec, -2)\n', (17562, 17573), True, 'from tensorflow.python.keras import backend as K\n'), ((21416, 21429), 'tensorflow.python.keras.backend.relu', 'K.relu', (['upper'], {}), '(upper)\n', (21422, 21429), True, 'from tensorflow.python.keras import backend as K\n'), ((21441, 21454), 'tensorflow.python.keras.backend.relu', 'K.relu', (['lower'], {}), '(lower)\n', (21447, 21454), True, 'from tensorflow.python.keras import backend as K\n'), ((23099, 23116), 'tensorflow.python.keras.backend.softplus', 'K.softplus', (['upper'], {}), '(upper)\n', (23109, 23116), True, 'from tensorflow.python.keras import backend as K\n'), ((23128, 23145), 'tensorflow.python.keras.backend.softplus', 'K.softplus', (['lower'], {}), '(lower)\n', (23138, 23145), True, 'from tensorflow.python.keras import backend as K\n'), ((27189, 27212), 'tensorflow.python.keras.backend.sum', 'K.sum', (['w_u'], {'axis': 'axis_w'}), '(w_u, axis=axis_w)\n', (27194, 27212), True, 'from tensorflow.python.keras import backend as K\n'), ((27224, 27247), 'tensorflow.python.keras.backend.sum', 'K.sum', (['w_l'], {'axis': 'axis_w'}), '(w_l, axis=axis_w)\n', (27229, 27247), True, 'from tensorflow.python.keras import backend as K\n'), ((27259, 27280), 'tensorflow.python.keras.backend.sum', 'K.sum', (['b_u'], {'axis': 'axis'}), '(b_u, axis=axis)\n', (27264, 27280), True, 'from tensorflow.python.keras import backend as K\n'), ((27292, 27313), 'tensorflow.python.keras.backend.sum', 'K.sum', (['b_l'], {'axis': 'axis'}), '(b_l, axis=axis)\n', (27297, 27313), True, 'from tensorflow.python.keras import backend as K\n'), ((31512, 31530), 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['x[-1]'], {}), '(x[-1])\n', (31523, 31530), True, 'from tensorflow.python.keras import backend as K\n'), ((37429, 37461), 'tensorflow.keras.activations.get', 'keras.activations.get', (['"""softmax"""'], {}), "('softmax')\n", (37450, 37461), True, 'import tensorflow.keras as keras\n'), ((58588, 58607), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['l_c', '(1.0)'], {}), '(l_c, 1.0)\n', (58597, 58607), True, 'from tensorflow.python.keras import backend as K\n'), ((61714, 61724), 'tensorflow.python.keras.backend.log', 'K.log', (['u_c'], {}), '(u_c)\n', (61719, 61724), True, 'from tensorflow.python.keras import backend as K\n'), ((61736, 61746), 'tensorflow.python.keras.backend.log', 'K.log', (['l_c'], {}), '(l_c)\n', (61741, 61746), True, 'from tensorflow.python.keras import backend as K\n'), ((63098, 63108), 'tensorflow.python.keras.backend.exp', 'K.exp', (['u_c'], {}), '(u_c)\n', (63103, 63108), True, 'from tensorflow.python.keras import backend as K\n'), ((63120, 63130), 'tensorflow.python.keras.backend.exp', 'K.exp', (['l_c'], {}), '(l_c)\n', (63125, 63130), True, 'from tensorflow.python.keras import backend as K\n'), ((63183, 63191), 'tensorflow.python.keras.backend.exp', 'K.exp', (['y'], {}), '(y)\n', (63188, 63191), True, 'from tensorflow.python.keras import backend as K\n'), ((63293, 63301), 'tensorflow.python.keras.backend.exp', 'K.exp', (['y'], {}), '(y)\n', (63298, 63301), True, 'from tensorflow.python.keras import backend as K\n'), ((2704, 2714), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (2712, 2714), True, 'from tensorflow.python.keras import backend as K\n'), ((3061, 3086), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['x_min_', '(-1)'], {}), '(x_min_, -1)\n', (3074, 3086), True, 'from tensorflow.python.keras import backend as K\n'), ((3104, 3129), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['x_max_', '(-1)'], {}), '(x_max_, -1)\n', (3117, 3129), True, 'from tensorflow.python.keras import backend as K\n'), ((3142, 3183), 'tensorflow.python.keras.backend.sum', 'K.sum', (['(w_pos * x_max_ + w_neg * x_min_)', '(1)'], {}), '(w_pos * x_max_ + w_neg * x_min_, 1)\n', (3147, 3183), True, 'from tensorflow.python.keras import backend as K\n'), ((3523, 3533), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3531, 3533), True, 'from tensorflow.python.keras import backend as K\n'), ((3809, 3834), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['x_min_', '(-1)'], {}), '(x_min_, -1)\n', (3822, 3834), True, 'from tensorflow.python.keras import backend as K\n'), ((3852, 3877), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['x_max_', '(-1)'], {}), '(x_max_, -1)\n', (3865, 3877), True, 'from tensorflow.python.keras import backend as K\n'), ((3890, 3931), 'tensorflow.python.keras.backend.sum', 'K.sum', (['(w_pos * x_min_ + w_neg * x_max_)', '(1)'], {}), '(w_pos * x_min_ + w_neg * x_max_, 1)\n', (3895, 3931), True, 'from tensorflow.python.keras import backend as K\n'), ((13019, 13035), 'tensorflow.python.keras.backend.sum', 'K.sum', (['(W * x_)', '(1)'], {}), '(W * x_, 1)\n', (13024, 13035), True, 'from tensorflow.python.keras import backend as K\n'), ((15629, 15639), 'numpy.sqrt', 'np.sqrt', (['k'], {}), '(k)\n', (15636, 15639), True, 'import numpy as np\n'), ((16749, 16769), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['R', '(-1)'], {}), '(R, -1)\n', (16762, 16769), True, 'from tensorflow.python.keras import backend as K\n'), ((17928, 17947), 'numpy.sqrt', 'np.sqrt', (['(n_iter + 1)'], {}), '(n_iter + 1)\n', (17935, 17947), True, 'import numpy as np\n'), ((18287, 18306), 'tensorflow.python.keras.backend.clip', 'K.clip', (['w', '(0.0)', '(1.0)'], {}), '(w, 0.0, 1.0)\n', (18293, 18306), True, 'from tensorflow.python.keras import backend as K\n'), ((18445, 18464), 'tensorflow.python.keras.backend.clip', 'K.clip', (['w', '(0.0)', '(1.0)'], {}), '(w, 0.0, 1.0)\n', (18451, 18464), True, 'from tensorflow.python.keras import backend as K\n'), ((19446, 19464), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0.0)', 'w_'], {}), '(0.0, w_)\n', (19455, 19464), True, 'from tensorflow.python.keras import backend as K\n'), ((19822, 19840), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0.0)', 'w_'], {}), '(0.0, w_)\n', (19831, 19840), True, 'from tensorflow.python.keras import backend as K\n'), ((20091, 20101), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (20099, 20101), True, 'from tensorflow.python.keras import backend as K\n'), ((20129, 20139), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (20137, 20139), True, 'from tensorflow.python.keras import backend as K\n'), ((20974, 20990), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['h', '(-g)'], {}), '(h, -g)\n', (20983, 20990), True, 'from tensorflow.python.keras import backend as K\n'), ((22774, 22790), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['h', '(-g)'], {}), '(h, -g)\n', (22783, 22790), True, 'from tensorflow.python.keras import backend as K\n'), ((26064, 26087), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['upper_', 'u_c_'], {}), '(upper_, u_c_)\n', (26073, 26087), True, 'from tensorflow.python.keras import backend as K\n'), ((26192, 26215), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['lower_', 'l_c_'], {}), '(lower_, l_c_)\n', (26201, 26215), True, 'from tensorflow.python.keras import backend as K\n'), ((27061, 27082), 'tensorflow.python.keras.backend.sum', 'K.sum', (['u_c'], {'axis': 'axis'}), '(u_c, axis=axis)\n', (27066, 27082), True, 'from tensorflow.python.keras import backend as K\n'), ((27098, 27119), 'tensorflow.python.keras.backend.sum', 'K.sum', (['l_c'], {'axis': 'axis'}), '(l_c, axis=axis)\n', (27103, 27119), True, 'from tensorflow.python.keras import backend as K\n'), ((31405, 31426), 'tensorflow.python.keras.backend.max', 'K.max', (['u_c'], {'axis': 'axis'}), '(u_c, axis=axis)\n', (31410, 31426), True, 'from tensorflow.python.keras import backend as K\n'), ((31442, 31463), 'tensorflow.python.keras.backend.max', 'K.max', (['l_c'], {'axis': 'axis'}), '(l_c, axis=axis)\n', (31447, 31463), True, 'from tensorflow.python.keras import backend as K\n'), ((31665, 31691), 'tensorflow.split', 'tf.split', (['h', 'max_dim', 'axis'], {}), '(h, max_dim, axis)\n', (31673, 31691), True, 'import tensorflow as tf\n'), ((31709, 31735), 'tensorflow.split', 'tf.split', (['g', 'max_dim', 'axis'], {}), '(g, max_dim, axis)\n', (31717, 31735), True, 'import tensorflow as tf\n'), ((31974, 32002), 'tensorflow.split', 'tf.split', (['u_c', 'max_dim', 'axis'], {}), '(u_c, max_dim, axis)\n', (31982, 32002), True, 'import tensorflow as tf\n'), ((32022, 32050), 'tensorflow.split', 'tf.split', (['l_c', 'max_dim', 'axis'], {}), '(l_c, max_dim, axis)\n', (32030, 32050), True, 'import tensorflow as tf\n'), ((32220, 32248), 'tensorflow.split', 'tf.split', (['b_u', 'max_dim', 'axis'], {}), '(b_u, max_dim, axis)\n', (32228, 32248), True, 'import tensorflow as tf\n'), ((32268, 32296), 'tensorflow.split', 'tf.split', (['b_l', 'max_dim', 'axis'], {}), '(b_l, max_dim, axis)\n', (32276, 32296), True, 'import tensorflow as tf\n'), ((35412, 35433), 'tensorflow.python.keras.backend.squeeze', 'K.squeeze', (['u_c_', 'axis'], {}), '(u_c_, axis)\n', (35421, 35433), True, 'from tensorflow.python.keras import backend as K\n'), ((35449, 35470), 'tensorflow.python.keras.backend.squeeze', 'K.squeeze', (['l_c_', 'axis'], {}), '(l_c_, axis)\n', (35458, 35470), True, 'from tensorflow.python.keras import backend as K\n'), ((35534, 35555), 'tensorflow.python.keras.backend.squeeze', 'K.squeeze', (['b_u_', 'axis'], {}), '(b_u_, axis)\n', (35543, 35555), True, 'from tensorflow.python.keras import backend as K\n'), ((35571, 35592), 'tensorflow.python.keras.backend.squeeze', 'K.squeeze', (['b_l_', 'axis'], {}), '(b_l_, axis)\n', (35580, 35592), True, 'from tensorflow.python.keras import backend as K\n'), ((35834, 35853), 'tensorflow.python.keras.backend.sum', 'K.sum', (['g'], {'axis': 'axis'}), '(g, axis=axis)\n', (35839, 35853), True, 'from tensorflow.python.keras import backend as K\n'), ((36002, 36025), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['upper_', 'u_c_'], {}), '(upper_, u_c_)\n', (36011, 36025), True, 'from tensorflow.python.keras import backend as K\n'), ((36101, 36124), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['lower_', 'l_c_'], {}), '(lower_, l_c_)\n', (36110, 36124), True, 'from tensorflow.python.keras import backend as K\n'), ((37278, 37309), 'tensorflow.keras.activations.get', 'keras.activations.get', (['"""linear"""'], {}), "('linear')\n", (37299, 37309), True, 'import tensorflow.keras as keras\n'), ((46291, 46316), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['u_c_0_', 'u_c_1_'], {}), '(u_c_0_, u_c_1_)\n', (46300, 46316), True, 'from tensorflow.python.keras import backend as K\n'), ((46502, 46520), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['u0', '(0.0)'], {}), '(u0, 0.0)\n', (46511, 46520), True, 'from tensorflow.python.keras import backend as K\n'), ((46539, 46557), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['u0', '(0.0)'], {}), '(u0, 0.0)\n', (46548, 46557), True, 'from tensorflow.python.keras import backend as K\n'), ((46577, 46595), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['l1', '(0.0)'], {}), '(l1, 0.0)\n', (46586, 46595), True, 'from tensorflow.python.keras import backend as K\n'), ((46614, 46632), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['l1', '(0.0)'], {}), '(l1, 0.0)\n', (46623, 46632), True, 'from tensorflow.python.keras import backend as K\n'), ((46886, 46904), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['u1', '(0.0)'], {}), '(u1, 0.0)\n', (46895, 46904), True, 'from tensorflow.python.keras import backend as K\n'), ((46923, 46941), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['u1', '(0.0)'], {}), '(u1, 0.0)\n', (46932, 46941), True, 'from tensorflow.python.keras import backend as K\n'), ((46960, 46978), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['l0', '(0.0)'], {}), '(l0, 0.0)\n', (46969, 46978), True, 'from tensorflow.python.keras import backend as K\n'), ((46997, 47015), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['l0', '(0.0)'], {}), '(l0, 0.0)\n', (47006, 47015), True, 'from tensorflow.python.keras import backend as K\n'), ((47902, 47924), 'numpy.delete', 'np.delete', (['index', 'axis'], {}), '(index, axis)\n', (47911, 47924), True, 'import numpy as np\n'), ((48301, 48325), 'numpy.delete', 'np.delete', (['index_w', 'axis'], {}), '(index_w, axis)\n', (48310, 48325), True, 'import numpy as np\n'), ((51456, 51482), 'tensorflow.split', 'tf.split', (['u_', '(1)'], {'axis': 'axis'}), '(u_, 1, axis=axis)\n', (51464, 51482), True, 'import tensorflow as tf\n'), ((51500, 51526), 'tensorflow.split', 'tf.split', (['l_', '(1)'], {'axis': 'axis'}), '(l_, 1, axis=axis)\n', (51508, 51526), True, 'import tensorflow as tf\n'), ((51619, 51647), 'tensorflow.split', 'tf.split', (['b_u_', '(1)'], {'axis': 'axis'}), '(b_u_, 1, axis=axis)\n', (51627, 51647), True, 'import tensorflow as tf\n'), ((51667, 51695), 'tensorflow.split', 'tf.split', (['b_l_', '(1)'], {'axis': 'axis'}), '(b_l_, 1, axis=axis)\n', (51675, 51695), True, 'import tensorflow as tf\n'), ((51787, 51815), 'tensorflow.split', 'tf.split', (['w_u_', '(1)'], {'axis': 'axis'}), '(w_u_, 1, axis=axis)\n', (51795, 51815), True, 'import tensorflow as tf\n'), ((51835, 51863), 'tensorflow.split', 'tf.split', (['w_l_', '(1)'], {'axis': 'axis'}), '(w_l_, 1, axis=axis)\n', (51843, 51863), True, 'import tensorflow as tf\n'), ((53578, 53603), 'tensorflow.split', 'tf.split', (['x', 'n'], {'axis': 'axis'}), '(x, n, axis=axis)\n', (53586, 53603), True, 'import tensorflow as tf\n'), ((53792, 53823), 'tensorflow.split', 'tf.split', (['w_u', 'n'], {'axis': '(axis + 1)'}), '(w_u, n, axis=axis + 1)\n', (53800, 53823), True, 'import tensorflow as tf\n'), ((53870, 53901), 'tensorflow.split', 'tf.split', (['w_l', 'n'], {'axis': '(axis + 1)'}), '(w_l, n, axis=axis + 1)\n', (53878, 53901), True, 'import tensorflow as tf\n'), ((55808, 55830), 'tensorflow.python.keras.backend.concatenate', 'K.concatenate', (['x', 'axis'], {}), '(x, axis)\n', (55821, 55830), True, 'from tensorflow.python.keras import backend as K\n'), ((56020, 56053), 'tensorflow.python.keras.backend.concatenate', 'K.concatenate', (['w_u_list', '(axis + 1)'], {}), '(w_u_list, axis + 1)\n', (56033, 56053), True, 'from tensorflow.python.keras import backend as K\n'), ((56069, 56102), 'tensorflow.python.keras.backend.concatenate', 'K.concatenate', (['w_l_list', '(axis + 1)'], {}), '(w_l_list, axis + 1)\n', (56082, 56102), True, 'from tensorflow.python.keras import backend as K\n'), ((58649, 58657), 'tensorflow.python.keras.backend.pow', 'K.pow', (['z'], {}), '(z)\n', (58654, 58657), True, 'from tensorflow.python.keras import backend as K\n'), ((60042, 60064), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['t', 'axis'], {}), '(t, axis)\n', (60055, 60064), True, 'from tensorflow.python.keras import backend as K\n'), ((60284, 60310), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['w_u', 'axis_w'], {}), '(w_u, axis_w)\n', (60297, 60310), True, 'from tensorflow.python.keras import backend as K\n'), ((60326, 60352), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['w_l', 'axis_w'], {}), '(w_l, axis_w)\n', (60339, 60352), True, 'from tensorflow.python.keras import backend as K\n'), ((61904, 61912), 'tensorflow.python.keras.backend.log', 'K.log', (['y'], {}), '(y)\n', (61909, 61912), True, 'from tensorflow.python.keras import backend as K\n'), ((4320, 4328), 'tensorflow.python.keras.backend.abs', 'K.abs', (['x'], {}), '(x)\n', (4325, 4328), True, 'from tensorflow.python.keras import backend as K\n'), ((5313, 5335), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['x_0', '(-1)'], {}), '(x_0, -1)\n', (5326, 5335), True, 'from tensorflow.python.keras import backend as K\n'), ((5352, 5369), 'tensorflow.python.keras.backend.sum', 'K.sum', (['(w * x_0)', '(1)'], {}), '(w * x_0, 1)\n', (5357, 5369), True, 'from tensorflow.python.keras import backend as K\n'), ((5838, 5880), 'numpy.reshape', 'np.reshape', (['upper', '([1, -1] + [1] * n_shape)'], {}), '(upper, [1, -1] + [1] * n_shape)\n', (5848, 5880), True, 'import numpy as np\n'), ((5898, 5940), 'numpy.reshape', 'np.reshape', (['lower', '([1, -1] + [1] * n_shape)'], {}), '(lower, [1, -1] + [1] * n_shape)\n', (5908, 5940), True, 'import numpy as np\n'), ((6343, 6385), 'numpy.reshape', 'np.reshape', (['upper', '([1, -1] + [1] * n_shape)'], {}), '(upper, [1, -1] + [1] * n_shape)\n', (6353, 6385), True, 'import numpy as np\n'), ((6792, 6834), 'numpy.reshape', 'np.reshape', (['lower', '([1, -1] + [1] * n_shape)'], {}), '(lower, [1, -1] + [1] * n_shape)\n', (6802, 6834), True, 'import numpy as np\n'), ((7640, 7682), 'numpy.reshape', 'np.reshape', (['upper', '([1, -1] + [1] * n_shape)'], {}), '(upper, [1, -1] + [1] * n_shape)\n', (7650, 7682), True, 'import numpy as np\n'), ((7700, 7742), 'numpy.reshape', 'np.reshape', (['lower', '([1, -1] + [1] * n_shape)'], {}), '(lower, [1, -1] + [1] * n_shape)\n', (7710, 7742), True, 'import numpy as np\n'), ((8145, 8187), 'numpy.reshape', 'np.reshape', (['upper', '([1, -1] + [1] * n_shape)'], {}), '(upper, [1, -1] + [1] * n_shape)\n', (8155, 8187), True, 'import numpy as np\n'), ((8594, 8636), 'numpy.reshape', 'np.reshape', (['lower', '([1, -1] + [1] * n_shape)'], {}), '(lower, [1, -1] + [1] * n_shape)\n', (8604, 8636), True, 'import numpy as np\n'), ((9883, 9905), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['x_0', '(-1)'], {}), '(x_0, -1)\n', (9896, 9905), True, 'from tensorflow.python.keras import backend as K\n'), ((9922, 9939), 'tensorflow.python.keras.backend.sum', 'K.sum', (['(w * x_0)', '(1)'], {}), '(w * x_0, 1)\n', (9927, 9939), True, 'from tensorflow.python.keras import backend as K\n'), ((13548, 13563), 'tensorflow.python.keras.backend.sum', 'K.sum', (['(W * W)', '(1)'], {}), '(W * W, 1)\n', (13553, 13563), True, 'from tensorflow.python.keras import backend as K\n'), ((15699, 15710), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (15708, 15710), True, 'from tensorflow.python.keras import backend as K\n'), ((18534, 18545), 'tensorflow.python.keras.backend.sum', 'K.sum', (['w', '(0)'], {}), '(w, 0)\n', (18539, 18545), True, 'from tensorflow.python.keras import backend as K\n'), ((18547, 18558), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (18556, 18558), True, 'from tensorflow.python.keras import backend as K\n'), ((21670, 21692), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['w_u_', '(1)'], {}), '(w_u_, 1)\n', (21683, 21692), True, 'from tensorflow.python.keras import backend as K\n'), ((21714, 21736), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['w_l_', '(1)'], {}), '(w_l_, 1)\n', (21727, 21736), True, 'from tensorflow.python.keras import backend as K\n'), ((23366, 23388), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['w_u_', '(1)'], {}), '(w_u_, 1)\n', (23379, 23388), True, 'from tensorflow.python.keras import backend as K\n'), ((23410, 23432), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['w_l_', '(1)'], {}), '(w_l_, 1)\n', (23423, 23432), True, 'from tensorflow.python.keras import backend as K\n'), ((26855, 26877), 'tensorflow.python.keras.backend.sum', 'K.sum', (['x[0]'], {'axis': 'axis'}), '(x[0], axis=axis)\n', (26860, 26877), True, 'from tensorflow.python.keras import backend as K\n'), ((26879, 26901), 'tensorflow.python.keras.backend.sum', 'K.sum', (['x[1]'], {'axis': 'axis'}), '(x[1], axis=axis)\n', (26884, 26901), True, 'from tensorflow.python.keras import backend as K\n'), ((32444, 32472), 'tensorflow.split', 'tf.split', (['w_u', 'max_dim', 'axis'], {}), '(w_u, max_dim, axis)\n', (32452, 32472), True, 'import tensorflow as tf\n'), ((32496, 32524), 'tensorflow.split', 'tf.split', (['w_l', 'max_dim', 'axis'], {}), '(w_l, max_dim, axis)\n', (32504, 32524), True, 'import tensorflow as tf\n'), ((32562, 32594), 'tensorflow.split', 'tf.split', (['w_u', 'max_dim', '(axis + 1)'], {}), '(w_u, max_dim, axis + 1)\n', (32570, 32594), True, 'import tensorflow as tf\n'), ((32618, 32650), 'tensorflow.split', 'tf.split', (['w_l', 'max_dim', '(axis + 1)'], {}), '(w_l, max_dim, axis + 1)\n', (32626, 32650), True, 'import tensorflow as tf\n'), ((35635, 35656), 'tensorflow.python.keras.backend.squeeze', 'K.squeeze', (['w_u_', 'axis'], {}), '(w_u_, axis)\n', (35644, 35656), True, 'from tensorflow.python.keras import backend as K\n'), ((35676, 35697), 'tensorflow.python.keras.backend.squeeze', 'K.squeeze', (['w_l_', 'axis'], {}), '(w_l_, axis)\n', (35685, 35697), True, 'from tensorflow.python.keras import backend as K\n'), ((35731, 35756), 'tensorflow.python.keras.backend.squeeze', 'K.squeeze', (['w_u_', '(axis + 1)'], {}), '(w_u_, axis + 1)\n', (35740, 35756), True, 'from tensorflow.python.keras import backend as K\n'), ((35776, 35801), 'tensorflow.python.keras.backend.squeeze', 'K.squeeze', (['w_l_', '(axis + 1)'], {}), '(w_l_, axis + 1)\n', (35785, 35801), True, 'from tensorflow.python.keras import backend as K\n'), ((35867, 35890), 'tensorflow.python.keras.backend.max', 'K.max', (['(h + g)'], {'axis': 'axis'}), '(h + g, axis=axis)\n', (35872, 35890), True, 'from tensorflow.python.keras import backend as K\n'), ((43124, 43151), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['upper_0', 'upper_1'], {}), '(upper_0, upper_1)\n', (43133, 43151), True, 'from tensorflow.python.keras import backend as K\n'), ((43153, 43180), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['lower_0', 'lower_1'], {}), '(lower_0, lower_1)\n', (43162, 43180), True, 'from tensorflow.python.keras import backend as K\n'), ((48139, 48172), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['x[2]', 'index'], {}), '(x[2], index)\n', (48159, 48172), True, 'from tensorflow.python.keras import backend as K\n'), ((48186, 48219), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['x[3]', 'index'], {}), '(x[3], index)\n', (48206, 48219), True, 'from tensorflow.python.keras import backend as K\n'), ((48561, 48595), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['w_u', 'index_w'], {}), '(w_u, index_w)\n', (48581, 48595), True, 'from tensorflow.python.keras import backend as K\n'), ((48609, 48641), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['b_u', 'index'], {}), '(b_u, index)\n', (48629, 48641), True, 'from tensorflow.python.keras import backend as K\n'), ((48655, 48689), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['w_l', 'index_w'], {}), '(w_l, index_w)\n', (48675, 48689), True, 'from tensorflow.python.keras import backend as K\n'), ((48703, 48735), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['b_l', 'index'], {}), '(b_l, index)\n', (48723, 48735), True, 'from tensorflow.python.keras import backend as K\n'), ((48960, 48990), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['u', 'index'], {}), '(u, index)\n', (48980, 48990), True, 'from tensorflow.python.keras import backend as K\n'), ((49004, 49038), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['w_u', 'index_w'], {}), '(w_u, index_w)\n', (49024, 49038), True, 'from tensorflow.python.keras import backend as K\n'), ((49052, 49084), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['b_u', 'index'], {}), '(b_u, index)\n', (49072, 49084), True, 'from tensorflow.python.keras import backend as K\n'), ((49098, 49128), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['l', 'index'], {}), '(l, index)\n', (49118, 49128), True, 'from tensorflow.python.keras import backend as K\n'), ((49142, 49176), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['w_l', 'index_w'], {}), '(w_l, index_w)\n', (49162, 49176), True, 'from tensorflow.python.keras import backend as K\n'), ((49190, 49222), 'tensorflow.python.keras.backend.permute_dimensions', 'K.permute_dimensions', (['b_l', 'index'], {}), '(b_l, index)\n', (49210, 49222), True, 'from tensorflow.python.keras import backend as K\n'), ((49984, 50006), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['u', 'axis'], {}), '(u, axis)\n', (49997, 50006), True, 'from tensorflow.python.keras import backend as K\n'), ((50023, 50045), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['l', 'axis'], {}), '(l, axis)\n', (50036, 50045), True, 'from tensorflow.python.keras import backend as K\n'), ((50216, 50240), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['b_u', 'axis'], {}), '(b_u, axis)\n', (50229, 50240), True, 'from tensorflow.python.keras import backend as K\n'), ((50259, 50283), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['b_l', 'axis'], {}), '(b_l, axis)\n', (50272, 50283), True, 'from tensorflow.python.keras import backend as K\n'), ((50302, 50328), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['w_u', 'axis_w'], {}), '(w_u, axis_w)\n', (50315, 50328), True, 'from tensorflow.python.keras import backend as K\n'), ((50347, 50373), 'tensorflow.python.keras.backend.expand_dims', 'K.expand_dims', (['w_l', 'axis_w'], {}), '(w_l, axis_w)\n', (50360, 50373), True, 'from tensorflow.python.keras import backend as K\n'), ((58517, 58537), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['u_c_', 'u_c'], {}), '(u_c_, u_c)\n', (58526, 58537), True, 'from tensorflow.python.keras import backend as K\n'), ((58556, 58576), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['l_c', 'l_c_'], {}), '(l_c, l_c_)\n', (58565, 58576), True, 'from tensorflow.python.keras import backend as K\n'), ((61356, 61366), 'tensorflow.python.keras.backend.log', 'K.log', (['u_c'], {}), '(u_c)\n', (61361, 61366), True, 'from tensorflow.python.keras import backend as K\n'), ((61368, 61378), 'tensorflow.python.keras.backend.log', 'K.log', (['l_c'], {}), '(l_c)\n', (61373, 61378), True, 'from tensorflow.python.keras import backend as K\n'), ((61608, 61619), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (61617, 61619), True, 'from tensorflow.python.keras import backend as K\n'), ((61833, 61844), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (61842, 61844), True, 'from tensorflow.python.keras import backend as K\n'), ((62786, 62794), 'tensorflow.python.keras.backend.exp', 'K.exp', (['e'], {}), '(e)\n', (62791, 62794), True, 'from tensorflow.python.keras import backend as K\n'), ((63238, 63249), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (63247, 63249), True, 'from tensorflow.python.keras import backend as K\n'), ((4378, 4389), 'tensorflow.python.keras.backend.pow', 'K.pow', (['x', 'p'], {}), '(x, p)\n', (4383, 4389), True, 'from tensorflow.python.keras import backend as K\n'), ((6435, 6452), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0.0)', 'w'], {}), '(0.0, w)\n', (6444, 6452), True, 'from tensorflow.python.keras import backend as K\n'), ((6478, 6493), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0)', 'w'], {}), '(0, w)\n', (6487, 6493), True, 'from tensorflow.python.keras import backend as K\n'), ((6884, 6901), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0.0)', 'w'], {}), '(0.0, w)\n', (6893, 6901), True, 'from tensorflow.python.keras import backend as K\n'), ((6927, 6942), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0)', 'w'], {}), '(0, w)\n', (6936, 6942), True, 'from tensorflow.python.keras import backend as K\n'), ((8237, 8254), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0.0)', 'w'], {}), '(0.0, w)\n', (8246, 8254), True, 'from tensorflow.python.keras import backend as K\n'), ((8280, 8295), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0)', 'w'], {}), '(0, w)\n', (8289, 8295), True, 'from tensorflow.python.keras import backend as K\n'), ((8686, 8703), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0.0)', 'w'], {}), '(0.0, w)\n', (8695, 8703), True, 'from tensorflow.python.keras import backend as K\n'), ((8729, 8744), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0)', 'w'], {}), '(0, w)\n', (8738, 8744), True, 'from tensorflow.python.keras import backend as K\n'), ((14031, 14066), 'tensorflow.python.keras.backend.pow', 'K.pow', (['((z[:, 1] - z[:, 0]) / 2.0)', '(2)'], {}), '((z[:, 1] - z[:, 0]) / 2.0, 2)\n', (14036, 14066), True, 'from tensorflow.python.keras import backend as K\n'), ((15655, 15666), 'tensorflow.python.keras.backend.pow', 'K.pow', (['g', '(2)'], {}), '(g, 2)\n', (15660, 15666), True, 'from tensorflow.python.keras import backend as K\n'), ((16619, 16639), 'tensorflow.python.keras.backend.sum', 'K.sum', (['constant_0', '(1)'], {}), '(constant_0, 1)\n', (16624, 16639), True, 'from tensorflow.python.keras import backend as K\n'), ((17228, 17269), 'tensorflow.python.keras.backend.concatenate', 'K.concatenate', (['([x_k[:, None]] * n_iter)', '(1)'], {}), '([x_k[:, None]] * n_iter, 1)\n', (17241, 17269), True, 'from tensorflow.python.keras import backend as K\n'), ((17483, 17496), 'tensorflow.python.keras.backend.pow', 'K.pow', (['g_k', '(2)'], {}), '(g_k, 2)\n', (17488, 17496), True, 'from tensorflow.python.keras import backend as K\n'), ((18122, 18142), 'tensorflow.python.keras.backend.less_equal', 'K.less_equal', (['w', '(0.0)'], {}), '(w, 0.0)\n', (18134, 18142), True, 'from tensorflow.python.keras import backend as K\n'), ((18144, 18154), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (18152, 18154), True, 'from tensorflow.python.keras import backend as K\n'), ((21133, 21146), 'tensorflow.python.keras.backend.sign', 'K.sign', (['lower'], {}), '(lower)\n', (21139, 21146), True, 'from tensorflow.python.keras import backend as K\n'), ((28154, 28165), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (28163, 28165), True, 'from tensorflow.python.keras import backend as K\n'), ((36981, 37014), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""linear"""'], {}), "('linear')\n", (37004, 37014), True, 'import tensorflow.keras as keras\n'), ((46143, 46170), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(u0 * u1)', '(u0 * l1)'], {}), '(u0 * u1, u0 * l1)\n', (46152, 46170), True, 'from tensorflow.python.keras import backend as K\n'), ((46169, 46196), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(u0 * l1)', '(l0 * l1)'], {}), '(u0 * l1, l0 * l1)\n', (46178, 46196), True, 'from tensorflow.python.keras import backend as K\n'), ((46218, 46245), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(u1 * u0)', '(u1 * l0)'], {}), '(u1 * u0, u1 * l0)\n', (46227, 46245), True, 'from tensorflow.python.keras import backend as K\n'), ((46244, 46271), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(u1 * l0)', '(l1 * l0)'], {}), '(u1 * l0, l1 * l0)\n', (46253, 46271), True, 'from tensorflow.python.keras import backend as K\n'), ((46332, 46359), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(l0 * l1)', '(l0 * u1)'], {}), '(l0 * l1, l0 * u1)\n', (46341, 46359), True, 'from tensorflow.python.keras import backend as K\n'), ((46358, 46385), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(l0 * l1)', '(u0 * l1)'], {}), '(l0 * l1, u0 * l1)\n', (46367, 46385), True, 'from tensorflow.python.keras import backend as K\n'), ((6405, 6420), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0)', 'w'], {}), '(0, w)\n', (6414, 6420), True, 'from tensorflow.python.keras import backend as K\n'), ((6542, 6569), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0.0)', 'w_alpha_bar'], {}), '(0.0, w_alpha_bar)\n', (6551, 6569), True, 'from tensorflow.python.keras import backend as K\n'), ((6854, 6869), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0)', 'w'], {}), '(0, w)\n', (6863, 6869), True, 'from tensorflow.python.keras import backend as K\n'), ((6990, 7017), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0.0)', 'w_alpha_bar'], {}), '(0.0, w_alpha_bar)\n', (6999, 7017), True, 'from tensorflow.python.keras import backend as K\n'), ((8207, 8222), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0)', 'w'], {}), '(0, w)\n', (8216, 8222), True, 'from tensorflow.python.keras import backend as K\n'), ((8344, 8371), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0.0)', 'w_alpha_bar'], {}), '(0.0, w_alpha_bar)\n', (8353, 8371), True, 'from tensorflow.python.keras import backend as K\n'), ((8656, 8671), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0)', 'w'], {}), '(0, w)\n', (8665, 8671), True, 'from tensorflow.python.keras import backend as K\n'), ((8792, 8819), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0.0)', 'w_alpha_bar'], {}), '(0.0, w_alpha_bar)\n', (8801, 8819), True, 'from tensorflow.python.keras import backend as K\n'), ((14160, 14195), 'tensorflow.python.keras.backend.pow', 'K.pow', (['((z[:, 1] - z[:, 0]) / 2.0)', '(2)'], {}), '((z[:, 1] - z[:, 0]) / 2.0, 2)\n', (14165, 14195), True, 'from tensorflow.python.keras import backend as K\n'), ((17632, 17662), 'tensorflow.python.keras.backend.sum', 'K.sum', (['(W_0[:, None] * X_vec)', '(2)'], {}), '(W_0[:, None] * X_vec, 2)\n', (17637, 17662), True, 'from tensorflow.python.keras import backend as K\n'), ((17720, 17750), 'tensorflow.python.keras.backend.sum', 'K.sum', (['(W_1[:, None] * X_vec)', '(2)'], {}), '(W_1[:, None] * X_vec, 2)\n', (17725, 17750), True, 'from tensorflow.python.keras import backend as K\n'), ((21035, 21048), 'tensorflow.python.keras.backend.sign', 'K.sign', (['upper'], {}), '(upper)\n', (21041, 21048), True, 'from tensorflow.python.keras import backend as K\n'), ((32894, 32931), 'tensorflow.split', 'tf.split', (['params[None]', 'max_dim', 'axis'], {}), '(params[None], max_dim, axis)\n', (32902, 32931), True, 'import tensorflow as tf\n'), ((40691, 40702), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l1)'], {}), '(-l1)\n', (40697, 40702), True, 'from tensorflow.python.keras import backend as K\n'), ((40791, 40802), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l0)'], {}), '(-l0)\n', (40797, 40802), True, 'from tensorflow.python.keras import backend as K\n'), ((40892, 40903), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l1)'], {}), '(-l1)\n', (40898, 40903), True, 'from tensorflow.python.keras import backend as K\n'), ((40992, 41003), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u0)'], {}), '(-u0)\n', (40998, 41003), True, 'from tensorflow.python.keras import backend as K\n'), ((41222, 41233), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l1)'], {}), '(-l1)\n', (41228, 41233), True, 'from tensorflow.python.keras import backend as K\n'), ((41414, 41425), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l0)'], {}), '(-l0)\n', (41420, 41425), True, 'from tensorflow.python.keras import backend as K\n'), ((41607, 41618), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l1)'], {}), '(-l1)\n', (41613, 41618), True, 'from tensorflow.python.keras import backend as K\n'), ((41799, 41810), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u0)'], {}), '(-u0)\n', (41805, 41810), True, 'from tensorflow.python.keras import backend as K\n'), ((41915, 41926), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l1)'], {}), '(-l1)\n', (41921, 41926), True, 'from tensorflow.python.keras import backend as K\n'), ((42021, 42032), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l0)'], {}), '(-l0)\n', (42027, 42032), True, 'from tensorflow.python.keras import backend as K\n'), ((42128, 42139), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l1)'], {}), '(-l1)\n', (42134, 42139), True, 'from tensorflow.python.keras import backend as K\n'), ((42234, 42245), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u0)'], {}), '(-u0)\n', (42240, 42245), True, 'from tensorflow.python.keras import backend as K\n'), ((6047, 6074), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0.0)', 'w_alpha_bar'], {}), '(0.0, w_alpha_bar)\n', (6056, 6074), True, 'from tensorflow.python.keras import backend as K\n'), ((6093, 6120), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0.0)', 'w_alpha_bar'], {}), '(0.0, w_alpha_bar)\n', (6102, 6120), True, 'from tensorflow.python.keras import backend as K\n'), ((7849, 7876), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['(0.0)', 'w_alpha_bar'], {}), '(0.0, w_alpha_bar)\n', (7858, 7876), True, 'from tensorflow.python.keras import backend as K\n'), ((7895, 7922), 'tensorflow.python.keras.backend.minimum', 'K.minimum', (['(0.0)', 'w_alpha_bar'], {}), '(0.0, w_alpha_bar)\n', (7904, 7922), True, 'from tensorflow.python.keras import backend as K\n'), ((14309, 14347), 'tensorflow.python.keras.backend.pow', 'K.pow', (["(z - z + convex_domain['eps'])", '(2)'], {}), "(z - z + convex_domain['eps'], 2)\n", (14314, 14347), True, 'from tensorflow.python.keras import backend as K\n'), ((40673, 40683), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l1'], {}), '(l1)\n', (40679, 40683), True, 'from tensorflow.python.keras import backend as K\n'), ((40773, 40783), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l0'], {}), '(l0)\n', (40779, 40783), True, 'from tensorflow.python.keras import backend as K\n'), ((40874, 40884), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l1'], {}), '(l1)\n', (40880, 40884), True, 'from tensorflow.python.keras import backend as K\n'), ((40974, 40984), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u0'], {}), '(u0)\n', (40980, 40984), True, 'from tensorflow.python.keras import backend as K\n'), ((41181, 41191), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l1'], {}), '(l1)\n', (41187, 41191), True, 'from tensorflow.python.keras import backend as K\n'), ((41373, 41383), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l0'], {}), '(l0)\n', (41379, 41383), True, 'from tensorflow.python.keras import backend as K\n'), ((41566, 41576), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l1'], {}), '(l1)\n', (41572, 41576), True, 'from tensorflow.python.keras import backend as K\n'), ((41758, 41768), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u0'], {}), '(u0)\n', (41764, 41768), True, 'from tensorflow.python.keras import backend as K\n'), ((41895, 41905), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l1'], {}), '(l1)\n', (41901, 41905), True, 'from tensorflow.python.keras import backend as K\n'), ((42001, 42011), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l0'], {}), '(l0)\n', (42007, 42011), True, 'from tensorflow.python.keras import backend as K\n'), ((42108, 42118), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l1'], {}), '(l1)\n', (42114, 42118), True, 'from tensorflow.python.keras import backend as K\n'), ((42214, 42224), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u0'], {}), '(u0)\n', (42220, 42224), True, 'from tensorflow.python.keras import backend as K\n'), ((13195, 13217), 'tensorflow.python.keras.backend.maximum', 'K.maximum', (['constant', 'z'], {}), '(constant, z)\n', (13204, 13217), True, 'from tensorflow.python.keras import backend as K\n'), ((40636, 40646), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u0'], {}), '(u0)\n', (40642, 40646), True, 'from tensorflow.python.keras import backend as K\n'), ((40654, 40665), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u0)'], {}), '(-u0)\n', (40660, 40665), True, 'from tensorflow.python.keras import backend as K\n'), ((40736, 40746), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u1'], {}), '(u1)\n', (40742, 40746), True, 'from tensorflow.python.keras import backend as K\n'), ((40754, 40765), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u1)'], {}), '(-u1)\n', (40760, 40765), True, 'from tensorflow.python.keras import backend as K\n'), ((40837, 40847), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l0'], {}), '(l0)\n', (40843, 40847), True, 'from tensorflow.python.keras import backend as K\n'), ((40855, 40866), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l0)'], {}), '(-l0)\n', (40861, 40866), True, 'from tensorflow.python.keras import backend as K\n'), ((40937, 40947), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u1'], {}), '(u1)\n', (40943, 40947), True, 'from tensorflow.python.keras import backend as K\n'), ((40955, 40966), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u1)'], {}), '(-u1)\n', (40961, 40966), True, 'from tensorflow.python.keras import backend as K\n'), ((41098, 41108), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u0'], {}), '(u0)\n', (41104, 41108), True, 'from tensorflow.python.keras import backend as K\n'), ((41139, 41150), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u0)'], {}), '(-u0)\n', (41145, 41150), True, 'from tensorflow.python.keras import backend as K\n'), ((41290, 41300), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u1'], {}), '(u1)\n', (41296, 41300), True, 'from tensorflow.python.keras import backend as K\n'), ((41331, 41342), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u1)'], {}), '(-u1)\n', (41337, 41342), True, 'from tensorflow.python.keras import backend as K\n'), ((41483, 41493), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l0'], {}), '(l0)\n', (41489, 41493), True, 'from tensorflow.python.keras import backend as K\n'), ((41524, 41535), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l0)'], {}), '(-l0)\n', (41530, 41535), True, 'from tensorflow.python.keras import backend as K\n'), ((41675, 41685), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u1'], {}), '(u1)\n', (41681, 41685), True, 'from tensorflow.python.keras import backend as K\n'), ((41716, 41727), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u1)'], {}), '(-u1)\n', (41722, 41727), True, 'from tensorflow.python.keras import backend as K\n'), ((41854, 41864), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u0'], {}), '(u0)\n', (41860, 41864), True, 'from tensorflow.python.keras import backend as K\n'), ((41874, 41885), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u0)'], {}), '(-u0)\n', (41880, 41885), True, 'from tensorflow.python.keras import backend as K\n'), ((41960, 41970), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u1'], {}), '(u1)\n', (41966, 41970), True, 'from tensorflow.python.keras import backend as K\n'), ((41980, 41991), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u1)'], {}), '(-u1)\n', (41986, 41991), True, 'from tensorflow.python.keras import backend as K\n'), ((42067, 42077), 'tensorflow.python.keras.backend.relu', 'K.relu', (['l0'], {}), '(l0)\n', (42073, 42077), True, 'from tensorflow.python.keras import backend as K\n'), ((42087, 42098), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-l0)'], {}), '(-l0)\n', (42093, 42098), True, 'from tensorflow.python.keras import backend as K\n'), ((42173, 42183), 'tensorflow.python.keras.backend.relu', 'K.relu', (['u1'], {}), '(u1)\n', (42179, 42183), True, 'from tensorflow.python.keras import backend as K\n'), ((42193, 42204), 'tensorflow.python.keras.backend.relu', 'K.relu', (['(-u1)'], {}), '(-u1)\n', (42199, 42204), True, 'from tensorflow.python.keras import backend as K\n')] |
import os
import logging
import numpy as np
from ..common.utils import (
logger, InstanceList, Timer, append_instance_lists, cbind, rbind, append, matrix, read_data_as_matrix,
get_sample_feature_ranges, configure_logger
)
from ..common.metrics import fn_auc
from .aad_globals import (
STREAM_RETENTION_OVERWRITE, STREAM_RETENTION_TOP_ANOMALOUS, get_aad_command_args, AadOpts,
get_first_vals_not_marked
)
from .aad_base import get_budget_topK, Ensemble
from .forest_aad_detector import is_forest_detector
from .query_model import Query
from .aad_support import get_aad_model, load_aad_model, SequentialResults, write_sequential_results_to_csv
from .data_stream import DataStream, IdServer
from .aad_test_support import plot_score_contours
from .query_model_euclidean import filter_by_euclidean_distance
class StreamingAnomalyDetector(object):
"""
Attributes:
model: Aad
trained AAD model
stream: DataStream
max_buffer: int
Determines the window size
labeled: InstanceList
unlabeled: InstanceList
buffer: InstanceList
test set from stream
initial_labeled: InstanceList
initial_anomalies: InstanceList
initial_nominals: InstanceList
opts: AadOpts
"""
def __init__(self, stream, model, labeled_x=None, labeled_y=None, labeled_ids=None,
unlabeled_x=None, unlabeled_y=None, unlabeled_ids=None, opts=None,
max_buffer=512, min_samples_for_update=256):
self.model = model
self.stream = stream
self.max_buffer = max_buffer
self.min_samples_for_update = min_samples_for_update
self.opts = opts
self.n_prelabeled_instances = 0
self.buffer = None
self.initial_labeled, self.initial_anomalies, self.initial_nominals = \
self.get_initial_labeled(labeled_x, labeled_y, labeled_ids)
self.labeled = self._get_pretrain_labeled()
if self.labeled is not None:
self.n_prelabeled_instances = self.labeled.x.shape[0]
self.unlabeled = None
if unlabeled_x is not None:
self.unlabeled = InstanceList(x=unlabeled_x, y=unlabeled_y, ids=unlabeled_ids)
# transform the features and cache...
self.unlabeled.x_transformed = self.get_transformed(self.unlabeled.x)
self.qstate = None
self.feature_ranges = None # required if diverse querying strategy is used
self.current_dists = None
self.kl_alpha = opts.kl_alpha
self.kl_q_alpha = 0.
if is_forest_detector(self.opts.detector_type):
# initialize the baseline instance distributions required for evaluating KL-divergence
all_instances = self._get_all_instances()
self.current_dists = self.model.get_node_sample_distributions(all_instances.x)
kl_trees, self.kl_q_alpha = self.model.get_KL_divergence_distribution(all_instances.x, alpha=self.kl_alpha)
logger.debug("kl kl_q_alpha: %s (alpha=%0.2f), kl mean: %f, kl_trees:\n%s" %
(str(list(self.kl_q_alpha)), self.kl_alpha, np.mean(kl_trees), str(list(kl_trees))))
self._pre_train(debug_auc=True)
def _pre_train(self, debug_auc=False):
if not self.opts.pretrain or self.initial_labeled is None or self.opts.n_pretrain == 0:
return
ha = np.where(self.initial_labeled.y == 1)[0]
# hn = np.where(self.initial_labeled.y == 0)[0]
# set hn to empty array for pre-training. Since all instances are labeled,
# we just focus on getting the labeled anomalies ranked at the top
hn = np.zeros(0, dtype=int)
if len(ha) == 0 or len(ha) == len(self.initial_labeled.y):
logger.debug("At least one example from each class (anomaly, nominal) is required for pretraining.")
return
logger.debug("Pre-training %d rounds with anomalies: %d, nominals: %d..." %
(self.opts.n_pretrain, len(ha), len(self.initial_labeled.y)-len(ha)))
tm = Timer()
x, y, ids, x_transformed = self.initial_labeled.x, self.initial_labeled.y, self.initial_labeled.ids, self.initial_labeled.x_transformed
orig_tau = self.opts.tau
self.opts.tau = len(ha)*1.0 / len(self.initial_labeled.y)
auc = self.get_auc(x=x, y=y, x_transformed=x_transformed)
if self.opts.dataset in ['toy', 'toy2', 'toy_hard']:
plot_score_contours(x, y, x_transformed, model=self.model,
filename="baseline", outputdir=self.opts.resultsdir,
opts=self.opts)
if debug_auc: logger.debug("AUC[0]: %f" % (auc))
best_i = 0
best_auc = auc
best_w = self.model.w
for i in range(self.opts.n_pretrain):
self.model.update_weights(x_transformed, y, ha, hn, self.opts)
auc = self.get_auc(x=x, y=y, x_transformed=x_transformed)
if debug_auc: logger.debug("AUC[%d]: %f" % (i + 1, auc))
if best_auc < auc:
best_auc = auc
best_w = np.copy(self.model.w)
best_i = i+1
logger.debug("best_i: %d, best_auc: %f" % (best_i, best_auc))
self.model.w = best_w
self.opts.tau = orig_tau
if self.opts.dataset in ['toy', 'toy2', 'toy_hard']:
# some DEBUG plots
selx = None
if self.labeled is not None:
idxs = np.where(self.labeled.y == 0)[0]
logger.debug("#selx: %d" % len(idxs))
selx = self.labeled.x[idxs]
plot_score_contours(x, y, x_transformed, selected_x=selx, model=self.model,
filename="pre_train", outputdir=self.opts.resultsdir,
opts=self.opts)
logger.debug(tm.message("Updated weights %d times with no feedback " % self.opts.n_pretrain))
def get_initial_labeled(self, x, y, ids):
"""Returns the labeled instances as InstanceLists
:param x: np.ndarray
:param y: np.array
:param ids: np.array
:return: InstanceList, InstanceList, InstanceList
"""
initial_labeled = initial_anomalies = initial_nominals = None
if x is not None:
initial_labeled = InstanceList(x=x, y=y, ids=ids)
# transform the features and cache...
initial_labeled.x_transformed = self.get_transformed(initial_labeled.x)
initial_anomalies, initial_nominals = self._separate_anomaly_nominal(initial_labeled)
return initial_labeled, initial_anomalies, initial_nominals
def _get_pretrain_labeled(self):
"""Returns a subset of the initial labeled data which will be utilized in future
First, we retain all labeled anomalies since these provide vital information.
Retaining all nominals might result in severe class imbalance if they are in
relatively larger number compared to anomalies. Therefore, we subsample the nominals.
We need to determine a reasonable informative set of nominals. For this, we utilize
Euclidean-diversity based strategy. We retain the nominals which have highest
average distance from the anomalies as well as other selected nominals.
:return: InstanceList
"""
l = self.initial_labeled
if l is None:
return None
if self.opts.n_pretrain_nominals < 0:
# include all nominal instances
labeled = InstanceList(x=self.initial_labeled.x, y=self.initial_labeled.y,
ids=self.initial_labeled.ids,
x_transformed=self.initial_labeled.x_transformed)
elif self.opts.n_pretrain_nominals == 0:
# completely ignore nominals and only retain anomalies
labeled = InstanceList(x=self.initial_anomalies.x, y=self.initial_anomalies.y,
ids=self.initial_anomalies.ids,
x_transformed=self.initial_anomalies.x_transformed)
else:
# select a subset of nominals
tm = Timer()
anom_idxs = np.where(l.y == 1)[0]
noml_idxs = np.where(l.y == 0)[0]
# set number of nominals...
n_nominals = min(self.opts.n_pretrain_nominals, len(anom_idxs))
if n_nominals > 0:
selected_indexes = filter_by_euclidean_distance(l.x,
noml_idxs, init_selected=anom_idxs,
n_select=n_nominals)
else:
selected_indexes = anom_idxs
selected_indexes = np.array(selected_indexes, dtype=int)
labeled = InstanceList(x=l.x[selected_indexes], y=l.y[selected_indexes],
x_transformed=l.x_transformed[selected_indexes],
ids=l.ids[selected_indexes])
logger.debug(tm.message("Total labeled: %d, anomalies: %d, nominals: %d" %
(labeled.x.shape[0], len(anom_idxs), len(selected_indexes)-len(anom_idxs))))
return labeled
def _separate_anomaly_nominal(self, labeled):
anom_idxs = np.where(labeled.y == 1)[0]
noml_idxs = np.where(labeled.y == 0)[0]
anomalies = None
nominals = None
if len(anom_idxs) > 0:
anomalies = InstanceList(x=labeled.x[anom_idxs], y=labeled.y[anom_idxs], ids=labeled.ids[anom_idxs],
x_transformed=labeled.x_transformed[anom_idxs])
if len(noml_idxs) > 0:
nominals = InstanceList(x=labeled.x[noml_idxs], y=labeled.y[noml_idxs], ids=labeled.ids[noml_idxs],
x_transformed=labeled.x_transformed[noml_idxs])
return anomalies, nominals
def _get_all_instances(self):
if self.labeled is not None and self.unlabeled is not None:
all_instances = append_instance_lists(self.labeled, self.unlabeled)
elif self.labeled is not None:
all_instances = self.labeled
else:
all_instances = self.unlabeled
return all_instances
def reset_buffer(self):
self.buffer = None
def add_to_buffer(self, instances):
if self.buffer is not None:
self.buffer.add_instances(instances.x, instances.y,
instances.ids, instances.x_transformed)
else:
self.buffer = instances
def move_buffer_to_unlabeled(self):
if self.opts.retention_type == STREAM_RETENTION_OVERWRITE:
if False:
missed = int(np.sum(self.unlabeled.y)) if self.unlabeled.y is not None else 0
retained = int(np.sum(self.buffer.y)) if self.buffer.y is not None else 0
logger.debug("[overwriting] true anomalies: missed(%d), retained(%d)" % (missed, retained))
if self.buffer is not None:
self.unlabeled = self.buffer
elif self.opts.retention_type == STREAM_RETENTION_TOP_ANOMALOUS:
# retain the top anomalous instances from the merged
# set of instance from both buffer and current unlabeled.
if self.buffer is not None and self.unlabeled is not None:
tmp = append_instance_lists(self.unlabeled, self.buffer)
elif self.buffer is not None:
tmp = self.buffer
else:
tmp = self.unlabeled
n = min(tmp.x.shape[0], self.max_buffer)
idxs, scores = self.model.order_by_score(tmp.x_transformed)
top_idxs = idxs[np.arange(n)]
tmp_x, tmp_y, tmp_ids, tmp_trans = tmp.get_instances_at(top_idxs)
self.unlabeled = InstanceList(x=tmp_x, y=tmp_y, ids=tmp_ids, x_transformed=tmp_trans)
# self.unlabeled = InstanceList(x=tmp.x[top_idxs],
# y=tmp.y[top_idxs],
# x_transformed=tmp.x_transformed[top_idxs])
if n < len(tmp.y):
missedidxs = idxs[n:len(tmp.y)]
else:
missedidxs = None
if False:
missed = int(np.sum(tmp.y[missedidxs])) if missedidxs is not None else 0
retained = int(np.sum(self.unlabeled.y)) if self.unlabeled.y is not None else 0
logger.debug("[top anomalous] true anomalies: missed(%d), retained(%d)" % (missed, retained))
self.feature_ranges = get_sample_feature_ranges(self.unlabeled.x)
self.reset_buffer()
def get_num_instances(self):
"""Returns the total number of labeled and unlabeled instances that will be used for weight inference"""
n = 0
if self.unlabeled is not None:
n += len(self.unlabeled)
if self.labeled is not None:
# logger.debug("labeled_x: %s" % str(self.labeled_x.shape))
n += len(self.labeled)
return n
def init_query_state(self):
n = self.get_num_instances()
bt = get_budget_topK(n, self.opts)
self.qstate = Query.get_initial_query_state(self.opts.qtype, opts=self.opts, qrank=bt.topK,
a=1., b=1., budget=bt.budget)
def get_next_from_stream(self, n=0, transform=False):
if n == 0:
n = self.max_buffer
instances = self.stream.read_next_from_stream(n)
if instances is not None:
if False:
if self.buffer is not None:
logger.debug("buffer shape: %s" % str(self.buffer.x.shape))
logger.debug("x.shape: %s" % str(instances.x.shape))
if transform:
instances.x_transformed = self.get_transformed(instances.x)
self.add_to_buffer(instances)
self.model.add_samples(instances.x, current=False)
return instances
def update_model_from_buffer(self, transform=False):
"""Updates the underlying model if it meets the criteria
The minimum number of samples required for model update is:
max(self.min_samples_for_update, self.opts.stream_window//2)
We will replace trees in the following conditions:
- if check_KL_divergence is True, then check whether the KL-divergence
from reference distributions of 2*kl_alpha number of trees exceed
the alpha-threshold; if so, then replace all trees which exceed their
respective thresholds.
- if check_KL_divergence is False, then replace the configured fraction of
oldest trees. The fraction is configured with the command line
parameter --forest_replace_frac.
:param transform: bool
:return:
"""
model_updated = False
min_samples_required = max(self.min_samples_for_update, self.opts.stream_window//2)
if self.buffer is None or self.buffer.x is None or self.buffer.x.shape[0] < min_samples_required:
logger.warning("Insufficient samples (%d) for model update. Minimum required: %d = max(%d,%d)." %
(0 if self.buffer is None or self.buffer.x is None else self.buffer.x.shape[0],
min_samples_required, self.min_samples_for_update, self.opts.stream_window//2))
else:
tm = Timer()
n_trees = self.model.clf.n_estimators
n_threshold = int(2 * self.kl_alpha * n_trees)
replace_trees_by_kl = None
if self.opts.check_KL_divergence:
kl_trees, _ = self.model.get_KL_divergence_distribution(self.buffer.x, p=self.current_dists)
replace_trees_by_kl = self.model.get_trees_to_replace(kl_trees, self.kl_q_alpha)
logger.debug("kl kl_q_alpha: %s (alpha=%0.2f), kl_trees:\n%s\n(#replace: %d): %s" %
(str(list(self.kl_q_alpha)), self.kl_alpha, str(list(kl_trees)), len(replace_trees_by_kl), str(list(replace_trees_by_kl))))
n_replace = 0 if replace_trees_by_kl is None else len(replace_trees_by_kl)
# check whether conditions for tree-replacement are satisfied.
do_replace = not self.opts.check_KL_divergence or (n_trees > 0 and n_replace >= n_threshold)
if do_replace:
self.model.update_model_from_stream_buffer(replace_trees=replace_trees_by_kl)
if is_forest_detector(self.opts.detector_type):
self.current_dists = self.model.get_node_sample_distributions(self.buffer.x)
kl_trees, self.kl_q_alpha = self.model.get_KL_divergence_distribution(self.buffer.x, alpha=self.kl_alpha)
logger.debug("kl kl_q_alpha: %s, kl_trees:\n%s" % (str(list(self.kl_q_alpha)), str(list(kl_trees))))
model_updated = True
logger.debug(tm.message(
"Model%s updated; n_replace: %d, n_threshold: %d, kl_q_alpha: %s (check_KL: %s, alpha: %0.2f)" %
(" not" if not do_replace else "", n_replace, n_threshold,
str(list(self.kl_q_alpha)), str(self.opts.check_KL_divergence), self.kl_alpha)
))
if transform:
if self.labeled is not None and self.labeled.x is not None:
self.labeled.x_transformed = self.get_transformed(self.labeled.x)
if self.unlabeled is not None and self.unlabeled.x is not None:
self.unlabeled.x_transformed = self.get_transformed(self.unlabeled.x)
if self.buffer is not None and self.buffer.x is not None:
self.buffer.x_transformed = self.get_transformed(self.buffer.x)
return model_updated
def stream_buffer_empty(self):
return self.stream.empty()
def get_anomaly_scores(self, x, x_transformed=None):
if x_transformed is None:
x_new = self.get_transformed(x)
else:
if x.shape[0] != x_transformed.shape[0]:
raise ValueError("x(%d) and x_transformed(%d) are inconsistent" % (x.shape[0], x_transformed.shape[0]))
x_new = x_transformed
scores = self.model.get_score(x_new)
return scores
def get_auc(self, x, y, x_transformed=None):
scores = self.get_anomaly_scores(x, x_transformed=x_transformed)
auc = fn_auc(cbind(y, -scores))
return auc
def get_allowed_labeled_subset(self):
""" Returns a randomly selected subset of labeled instances
The number of instances returned is determined by the upper limit
specified through the optional parameters opts.labeled_to_window_ratio
and opts.max_labeled_for_stream in the streaming mode.
"""
# first, compute the maximum number of labeled instances allowed for
# computing AAD losses and constraints...
n_labeled = 0 if self.labeled is None else len(self.labeled.x)
if n_labeled == 0 or (self.opts.labeled_to_window_ratio is None and self.opts.max_labeled_for_stream is None):
return self.labeled
n_allowed_labeled = self.max_buffer if self.opts.labeled_to_window_ratio is None \
else int(self.opts.labeled_to_window_ratio * self.max_buffer)
n_allowed_labeled = n_allowed_labeled if self.opts.max_labeled_for_stream is None \
else min(n_allowed_labeled, self.opts.max_labeled_for_stream)
n_allowed_labeled = min(n_allowed_labeled, n_labeled)
if n_allowed_labeled == n_labeled:
return self.labeled
labeled = InstanceList(x=self.labeled.x, y=self.labeled.y,
ids=self.labeled.ids, x_transformed=self.labeled.x_transformed)
n_per_type = n_allowed_labeled // 2
anom_idxs = np.where(self.labeled.y == 1)[0]
noml_idxs = np.where(self.labeled.y == 0)[0]
if len(anom_idxs) > n_per_type:
np.random.shuffle(anom_idxs)
idxs = anom_idxs[0:n_per_type]
else:
idxs = anom_idxs
n_anoms = len(idxs)
n_nomls = n_allowed_labeled - n_anoms
if len(noml_idxs) > n_nomls:
np.random.shuffle(noml_idxs)
idxs = np.append(idxs, noml_idxs[0:n_nomls])
else:
idxs = np.append(idxs, noml_idxs)
n_nomls = len(idxs) - n_anoms
if False:
logger.debug("n_labeled: %d, n_allowed_labeled: %d, n_anoms: %d, n_nomls: %d" %
(n_labeled, n_allowed_labeled, n_anoms, n_nomls))
mask = np.zeros(n_labeled, dtype=bool)
mask[idxs[0:n_allowed_labeled]] = True
labeled.retain_with_mask(mask)
return labeled
def setup_data_for_feedback(self):
"""
Prepares the input matrices/data structures for weight update. The format
is such that the top rows of data matrix are labeled and below are unlabeled.
:return: (np.ndarray, np.array, np.array, np.array)
(x, y, ha, hn)
x - data matrix, y - labels (np.nan for unlabeled),
ha - indexes of labeled anomalies, hn - indexes of labeled nominals
"""
labeled = self.get_allowed_labeled_subset()
if labeled is None:
tmp = self.unlabeled
elif self.unlabeled is None:
tmp = labeled
else:
tmp = append_instance_lists(labeled, self.unlabeled)
if labeled is not None:
ha = np.where(labeled.y == 1)[0]
hn = np.where(labeled.y == 0)[0]
else:
ha = np.zeros(0, dtype=int)
hn = np.zeros(0, dtype=int)
if False:
logger.debug("x: %d, ha: %d, hn:%d" % (nrow(tmp.x), len(ha), len(hn)))
return tmp, ha, hn
def get_instance_stats(self):
nha = nhn = nul = 0
if self.labeled is not None and self.labeled.y is not None:
nha = len(np.where(self.labeled.y == 1)[0])
nhn = len(np.where(self.labeled.y == 0)[0])
if self.unlabeled is not None:
nul = len(self.unlabeled)
return nha, nhn, nul
def get_num_labeled(self):
"""Returns the number of instances for which we already have label feedback"""
if self.labeled is not None:
return len(self.labeled.y)
return 0
def reestimate_tau(self, default_tau):
"""Re-estimate the proportion of anomalies
The number of true anomalies discovered might end up being high
relative to the data in the memory. We need to adjust for that...
:param default_tau: float
default proportion of anomalies
:return: float
"""
new_tau = default_tau
nha, nhn, nul = self.get_instance_stats()
frac_known_anom = nha * 1.0 / (nha + nhn + nul)
if frac_known_anom >= default_tau:
new_tau = frac_known_anom + 0.01
logger.debug("Exceeded original tau (%f); setting tau=%f" % (default_tau, new_tau))
return new_tau
def update_weights_with_no_feedback(self, n_train=None, debug_auc=False):
"""Runs the weight update n times
This is used when:
1. There has been a significant update to the model because
of (say) data drift and we want to iteratively estimate the
ensemble weights and the tau-quantile value a number of times.
2. We have an initial fully labeled set with which we want to
pretrain the model
"""
n = n_train if n_train is not None else self.opts.n_weight_updates_after_stream_window
if self.opts.do_not_update_weights or n <= 0:
return
tm = Timer()
tmp, ha, hn = self.setup_data_for_feedback()
x, y, ids, x_transformed = tmp.x, tmp.y, tmp.ids, tmp.x_transformed
orig_tau = self.opts.tau
self.opts.tau = self.reestimate_tau(orig_tau)
if debug_auc: logger.debug("AUC[0]: %f" % (self.get_auc(x=x, y=y, x_transformed=x_transformed)))
for i in range(n):
self.model.update_weights(x_transformed, y, ha, hn, self.opts)
if debug_auc: logger.debug("AUC[%d]: %f" % (i+1, self.get_auc(x=x, y=y, x_transformed=x_transformed)))
self.opts.tau = orig_tau
logger.debug(tm.message("Updated weights %d times with no feedback " % n))
def get_query_data(self, x=None, y=None, ids=None, ha=None, hn=None, unl=None, w=None, n_query=1):
"""Returns the best instance that should be queried, along with other data structures
Args:
x: np.ndarray
input instances (labeled + unlabeled)
y: np.array
labels for instances which are already labeled, else some dummy values
ids: np.array
unique instance ids
ha: np.array
indexes of labeled anomalies
hn: np.array
indexes of labeled nominals
unl: np.array
unlabeled instances that should be ignored for query
w: np.array
current weight vector
n_query: int
number of instances to query
"""
if self.get_num_instances() == 0:
raise ValueError("No instances available")
x_transformed = None
if x is None:
tmp, ha, hn = self.setup_data_for_feedback()
x, y, ids, x_transformed = tmp.x, tmp.y, tmp.ids, tmp.x_transformed
n = x.shape[0]
if w is None:
w = self.model.w
if unl is None:
unl = np.zeros(0, dtype=int)
n_feedback = len(ha) + len(hn)
# the top n_feedback instances in the instance list are the labeled items
queried_items = append(np.arange(n_feedback), unl)
if x_transformed is None:
x_transformed = self.get_transformed(x)
logger.debug("needs transformation")
order_anom_idxs, anom_score = self.model.order_by_score(x_transformed)
ensemble = Ensemble(x, original_indexes=0)
xi = self.qstate.get_next_query(maxpos=n, ordered_indexes=order_anom_idxs,
queried_items=queried_items,
ensemble=ensemble,
feature_ranges=self.feature_ranges,
model=self.model,
x=x_transformed, lbls=y, anom_score=anom_score,
w=w, hf=append(ha, hn),
remaining_budget=self.opts.num_query_batch, # self.opts.budget - n_feedback,
n=n_query)
if False:
logger.debug("ordered instances[%d]: %s\nha: %s\nhn: %s\nxi: %s" %
(self.opts.budget, str(list(order_anom_idxs[0:self.opts.budget])),
str(list(ha)), str(list(hn)), str(list(xi))))
return xi, x, y, ids, x_transformed, ha, hn, order_anom_idxs, anom_score
def get_transformed(self, x):
"""Returns the instance.x_transformed
Args:
instances: InstanceList
Returns: scipy sparse array
"""
# logger.debug("transforming data...")
x_transformed = self.model.transform_to_ensemble_features(
x, dense=False, norm_unit=self.opts.norm_unit)
return x_transformed
def move_unlabeled_to_labeled(self, xi, yi):
unlabeled_idxs = xi
x, _, id, x_trans = self.unlabeled.get_instances_at(unlabeled_idxs)
if self.labeled is None:
self.labeled = InstanceList(x=self.unlabeled.x[unlabeled_idxs, :],
y=yi,
ids=None if id is None else id,
x_transformed=x_trans)
else:
self.labeled.add_instance(x, y=yi, id=id, x_transformed=x_trans)
self.unlabeled.remove_instance_at(unlabeled_idxs)
def update_weights_with_feedback(self, xis, yis, x, y, x_transformed, ha, hn):
"""Relearns the optimal weights from feedback and updates internal labeled and unlabeled matrices
IMPORTANT:
This API assumes that the input x, y, x_transformed are consistent with
the internal labeled/unlabeled matrices, i.e., the top rows/values in
these matrices are from labeled data and bottom ones are from internally
stored unlabeled data.
Args:
xis: np.array(dtype=int)
indexes of instances in Union(self.labeled, self.unlabeled)
yis: np.array(dtype=int)
labels {0, 1} of instances (supposedly provided by an Oracle)
x: numpy.ndarray
set of all instances
y: list of int
set of all labels (only those at locations in the lists ha and hn are relevant)
x_transformed: numpy.ndarray
x transformed to ensemble features
ha: list of int
indexes of labeled anomalies
hn: list of int
indexes of labeled nominals
"""
# Add the newly labeled instance to the corresponding list of labeled
# instances and remove it from the unlabeled set.
nhn = len(ha) + len(hn)
self.move_unlabeled_to_labeled(xis - nhn, yis)
for xi, yi in zip(xis, yis):
if yi == 1:
ha = append(ha, [xi])
else:
hn = append(hn, [xi])
if not self.opts.do_not_update_weights:
self.model.update_weights(x_transformed, y, ha, hn, self.opts)
def run_feedback(self):
"""Runs active learning loop for current unlabeled window of data."""
min_feedback = self.opts.min_feedback_per_window
max_feedback = self.opts.max_feedback_per_window
# For the last window, we query till the buffer is exhausted
# irrespective of whether we exceed max_feedback per window limit
if self.stream_buffer_empty() and self.opts.till_budget:
bk = get_budget_topK(self.unlabeled.x.shape[0], self.opts)
n_labeled = 0 if self.labeled is None else len(self.labeled.y)
max_feedback = max(0, bk.budget - (n_labeled - self.n_prelabeled_instances))
max_feedback = min(max_feedback, self.unlabeled.x.shape[0])
if False:
# get baseline metrics
x_transformed = self.get_transformed(self.unlabeled.x)
ordered_idxs, _ = self.model.order_by_score(x_transformed)
seen_baseline = self.unlabeled.y[ordered_idxs[0:max_feedback]]
num_seen_baseline = np.cumsum(seen_baseline)
logger.debug("num_seen_baseline:\n%s" % str(list(num_seen_baseline)))
# baseline scores
w_baseline = self.model.get_uniform_weights()
order_baseline, scores_baseline = self.model.order_by_score(self.unlabeled.x_transformed, w_baseline)
n_seen_baseline = min(max_feedback, len(self.unlabeled.y))
queried_baseline = order_baseline[0:n_seen_baseline]
seen_baseline = self.unlabeled.y[queried_baseline]
orig_tau = self.opts.tau
self.opts.tau = self.reestimate_tau(orig_tau)
seen = np.zeros(0, dtype=int)
n_unlabeled = np.zeros(0, dtype=int)
queried = np.zeros(0, dtype=int)
unl = np.zeros(0, dtype=int)
i = 0
n_feedback = 0
while n_feedback < max_feedback:
i += 1
# scores based on current weights
xi_, x, y, ids, x_transformed, ha, hn, order_anom_idxs, anom_score = \
self.get_query_data(unl=unl, n_query=self.opts.n_explore)
order_anom_idxs_minus_ha_hn = get_first_vals_not_marked(
order_anom_idxs, append(ha, hn), n=len(order_anom_idxs))
bt = get_budget_topK(x_transformed.shape[0], self.opts)
# Note: We will ensure that the tau-th instance is atleast 10-th (or lower) ranked
tau_rank = min(max(bt.topK, 10), x.shape[0])
xi = np.array(xi_, dtype=int)
if n_feedback + len(xi) > max_feedback:
xi = xi[0:(max_feedback - n_feedback)]
n_feedback += len(xi)
# logger.debug("n_feedback: %d, #xi: %d" % (n_feedback, len(xi)))
means = vars = qpos = m_tau = v_tau = None
if self.opts.query_confident:
# get the mean score and its variance for the top ranked instances
# excluding the instances which have already been queried
means, vars, test, v_eval, _ = get_score_variances(x_transformed, self.model.w,
n_test=tau_rank,
ordered_indexes=order_anom_idxs,
queried_indexes=append(ha, hn))
# get the mean score and its variance for the tau-th ranked instance
m_tau, v_tau, _, _, _ = get_score_variances(x_transformed[order_anom_idxs_minus_ha_hn[tau_rank]],
self.model.w, n_test=1,
test_indexes=np.array([0], dtype=int))
qpos = np.where(test == xi[0])[0] # top-most ranked instance
if False and self.opts.query_confident:
logger.debug("tau score:\n%s (%s)" % (str(list(m_tau)), str(list(v_tau))))
strmv = ",".join(["%f (%f)" % (means[j], vars[j]) for j in np.arange(len(means))])
logger.debug("scores:\n%s" % strmv)
# check if we are confident that this is larger than the tau-th ranked instance
if (not self.opts.query_confident) or (n_feedback <= min_feedback or
means[qpos] - 3. * np.sqrt(vars[qpos]) >= m_tau):
seen = np.append(seen, y[xi])
queried_ = [ids[q] for q in xi]
queried = np.append(queried, queried_)
tm_update = Timer()
self.update_weights_with_feedback(xi, y[xi], x, y, x_transformed, ha, hn)
tm_update.end()
# reset the list of queried test instances because their scores would have changed
unl = np.zeros(0, dtype=int)
if False:
nha, nhn, nul = self.get_instance_stats()
# logger.debug("xi:%d, test indxs: %s, qpos: %d" % (xi, str(list(test)), qpos))
# logger.debug("orig scores:\n%s" % str(list(anom_score[order_anom_idxs[0:tau_rank]])))
logger.debug("[%d] #feedback: %d; ha: %d; hn: %d, mnw: %d, mxw: %d; update: %f sec(s)" %
(i, nha + nhn, nha, nhn, min_feedback, max_feedback, tm_update.elapsed()))
else:
# ignore these instances from query
unl = np.append(unl, xi)
# logger.debug("skipping feedback for xi=%d at iter %d; unl: %s" % (xi, i, str(list(unl))))
# continue
n_unlabeled = np.append(n_unlabeled, [int(np.sum(self.unlabeled.y))])
# logger.debug("y:\n%s" % str(list(y)))
self.opts.tau = orig_tau
# logger.debug("w:\n%s" % str(list(sad.model.w)))
return seen, seen_baseline, queried, None, n_unlabeled
def print_instance_stats(self, msg="debug"):
logger.debug("%s:\nlabeled: %s, unlabeled: %s" %
(msg,
'-' if self.labeled is None else str(self.labeled),
'-' if self.unlabeled is None else str(self.unlabeled)))
def train_aad_model(opts, x):
random_state = np.random.RandomState(opts.randseed + opts.fid * opts.reruns + opts.runidx)
# fit the model
model = get_aad_model(x, opts, random_state)
model.fit(x)
model.init_weights(init_type=opts.init)
return model
def prepare_aad_model(x, y, opts):
if opts.load_model and opts.modelfile != "" and os.path.isfile(opts.modelfile):
logger.debug("Loading model from file %s" % opts.modelfile)
model = load_aad_model(opts.modelfile)
else:
model = train_aad_model(opts, x)
if is_forest_detector(model.detector_type):
logger.debug("total #nodes: %d" % (len(model.all_regions)))
if False:
if model.w is not None:
logger.debug("w:\n%s" % str(list(model.w)))
else:
logger.debug("model weights are not set")
return model
def prepare_stream_anomaly_detector(stream, opts):
"""Prepares an instance of the StreamingAnomalyDetector
:param stream: DataStream
:param opts: AadOpts
:param pretrain: boolean
If True, then treats the first window of data as fully *LABELED* and updates
the weights with the labeled data. Next, fetches the next window of data
as fully *UNLABELED* and updates tree structure if needed.
If False, then treats the first window of data as fully unlabeled.
:param n_pretrain: int
Number of times to run the weight update if pre-training is required.
:return: StreamingAnomalyDetector
"""
training_set = stream.read_next_from_stream(opts.stream_window)
X_train, y_train, ids = training_set.x, training_set.y, training_set.ids
model = prepare_aad_model(X_train, y_train, opts) # initial model training
if opts.pretrain:
# first window pre-trains the model as fully labeled set
sad = StreamingAnomalyDetector(stream, model,
labeled_x=X_train, labeled_y=y_train, labeled_ids=ids,
max_buffer=opts.stream_window, opts=opts)
# second window is treated as fully unlabeled
instances = sad.get_next_from_stream(sad.max_buffer,
transform=(not opts.allow_stream_update))
if instances is not None:
model_updated = False
if opts.allow_stream_update:
model_updated = sad.update_model_from_buffer(transform=True)
sad.move_buffer_to_unlabeled()
if model_updated:
sad.update_weights_with_no_feedback()
sad.feature_ranges = get_sample_feature_ranges(instances.x)
else:
sad.feature_ranges = get_sample_feature_ranges(X_train)
sad.init_query_state()
else:
# first window is treated as fully unlabeled
sad = StreamingAnomalyDetector(stream, model,
unlabeled_x=X_train, unlabeled_y=y_train, unlabeled_ids=ids,
max_buffer=opts.stream_window, opts=opts)
sad.feature_ranges = get_sample_feature_ranges(X_train)
sad.init_query_state()
return sad
def aad_stream():
logger = logging.getLogger(__name__)
# PRODUCTION
args = get_aad_command_args(debug=False)
# print "log file: %s" % args.log_file
configure_logger(args)
opts = AadOpts(args)
# print opts.str_opts()
logger.debug(opts.str_opts())
if not opts.streaming:
raise ValueError("Only streaming supported")
np.random.seed(opts.randseed)
X_full, y_full = read_data_as_matrix(opts)
logger.debug("loaded file: (%s) %s" % (str(X_full.shape), opts.datafile))
logger.debug("results dir: %s" % opts.resultsdir)
all_num_seen = None
all_num_not_seen = None
all_num_seen_baseline = None
all_queried = None
all_window = None
all_window_baseline = None
opts.fid = 1
for runidx in opts.get_runidxs():
tm_run = Timer()
opts.set_multi_run_options(opts.fid, runidx)
stream = DataStream(X_full, y_full, IdServer(initial=0))
# from aad.malware_aad import MalwareDataStream
# stream = MalwareDataStream(X_full, y_full, IdServer(initial=0))
sad = prepare_stream_anomaly_detector(stream, opts)
if sad.unlabeled is None:
logger.debug("No instances to label")
continue
iter = 0
seen = np.zeros(0, dtype=int)
n_unlabeled = np.zeros(0, dtype=int)
seen_baseline = np.zeros(0, dtype=int)
queried = np.zeros(0, dtype=int)
stream_window_tmp = np.zeros(0, dtype=int)
stream_window_baseline = np.zeros(0, dtype=int)
stop_iter = False
while not stop_iter:
iter += 1
tm = Timer()
seen_, seen_baseline_, queried_, queried_baseline_, n_unlabeled_ = sad.run_feedback()
# gather metrics...
seen = append(seen, seen_)
n_unlabeled = append(n_unlabeled, n_unlabeled_)
seen_baseline = append(seen_baseline, seen_baseline_)
queried = append(queried, queried_)
stream_window_tmp = append(stream_window_tmp, np.ones(len(seen_)) * iter)
stream_window_baseline = append(stream_window_baseline, np.ones(len(seen_baseline_)) * iter)
# get the next window of data from stream and transform features...
# Note: Since model update will automatically transform the data, we will
# not transform while reading from stream. If however, the model is not
# to be updated, then we transform the data while reading from stream
instances = sad.get_next_from_stream(sad.max_buffer,
transform=(not opts.allow_stream_update))
if instances is None or iter >= opts.max_windows or len(queried) >= opts.budget:
if iter >= opts.max_windows:
logger.debug("Exceeded %d iters; exiting stream read..." % opts.max_windows)
stop_iter = True
else:
model_updated = False
if opts.allow_stream_update:
model_updated = sad.update_model_from_buffer(transform=True)
sad.move_buffer_to_unlabeled()
if model_updated:
sad.update_weights_with_no_feedback()
logger.debug(tm.message("Stream window [%d]: algo [%d/%d]; baseline [%d/%d]; unlabeled anoms [%d]: " %
(iter, int(np.sum(seen)), len(seen),
int(np.sum(seen_baseline)), len(seen_baseline),
int(np.sum(sad.unlabeled.y)))))
# retained = int(np.sum(sad.unlabeled_y)) if sad.unlabeled_y is not None else 0
# logger.debug("Final retained unlabeled anoms: %d" % retained)
num_seen_tmp = np.cumsum(seen)
# logger.debug("\nnum_seen : %s" % (str(list(num_seen_tmp)),))
num_seen_baseline = np.cumsum(seen_baseline)
# logger.debug("Numseen in %d budget (overall):\n%s" % (opts.budget, str(list(num_seen_baseline))))
stream_window_baseline = append(np.array([opts.fid, opts.runidx],
dtype=stream_window_baseline.dtype),
stream_window_baseline)
stream_window = np.ones(len(stream_window_baseline) + 2, dtype=stream_window_tmp.dtype) * -1
stream_window[0:2] = [opts.fid, opts.runidx]
stream_window[2:(2+len(stream_window_tmp))] = stream_window_tmp
# num_seen_baseline has the uniformly maximum number of queries.
# the number of queries in num_seen will vary under the query confidence mode
num_seen = np.ones(len(num_seen_baseline) + 2, dtype=num_seen_tmp.dtype) * -1
num_not_seen = np.ones(len(num_seen_baseline) + 2, dtype=num_seen.dtype) * -1
num_seen[0:2] = [opts.fid, opts.runidx]
num_seen[2:(2+len(num_seen_tmp))] = num_seen_tmp
queried_ids = np.ones(len(num_seen_baseline) + 2, dtype=num_seen_tmp.dtype) * -1
queried_ids[0:2] = [opts.fid, opts.runidx]
# IMPORTANT:: The queried indexes are output as 1-indexed (NOT zero-indexed)
# logger.debug("queried:\n%s\n%s" % (str(list(queried)), str(list(y_full[queried]))))
queried_ids[2:(2 + len(queried))] = queried + 1
# the number of unlabeled instances in buffer. For streaming this is
# important since this represents the potential to discover true
# anomalies. True anomalies in unlabeled set should not get discarded
# when a new window of data arrives.
num_not_seen[0:2] = [opts.fid, opts.runidx]
num_not_seen[2:(2+len(n_unlabeled))] = n_unlabeled
num_seen_baseline = append(np.array([opts.fid, opts.runidx], dtype=num_seen_baseline.dtype), num_seen_baseline)
all_num_seen = rbind(all_num_seen, matrix(num_seen, nrow=1))
all_num_not_seen = rbind(all_num_not_seen, matrix(num_not_seen, nrow=1))
all_num_seen_baseline = rbind(all_num_seen_baseline, matrix(num_seen_baseline, nrow=1))
all_queried = rbind(all_queried, matrix(queried_ids, nrow=1))
all_window = rbind(all_window, matrix(stream_window, nrow=1))
all_window_baseline = rbind(all_window_baseline, matrix(stream_window_baseline, nrow=1))
logger.debug(tm_run.message("Completed runidx: %d" % runidx))
results = SequentialResults(num_seen=all_num_seen,
num_not_seen=all_num_not_seen,
true_queried_indexes=all_queried,
num_seen_baseline=all_num_seen_baseline,
# true_queried_indexes_baseline=all_queried_baseline,
stream_window=all_window,
stream_window_baseline=all_window_baseline,
aucs=None)
write_sequential_results_to_csv(results, opts)
if __name__ == "__main__":
aad_stream()
| [
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.copy",
"numpy.sum",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.cumsum",
"os.path.isfile",
"numpy.where",
"numpy.append",
"numpy.arange",
"numpy.array",
"numpy.mean",
"logging.getLogger",
"numpy.sqrt"
] | [((36136, 36211), 'numpy.random.RandomState', 'np.random.RandomState', (['(opts.randseed + opts.fid * opts.reruns + opts.runidx)'], {}), '(opts.randseed + opts.fid * opts.reruns + opts.runidx)\n', (36157, 36211), True, 'import numpy as np\n'), ((39299, 39326), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (39316, 39326), False, 'import logging\n'), ((39634, 39663), 'numpy.random.seed', 'np.random.seed', (['opts.randseed'], {}), '(opts.randseed)\n', (39648, 39663), True, 'import numpy as np\n'), ((3692, 3714), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (3700, 3714), True, 'import numpy as np\n'), ((20769, 20800), 'numpy.zeros', 'np.zeros', (['n_labeled'], {'dtype': 'bool'}), '(n_labeled, dtype=bool)\n', (20777, 20800), True, 'import numpy as np\n'), ((31568, 31590), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (31576, 31590), True, 'import numpy as np\n'), ((31613, 31635), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (31621, 31635), True, 'import numpy as np\n'), ((31654, 31676), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (31662, 31676), True, 'import numpy as np\n'), ((31691, 31713), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (31699, 31713), True, 'import numpy as np\n'), ((36448, 36478), 'os.path.isfile', 'os.path.isfile', (['opts.modelfile'], {}), '(opts.modelfile)\n', (36462, 36478), False, 'import os\n'), ((40537, 40559), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (40545, 40559), True, 'import numpy as np\n'), ((40582, 40604), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (40590, 40604), True, 'import numpy as np\n'), ((40629, 40651), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (40637, 40651), True, 'import numpy as np\n'), ((40670, 40692), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (40678, 40692), True, 'import numpy as np\n'), ((40721, 40743), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (40729, 40743), True, 'import numpy as np\n'), ((40777, 40799), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (40785, 40799), True, 'import numpy as np\n'), ((43046, 43061), 'numpy.cumsum', 'np.cumsum', (['seen'], {}), '(seen)\n', (43055, 43061), True, 'import numpy as np\n'), ((43165, 43189), 'numpy.cumsum', 'np.cumsum', (['seen_baseline'], {}), '(seen_baseline)\n', (43174, 43189), True, 'import numpy as np\n'), ((3424, 3461), 'numpy.where', 'np.where', (['(self.initial_labeled.y == 1)'], {}), '(self.initial_labeled.y == 1)\n', (3432, 3461), True, 'import numpy as np\n'), ((9388, 9412), 'numpy.where', 'np.where', (['(labeled.y == 1)'], {}), '(labeled.y == 1)\n', (9396, 9412), True, 'import numpy as np\n'), ((9436, 9460), 'numpy.where', 'np.where', (['(labeled.y == 0)'], {}), '(labeled.y == 0)\n', (9444, 9460), True, 'import numpy as np\n'), ((20009, 20038), 'numpy.where', 'np.where', (['(self.labeled.y == 1)'], {}), '(self.labeled.y == 1)\n', (20017, 20038), True, 'import numpy as np\n'), ((20062, 20091), 'numpy.where', 'np.where', (['(self.labeled.y == 0)'], {}), '(self.labeled.y == 0)\n', (20070, 20091), True, 'import numpy as np\n'), ((20147, 20175), 'numpy.random.shuffle', 'np.random.shuffle', (['anom_idxs'], {}), '(anom_idxs)\n', (20164, 20175), True, 'import numpy as np\n'), ((20385, 20413), 'numpy.random.shuffle', 'np.random.shuffle', (['noml_idxs'], {}), '(noml_idxs)\n', (20402, 20413), True, 'import numpy as np\n'), ((20433, 20470), 'numpy.append', 'np.append', (['idxs', 'noml_idxs[0:n_nomls]'], {}), '(idxs, noml_idxs[0:n_nomls])\n', (20442, 20470), True, 'import numpy as np\n'), ((20504, 20530), 'numpy.append', 'np.append', (['idxs', 'noml_idxs'], {}), '(idxs, noml_idxs)\n', (20513, 20530), True, 'import numpy as np\n'), ((21783, 21805), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (21791, 21805), True, 'import numpy as np\n'), ((21823, 21845), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (21831, 21845), True, 'import numpy as np\n'), ((25807, 25829), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (25815, 25829), True, 'import numpy as np\n'), ((25982, 26003), 'numpy.arange', 'np.arange', (['n_feedback'], {}), '(n_feedback)\n', (25991, 26003), True, 'import numpy as np\n'), ((30979, 31003), 'numpy.cumsum', 'np.cumsum', (['seen_baseline'], {}), '(seen_baseline)\n', (30988, 31003), True, 'import numpy as np\n'), ((32398, 32422), 'numpy.array', 'np.array', (['xi_'], {'dtype': 'int'}), '(xi_, dtype=int)\n', (32406, 32422), True, 'import numpy as np\n'), ((43339, 43408), 'numpy.array', 'np.array', (['[opts.fid, opts.runidx]'], {'dtype': 'stream_window_baseline.dtype'}), '([opts.fid, opts.runidx], dtype=stream_window_baseline.dtype)\n', (43347, 43408), True, 'import numpy as np\n'), ((44983, 45047), 'numpy.array', 'np.array', (['[opts.fid, opts.runidx]'], {'dtype': 'num_seen_baseline.dtype'}), '([opts.fid, opts.runidx], dtype=num_seen_baseline.dtype)\n', (44991, 45047), True, 'import numpy as np\n'), ((5163, 5184), 'numpy.copy', 'np.copy', (['self.model.w'], {}), '(self.model.w)\n', (5170, 5184), True, 'import numpy as np\n'), ((8834, 8871), 'numpy.array', 'np.array', (['selected_indexes'], {'dtype': 'int'}), '(selected_indexes, dtype=int)\n', (8842, 8871), True, 'import numpy as np\n'), ((21679, 21703), 'numpy.where', 'np.where', (['(labeled.y == 1)'], {}), '(labeled.y == 1)\n', (21687, 21703), True, 'import numpy as np\n'), ((21724, 21748), 'numpy.where', 'np.where', (['(labeled.y == 0)'], {}), '(labeled.y == 0)\n', (21732, 21748), True, 'import numpy as np\n'), ((34323, 34345), 'numpy.append', 'np.append', (['seen', 'y[xi]'], {}), '(seen, y[xi])\n', (34332, 34345), True, 'import numpy as np\n'), ((34420, 34448), 'numpy.append', 'np.append', (['queried', 'queried_'], {}), '(queried, queried_)\n', (34429, 34448), True, 'import numpy as np\n'), ((34728, 34750), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'int'}), '(0, dtype=int)\n', (34736, 34750), True, 'import numpy as np\n'), ((35356, 35374), 'numpy.append', 'np.append', (['unl', 'xi'], {}), '(unl, xi)\n', (35365, 35374), True, 'import numpy as np\n'), ((5528, 5557), 'numpy.where', 'np.where', (['(self.labeled.y == 0)'], {}), '(self.labeled.y == 0)\n', (5536, 5557), True, 'import numpy as np\n'), ((8270, 8288), 'numpy.where', 'np.where', (['(l.y == 1)'], {}), '(l.y == 1)\n', (8278, 8288), True, 'import numpy as np\n'), ((8316, 8334), 'numpy.where', 'np.where', (['(l.y == 0)'], {}), '(l.y == 0)\n', (8324, 8334), True, 'import numpy as np\n'), ((11821, 11833), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (11830, 11833), True, 'import numpy as np\n'), ((22127, 22156), 'numpy.where', 'np.where', (['(self.labeled.y == 1)'], {}), '(self.labeled.y == 1)\n', (22135, 22156), True, 'import numpy as np\n'), ((22183, 22212), 'numpy.where', 'np.where', (['(self.labeled.y == 0)'], {}), '(self.labeled.y == 0)\n', (22191, 22212), True, 'import numpy as np\n'), ((33680, 33703), 'numpy.where', 'np.where', (['(test == xi[0])'], {}), '(test == xi[0])\n', (33688, 33703), True, 'import numpy as np\n'), ((3169, 3186), 'numpy.mean', 'np.mean', (['kl_trees'], {}), '(kl_trees)\n', (3176, 3186), True, 'import numpy as np\n'), ((10837, 10861), 'numpy.sum', 'np.sum', (['self.unlabeled.y'], {}), '(self.unlabeled.y)\n', (10843, 10861), True, 'import numpy as np\n'), ((10933, 10954), 'numpy.sum', 'np.sum', (['self.buffer.y'], {}), '(self.buffer.y)\n', (10939, 10954), True, 'import numpy as np\n'), ((33631, 33655), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'int'}), '([0], dtype=int)\n', (33639, 33655), True, 'import numpy as np\n'), ((35564, 35588), 'numpy.sum', 'np.sum', (['self.unlabeled.y'], {}), '(self.unlabeled.y)\n', (35570, 35588), True, 'import numpy as np\n'), ((12406, 12431), 'numpy.sum', 'np.sum', (['tmp.y[missedidxs]'], {}), '(tmp.y[missedidxs])\n', (12412, 12431), True, 'import numpy as np\n'), ((12497, 12521), 'numpy.sum', 'np.sum', (['self.unlabeled.y'], {}), '(self.unlabeled.y)\n', (12503, 12521), True, 'import numpy as np\n'), ((34269, 34288), 'numpy.sqrt', 'np.sqrt', (['vars[qpos]'], {}), '(vars[qpos])\n', (34276, 34288), True, 'import numpy as np\n'), ((42681, 42693), 'numpy.sum', 'np.sum', (['seen'], {}), '(seen)\n', (42687, 42693), True, 'import numpy as np\n'), ((42748, 42769), 'numpy.sum', 'np.sum', (['seen_baseline'], {}), '(seen_baseline)\n', (42754, 42769), True, 'import numpy as np\n'), ((42833, 42856), 'numpy.sum', 'np.sum', (['sad.unlabeled.y'], {}), '(sad.unlabeled.y)\n', (42839, 42856), True, 'import numpy as np\n')] |
# Time-stamp: <2017-08-10>
'''Module for calculating the Wilcoxon-score and p value for each unique TF
Copyright (c) 2017, 2018 <NAME>, <NAME> <<EMAIL>>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License.
@status: release candidate
@version: $Id$
@author: <NAME>, <NAME>
@contact: <EMAIL>
'''
from __future__ import division
import argparse,os,sys,re
import pandas as pd
import numpy as np
import scipy
from scipy import stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=16
matplotlib.rcParams["font.sans-serif"] = ["Arial", "Liberation Sans", "Bitstream Vera Sans"]
matplotlib.rcParams["font.family"] = "sans-serif"
from BART.OptValidator import opt_validate,conf_validate
def factorial(n):
value = 1.0
while n>1:
value*=n
n-=1
return value
def logfac(n):
if n<20:
return np.log(factorial(n))
else:
return n*np.log(n)-n+(np.log(n*(1+4*n*(1+2*n)))/6.0)+(np.log(np.pi))/2.0
def irwin_hall_cdf(x,n):
# pval = returned_value for down regulated
# pval = 1 - returned_value for up regulated
value,k = 0,0
while k<=np.floor(x):
value +=(-1)**k*(scipy.special.binom(n,k))*(x-k)**n
k+=1
return value/(np.exp(logfac(n)))
def stat_plot(stat,tfs,ID,args,col):
# box-plot
fig=plt.figure(figsize=(2.6,2.6))
if not args.nonorm:
stat = stat.sort_values(by=[col])
for tf_id in stat.index:
plt.scatter(list(stat.index).index(tf_id)+1,-1*np.log10(irwin_hall_cdf(3*stat.loc[tf_id][col],3)),c='dimgrey',s=1)
plt.scatter(list(stat.index).index(tf_id)+1,-1*np.log10(irwin_hall_cdf(3*stat.loc[tf_id][col],3)),c='dimgrey',s=1,label="Others")
plt.scatter(list(stat.index).index(ID)+1,-1*np.log10(irwin_hall_cdf(3*stat.loc[ID][col],3)),c='r',s=1,label=ID)
plt.scatter(list(stat.index).index(ID)+1,-1*np.log10(irwin_hall_cdf(3*stat.loc[ID][col],3)),c='r',s=40)
else:
stat = stat.sort_values(by=['score'])
for tf_id in stat.index:
plt.scatter(list(stat.index).index(tf_id)+1,-1*np.log10(irwin_hall_cdf(3*stat.loc[tf_id]['score'],3)),c='dimgrey',s=1)
plt.scatter(list(stat.index).index(tf_id)+1,-1*np.log10(irwin_hall_cdf(3*stat.loc[tf_id]['score'],3)),c='dimgrey',s=1,label="Others")
plt.scatter(list(stat.index).index(ID)+1,-1*np.log10(irwin_hall_cdf(3*stat.loc[ID]['score'],3)),c='r',s=1,label=ID)
plt.scatter(list(stat.index).index(ID)+1,-1*np.log10(irwin_hall_cdf(3*stat.loc[ID]['score'],3)),c='r',s=40)
#plt.gca().invert_yaxis()
#plt.gca().xaxis.set_major_locator(plt.NullLocator())
#plt.title(ID,fontsize=18)
plt.legend(fontsize = 10,frameon=False,borderaxespad=0.,labelspacing=.2,loc='upper right',markerscale = 4)
plt.xlabel('TF Rank',fontsize=18)
plt.ylabel('-log10 ($p$)',fontsize = 18)
plt.axes().set_xticks([1,len(tfs)])#;print(len(tfs))
plotdir = args.outdir+os.sep+'{}_plot'.format(args.ofilename)
#os.makedirs(plotdir,exist_ok=True)
try:
os.makedirs(plotdir)
except:
sys.exit('Output directory: {} already exist, please select another directory.'.format(args.outdir))
figname1 = plotdir+os.sep+'{}_ranked_dot'.format(ID)
plt.savefig(figname1,bbox_inches='tight',pad_inches=0.02, dpi=600)
plt.close()
#Cumulative Fraction plot
background = []
for tf in tfs:
background.extend(tfs[tf])
target = tfs[ID]
background = sorted(background)
fig=plt.figure(figsize=(2.6,2.6))
dx = 0.01
x = np.arange(0,1,dx)
by,ty = [],[]
for xi in x:
by.append(sum(i< xi for i in background )/len(background))
ty.append(sum(i< xi for i in target )/len(target))
plt.plot(x,by,color='dimgrey',label='Background')
plt.plot(x,ty,'r-',label='{}'.format(ID))
plt.legend(fontsize = 10,frameon=False,borderaxespad=0.,labelspacing=.2,loc='upper left')
#maxval = max(background)
#minval = min(background)
#plt.ylim([0,1])
plt.xlim([0.2,1])
plt.ylabel('Cumulative Fraction',fontsize=18)
plt.xlabel('AUC',fontsize=18)
figname2 = plotdir+os.sep+'{}_cumulative_distribution'.format(ID)
plt.savefig(figname2,bbox_inches='tight',pad_inches=0.02, dpi=600)
plt.close()
def stat_test(AUCs,args):
# read AUCs according to TF type
print('Statistical tests start.\n')
tfs = {}
sam1 = []
for tf_key in AUCs.keys():
tf = tf_key.split('_')[0]
auc = AUCs[tf_key]
sam1.append(auc)
if tf not in tfs:
tfs[tf] = [auc]
else:
tfs[tf].append(auc)
cols = ['score','pvalue','max_auc','zscore','rank_score','rank_zscore','rank_pvalue','rank_auc','rank_avg_z_p','rank_avg_z_p_a','rank_avg_z_p_a_irwinhall_pvalue']
stat = pd.DataFrame(index = [tf for tf in tfs],columns = cols)
#stat = {}
for tf in tfs.keys():
if len(tfs[tf])>0: # filter the tf with few samples
#stat_test = stats.mstats.ks_twosamp(sam1,tfs[tf],alternative='greater')
stat_test = stats.ranksums(tfs[tf],sam1)
#stat[tf] = [stat_test[0],stat_test[1]]
stat.loc[tf]['score'] = stat_test[0]
# one-sided test
stat.loc[tf]['pvalue'] = stat_test[1]*0.5 if stat_test[0]>0 else 1-stat_test[1]*0.5
tf_stats = pd.read_csv(args.normfile,sep='\t',index_col=0)
# cal the normalized stat-score
#print('Do Normalization...')
for i in stat.index:
#stat[i].append((stat[i][0]-tf_stats.loc[i,'mean'])/tf_stats.loc[i,'std']) #[2] for Z-Score
stat.loc[i]['zscore'] = (stat.loc[i]['score']-tf_stats.loc[i,'mean'])/tf_stats.loc[i,'std']
stat.loc[i]['max_auc'] = max(tfs[i])
# rank the list by the average rank of stat-score and z-score
# rank of Wilcoxon Socre
rs = 1
for i in sorted(stat.index,key = lambda x: stat.loc[x]['score'],reverse=True):
#print(i,stat[i])
stat.loc[i]['rank_score'] = rs # rank of stat_score
#print(i,stat[i],'\n')
rs +=1
# rank of Z-Score
rz = 1
for i in sorted(stat.index,key = lambda x: stat.loc[x]['zscore'],reverse=True):
stat.loc[i]['rank_zscore'] = rz # rank of z-score
#print(i,stat[i])
rz +=1
# rank of pvalue
rp = 1
for i in sorted(stat.index,key = lambda x: stat.loc[x]['pvalue'],reverse=False):
stat.loc[i]['rank_pvalue'] = rp # rank of pvalue
#print(i,stat[i])
rp +=1
ra = 1
for i in sorted(stat.index,key = lambda x: stat.loc[x]['max_auc'],reverse=True):
stat.loc[i]['rank_auc'] = ra # rank of pvalue
#print(i,stat[i])
ra +=1
# rank of average
for i in stat.index:
stat.loc[i]['rank_avg_z_p'] = (stat.loc[i]['rank_zscore']+stat.loc[i]['rank_pvalue'])*0.5 # [6] for average of stat-score and z-score
stat.loc[i]['rank_avg_z_p_a'] = (stat.loc[i]['rank_zscore']+stat.loc[i]['rank_pvalue']+stat.loc[i]['rank_auc'])*0.33/len(tfs.keys()) # [7] for average of three
stat.loc[i]['rank_avg_z_p_a_irwinhall_pvalue'] = irwin_hall_cdf(3*stat.loc[i]['rank_avg_z_p_a'],3)
#print(i,stat.loc[i])
statfile = args.outdir+os.sep+args.ofilename+'_bart_results.txt'
with open(statfile,'w') as statout:
statout.write('TF\t{}\t{}\t{}\t{}\t{}\t{}\n'.format('statistic','pvalue','zscore','max_auc','re_rank','irwin_hall_pvalue'))
for i in sorted(stat.index,key=lambda x: stat.loc[x]['rank_avg_z_p_a'],reverse=False):
statout.write('{}\t{:.3f}\t{:.3e}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3e}\n'.format(i,stat.loc[i]['score'],stat.loc[i]['pvalue'],stat.loc[i]['zscore'],stat.loc[i]['max_auc'],stat.loc[i]['rank_avg_z_p_a'],stat.loc[i]['rank_avg_z_p_a_irwinhall_pvalue']))
print('--Standardization finished!\n--Ranked TFs saved in file: {}\n'.format(statfile))
# plot figures of user defined TFs
if args.target:
with open(args.target) as target_file:
#IDs = [re.split('[^a-zA-Z0-9]+',line)[0] for line in target_file.readlines()]
IDs = [line.strip() for line in target_file.readlines()]
for ID in IDs:
stat_plot(stat,tfs,ID,args,'rank_avg_z_p_a')
print('Prediction done!\n')
| [
"pandas.DataFrame",
"matplotlib.pyplot.xlim",
"scipy.special.binom",
"os.makedirs",
"matplotlib.pyplot.plot",
"scipy.stats.ranksums",
"pandas.read_csv",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.floor",
"matplotlib.pyplot.axes",
"numpy.log",
"matplotlib.pyplot.figure",
... | [((504, 525), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (518, 525), False, 'import matplotlib\n'), ((1400, 1430), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.6, 2.6)'}), '(figsize=(2.6, 2.6))\n', (1410, 1430), True, 'import matplotlib.pyplot as plt\n'), ((2753, 2866), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)', 'frameon': '(False)', 'borderaxespad': '(0.0)', 'labelspacing': '(0.2)', 'loc': '"""upper right"""', 'markerscale': '(4)'}), "(fontsize=10, frameon=False, borderaxespad=0.0, labelspacing=0.2,\n loc='upper right', markerscale=4)\n", (2763, 2866), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2898), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TF Rank"""'], {'fontsize': '(18)'}), "('TF Rank', fontsize=18)\n", (2874, 2898), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2941), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""-log10 ($p$)"""'], {'fontsize': '(18)'}), "('-log10 ($p$)', fontsize=18)\n", (2912, 2941), True, 'import matplotlib.pyplot as plt\n'), ((3334, 3402), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname1'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.02)', 'dpi': '(600)'}), "(figname1, bbox_inches='tight', pad_inches=0.02, dpi=600)\n", (3345, 3402), True, 'import matplotlib.pyplot as plt\n'), ((3405, 3416), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3414, 3416), True, 'import matplotlib.pyplot as plt\n'), ((3607, 3637), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.6, 2.6)'}), '(figsize=(2.6, 2.6))\n', (3617, 3637), True, 'import matplotlib.pyplot as plt\n'), ((3662, 3681), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'dx'], {}), '(0, 1, dx)\n', (3671, 3681), True, 'import numpy as np\n'), ((3858, 3910), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'by'], {'color': '"""dimgrey"""', 'label': '"""Background"""'}), "(x, by, color='dimgrey', label='Background')\n", (3866, 3910), True, 'import matplotlib.pyplot as plt\n'), ((3960, 4057), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)', 'frameon': '(False)', 'borderaxespad': '(0.0)', 'labelspacing': '(0.2)', 'loc': '"""upper left"""'}), "(fontsize=10, frameon=False, borderaxespad=0.0, labelspacing=0.2,\n loc='upper left')\n", (3970, 4057), True, 'import matplotlib.pyplot as plt\n'), ((4135, 4153), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.2, 1]'], {}), '([0.2, 1])\n', (4143, 4153), True, 'import matplotlib.pyplot as plt\n'), ((4157, 4203), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Fraction"""'], {'fontsize': '(18)'}), "('Cumulative Fraction', fontsize=18)\n", (4167, 4203), True, 'import matplotlib.pyplot as plt\n'), ((4207, 4237), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""AUC"""'], {'fontsize': '(18)'}), "('AUC', fontsize=18)\n", (4217, 4237), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4379), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname2'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.02)', 'dpi': '(600)'}), "(figname2, bbox_inches='tight', pad_inches=0.02, dpi=600)\n", (4322, 4379), True, 'import matplotlib.pyplot as plt\n'), ((4382, 4393), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4391, 4393), True, 'import matplotlib.pyplot as plt\n'), ((4927, 4979), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tf for tf in tfs]', 'columns': 'cols'}), '(index=[tf for tf in tfs], columns=cols)\n', (4939, 4979), True, 'import pandas as pd\n'), ((5470, 5519), 'pandas.read_csv', 'pd.read_csv', (['args.normfile'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(args.normfile, sep='\\t', index_col=0)\n", (5481, 5519), True, 'import pandas as pd\n'), ((1200, 1211), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (1208, 1211), True, 'import numpy as np\n'), ((3123, 3143), 'os.makedirs', 'os.makedirs', (['plotdir'], {}), '(plotdir)\n', (3134, 3143), False, 'import argparse, os, sys, re\n'), ((2947, 2957), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2955, 2957), True, 'import matplotlib.pyplot as plt\n'), ((5193, 5222), 'scipy.stats.ranksums', 'stats.ranksums', (['tfs[tf]', 'sam1'], {}), '(tfs[tf], sam1)\n', (5207, 5222), False, 'from scipy import stats\n'), ((1028, 1041), 'numpy.log', 'np.log', (['np.pi'], {}), '(np.pi)\n', (1034, 1041), True, 'import numpy as np\n'), ((1238, 1263), 'scipy.special.binom', 'scipy.special.binom', (['n', 'k'], {}), '(n, k)\n', (1257, 1263), False, 'import scipy\n'), ((996, 1033), 'numpy.log', 'np.log', (['(n * (1 + 4 * n * (1 + 2 * n)))'], {}), '(n * (1 + 4 * n * (1 + 2 * n)))\n', (1002, 1033), True, 'import numpy as np\n'), ((983, 992), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (989, 992), True, 'import numpy as np\n')] |
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for the `wx_symbols` module."""
import numpy as np
from metpy.plots import current_weather, wx_code_to_numeric
from metpy.testing import assert_array_equal
def test_mapper():
"""Test for symbol mapping functionality."""
assert current_weather(0) == ''
assert current_weather(4) == '\ue9a2'
assert current_weather(7) == '\ue9a5'
assert current_weather(65) == '\ue9e1'
def test_alt_char():
"""Test alternate character functionality for mapper."""
assert current_weather.alt_char(7, 1) == '\ue9a6'
assert current_weather.alt_char(7, 2) == '\ue9a7'
def test_mapper_len():
"""Test getting the length of the mapper."""
assert len(current_weather) == 100
def test_wx_code_to_numeric():
"""Test getting numeric weather codes from METAR."""
data = ['SN', '-RA', '-SHSN', '-SHRA', 'DZ', 'RA', 'SHSN', 'TSRA', '-FZRA', '-SN', '-TSRA',
'-RASN', '+SN', 'FG', '-SHRASN', '-DZ', 'SHRA', '-FZRASN', 'TSSN', 'MIBCFG',
'-RAPL', 'RAPL', 'TSSNPL', '-SNPL', '+RA', '-RASNPL', '-BLSN', '-SHSNIC', '+TSRA',
'TS', 'PL', 'SNPL', '-SHRAPL', '-SNSG', '-TSSN', 'SG', 'IC', 'FU', '+SNPL',
'TSSNPLGR', '-TSSNPLGR', '-SHSNSG', 'SHRAPL', '-TSRASN', 'FZRA', '-TSRAPL',
'-FZDZSN', '+TSSN', '-TSRASNPL', 'TSRAPL', 'RASN', '-SNIC', 'FZRAPL', '-FZRASNPL',
'+RAPL', '-RASGPL', '-TSSNPL', 'FZRASN', '+TSSNGR', 'TSPLGR', '', 'RA BR', '-TSSG',
'-TS', '-NA', 'NANA', '+NANA', 'NANANA', 'NANANANA']
true_codes = np.array([73, 61, 85, 80, 53, 63, 86, 95, 66, 71, 95, 68, 75, 45, 83, 51, 81,
66, 95, 0, 79, 79, 95, 79, 65, 79, 36, 85, 97, 17, 79, 79, 80, 77,
95, 77, 78, 4, 79, 95, 95, 85, 81, 95, 67, 95, 56, 97, 95, 95, 69,
71, 79, 66, 79, 61, 95, 67, 97, 95, 0, 63, 17, 17, 0, 0, 0, 0, 0])
wx_codes = wx_code_to_numeric(data)
assert_array_equal(wx_codes, true_codes)
| [
"metpy.plots.current_weather.alt_char",
"metpy.plots.wx_code_to_numeric",
"numpy.array",
"metpy.plots.current_weather",
"metpy.testing.assert_array_equal"
] | [((1666, 1959), 'numpy.array', 'np.array', (['[73, 61, 85, 80, 53, 63, 86, 95, 66, 71, 95, 68, 75, 45, 83, 51, 81, 66, 95,\n 0, 79, 79, 95, 79, 65, 79, 36, 85, 97, 17, 79, 79, 80, 77, 95, 77, 78, \n 4, 79, 95, 95, 85, 81, 95, 67, 95, 56, 97, 95, 95, 69, 71, 79, 66, 79, \n 61, 95, 67, 97, 95, 0, 63, 17, 17, 0, 0, 0, 0, 0]'], {}), '([73, 61, 85, 80, 53, 63, 86, 95, 66, 71, 95, 68, 75, 45, 83, 51, \n 81, 66, 95, 0, 79, 79, 95, 79, 65, 79, 36, 85, 97, 17, 79, 79, 80, 77, \n 95, 77, 78, 4, 79, 95, 95, 85, 81, 95, 67, 95, 56, 97, 95, 95, 69, 71, \n 79, 66, 79, 61, 95, 67, 97, 95, 0, 63, 17, 17, 0, 0, 0, 0, 0])\n', (1674, 1959), True, 'import numpy as np\n'), ((2041, 2065), 'metpy.plots.wx_code_to_numeric', 'wx_code_to_numeric', (['data'], {}), '(data)\n', (2059, 2065), False, 'from metpy.plots import current_weather, wx_code_to_numeric\n'), ((2070, 2110), 'metpy.testing.assert_array_equal', 'assert_array_equal', (['wx_codes', 'true_codes'], {}), '(wx_codes, true_codes)\n', (2088, 2110), False, 'from metpy.testing import assert_array_equal\n'), ((390, 408), 'metpy.plots.current_weather', 'current_weather', (['(0)'], {}), '(0)\n', (405, 408), False, 'from metpy.plots import current_weather, wx_code_to_numeric\n'), ((426, 444), 'metpy.plots.current_weather', 'current_weather', (['(4)'], {}), '(4)\n', (441, 444), False, 'from metpy.plots import current_weather, wx_code_to_numeric\n'), ((468, 486), 'metpy.plots.current_weather', 'current_weather', (['(7)'], {}), '(7)\n', (483, 486), False, 'from metpy.plots import current_weather, wx_code_to_numeric\n'), ((510, 529), 'metpy.plots.current_weather', 'current_weather', (['(65)'], {}), '(65)\n', (525, 529), False, 'from metpy.plots import current_weather, wx_code_to_numeric\n'), ((637, 667), 'metpy.plots.current_weather.alt_char', 'current_weather.alt_char', (['(7)', '(1)'], {}), '(7, 1)\n', (661, 667), False, 'from metpy.plots import current_weather, wx_code_to_numeric\n'), ((691, 721), 'metpy.plots.current_weather.alt_char', 'current_weather.alt_char', (['(7)', '(2)'], {}), '(7, 2)\n', (715, 721), False, 'from metpy.plots import current_weather, wx_code_to_numeric\n')] |
import os
import cv2
import numpy as np
import shutil
import scipy.misc
import glob
import pickle5 as pickle
from video_processing import EchoProcess
import pickle
import json
from PIL import Image
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
class DataMaster:
def __init__(self, conf):
self.conf = conf
self.df = os.path.join(DIR_PATH, self.conf['Load_Save']['raw_data_folder'])
classes = json.loads(self.conf.get('Load_Save', 'classes'))
views = json.loads(self.conf.get('Load_Save', 'views'))
verbose = self.conf['Video_Processing']['verbose']
self.dt = DataCollection(self.df, classes, views, verbose)
def load(self):
if self.conf.getboolean('Load_Save', 'load_dataset'):
self.dt = self.dt.load_pickle(
self.conf['Load_Save']['pickle_name'])
if self.conf.getboolean('Load_Save', 'load_echos_from_pickle'):
self.dt = self.dt.load_pickle_multiple_file()
if not self.dt.populated:
self.dt.populate()
processor = EchoProcess(**self.conf.get_par_video_processing())
processor.process_dataset(self.dt)
if self.conf.getboolean('Load_Save', 'save_dataset'):
self.dt.save_pickle('test_dataset')
class DataCollection:
"""
Class that contains and manage a collection of echo videos
Parameters
data_folder: string
absolut path of the data folder that you want to use
verbose: boolean
set the amount of messages that you want to get
Variables:
file_path_names: list
list the file path name of all the ehcos memorized
echos; list
list of echo objects
echos_info: list
list of info of the echos
classes: array of two string
representing the possible diagnosis of echos
views: array of two string
representing the possible views of echos
populated: boolean
is set to True if the data collection has been loaded
"""
def __init__(self, verbose=False):
self.data_folder = []
self.file_path_names = []
self.echos = []
self.echos_infos = []
self.classes = []
self.populated = False
self.verbose = verbose
self.files_saved = []
def __init__(self, df, classes, views, verbose=False):
self.file_path_names = []
self.echos = []
self.echos_infos = []
self.data_folder = df
self.populated = False
self.verbose = verbose
self.classes = classes
self.views = views
self.files_saved = []
def populate(self):
"""
Loads all the echo files in the folder
"""
if os.path.isdir(self.data_folder):
for dirpath, dirnames, filenames in os.walk(self.data_folder):
for filename in [f for f in filenames if
(f.endswith(".avi") or f.endswith(
".wmv")) and not f.startswith(".")]:
file_path = os.path.join(dirpath, filename)
statinfo = os.stat(file_path)
if statinfo.st_size != 0:
ec = Echo(file_path, self.data_folder, self.classes,
self.views)
if not ec.exclude:
self.file_path_names.append(file_path)
self.echos.append(ec)
self.echos_infos.append(ec.get_info())
else:
print("File:", filename, "is zero bytes!")
self.populated = True
else:
raise FileNotFoundError("The specified folder does not exist")
def populate_dictionary(self, view):
if os.path.isdir(self.data_folder):
for dirpath, dirnames, filenames in os.walk(self.data_folder):
for filename in [f for f in filenames if
(f.endswith(".avi") or f.endswith(
".wmv")) and not f.startswith(".")]:
file_path = os.path.join(dirpath, filename)
statinfo = os.stat(file_path)
ec = Echo(file_path, self.data_folder, self.classes,
self.views)
if statinfo.st_size != 0 and not ec.exclude and ec.chamber_view in view:
self.files_saved.append(file_path)
elif statinfo.st_size != 0:
print("File:", filename, "is zero bytes!")
elif not ec.exclude:
print('View of the file is not considered.')
else:
raise FileNotFoundError("The specified folder does not exist")
def __str__(self):
print(self.echos_infos)
def get_target(self, view):
"""
returns the targets of all the echo
Returns
-------
numpy array
array of boolean describing the target of each ech
"""
y = np.array(
[ech.diagnosis for ech in self.echos if ech.chamber_view == view])
y = np.reshape(y, (-1, 1))
return y
def get_3dmatrix(self, view):
"""
returns the list of 3d array of all the echo video
Returns
-------
list of 3d arrays
list of 3d arrays that correspond to the echo video.
Note that is not given as a 4D array since the lenght of videos is not uniform.
"""
return [ech.matrix3d for ech in self.echos if ech.chamber_view == view]
def get_x_y(self, view):
"""
returns the targets of all the echos
Returns
-------
numpy array
array of boolean describing the target of each echo
"""
return self.get_3dmatrix(view), self.get_target(view)
def save_pickle(self, name):
"""
Saves the data collection into a pickle file
Parameters
----------
name : string
name of the pickle file in which to save in
"""
out_file_dir = os.path.join(DIR_PATH, '..', 'out',
os.path.basename(self.data_folder),
'pickle')
os.makedirs(out_file_dir, exist_ok=True)
with open(os.path.join(out_file_dir, name + '.pkl'), 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
def load_pickle(self, name):
"""
Loads the data collection from a pickle file
Parameters
----------
name : string
name of the pickle file that is used to load the data collection
Returns
----------
DataCollection object
data collection that was saved in the pickle file
"""
out_file_dir = os.path.join(DIR_PATH, '..', 'out',
os.path.basename(self.data_folder),
'pickle')
os.makedirs(out_file_dir, exist_ok=True)
out_file_name = os.path.join(out_file_dir, name + '.pkl')
with open(out_file_name, 'rb') as output:
try:
if self.verbose:
print("loading from pickle file ", out_file_name)
self = pickle.load(output)
self.populated = True
output.close()
except EOFError:
print("not found")
pass
return self
def load_pickle_multiple_file(self):
"""
Loads the data collection from a pickle file per echo video
Returns
----------
DataCollection object
data collection that was saved in the pickle file
"""
out_file_dir = os.path.join(DIR_PATH, '..', 'out',
os.path.basename(self.data_folder),
'pickle')
if os.path.isdir(out_file_dir):
for dirpath, dirnames, filenames in os.walk(out_file_dir):
for filename in [f for f in filenames if f.endswith(".pkl")]:
print(filename)
file_path = os.path.join(dirpath, filename)
statinfo = os.stat(file_path)
if statinfo.st_size != 0:
with open(file_path, 'rb') as output:
try:
if self.verbose:
print("loading from pickle file ",
filename)
ec = pickle.load(output)
output.close()
except EOFError:
print("not found")
pass
self.echos.append(ec)
self.echos_infos.append(ec.get_info())
self.file_path_names.append(ec.file_path)
else:
print("File:", filename, "is zero bytes!")
self.populated = True
else:
raise FileNotFoundError("The specified folder does not exist")
return self
class Echo:
"""
class that contains and manage an echo videos
Parameters
data_folder : string
absolut path of the data folder that you want to use
file_path: string
absolut path of the echo file
"""
def __init__(self, file_path, data_folder, classes, views):
self.file_path = file_path
self.data_folder = data_folder
self.pickle_folder = None
self.echo_name = None
self.diagnosis = None
self.chamber_view = None
self.hospital = None
self.height = None
self.width = None
self.no_frames = None
self.fps = None
self.duration = None
self.matrix3d = []
self.corrupt = None
self.classes = classes
self.views = views
self.exclude = False
self.labels = {'masks': [], 'box': None}
self.name_data = "new_data"
self.open()
def open(self):
"""
Opens the echo video according to the file path and set the meta information about it
"""
cap = cv2.VideoCapture(self.file_path)
file_name = os.path.splitext(os.path.basename(self.file_path))[0].upper()
self.name_data = os.path.basename(os.path.dirname(os.path.dirname(self.file_path)))
if not self.classes:
self.diagnosis = "unknown"
self.echo_name = file_name
elif self.classes[0] in self.file_path:
self.echo_name = self.classes[0] + file_name
self.diagnosis = 1
elif self.classes[1] in self.file_path:
self.echo_name = self.classes[1] + file_name
self.diagnosis = 0
else:
print("Unknown diagnosis in file:", self.file_path)
self.diagnosis = "diagnosis_unknown"
self.exclude = True
self.width = int(cap.get(3))
self.height = int(cap.get(4))
self.fps = cap.get(5)
if self.fps == 0:
print("Not a valid video file!")
self.corrupt = 1
self.exclude = True
else:
self.corrupt = 0
self.no_frames = int(cap.get(7))
if not self.corrupt:
self.duration = self.no_frames / self.fps
else:
self.duration = 'Nan'
self.exclude = True
cap.release()
if not self.views:
# no view provided
self.chamber_view = "view_unknown"
elif len(self.views) > 1:
for view in self.views:
if view.lower() in self.echo_name.lower():
self.chamber_view = view
if not self.chamber_view:
print("Unknown chamber-view in file:", self.file_path)
self.chamber_view = -1
self.exclude = True
# store segmentation masks and box
directory = os.path.dirname(self.file_path)
masks = [f for f in
glob.glob(directory + "/**mask.png", recursive=True)]
box = [f for f in glob.glob(directory + "/box.jpg", recursive=True)]
if len(masks) == 0 or len(box) == 0:
print(self.echo_name)
print("No labels provided. Add ####_mask.png and/or box.jpg")
return
if len(masks) != 3:
print("This folder ({}) does not contain 3 maskes.".format(file_name))
img = np.asarray(Image.open(box[0]))
img = (1 * (~((img[..., 0] >= 245) * (img[..., 1] >= 245) * (
img[..., 2] >= 245)))).astype(np.uint8)
self.labels['box'] = img
for f in sorted(masks):
# valve ground-truth as png as 4 color dimension and last is masking
img = np.asarray(Image.open(f))
frame = int(os.path.basename(f).split('_mask')[0])
self.labels['masks'].append(
{str(frame): (1 * (img[..., -1] == 255)).astype(np.uint8)})
def get_info(self):
"""
Returns a dictionary with all the metinformation of the ech
Returns
----------
echo_dict dict
dictionary with all the metainformation of the echo
"""
echo_dict = {"name": self.echo_name,
"file_path": self.file_path,
"diagnosis": self.diagnosis,
"chamber_view": self.chamber_view,
"height": self.height,
"width": self.width,
"no_frames": self.no_frames,
"fps": self.fps,
"duration": self.duration,
"corrupt": self.corrupt}
return echo_dict
def set_3d_array(self, matrix):
"""
Set the 3d array video of the echo
Parameter
matrix: 3D array
3D array that represents the echo video
"""
self.matrix3d = matrix
def save_frames(self):
"""
Saves the provided 3D array as frames (.jpg)
"""
echo_name = (os.path.splitext(self.echo_name))[0].upper()
n_frames = self.matrix3d.shape[2]
out_path = os.path.join(DIR_PATH, '..', 'out',
os.path.basename(self.data_folder), 'frames',
self.chamber_view,
echo_name)
if not os.path.exists(out_path):
os.makedirs(out_path)
shutil.rmtree(out_path, ignore_errors=True)
os.makedirs(out_path, exist_ok=True)
ndarray = cv2.normalize(self.matrix3d, None, 0, 255, cv2.NORM_MINMAX)
ndarray = ndarray.astype(np.uint8)
for f in range(0, n_frames):
frame = ndarray[:, :, f]
Image.fromarray(frame).save(
os.path.join(out_path, str(f).zfill(3) + ".jpg"))
def save_pickle(self):
"""
Saves the provided 3D array as a pickle file
"""
echo_name = (os.path.splitext(self.echo_name))[0].upper()
out_path = os.path.join(DIR_PATH, '..', 'data', 'in', 'processed', self.name_data)
if not os.path.exists(out_path):
os.makedirs(out_path)
with open(os.path.join(out_path, echo_name + '.pkl'), 'wb') as f:
pickle.dump(self, f, protocol=-1)
self.pickle_folder = out_path
| [
"pickle.dump",
"os.walk",
"pickle.load",
"glob.glob",
"cv2.normalize",
"shutil.rmtree",
"os.path.join",
"os.path.dirname",
"os.path.exists",
"numpy.reshape",
"os.stat",
"os.path.basename",
"os.path.realpath",
"os.makedirs",
"os.path.isdir",
"PIL.Image.open",
"cv2.VideoCapture",
"nu... | [((226, 252), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (242, 252), False, 'import os\n'), ((348, 413), 'os.path.join', 'os.path.join', (['DIR_PATH', "self.conf['Load_Save']['raw_data_folder']"], {}), "(DIR_PATH, self.conf['Load_Save']['raw_data_folder'])\n", (360, 413), False, 'import os\n'), ((2658, 2689), 'os.path.isdir', 'os.path.isdir', (['self.data_folder'], {}), '(self.data_folder)\n', (2671, 2689), False, 'import os\n'), ((3744, 3775), 'os.path.isdir', 'os.path.isdir', (['self.data_folder'], {}), '(self.data_folder)\n', (3757, 3775), False, 'import os\n'), ((5027, 5102), 'numpy.array', 'np.array', (['[ech.diagnosis for ech in self.echos if ech.chamber_view == view]'], {}), '([ech.diagnosis for ech in self.echos if ech.chamber_view == view])\n', (5035, 5102), True, 'import numpy as np\n'), ((5128, 5150), 'numpy.reshape', 'np.reshape', (['y', '(-1, 1)'], {}), '(y, (-1, 1))\n', (5138, 5150), True, 'import numpy as np\n'), ((6192, 6232), 'os.makedirs', 'os.makedirs', (['out_file_dir'], {'exist_ok': '(True)'}), '(out_file_dir, exist_ok=True)\n', (6203, 6232), False, 'import os\n'), ((6872, 6912), 'os.makedirs', 'os.makedirs', (['out_file_dir'], {'exist_ok': '(True)'}), '(out_file_dir, exist_ok=True)\n', (6883, 6912), False, 'import os\n'), ((6937, 6978), 'os.path.join', 'os.path.join', (['out_file_dir', "(name + '.pkl')"], {}), "(out_file_dir, name + '.pkl')\n", (6949, 6978), False, 'import os\n'), ((7780, 7807), 'os.path.isdir', 'os.path.isdir', (['out_file_dir'], {}), '(out_file_dir)\n', (7793, 7807), False, 'import os\n'), ((10135, 10167), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.file_path'], {}), '(self.file_path)\n', (10151, 10167), False, 'import cv2\n'), ((11920, 11951), 'os.path.dirname', 'os.path.dirname', (['self.file_path'], {}), '(self.file_path)\n', (11935, 11951), False, 'import os\n'), ((14441, 14484), 'shutil.rmtree', 'shutil.rmtree', (['out_path'], {'ignore_errors': '(True)'}), '(out_path, ignore_errors=True)\n', (14454, 14484), False, 'import shutil\n'), ((14493, 14529), 'os.makedirs', 'os.makedirs', (['out_path'], {'exist_ok': '(True)'}), '(out_path, exist_ok=True)\n', (14504, 14529), False, 'import os\n'), ((14549, 14608), 'cv2.normalize', 'cv2.normalize', (['self.matrix3d', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(self.matrix3d, None, 0, 255, cv2.NORM_MINMAX)\n', (14562, 14608), False, 'import cv2\n'), ((15024, 15095), 'os.path.join', 'os.path.join', (['DIR_PATH', '""".."""', '"""data"""', '"""in"""', '"""processed"""', 'self.name_data'], {}), "(DIR_PATH, '..', 'data', 'in', 'processed', self.name_data)\n", (15036, 15095), False, 'import os\n'), ((2739, 2764), 'os.walk', 'os.walk', (['self.data_folder'], {}), '(self.data_folder)\n', (2746, 2764), False, 'import os\n'), ((3825, 3850), 'os.walk', 'os.walk', (['self.data_folder'], {}), '(self.data_folder)\n', (3832, 3850), False, 'import os\n'), ((6102, 6136), 'os.path.basename', 'os.path.basename', (['self.data_folder'], {}), '(self.data_folder)\n', (6118, 6136), False, 'import os\n'), ((6323, 6373), 'pickle.dump', 'pickle.dump', (['self', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(self, output, pickle.HIGHEST_PROTOCOL)\n', (6334, 6373), False, 'import pickle\n'), ((6782, 6816), 'os.path.basename', 'os.path.basename', (['self.data_folder'], {}), '(self.data_folder)\n', (6798, 6816), False, 'import os\n'), ((7687, 7721), 'os.path.basename', 'os.path.basename', (['self.data_folder'], {}), '(self.data_folder)\n', (7703, 7721), False, 'import os\n'), ((7857, 7878), 'os.walk', 'os.walk', (['out_file_dir'], {}), '(out_file_dir)\n', (7864, 7878), False, 'import os\n'), ((12440, 12458), 'PIL.Image.open', 'Image.open', (['box[0]'], {}), '(box[0])\n', (12450, 12458), False, 'from PIL import Image\n'), ((14217, 14251), 'os.path.basename', 'os.path.basename', (['self.data_folder'], {}), '(self.data_folder)\n', (14233, 14251), False, 'import os\n'), ((14372, 14396), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (14386, 14396), False, 'import os\n'), ((14410, 14431), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (14421, 14431), False, 'import os\n'), ((15111, 15135), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (15125, 15135), False, 'import os\n'), ((15149, 15170), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (15160, 15170), False, 'import os\n'), ((15258, 15291), 'pickle.dump', 'pickle.dump', (['self', 'f'], {'protocol': '(-1)'}), '(self, f, protocol=-1)\n', (15269, 15291), False, 'import pickle\n'), ((6251, 6292), 'os.path.join', 'os.path.join', (['out_file_dir', "(name + '.pkl')"], {}), "(out_file_dir, name + '.pkl')\n", (6263, 6292), False, 'import os\n'), ((7172, 7191), 'pickle.load', 'pickle.load', (['output'], {}), '(output)\n', (7183, 7191), False, 'import pickle\n'), ((10309, 10340), 'os.path.dirname', 'os.path.dirname', (['self.file_path'], {}), '(self.file_path)\n', (10324, 10340), False, 'import os\n'), ((11997, 12049), 'glob.glob', 'glob.glob', (["(directory + '/**mask.png')"], {'recursive': '(True)'}), "(directory + '/**mask.png', recursive=True)\n", (12006, 12049), False, 'import glob\n'), ((12078, 12127), 'glob.glob', 'glob.glob', (["(directory + '/box.jpg')"], {'recursive': '(True)'}), "(directory + '/box.jpg', recursive=True)\n", (12087, 12127), False, 'import glob\n'), ((12764, 12777), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (12774, 12777), False, 'from PIL import Image\n'), ((15190, 15232), 'os.path.join', 'os.path.join', (['out_path', "(echo_name + '.pkl')"], {}), "(out_path, echo_name + '.pkl')\n", (15202, 15232), False, 'import os\n'), ((2997, 3028), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (3009, 3028), False, 'import os\n'), ((3060, 3078), 'os.stat', 'os.stat', (['file_path'], {}), '(file_path)\n', (3067, 3078), False, 'import os\n'), ((4083, 4114), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (4095, 4114), False, 'import os\n'), ((4146, 4164), 'os.stat', 'os.stat', (['file_path'], {}), '(file_path)\n', (4153, 4164), False, 'import os\n'), ((8026, 8057), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (8038, 8057), False, 'import os\n'), ((8089, 8107), 'os.stat', 'os.stat', (['file_path'], {}), '(file_path)\n', (8096, 8107), False, 'import os\n'), ((14043, 14075), 'os.path.splitext', 'os.path.splitext', (['self.echo_name'], {}), '(self.echo_name)\n', (14059, 14075), False, 'import os\n'), ((14739, 14761), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (14754, 14761), False, 'from PIL import Image\n'), ((14960, 14992), 'os.path.splitext', 'os.path.splitext', (['self.echo_name'], {}), '(self.echo_name)\n', (14976, 14992), False, 'import os\n'), ((10205, 10237), 'os.path.basename', 'os.path.basename', (['self.file_path'], {}), '(self.file_path)\n', (10221, 10237), False, 'import os\n'), ((12803, 12822), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (12819, 12822), False, 'import os\n'), ((8458, 8477), 'pickle.load', 'pickle.load', (['output'], {}), '(output)\n', (8469, 8477), False, 'import pickle\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import test_global_storage
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def _check(test_case, data, segment_ids, out_shape, out):
test_case.assertEqual(out.shape, out_shape)
ref = np.zeros_like(out)
for (idx, i) in np.ndenumerate(segment_ids):
out_idx = list(idx)
out_idx[-1] = i
out_idx = tuple(out_idx)
ref[out_idx] += data[idx]
test_case.assertTrue(np.allclose(ref, out, atol=1e-05, rtol=1e-05))
def _check_bw(test_case, params, indices, out_shape, out):
ref = np.zeros_like(out)
for (idx, i) in np.ndenumerate(indices):
in_idx = list(idx)
in_idx[-1] = i
in_idx = tuple(in_idx)
ref[idx] += params[in_idx]
test_case.assertTrue(np.array_equal(ref, out))
def _gen_segment_ids(out_shape, num_segments, segment_ids_shape):
axis = len(segment_ids_shape) - 1
return np.random.randint(
low=0, high=out_shape[axis], size=segment_ids_shape, dtype=np.int32
)
def _gen_data(out_shape, num_segments, segment_ids_shape):
axis = len(segment_ids_shape) - 1
data_shape = out_shape[0:axis] + (segment_ids_shape[axis],) + out_shape[axis + 1 :]
return np.random.rand(*data_shape).astype(np.float32)
def _make_unsoted_segment_sum_fn(device, data, segment_ids, num_segments):
flow.clear_default_session()
@flow.global_function(type="train", function_config=func_config)
def unsorted_batch_segment_sum_job(
data: oft.Numpy.Placeholder(data.shape, dtype=flow.float),
segment_ids: oft.Numpy.Placeholder(segment_ids.shape, dtype=flow.int32),
):
with flow.scope.placement(device, "0:0"):
x = flow.get_variable(
"data",
shape=data.shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
)
data = x + data
res = flow.math.unsorted_batch_segment_sum(
data=data, segment_ids=segment_ids, num_segments=num_segments
)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(res)
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch_diff(res, test_global_storage.Setter("loss_diff"))
return res
return unsorted_batch_segment_sum_job(data, segment_ids)
def _run_test(test_case, device, out_shape, num_segments, segment_ids_shape):
segment_ids = _gen_segment_ids(out_shape, num_segments, segment_ids_shape)
data = _gen_data(out_shape, num_segments, segment_ids_shape)
unsorted_batch_segment_sum_out = _make_unsoted_segment_sum_fn(
device, data, segment_ids, num_segments
).get()
out_ndarray = unsorted_batch_segment_sum_out.numpy()
grad_in_ndarray = test_global_storage.Get("x_diff")
grad_out_ndarray = test_global_storage.Get("loss_diff")
_check(test_case, data, segment_ids, out_shape, out_ndarray)
_check_bw(
test_case, grad_out_ndarray, segment_ids, grad_in_ndarray.shape, grad_in_ndarray
)
@flow.unittest.skip_unless_1n1d()
class TestUnsortedBatchSegmentSum(flow.unittest.TestCase):
def test_unsorted_batch_segment_sum(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["out_shape"] = [(2, 4, 7, 6)]
arg_dict["num_segments"] = [7]
arg_dict["segment_ids_shape"] = [(2, 4, 5)]
for arg in GenArgList(arg_dict):
_run_test(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"numpy.array_equal",
"numpy.allclose",
"numpy.random.randint",
"test_global_storage.Get",
"oneflow.compatible.single_client.constant_initializer",
"oneflow.compatible.single_client.math.unsorted_batch_segment_sum",
"oneflow.compatible.single_client.scope.placement",
"unittest.main",
"oneflow.compati... | [((900, 921), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (919, 921), True, 'from oneflow.compatible import single_client as flow\n'), ((4044, 4076), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4074, 4076), True, 'from oneflow.compatible import single_client as flow\n'), ((997, 1025), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1023, 1025), True, 'from oneflow.compatible import single_client as flow\n'), ((1145, 1163), 'numpy.zeros_like', 'np.zeros_like', (['out'], {}), '(out)\n', (1158, 1163), True, 'import numpy as np\n'), ((1184, 1211), 'numpy.ndenumerate', 'np.ndenumerate', (['segment_ids'], {}), '(segment_ids)\n', (1198, 1211), True, 'import numpy as np\n'), ((1475, 1493), 'numpy.zeros_like', 'np.zeros_like', (['out'], {}), '(out)\n', (1488, 1493), True, 'import numpy as np\n'), ((1514, 1537), 'numpy.ndenumerate', 'np.ndenumerate', (['indices'], {}), '(indices)\n', (1528, 1537), True, 'import numpy as np\n'), ((1823, 1913), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'out_shape[axis]', 'size': 'segment_ids_shape', 'dtype': 'np.int32'}), '(low=0, high=out_shape[axis], size=segment_ids_shape,\n dtype=np.int32)\n', (1840, 1913), True, 'import numpy as np\n'), ((2250, 2278), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2276, 2278), True, 'from oneflow.compatible import single_client as flow\n'), ((2285, 2348), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2305, 2348), True, 'from oneflow.compatible import single_client as flow\n'), ((3772, 3805), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (3795, 3805), False, 'import test_global_storage\n'), ((3829, 3865), 'test_global_storage.Get', 'test_global_storage.Get', (['"""loss_diff"""'], {}), "('loss_diff')\n", (3852, 3865), False, 'import test_global_storage\n'), ((4521, 4536), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4534, 4536), False, 'import unittest\n'), ((1357, 1402), 'numpy.allclose', 'np.allclose', (['ref', 'out'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(ref, out, atol=1e-05, rtol=1e-05)\n', (1368, 1402), True, 'import numpy as np\n'), ((1680, 1704), 'numpy.array_equal', 'np.array_equal', (['ref', 'out'], {}), '(ref, out)\n', (1694, 1704), True, 'import numpy as np\n'), ((4207, 4220), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4218, 4220), False, 'from collections import OrderedDict\n'), ((4427, 4447), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4437, 4447), False, 'from test_util import GenArgList\n'), ((2122, 2149), 'numpy.random.rand', 'np.random.rand', (['*data_shape'], {}), '(*data_shape)\n', (2136, 2149), True, 'import numpy as np\n'), ((2403, 2454), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['data.shape'], {'dtype': 'flow.float'}), '(data.shape, dtype=flow.float)\n', (2424, 2454), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2477, 2535), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['segment_ids.shape'], {'dtype': 'flow.int32'}), '(segment_ids.shape, dtype=flow.int32)\n', (2498, 2535), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2557, 2592), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device', '"""0:0"""'], {}), "(device, '0:0')\n", (2577, 2592), True, 'from oneflow.compatible import single_client as flow\n'), ((2841, 2944), 'oneflow.compatible.single_client.math.unsorted_batch_segment_sum', 'flow.math.unsorted_batch_segment_sum', ([], {'data': 'data', 'segment_ids': 'segment_ids', 'num_segments': 'num_segments'}), '(data=data, segment_ids=segment_ids,\n num_segments=num_segments)\n', (2877, 2944), True, 'from oneflow.compatible import single_client as flow\n'), ((3145, 3181), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (3171, 3181), False, 'import test_global_storage\n'), ((3216, 3255), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss_diff"""'], {}), "('loss_diff')\n", (3242, 3255), False, 'import test_global_storage\n'), ((2751, 2779), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (2776, 2779), True, 'from oneflow.compatible import single_client as flow\n'), ((3019, 3073), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (3060, 3073), True, 'from oneflow.compatible import single_client as flow\n')] |
# Copyright 2021 United States Government as represented by the Administrator of the National Aeronautics and Space
# Administration. No copyright is claimed in the United States under Title 17, U.S. Code. All Other Rights Reserved.
"""
UNDER CONSTRUCTION?!?!?!?
The :mod:`.visualizer` module provides...
Contents
--------
"""
import matplotlib.pyplot as plt
try:
from matplotlib.animation import ImageMagickWriter
except UnicodeDecodeError:
print('unable to import ImageMagickWriter')
ImageMagickWriter = None
import numpy as np
ANGLES = np.linspace(0, 2 * np.pi, 500)
SCAN_VECTORS = np.vstack([np.cos(ANGLES), np.sin(ANGLES), np.zeros(ANGLES.size)])
def show_templates(relnav, index, target_ind, ax1=None, ax2=None, fig=None):
# retrieve the image we are processing
image = relnav.camera.images[index]
# update the scene to reflect the current time
relnav.scene.update(image)
if (ax1 is None) or (ax2 is None) or (fig is None):
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# set the title so we know what we're looking at
fig.suptitle('{} {}'.format(image.observation_date.isoformat(), relnav.scene.target_objs[target_ind].name))
# show the image
ax1.imshow(image, cmap='gray')
# determine the location of the template in the image (roughly)
template_shape = np.array(relnav.saved_templates[index][target_ind].shape[::-1])
template_size = template_shape // 2
center = np.round(relnav.center_finding_results[index, target_ind]["measured"][:2])
if not np.isfinite(center).all():
print('invalid solved-for center. using predicted.')
center = np.round(relnav.center_finding_results[index, target_ind]["predicted"][:2])
min_bounds = center - template_size
max_bounds = center + template_size + (template_shape % 2)
# crop the image, accounting for when the shape is odd using the modulo
ax1.set_xlim(min_bounds[0], max_bounds[0])
ax1.set_ylim(max_bounds[1], min_bounds[1])
# label this subplot as the image
ax1.set_title('Image')
# show the template
ax2.imshow(relnav.saved_templates[index][target_ind], cmap='gray')
# label this subplot as the template
ax2.set_title('Template')
return ax1, ax2, fig
def show_limbs(relnav, index, ax=None):
# retrieve the image we are processing
image = relnav.camera.images[index]
# retrieve the observation observation_date of the image we are processing
date = image.observation_date
# update the scene to reflect the current time
relnav.scene.update(image)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(image, cmap='gray')
# initialize variables to store the bounds of the limbs
min_limb_bounds = [np.inf, np.inf]
max_limb_bounds = [-np.inf, -np.inf]
for target_ind, target in enumerate(relnav.scene.target_objs):
# determine the a priori distance to the target
apriori_distance = np.linalg.norm(target.position)
# get the a priori limb points
# define the line of sight to the body in the camera frame
apriori_los = target.position.ravel() / apriori_distance
# find the limb points in the camera frame
apriori_limbs_cam = target.shape.find_limbs(apriori_los, SCAN_VECTORS)
# project the limb points into the image
apriori_limbs_image = relnav.camera.model.project_onto_image(apriori_limbs_cam,
image=index,
temperature=image.temperature)
# plot the a priori limb points
ax.plot(*apriori_limbs_image, linewidth=1, label='{} a priori limbs'.format(target.name))
# adjust the target object to its observed location
rtype = relnav.center_finding_results[index, target_ind]['type']
if not rtype:
rtype = relnav.relative_position_results[index, target_ind]['type']
if rtype in [b'cof', 'cof']:
los = relnav.camera.model.pixels_to_unit(relnav.center_finding_results[index, target_ind]['measured'][:2],
temperature=image.temperature, image=index)
if np.isfinite(los).all():
target.change_position(los * apriori_distance)
elif rtype in [b'pos', 'pos']:
los = relnav.relative_position_results[index, target_ind]['measured'].copy()
los /= np.linalg.norm(los)
if np.isfinite(los).all():
target.change_position(relnav.relative_position_results[index, target_ind]['measured'])
else:
raise ValueError("Can't display limbs for {} type relnav".format(rtype))
limbs_cam = target.shape.find_limbs(los, SCAN_VECTORS)
# project the limb points into the image
limbs_image = relnav.camera.model.project_onto_image(limbs_cam,
image=index,
temperature=image.temperature)
# update the limb bounds
min_limb_bounds = np.minimum(min_limb_bounds, limbs_image.min(axis=-1))
max_limb_bounds = np.maximum(max_limb_bounds, limbs_image.max(axis=-1))
# plot the updated limb points
ax.plot(*limbs_image, linewidth=1, label='{} solved for limbs'.format(target.name))
if rtype in [b'cof', 'cof']:
# show the predicted center pixel
ax.scatter(*relnav.center_finding_results[index, target_ind]['predicted'][:2],
label='{} predicted center'.format(target.name))
# show the solved for center
ax.scatter(*relnav.center_finding_results[index, target_ind]['measured'][:2],
label='{} solved-for center'.format(target.name))
else:
# get the apriori image location
apriori_image_pos = relnav.camera.model.project_onto_image(
relnav.relative_position_results[index, target_ind]['predicted'],
image=index, temperature=image.temperature)
# get the solved for image location
image_pos = relnav.camera.model.project_onto_image(
relnav.relative_position_results[index, target_ind]['measured'],
image=index, temperature=image.temperature)
# show the centers
ax.scatter(*apriori_image_pos, label='{} predicted center'.format(target.name))
ax.scatter(*image_pos, label='{} solved-for center'.format(target.name))
# set the title so we know what image we're looking at
ax.set_title(date.isoformat())
return ax, max_limb_bounds, min_limb_bounds
def limb_summary_gif(relnav, fps=2, outfile='./opnavsummary.gif', dpi=100):
# initialize the figure and axes
fig = plt.figure()
fig.set_tight_layout(True)
ax = fig.add_subplot(111)
# initialize the writer
writer = ImageMagickWriter(fps=fps)
writer.setup(fig=fig, outfile=outfile, dpi=dpi)
# loop through each image and save the frame
for ind, image in relnav.camera:
ax.clear()
_, max_limbs, min_limbs = show_limbs(relnav, ind, ax=ax)
# set the limits to highlight only the portion of interest
if np.isfinite(min_limbs).all() and np.isfinite(max_limbs).all():
ax.set_xlim(min_limbs[0] - 10, max_limbs[0] + 10)
ax.set_ylim(min_limbs[1] - 10, max_limbs[1] + 10)
try:
fig.legend().draggable()
except AttributeError:
fig.legend().set_draggable(True)
writer.grab_frame()
writer.finish()
plt.close(fig)
def template_summary_gif(relnav, fps=2, outfile='./templatesummary.gif', dpi=100):
# initialize the figure and axes
fig = plt.figure()
fig.set_tight_layout(True)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# initialize the writer
writer = ImageMagickWriter(fps=fps)
writer.setup(fig=fig, outfile=outfile, dpi=dpi)
# loop through each image and save the frame
for ind, image in relnav.camera:
# loop through each object
for obj_ind in range(len(relnav.scene.target_objs)):
if relnav.saved_templates[ind] is not None:
if relnav.saved_templates[ind][obj_ind] is not None:
ax1.clear()
ax2.clear()
show_templates(relnav, ind, obj_ind, ax1=ax1, ax2=ax2, fig=fig)
writer.grab_frame()
writer.finish()
plt.close(fig)
def show_center_finding_residuals(relnav):
fig = plt.figure()
ax = fig.add_subplot(111)
# initialize lists to store the data
resids = []
dates = []
# loop through each image
for ind, image in relnav.camera:
# store a list in the resids list and the datetime object in the dates list
resids.append([])
dates.append(image.observation_date)
# loop through each target
for target_ind in range(len(relnav.scene.target_objs)):
# determine the type of results we are considering
if relnav.center_finding_results[ind, target_ind]["type"] in [b'cof', b'lmk', 'cof', 'lmk']:
# compute the observed minus computed residuals
resids[-1].append((relnav.center_finding_results[ind, target_ind]['measured'] -
relnav.center_finding_results[ind, target_ind]['predicted'])[:2])
else: # if we are considering a technique that estimates the full 3DOF position
# project the predicted and measured positions onto the image and compute the o-c resids
resids[-1].append(
relnav.camera.model.project_onto_image(relnav.center_finding_results[ind, target_ind]['measured'],
image=ind, temperature=image.temperature) -
relnav.camera.model.project_onto_image(relnav.center_finding_results[ind, target_ind]['predicted'],
image=ind, temperature=image.temperature))
# stack all of the resids together
resids = np.asarray(resids)
dates = np.asarray(dates, dtype='datetime64[us]')
# loop through each target again and plot the residuals vs time
for target_ind, target in enumerate(relnav.scene.target_objs):
ax.scatter(dates, resids[:, target_ind, 0], label='{} Columns'.format(target.name))
ax.scatter(dates, resids[:, target_ind, 1], label='{} Rows'.format(target.name))
# set the labels and update the x axis to display the dates better
ax.set_xlabel('Observation Date')
ax.set_ylabel('O-C Residuals, pix')
fig.autofmt_xdate()
# create a legend
try:
fig.legend().draggable()
except AttributeError:
fig.legend().set_draggable(True)
def scatter_residuals_sun_dependent(relnav):
"""
Show observed minus computed residuals with units of pixels plotted in a frame rotated so that +x points towards the
sun in the image.
:param relnav:
"""
resids = []
# loop through each image
for ind, image in relnav.camera:
# loop through each target
for target_ind in range(len(relnav.scene.target_objs)):
# update the scene so we can get the sun direction
relnav.scene.update(image)
# figure out the direction to the sun in the image
line_of_sight_sun = relnav.camera.model.project_directions(relnav.scene.light_obj.position.ravel())
# get the rotation to make the x axis line up with this direction
# since line_of_sight_sun is a unit vector the x component = cos(theta) and y component = sin of theta
# so the following gives
# [[ cos(theta), sin(theta)],
# [-sin(theta), cos(theta)]]
# which rotates from the image frame to the frame with +x pointing towards the sun
rmat = np.array([line_of_sight_sun, line_of_sight_sun[::-1] * [-1, 1]])
# determine the type of results we are considering
if relnav.center_finding_results[ind, target_ind]["type"] in [b'cof', b'lmk', 'cof', 'lmk']:
# compute the observed minus computed residuals
resids.append(rmat @ (relnav.center_finding_results[ind, target_ind]['measured'] -
relnav.center_finding_results[ind, target_ind]['predicted'])[:2])
else: # if we are considering a technique that estimates the full 3DOF position
# project the predicted and measured positions onto the image and compute the o-c resids
resids.append(rmat @
(relnav.camera.model.project_onto_image(
relnav.center_finding_results[ind, target_ind]['measured'],
image=ind, temperature=image.temperature
) -
relnav.camera.model.project_onto_image(
relnav.center_finding_results[ind, target_ind]['predicted'],
image=ind, temperature=image.temperature)
))
# stack all of the resids together
resids = np.asarray(resids)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(*resids)
ax.set_xlabel("Sun direction O-C error, pix")
ax.set_xlabel("Anti-sun direction O-C error, pix")
def plot_residuals_sun_dependent_time(relnav):
"""
Show observed minus computed residuals with units of pixels plotted in a frame rotated so that +x points towards the
sun in the image.
This is done with a time series (so the x axis of the plot is time and the y axis is residual in pixels) with 2
different series
:param relnav:
"""
dates = []
resids = []
# loop through each image
for ind, image in relnav.camera:
# store a list in the resids list and the datetime object in the dates list
resids.append([])
dates.append(image.observation_date)
# loop through each target
for target_ind in range(len(relnav.scene.target_objs)):
# update the scene so we can get the sun direction
relnav.scene.update(image)
# figure out the direction to the sun in the image
line_of_sight_sun = relnav.camera.model.project_directions(relnav.scene.light_obj.position.ravel())
# get the rotation to make the x axis line up with this direction
# since line_of_sight_sun is a unit vector the x component = cos(theta) and y component = sin of theta
# so the following gives
# [[ cos(theta), sin(theta)],
# [-sin(theta), cos(theta)]]
# which rotates from the image frame to the frame with +x pointing towards the sun
rmat = np.array([line_of_sight_sun, line_of_sight_sun[::-1] * [-1, 1]])
# determine the type of results we are considering
if relnav.center_finding_results[ind, target_ind]["type"] in [b'cof', b'lmk', 'cof', 'lmk']:
# compute the observed minus computed residuals
resids[-1].append(rmat @ (relnav.center_finding_results[ind, target_ind]['measured'] -
relnav.center_finding_results[ind, target_ind]['predicted'])[:2])
else: # if we are considering a technique that estimates the full 3DOF position
# project the predicted and measured positions onto the image and compute the o-c resids
resids[-1].append(rmat @
(relnav.camera.model.project_onto_image(
relnav.center_finding_results[ind, target_ind]['measured'],
image=ind, temperature=image.temperature
) -
relnav.camera.model.project_onto_image(
relnav.center_finding_results[ind, target_ind]['predicted'],
image=ind, temperature=image.temperature)
))
# stack all of the resids together
resids = np.asarray(resids)
fig = plt.figure()
ax = fig.add_subplot(111)
# loop through each target again and plot the residuals vs time
for target_ind, target in enumerate(relnav.scene.target_objs):
ax.scatter(dates, resids[:, target_ind, 0], label='{} Sun direction'.format(target.name))
ax.scatter(dates, resids[:, target_ind, 1], label='{} Anti sun direction'.format(target.name))
# set the labels and update the x axis to display the dates better
ax.set_xlabel('Observation Date')
ax.set_ylabel('O-C Residuals, pix')
fig.autofmt_xdate()
# create a legend
try:
fig.legend().draggable()
except AttributeError:
fig.legend().set_draggable(True)
| [
"matplotlib.pyplot.close",
"numpy.asarray",
"numpy.zeros",
"numpy.isfinite",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace",
"numpy.cos",
"matplotlib.animation.ImageMagickWriter",
"numpy.round"
] | [((560, 590), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(500)'], {}), '(0, 2 * np.pi, 500)\n', (571, 590), True, 'import numpy as np\n'), ((1385, 1448), 'numpy.array', 'np.array', (['relnav.saved_templates[index][target_ind].shape[::-1]'], {}), '(relnav.saved_templates[index][target_ind].shape[::-1])\n', (1393, 1448), True, 'import numpy as np\n'), ((1502, 1576), 'numpy.round', 'np.round', (["relnav.center_finding_results[index, target_ind]['measured'][:2]"], {}), "(relnav.center_finding_results[index, target_ind]['measured'][:2])\n", (1510, 1576), True, 'import numpy as np\n'), ((6966, 6978), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6976, 6978), True, 'import matplotlib.pyplot as plt\n'), ((7082, 7108), 'matplotlib.animation.ImageMagickWriter', 'ImageMagickWriter', ([], {'fps': 'fps'}), '(fps=fps)\n', (7099, 7108), False, 'from matplotlib.animation import ImageMagickWriter\n'), ((7781, 7795), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7790, 7795), True, 'import matplotlib.pyplot as plt\n'), ((7928, 7940), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7938, 7940), True, 'import matplotlib.pyplot as plt\n'), ((8076, 8102), 'matplotlib.animation.ImageMagickWriter', 'ImageMagickWriter', ([], {'fps': 'fps'}), '(fps=fps)\n', (8093, 8102), False, 'from matplotlib.animation import ImageMagickWriter\n'), ((8681, 8695), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8690, 8695), True, 'import matplotlib.pyplot as plt\n'), ((8751, 8763), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8761, 8763), True, 'import matplotlib.pyplot as plt\n'), ((10353, 10371), 'numpy.asarray', 'np.asarray', (['resids'], {}), '(resids)\n', (10363, 10371), True, 'import numpy as np\n'), ((10384, 10425), 'numpy.asarray', 'np.asarray', (['dates'], {'dtype': '"""datetime64[us]"""'}), "(dates, dtype='datetime64[us]')\n", (10394, 10425), True, 'import numpy as np\n'), ((13510, 13528), 'numpy.asarray', 'np.asarray', (['resids'], {}), '(resids)\n', (13520, 13528), True, 'import numpy as np\n'), ((13540, 13552), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13550, 13552), True, 'import matplotlib.pyplot as plt\n'), ((16492, 16510), 'numpy.asarray', 'np.asarray', (['resids'], {}), '(resids)\n', (16502, 16510), True, 'import numpy as np\n'), ((16522, 16534), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16532, 16534), True, 'import matplotlib.pyplot as plt\n'), ((617, 631), 'numpy.cos', 'np.cos', (['ANGLES'], {}), '(ANGLES)\n', (623, 631), True, 'import numpy as np\n'), ((633, 647), 'numpy.sin', 'np.sin', (['ANGLES'], {}), '(ANGLES)\n', (639, 647), True, 'import numpy as np\n'), ((649, 670), 'numpy.zeros', 'np.zeros', (['ANGLES.size'], {}), '(ANGLES.size)\n', (657, 670), True, 'import numpy as np\n'), ((989, 1001), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (999, 1001), True, 'import matplotlib.pyplot as plt\n'), ((1694, 1769), 'numpy.round', 'np.round', (["relnav.center_finding_results[index, target_ind]['predicted'][:2]"], {}), "(relnav.center_finding_results[index, target_ind]['predicted'][:2])\n", (1702, 1769), True, 'import numpy as np\n'), ((2660, 2672), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2670, 2672), True, 'import matplotlib.pyplot as plt\n'), ((3035, 3066), 'numpy.linalg.norm', 'np.linalg.norm', (['target.position'], {}), '(target.position)\n', (3049, 3066), True, 'import numpy as np\n'), ((12167, 12231), 'numpy.array', 'np.array', (['[line_of_sight_sun, line_of_sight_sun[::-1] * [-1, 1]]'], {}), '([line_of_sight_sun, line_of_sight_sun[::-1] * [-1, 1]])\n', (12175, 12231), True, 'import numpy as np\n'), ((15141, 15205), 'numpy.array', 'np.array', (['[line_of_sight_sun, line_of_sight_sun[::-1] * [-1, 1]]'], {}), '([line_of_sight_sun, line_of_sight_sun[::-1] * [-1, 1]])\n', (15149, 15205), True, 'import numpy as np\n'), ((1588, 1607), 'numpy.isfinite', 'np.isfinite', (['center'], {}), '(center)\n', (1599, 1607), True, 'import numpy as np\n'), ((4568, 4587), 'numpy.linalg.norm', 'np.linalg.norm', (['los'], {}), '(los)\n', (4582, 4587), True, 'import numpy as np\n'), ((4333, 4349), 'numpy.isfinite', 'np.isfinite', (['los'], {}), '(los)\n', (4344, 4349), True, 'import numpy as np\n'), ((7413, 7435), 'numpy.isfinite', 'np.isfinite', (['min_limbs'], {}), '(min_limbs)\n', (7424, 7435), True, 'import numpy as np\n'), ((7446, 7468), 'numpy.isfinite', 'np.isfinite', (['max_limbs'], {}), '(max_limbs)\n', (7457, 7468), True, 'import numpy as np\n'), ((4603, 4619), 'numpy.isfinite', 'np.isfinite', (['los'], {}), '(los)\n', (4614, 4619), True, 'import numpy as np\n')] |
import glob
import cv2
import numpy as np
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
from utils import plot_images, moving_avg
import matplotlib.pyplot as plt
CALIBRATION_IMAGES = './camera_cal/calibration*.jpg'
CALIBRATION_MATR = None
CALIBRATION_COEFF = None
NUM_CHESSBRD_CORNERS_X = 9
NUM_CHESSBRD_CORNERS_Y = 6
IMG_WIDTH = 1280
IMG_HEIGHT = 720
TRANSFORM_SRC = np.float32([[595, 450], [689, 450], [1104, 720], [215, 720]])
TRANSFORM_DST = np.float32([[250, 0], [IMG_WIDTH - 250, 0], [IMG_WIDTH - 250, IMG_HEIGHT], [250, IMG_HEIGHT]])
M = None
M_INV = None
M_PER_PX_X = 3.7 / 700 # meters per pixel in x dimension
M_PER_PX_Y = 30 / 720 # meters per pixel in y dimension
SMOOTHING_WINDOW = 3
FRAMES_COUNT = 0
FRAME_FIT = []
def find_corners(img):
""" Find chessboard corners
"""
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Find the chessboard corners
return cv2.findChessboardCorners(gray, (NUM_CHESSBRD_CORNERS_X, NUM_CHESSBRD_CORNERS_Y), None)
def calibrate_camera(objpoints, imgpoints):
""" Calibrate camera fro the given set of object and image points.
Return calibration matrix and distortion coefficients.
"""
res, cmatr, coeff, rvecs, tvecs = cv2.calibrateCamera(
objpoints, imgpoints, (IMG_WIDTH, IMG_HEIGHT), None, None)
return res, cmatr, coeff
def calibrate():
""" Find object and image points for each of the calibration images.
Calibrate camera and return calibration matrix and distortion coefficients.
"""
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((NUM_CHESSBRD_CORNERS_X * NUM_CHESSBRD_CORNERS_Y, 3), np.float32)
objp[:, :2] = np.mgrid[0:NUM_CHESSBRD_CORNERS_X, 0:NUM_CHESSBRD_CORNERS_Y].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob(CALIBRATION_IMAGES)
# Step through the list and search for chessboard corners
for name in images:
img = mpimg.imread(name)
res, corners = find_corners(img)
if res:
objpoints.append(objp)
imgpoints.append(corners)
return calibrate_camera(objpoints, imgpoints)
def undistort(img):
""" Undistort a given image
"""
assert(CALIBRATION_MATR is not None and CALIBRATION_COEFF is not None)
return cv2.undistort(img, CALIBRATION_MATR, CALIBRATION_COEFF)
def transform(img):
""" Apply perspective transformation to a given image
"""
assert(M is not None)
return cv2.warpPerspective(img, M, (IMG_WIDTH, IMG_HEIGHT))
def get_transform_matr():
""" Get transformation matrix
"""
# Given src and dst points, calculate the perspective transformation
return cv2.getPerspectiveTransform(TRANSFORM_SRC, TRANSFORM_DST)
def get_inv_transform_matr():
""" Get an inverse transformation matrix
"""
return cv2.getPerspectiveTransform(TRANSFORM_DST, TRANSFORM_SRC)
def get_binary(img, l_thresh=(0, 255), b_thresh=(0, 255)):
""" Get a threshold binary image.
"""
img = np.copy(img)
# Using different color channels.
# Convert to HLS color space and separate L channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hls[:, :, 1]
# Convert to Lab color space and separate b channel
lab = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
b_channel = lab[:, :, 2]
# Threshold L channel
l_binary = np.zeros_like(l_channel)
l_binary[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1
# Threshold b channel
b_binary = np.zeros_like(b_channel)
b_binary[(b_channel >= b_thresh[0]) & (b_channel <= b_thresh[1])] = 1
combined_binary = np.zeros_like(l_binary)
combined_binary[(l_binary == 1) | (b_binary == 1)] = 1
return combined_binary
def get_curvature_m(ploty, lefty, leftx, righty, rightx):
""" Get curvature radius of left and right lines
"""
y_eval = np.max(ploty)
left_fit_cr = np.polyfit(lefty * M_PER_PX_Y, leftx * M_PER_PX_X, 2)
right_fit_cr = np.polyfit(righty * M_PER_PX_Y, rightx * M_PER_PX_X, 2)
# Calculate the new radii of curvature
left_curverad = int(((1 + (2 * left_fit_cr[0] * y_eval * M_PER_PX_Y + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0]))
right_curverad = int(((1 + (2 * right_fit_cr[0] * y_eval * M_PER_PX_Y + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit_cr[0]))
return left_curverad, right_curverad
def get_offset_m(leftx, rightx):
""" Get the car offset from the lane center axis
"""
return ((IMG_WIDTH / 2) - (leftx[0] + rightx[0]) / 2) * M_PER_PX_X
def calc_frames_count():
global FRAMES_COUNT
if FRAMES_COUNT < SMOOTHING_WINDOW:
FRAMES_COUNT += 1
def draw_lane(img, binary_warped, ploty, left_fitx, right_fitx):
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
calc_frames_count()
if FRAMES_COUNT == 1:
FRAME_FIT.append(np.copy(left_fitx))
FRAME_FIT.append(np.copy(right_fitx))
else:
FRAME_FIT[0] = moving_avg(FRAME_FIT[0], left_fitx, FRAMES_COUNT)
FRAME_FIT[1] = moving_avg(FRAME_FIT[1], right_fitx, FRAMES_COUNT)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([FRAME_FIT[0], ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([FRAME_FIT[1], ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix
newwarp = cv2.warpPerspective(color_warp, M_INV, (IMG_WIDTH, IMG_HEIGHT))
# Combine the result with the original image
return cv2.addWeighted(img, 1, newwarp, 0.3, 0)
def detect_lane(img, binary_warped):
""" Detect lane lines and draw the lane polygon on the returned image
"""
assert(M_INV is not None)
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[int(binary_warped.shape[0] / 2):, :], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0] / nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 30
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = None
right_fit = None
if len(leftx) > 0 and len(lefty) > 0:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) > 0 and len(righty) > 0:
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
if left_fit is not None and right_fit is not None:
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image and draw lane on it
result = draw_lane(img, binary_warped, ploty, left_fitx, right_fitx)
# Measure and print out curvature
left_rad_m, right_rad_m = get_curvature_m(ploty, lefty, leftx, righty, rightx)
cv2.putText(result,
"Left: {} m Right: {} m".format(left_rad_m, right_rad_m),
(100, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255))
# Estimate and print out car offset
offset_m = get_offset_m(leftx, rightx)
cv2.putText(result,
"Offset: {:.2f} m".format(offset_m),
(100, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255))
else:
result = img
return result
def process_image(img):
""" Apply lane finding pipeline to a given image.
"""
undistorted = undistort(img)
warped = transform(undistorted)
binary_warped = get_binary(warped, (225, 255), (155, 200))
return detect_lane(undistorted, binary_warped)
if __name__ == '__main__':
# Prepare calibration and transformation constants
res, CALIBRATION_MATR, CALIBRATION_COEFF = calibrate()
M = get_transform_matr()
M_INV = get_inv_transform_matr()
# Process a video clip
output = 'video.mp4'
video = VideoFileClip("test_videos/project_video.mp4")
res = video.fl_image(process_image)
res.write_videofile(output, audio=False)
| [
"numpy.absolute",
"numpy.polyfit",
"cv2.getPerspectiveTransform",
"numpy.argmax",
"numpy.mean",
"glob.glob",
"cv2.undistort",
"cv2.warpPerspective",
"numpy.zeros_like",
"numpy.int_",
"numpy.copy",
"moviepy.editor.VideoFileClip",
"cv2.cvtColor",
"numpy.max",
"numpy.int",
"numpy.linspace... | [((400, 461), 'numpy.float32', 'np.float32', (['[[595, 450], [689, 450], [1104, 720], [215, 720]]'], {}), '([[595, 450], [689, 450], [1104, 720], [215, 720]])\n', (410, 461), True, 'import numpy as np\n'), ((478, 576), 'numpy.float32', 'np.float32', (['[[250, 0], [IMG_WIDTH - 250, 0], [IMG_WIDTH - 250, IMG_HEIGHT], [250,\n IMG_HEIGHT]]'], {}), '([[250, 0], [IMG_WIDTH - 250, 0], [IMG_WIDTH - 250, IMG_HEIGHT],\n [250, IMG_HEIGHT]])\n', (488, 576), True, 'import numpy as np\n'), ((869, 906), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (881, 906), False, 'import cv2\n'), ((953, 1044), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(NUM_CHESSBRD_CORNERS_X, NUM_CHESSBRD_CORNERS_Y)', 'None'], {}), '(gray, (NUM_CHESSBRD_CORNERS_X,\n NUM_CHESSBRD_CORNERS_Y), None)\n', (978, 1044), False, 'import cv2\n'), ((1267, 1345), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', '(IMG_WIDTH, IMG_HEIGHT)', 'None', 'None'], {}), '(objpoints, imgpoints, (IMG_WIDTH, IMG_HEIGHT), None, None)\n', (1286, 1345), False, 'import cv2\n'), ((1652, 1726), 'numpy.zeros', 'np.zeros', (['(NUM_CHESSBRD_CORNERS_X * NUM_CHESSBRD_CORNERS_Y, 3)', 'np.float32'], {}), '((NUM_CHESSBRD_CORNERS_X * NUM_CHESSBRD_CORNERS_Y, 3), np.float32)\n', (1660, 1726), True, 'import numpy as np\n'), ((2052, 2081), 'glob.glob', 'glob.glob', (['CALIBRATION_IMAGES'], {}), '(CALIBRATION_IMAGES)\n', (2061, 2081), False, 'import glob\n'), ((2532, 2587), 'cv2.undistort', 'cv2.undistort', (['img', 'CALIBRATION_MATR', 'CALIBRATION_COEFF'], {}), '(img, CALIBRATION_MATR, CALIBRATION_COEFF)\n', (2545, 2587), False, 'import cv2\n'), ((2713, 2765), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(IMG_WIDTH, IMG_HEIGHT)'], {}), '(img, M, (IMG_WIDTH, IMG_HEIGHT))\n', (2732, 2765), False, 'import cv2\n'), ((2920, 2977), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['TRANSFORM_SRC', 'TRANSFORM_DST'], {}), '(TRANSFORM_SRC, TRANSFORM_DST)\n', (2947, 2977), False, 'import cv2\n'), ((3074, 3131), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['TRANSFORM_DST', 'TRANSFORM_SRC'], {}), '(TRANSFORM_DST, TRANSFORM_SRC)\n', (3101, 3131), False, 'import cv2\n'), ((3249, 3261), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (3256, 3261), True, 'import numpy as np\n'), ((3516, 3552), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2Lab'], {}), '(img, cv2.COLOR_RGB2Lab)\n', (3528, 3552), False, 'import cv2\n'), ((3624, 3648), 'numpy.zeros_like', 'np.zeros_like', (['l_channel'], {}), '(l_channel)\n', (3637, 3648), True, 'import numpy as np\n'), ((3765, 3789), 'numpy.zeros_like', 'np.zeros_like', (['b_channel'], {}), '(b_channel)\n', (3778, 3789), True, 'import numpy as np\n'), ((3887, 3910), 'numpy.zeros_like', 'np.zeros_like', (['l_binary'], {}), '(l_binary)\n', (3900, 3910), True, 'import numpy as np\n'), ((4132, 4145), 'numpy.max', 'np.max', (['ploty'], {}), '(ploty)\n', (4138, 4145), True, 'import numpy as np\n'), ((4165, 4218), 'numpy.polyfit', 'np.polyfit', (['(lefty * M_PER_PX_Y)', '(leftx * M_PER_PX_X)', '(2)'], {}), '(lefty * M_PER_PX_Y, leftx * M_PER_PX_X, 2)\n', (4175, 4218), True, 'import numpy as np\n'), ((4238, 4293), 'numpy.polyfit', 'np.polyfit', (['(righty * M_PER_PX_Y)', '(rightx * M_PER_PX_X)', '(2)'], {}), '(righty * M_PER_PX_Y, rightx * M_PER_PX_X, 2)\n', (4248, 4293), True, 'import numpy as np\n'), ((5151, 5195), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (5160, 5195), True, 'import numpy as np\n'), ((5737, 5769), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (5746, 5769), True, 'import numpy as np\n'), ((5975, 6038), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'M_INV', '(IMG_WIDTH, IMG_HEIGHT)'], {}), '(color_warp, M_INV, (IMG_WIDTH, IMG_HEIGHT))\n', (5994, 6038), False, 'import cv2\n'), ((6099, 6139), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(1)', 'newwarp', '(0.3)', '(0)'], {}), '(img, 1, newwarp, 0.3, 0)\n', (6114, 6139), False, 'import cv2\n'), ((6656, 6686), 'numpy.int', 'np.int', (['(histogram.shape[0] / 2)'], {}), '(histogram.shape[0] / 2)\n', (6662, 6686), True, 'import numpy as np\n'), ((6704, 6735), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (6713, 6735), True, 'import numpy as np\n'), ((6906, 6947), 'numpy.int', 'np.int', (['(binary_warped.shape[0] / nwindows)'], {}), '(binary_warped.shape[0] / nwindows)\n', (6912, 6947), True, 'import numpy as np\n'), ((7073, 7093), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (7081, 7093), True, 'import numpy as np\n'), ((7109, 7129), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (7117, 7129), True, 'import numpy as np\n'), ((8922, 8952), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (8936, 8952), True, 'import numpy as np\n'), ((8975, 9006), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (8989, 9006), True, 'import numpy as np\n'), ((11102, 11148), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['"""test_videos/project_video.mp4"""'], {}), "('test_videos/project_video.mp4')\n", (11115, 11148), False, 'from moviepy.editor import VideoFileClip\n'), ((2183, 2201), 'matplotlib.image.imread', 'mpimg.imread', (['name'], {}), '(name)\n', (2195, 2201), True, 'import matplotlib.image as mpimg\n'), ((5372, 5421), 'utils.moving_avg', 'moving_avg', (['FRAME_FIT[0]', 'left_fitx', 'FRAMES_COUNT'], {}), '(FRAME_FIT[0], left_fitx, FRAMES_COUNT)\n', (5382, 5421), False, 'from utils import plot_images, moving_avg\n'), ((5445, 5495), 'utils.moving_avg', 'moving_avg', (['FRAME_FIT[1]', 'right_fitx', 'FRAMES_COUNT'], {}), '(FRAME_FIT[1], right_fitx, FRAMES_COUNT)\n', (5455, 5495), False, 'from utils import plot_images, moving_avg\n'), ((5848, 5862), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (5855, 5862), True, 'import numpy as np\n'), ((6754, 6785), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (6763, 6785), True, 'import numpy as np\n'), ((9357, 9384), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (9367, 9384), True, 'import numpy as np\n'), ((9449, 9478), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (9459, 9478), True, 'import numpy as np\n'), ((9594, 9660), 'numpy.linspace', 'np.linspace', (['(0)', '(binary_warped.shape[0] - 1)', 'binary_warped.shape[0]'], {}), '(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n', (9605, 9660), True, 'import numpy as np\n'), ((3367, 3403), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (3379, 3403), False, 'import cv2\n'), ((4443, 4474), 'numpy.absolute', 'np.absolute', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (4454, 4474), True, 'import numpy as np\n'), ((4594, 4626), 'numpy.absolute', 'np.absolute', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (4605, 4626), True, 'import numpy as np\n'), ((5088, 5116), 'numpy.zeros_like', 'np.zeros_like', (['binary_warped'], {}), '(binary_warped)\n', (5101, 5116), True, 'import numpy as np\n'), ((5273, 5291), 'numpy.copy', 'np.copy', (['left_fitx'], {}), '(left_fitx)\n', (5280, 5291), True, 'import numpy as np\n'), ((5318, 5337), 'numpy.copy', 'np.copy', (['right_fitx'], {}), '(right_fitx)\n', (5325, 5337), True, 'import numpy as np\n'), ((5605, 5637), 'numpy.vstack', 'np.vstack', (['[FRAME_FIT[0], ploty]'], {}), '([FRAME_FIT[0], ploty])\n', (5614, 5637), True, 'import numpy as np\n'), ((8711, 8744), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (8718, 8744), True, 'import numpy as np\n'), ((8824, 8858), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (8831, 8858), True, 'import numpy as np\n'), ((5690, 5722), 'numpy.vstack', 'np.vstack', (['[FRAME_FIT[1], ploty]'], {}), '([FRAME_FIT[1], ploty])\n', (5699, 5722), True, 'import numpy as np\n')] |
import numpy as np
class SVM:
def __init__(self, learning_rate=0.001, lambda_param=0.01, n_iters=1000):
self.lr = learning_rate
self.lambda_param = lambda_param
self.n_iters = n_iters
self.w = None
self.b = None
def fit(self, X, y):
_, n_features = X.shape
y_ = np.where(y <= 0, -1, 1)
self.w = np.zeros(n_features)
self.b = 0
for _ in range(self.n_iters):
for idx, x_i in enumerate(X):
condition = y_[idx] * (np.dot(x_i, self.w) - self.b) >= 1
if condition:
self.w -= self.lr * (2 * self.lambda_param * self.w)
else:
self.w -= self.lr * \
(2 * self.lambda_param * self.w - np.dot(x_i, y_[idx]))
self.b -= self.lr * y_[idx]
def predict(self, X):
approx = np.dot(X, self.w) - self.b
return np.sign(approx)
| [
"numpy.dot",
"numpy.where",
"numpy.zeros",
"numpy.sign"
] | [((330, 353), 'numpy.where', 'np.where', (['(y <= 0)', '(-1)', '(1)'], {}), '(y <= 0, -1, 1)\n', (338, 353), True, 'import numpy as np\n'), ((372, 392), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (380, 392), True, 'import numpy as np\n'), ((948, 963), 'numpy.sign', 'np.sign', (['approx'], {}), '(approx)\n', (955, 963), True, 'import numpy as np\n'), ((906, 923), 'numpy.dot', 'np.dot', (['X', 'self.w'], {}), '(X, self.w)\n', (912, 923), True, 'import numpy as np\n'), ((532, 551), 'numpy.dot', 'np.dot', (['x_i', 'self.w'], {}), '(x_i, self.w)\n', (538, 551), True, 'import numpy as np\n'), ((792, 812), 'numpy.dot', 'np.dot', (['x_i', 'y_[idx]'], {}), '(x_i, y_[idx])\n', (798, 812), True, 'import numpy as np\n')] |
#coding:utf-8
# coding:UTF-8
'''
Date:20160901
@author: zhaozhiyong
'''
import numpy as np
def load_data(file_name):
'''导入训练数据
input: file_name(string)训练数据的位置
output: feature_data(mat)特征
label_data(mat)标签
'''
f = open(file_name) # 打开文件
feature_data = []
label_data = []
for line in f.readlines():
feature_tmp = []
lable_tmp = []
lines = line.strip().split("\t")
feature_tmp.append(1) # 偏置项
for i in range(len(lines) - 1):
feature_tmp.append(float(lines[i]))
lable_tmp.append(float(lines[-1]))
feature_data.append(feature_tmp)
label_data.append(lable_tmp)
f.close() # 关闭文件
return np.mat(feature_data), np.mat(label_data)
def sig(x):
'''Sigmoid函数
input: x(mat):feature * w
output: sigmoid(x)(mat):Sigmoid值
'''
return 1.0 / (1 + np.exp(-x))
def lr_train_bgd(feature, label, maxCycle, alpha):
'''利用梯度下降法训练LR模型
input: feature(mat)特征
label(mat)标签
maxCycle(int)最大迭代次数
alpha(float)学习率
output: w(mat):权重
'''
n = np.shape(feature)[1] # 特征个数
w = np.mat(np.ones((n, 1))) # 初始化权重
i = 0
while i <= maxCycle: # 在最大迭代次数的范围内
i += 1 # 当前的迭代次数
h = sig(feature * w) # 计算Sigmoid值
err = label - h
if i % 100 == 0:
print("\t---------iter=" + str(i) + \
" , train error rate= " + str(error_rate(h, label)))
w = w + alpha * feature.T * err # 权重修正
return w
def error_rate(h, label):
'''计算当前的损失函数值
input: h(mat):预测值
label(mat):实际值
output: err/m(float):错误率
'''
m = np.shape(h)[0]
sum_err = 0.0
for i in range(m):
if h[i, 0] > 0 and (1 - h[i, 0]) > 0:
sum_err -= (label[i,0] * np.log(h[i,0]) + \
(1-label[i,0]) * np.log(1-h[i,0]))
else:
sum_err -= 0
return sum_err / m
def save_model(file_name, w):
'''保存最终的模型
input: file_name(string):模型保存的文件名
w(mat):LR模型的权重
'''
m = np.shape(w)[0]
f_w = open(file_name, "w")
w_array = []
for i in range(m):
w_array.append(str(w[i, 0]))
f_w.write("\t".join(w_array))
f_w.close()
if __name__ == "__main__":
# 1、导入训练数据
print("---------- 1.load data ------------")
feature, label = load_data("../resources/machine-learning/lr/data.txt")
# 2、训练LR模型
print("---------- 2.training ------------")
w = lr_train_bgd(feature, label, 1000, 0.01)
# 3、保存最终的模型
print("---------- 3.save model ------------")
save_model("../resources/machine-learning/lr/lr.weights", w)
| [
"numpy.log",
"numpy.ones",
"numpy.shape",
"numpy.exp",
"numpy.mat"
] | [((721, 741), 'numpy.mat', 'np.mat', (['feature_data'], {}), '(feature_data)\n', (727, 741), True, 'import numpy as np\n'), ((743, 761), 'numpy.mat', 'np.mat', (['label_data'], {}), '(label_data)\n', (749, 761), True, 'import numpy as np\n'), ((1125, 1142), 'numpy.shape', 'np.shape', (['feature'], {}), '(feature)\n', (1133, 1142), True, 'import numpy as np\n'), ((1169, 1184), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (1176, 1184), True, 'import numpy as np\n'), ((1679, 1690), 'numpy.shape', 'np.shape', (['h'], {}), '(h)\n', (1687, 1690), True, 'import numpy as np\n'), ((2091, 2102), 'numpy.shape', 'np.shape', (['w'], {}), '(w)\n', (2099, 2102), True, 'import numpy as np\n'), ((890, 900), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (896, 900), True, 'import numpy as np\n'), ((1823, 1838), 'numpy.log', 'np.log', (['h[i, 0]'], {}), '(h[i, 0])\n', (1829, 1838), True, 'import numpy as np\n'), ((1883, 1902), 'numpy.log', 'np.log', (['(1 - h[i, 0])'], {}), '(1 - h[i, 0])\n', (1889, 1902), True, 'import numpy as np\n')] |
###################################
## LOAD AND PREPROCESS IMAGE DATA
###################################
import os
import torch
from torch.utils.data import Dataset
import glob
import numpy as np
from util import read_filepaths
from PIL import Image
from torchvision import transforms
from sklearn.model_selection import train_test_split
COVIDxDICT = {'NON-COVID-19': 0, 'COVID-19': 1}
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transformer = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop((224), scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
val_transformer = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
class COVIDxDataset(Dataset):
"""
Code for reading the COVIDxDataset
"""
def __init__(self, mode, n_classes=2, dataset_path='../final/Dataset/CovidX_dataset/', dim=(224, 224)):
self.CLASSES = n_classes
self.dim = dim
self.COVIDxDICT = {'NON-COVID-19': 0, 'COVID-19': 1}
testfile = '../final/Dataset/CovidX_dataset/test_split.txt'
trainfile = '../final/Dataset/CovidX_dataset/train_split.txt'
if (mode == 'train' or mode == 'valid'):
paths, labels = read_filepaths(trainfile)
x_train, x_valid, y_train, y_valid = train_test_split(paths, labels, test_size=0.1, stratify=labels, random_state=20)
if mode == 'train':
self.paths, self.labels = x_train, y_train
self.transform = train_transformer
elif mode == 'valid':
self.paths, self.labels = x_valid, y_valid
self.transform = val_transformer
elif (mode == 'test'):
self.paths, self.labels = read_filepaths(testfile)
self.transform = val_transformer
_, cnts = np.unique(self.labels, return_counts=True)
print("{} examples = {}".format(mode, len(self.paths)))
if mode == 'valid':
mode = 'train'
self.root = str(dataset_path) + '/' + mode + '/'
self.mode = mode
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
image_tensor = self.load_image(self.root + self.paths[index], self.dim, augmentation=self.mode)
label_tensor = torch.tensor(self.COVIDxDICT[self.labels[index]], dtype=torch.long)
return image_tensor, label_tensor
def load_image(self, img_path, dim, augmentation='test'):
if not os.path.exists(img_path):
print("IMAGE DOES NOT EXIST {}".format(img_path))
image = Image.open(img_path).convert('RGB')
image = image.resize(dim)
image_tensor = self.transform(image)
return image_tensor
| [
"torch.tensor",
"util.read_filepaths",
"torchvision.transforms.RandomHorizontalFlip",
"sklearn.model_selection.train_test_split",
"os.path.exists",
"torchvision.transforms.RandomResizedCrop",
"PIL.Image.open",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"torchvision.tr... | [((402, 477), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (422, 477), False, 'from torchvision import transforms\n'), ((523, 545), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (540, 545), False, 'from torchvision import transforms\n'), ((551, 602), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {'scale': '(0.5, 1.0)'}), '(224, scale=(0.5, 1.0))\n', (579, 602), False, 'from torchvision import transforms\n'), ((610, 643), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (641, 643), False, 'from torchvision import transforms\n'), ((649, 670), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (668, 670), False, 'from torchvision import transforms\n'), ((733, 755), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224)'], {}), '(224)\n', (750, 755), False, 'from torchvision import transforms\n'), ((761, 787), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (782, 787), False, 'from torchvision import transforms\n'), ((793, 814), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (812, 814), False, 'from torchvision import transforms\n'), ((1976, 2018), 'numpy.unique', 'np.unique', (['self.labels'], {'return_counts': '(True)'}), '(self.labels, return_counts=True)\n', (1985, 2018), True, 'import numpy as np\n'), ((2466, 2533), 'torch.tensor', 'torch.tensor', (['self.COVIDxDICT[self.labels[index]]'], {'dtype': 'torch.long'}), '(self.COVIDxDICT[self.labels[index]], dtype=torch.long)\n', (2478, 2533), False, 'import torch\n'), ((1379, 1404), 'util.read_filepaths', 'read_filepaths', (['trainfile'], {}), '(trainfile)\n', (1393, 1404), False, 'from util import read_filepaths\n'), ((1454, 1539), 'sklearn.model_selection.train_test_split', 'train_test_split', (['paths', 'labels'], {'test_size': '(0.1)', 'stratify': 'labels', 'random_state': '(20)'}), '(paths, labels, test_size=0.1, stratify=labels, random_state=20\n )\n', (1470, 1539), False, 'from sklearn.model_selection import train_test_split\n'), ((2655, 2679), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (2669, 2679), False, 'import os\n'), ((1888, 1912), 'util.read_filepaths', 'read_filepaths', (['testfile'], {}), '(testfile)\n', (1902, 1912), False, 'from util import read_filepaths\n'), ((2759, 2779), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2769, 2779), False, 'from PIL import Image\n')] |
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
import copy
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge
from keras.optimizers import Adam, RMSprop
from keras.callbacks import History
from keras import backend as K
import tensorflow as tf
from gym import Env, Space, spaces
from gym.utils import seeding
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.agents.cem import CEMAgent
from rl.agents import SARSAAgent
from rl.callbacks import TrainEpisodeLogger, CallbackList
# env = gym.make('MountainCar-v0')
env = gym.make('CartPole-v1')
env.seed()
nb_actions = env.action_space.n
x = Input((1,) + env.observation_space.shape)
y = Flatten()(x)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(nb_actions)(y)
y = Activation('linear')(y)
model = Model(x, y)
memory = SequentialMemory(limit=10000, window_length=1)
# policy = BoltzmannQPolicy()
policy = EpsGreedyQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9,
enable_dueling_network=False, dueling_type='avg', target_model_update=1e-2, policy=policy)
# dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
# enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=.001, decay=.001), metrics=['mae'])
rewards = []
callback = [TrainEpisodeLogger(), History()]
hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None)
rewards.extend(hist.history.get('episode_reward'))
plt.plot(rewards)
dqn.test(env, nb_episodes=5, visualize=True)
state = env.reset()
action = env.action_space.sample()
print(action)
state_list= []
for i in range(300):
state_list.append(state)
# action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0])
state, reward, done, _ = env.step(2)
env.render()
env.render(close=True)
state_arr = np.array(state_list)
plt.plot(state_arr) | [
"rl.callbacks.TrainEpisodeLogger",
"rl.memory.SequentialMemory",
"rl.agents.dqn.DQNAgent",
"keras.callbacks.History",
"gym.make",
"matplotlib.pyplot.plot",
"keras.layers.Activation",
"rl.policy.EpsGreedyQPolicy",
"keras.layers.Flatten",
"keras.optimizers.Adam",
"keras.models.Model",
"keras.lay... | [((751, 774), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (759, 774), False, 'import gym\n'), ((823, 864), 'keras.layers.Input', 'Input', (['((1,) + env.observation_space.shape)'], {}), '((1,) + env.observation_space.shape)\n', (828, 864), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((1072, 1083), 'keras.models.Model', 'Model', (['x', 'y'], {}), '(x, y)\n', (1077, 1083), False, 'from keras.models import Sequential, Model\n'), ((1094, 1140), 'rl.memory.SequentialMemory', 'SequentialMemory', ([], {'limit': '(10000)', 'window_length': '(1)'}), '(limit=10000, window_length=1)\n', (1110, 1140), False, 'from rl.memory import SequentialMemory, EpisodeParameterMemory\n'), ((1180, 1198), 'rl.policy.EpsGreedyQPolicy', 'EpsGreedyQPolicy', ([], {}), '()\n', (1196, 1198), False, 'from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy\n'), ((1205, 1397), 'rl.agents.dqn.DQNAgent', 'DQNAgent', ([], {'model': 'model', 'nb_actions': 'nb_actions', 'memory': 'memory', 'nb_steps_warmup': '(1000)', 'gamma': '(0.9)', 'enable_dueling_network': '(False)', 'dueling_type': '"""avg"""', 'target_model_update': '(0.01)', 'policy': 'policy'}), "(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup\n =1000, gamma=0.9, enable_dueling_network=False, dueling_type='avg',\n target_model_update=0.01, policy=policy)\n", (1213, 1397), False, 'from rl.agents.dqn import DQNAgent\n'), ((1844, 1861), 'matplotlib.pyplot.plot', 'plt.plot', (['rewards'], {}), '(rewards)\n', (1852, 1861), True, 'import matplotlib.pyplot as plt\n'), ((2229, 2249), 'numpy.array', 'np.array', (['state_list'], {}), '(state_list)\n', (2237, 2249), True, 'import numpy as np\n'), ((2250, 2269), 'matplotlib.pyplot.plot', 'plt.plot', (['state_arr'], {}), '(state_arr)\n', (2258, 2269), True, 'import matplotlib.pyplot as plt\n'), ((869, 878), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (876, 878), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((886, 895), 'keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (891, 895), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((903, 921), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (913, 921), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((929, 938), 'keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (934, 938), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((946, 964), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (956, 964), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((972, 981), 'keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (977, 981), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((989, 1007), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (999, 1007), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((1015, 1032), 'keras.layers.Dense', 'Dense', (['nb_actions'], {}), '(nb_actions)\n', (1020, 1032), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((1040, 1060), 'keras.layers.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (1050, 1060), False, 'from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge\n'), ((1610, 1637), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)', 'decay': '(0.001)'}), '(lr=0.001, decay=0.001)\n', (1614, 1637), False, 'from keras.optimizers import Adam, RMSprop\n'), ((1680, 1700), 'rl.callbacks.TrainEpisodeLogger', 'TrainEpisodeLogger', ([], {}), '()\n', (1698, 1700), False, 'from rl.callbacks import TrainEpisodeLogger, CallbackList\n'), ((1702, 1711), 'keras.callbacks.History', 'History', ([], {}), '()\n', (1709, 1711), False, 'from keras.callbacks import History\n')] |
import random
from argparse import ArgumentParser
from collections import deque
import gym
from gym.wrappers import RescaleAction
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from td3 import TD3
from utils import MeanStdevFilter, Transition, make_gif, make_checkpoint
def train(agent, env, params):
update_timestep = params['update_every_n_steps']
seed = params['seed']
log_interval = 1000
gif_interval = 500000
n_random_actions = params['n_random_actions']
n_evals = params['n_evals']
n_collect_steps = params['n_collect_steps']
use_statefilter = params['obs_filter']
save_model = params['save_model']
assert n_collect_steps > agent.batchsize, "错误,选择的数量需要大于batch_size"
cumulative_timestep = 0
cumulative_log_timestep = 0
n_updates = 0
i_episode = 0
log_episode = 0
samples_number = 0
episode_rewards = []
episode_steps = []
if use_statefilter:
state_filter = MeanStdevFilter(env.env.observation_space.shape[0])
else:
state_filter = None
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
env.seed(seed)
env.action_space.np_random.seed(seed)
max_steps = env.spec.max_episode_steps
writer = SummaryWriter()
while samples_number < 3e7:
time_step = 0
episode_reward = 0
i_episode += 1
log_episode += 1
state = env.reset()
if state_filter:
state_filter.update(state)
done = False
while (not done):
cumulative_log_timestep += 1
cumulative_timestep += 1
time_step += 1
samples_number += 1
if samples_number < n_random_actions:
action = env.action_space.sample()
else:
action = agent.get_action(state, state_filter=state_filter)
nextstate, reward, done, _ = env.step(action)
# if we hit the time-limit, it's not a 'real' done; we don't want to assign low value to those states
real_done = False if time_step == max_steps else done
agent.replay_pool.push(Transition(state, action, reward, nextstate, real_done))
state = nextstate
if state_filter:
state_filter.update(state)
episode_reward += reward
# update if it's time
if cumulative_timestep % update_timestep == 0 and cumulative_timestep > n_collect_steps:
q1_loss, q2_loss, pi_loss = agent.optimize(update_timestep, state_filter=state_filter)
n_updates += 1
# logging
if cumulative_timestep % log_interval == 0 and cumulative_timestep > n_collect_steps:
writer.add_scalar('Loss/Q-func_1', q1_loss, n_updates)
writer.add_scalar('Loss/Q-func_2', q2_loss, n_updates)
#TODO: This may not work; fix this
if pi_loss:
writer.add_scalar('Loss/policy', pi_loss, n_updates)
avg_length = np.mean(episode_steps)
running_reward = np.mean(episode_rewards)
eval_reward = evaluate(env, agent, state_filter, n_starts=n_evals)
writer.add_scalar('Reward/Train', running_reward, cumulative_timestep)
writer.add_scalar('Reward/Test', eval_reward, cumulative_timestep)
print('Episode {} \t QFuncLoss {} \t Samples {} \t Avg length: {} \t Test reward: {} \t Train reward: {} \t Number of Updates: {}'
.format(i_episode, q1_loss+q2_loss, samples_number, avg_length, eval_reward, running_reward, n_updates))
episode_steps = []
episode_rewards = []
if cumulative_timestep % gif_interval == 0:
make_gif(agent, env, cumulative_timestep, state_filter)
if save_model:
make_checkpoint(agent, cumulative_timestep, params['env'])
episode_steps.append(time_step)
episode_rewards.append(episode_reward)
def evaluate(env, agent, state_filter, n_starts=1):
reward_sum = 0
for _ in range(n_starts):
done = False
state = env.reset()
while (not done):
action = agent.get_action(state, state_filter=state_filter, deterministic=True)
nextstate, reward, done, _ = env.step(action)
reward_sum += reward
state = nextstate
return reward_sum / n_starts
def main():
parser = ArgumentParser()
parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--seed', type=int, default=100)
parser.add_argument('--use_obs_filter', dest='obs_filter', action='store_true')
parser.add_argument('--update_every_n_steps', type=int, default=1)
parser.add_argument('--n_random_actions', type=int, default=25000)
parser.add_argument('--n_collect_steps', type=int, default=1000)
parser.add_argument('--n_evals', type=int, default=1)
parser.add_argument('--save_model', dest='save_model', action='store_true')
parser.set_defaults(obs_filter=False)
parser.set_defaults(save_model=False)
args = parser.parse_args()
params = vars(args)
seed = params['seed']
env = gym.make(params['env'])
env = RescaleAction(env, -1, 1)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
agent = TD3(seed, state_dim, action_dim)
train(agent=agent, env=env, params=params)
if __name__ == '__main__':
main()
| [
"utils.make_checkpoint",
"numpy.random.seed",
"argparse.ArgumentParser",
"gym.make",
"utils.Transition",
"torch.manual_seed",
"utils.make_gif",
"gym.wrappers.RescaleAction",
"random.seed",
"td3.TD3",
"torch.utils.tensorboard.SummaryWriter",
"utils.MeanStdevFilter",
"numpy.mean"
] | [((1088, 1105), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1099, 1105), False, 'import random\n'), ((1110, 1133), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1127, 1133), False, 'import torch\n'), ((1138, 1158), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1152, 1158), True, 'import numpy as np\n'), ((1278, 1293), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (1291, 1293), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((4539, 4555), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (4553, 4555), False, 'from argparse import ArgumentParser\n'), ((5292, 5315), 'gym.make', 'gym.make', (["params['env']"], {}), "(params['env'])\n", (5300, 5315), False, 'import gym\n'), ((5326, 5351), 'gym.wrappers.RescaleAction', 'RescaleAction', (['env', '(-1)', '(1)'], {}), '(env, -1, 1)\n', (5339, 5351), False, 'from gym.wrappers import RescaleAction\n'), ((5456, 5488), 'td3.TD3', 'TD3', (['seed', 'state_dim', 'action_dim'], {}), '(seed, state_dim, action_dim)\n', (5459, 5488), False, 'from td3 import TD3\n'), ((993, 1044), 'utils.MeanStdevFilter', 'MeanStdevFilter', (['env.env.observation_space.shape[0]'], {}), '(env.env.observation_space.shape[0])\n', (1008, 1044), False, 'from utils import MeanStdevFilter, Transition, make_gif, make_checkpoint\n'), ((2169, 2224), 'utils.Transition', 'Transition', (['state', 'action', 'reward', 'nextstate', 'real_done'], {}), '(state, action, reward, nextstate, real_done)\n', (2179, 2224), False, 'from utils import MeanStdevFilter, Transition, make_gif, make_checkpoint\n'), ((3077, 3099), 'numpy.mean', 'np.mean', (['episode_steps'], {}), '(episode_steps)\n', (3084, 3099), True, 'import numpy as np\n'), ((3133, 3157), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (3140, 3157), True, 'import numpy as np\n'), ((3829, 3884), 'utils.make_gif', 'make_gif', (['agent', 'env', 'cumulative_timestep', 'state_filter'], {}), '(agent, env, cumulative_timestep, state_filter)\n', (3837, 3884), False, 'from utils import MeanStdevFilter, Transition, make_gif, make_checkpoint\n'), ((3936, 3994), 'utils.make_checkpoint', 'make_checkpoint', (['agent', 'cumulative_timestep', "params['env']"], {}), "(agent, cumulative_timestep, params['env'])\n", (3951, 3994), False, 'from utils import MeanStdevFilter, Transition, make_gif, make_checkpoint\n')] |
import numpy as np
import scipy.io
import nibabel as nib
from nilearn.input_data import NiftiMasker
from nilearn.masking import compute_epi_mask
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import PredefinedSplit
from copy import deepcopy
# data path to ss dataset
ss_dir = '/jukebox/norman/jantony/surprisesuspense/'
ss_bids_dir = '/jukebox/norman/jantony/surprisesuspense/data/bids/Norman/Antony/ss/'
# constants for the ss dataset (localizer)
ss_all_ROIs = ['ses01brain', 'V1', 'bilateral_NAcc', 'bilateral_HC', 'bilateral_frontal_inf-orbital']
ss_TR = 1
ss_hrf_lag = 5 # In seconds what is the lag between a stimulus onset and the peak bold response
run_names = ['view','recall']
n_runs = [3]
ss_tngs = 9
# TRs_run = [311, 406, 214, 166, 406, 406, 214]
def get_MNI152_template(dim_x, dim_y, dim_z):
"""get MNI152 template used in fmrisim
Parameters
----------
dim_x: int
dim_y: int
dim_z: int
- dims set the size of the volume we want to create
Return
-------
MNI_152_template: 3d array (dim_x, dim_y, dim_z)
"""
# Import the fmrisim from BrainIAK
import brainiak.utils.fmrisim as sim
# Make a grey matter mask into a 3d volume of a given size
dimensions = np.asarray([dim_x, dim_y, dim_z])
_, MNI_152_template = sim.mask_brain(dimensions)
return MNI_152_template
def load_ss_mask(ROI_name, sub):
"""Load the mask for the ss data
Parameters
----------
ROI_name: string
sub: string
Return
----------
the requested mask
"""
#assert ROI_name in ss_all_ROIs
maskdir = (ss_bids_dir + "derivatives/firstlevel/" + sub + "/masks/")
# load the mask
maskfile = (maskdir + sub + "_%s.nii.gz" % (ROI_name))
mask = nib.load(maskfile)
print("Loaded %s mask" % (ROI_name))
return mask
def load_ss_epi_data(sub, run):
# Load MRI file (in Nifti format) of one localizer run
epi_in = (ss_bids_dir +
"derivatives/fmriprep/%s/ses-01/func/%s_ses-01_task-view_run-0%i_space-T1w_desc-preproc_bold.nii.gz" % (sub,sub,run))
epi_data = nib.load(epi_in)
print("Loading data from %s" % (epi_in))
return epi_data
def mask_data(epi_data, mask):
"""mask the input data with the input mask
Parameters
----------
epi_data
mask
Return
----------
masked data
"""
nifti_masker = NiftiMasker(mask_img=mask)
epi_masked_data = nifti_masker.fit_transform(epi_data);
return epi_masked_data
def scale_data(data):
data_scaled = preprocessing.StandardScaler().fit_transform(data)
return data_scaled
""""""
# Make a function to load the mask data
def load_data(directory, subject_name, mask_name='', num_runs=2, zscore_data=False):
# Cycle through the masks
print ("Processing Start ...")
# If there is a mask supplied then load it now
if mask_name is '':
mask = None
else:
mask = load_ss_mask(mask_name, subject_name)
# Cycle through the runs
for run in range(1, num_runs + 1):
epi_data = load_ss_epi_data(subject_name, run)
# Mask the data if necessary
if mask_name is not '':
epi_mask_data = mask_data(epi_data, mask).T
else:
# Do a whole brain mask
if run == 1:
# Compute mask from epi
mask = compute_epi_mask(epi_data).get_fdata()
else:
# Get the intersection mask
# (set voxels that are within the mask on all runs to 1, set all other voxels to 0)
mask *= compute_epi_mask(epi_data).get_fdata()
# Reshape all of the data from 4D (X*Y*Z*time) to 2D (voxel*time): not great for memory
epi_mask_data = epi_data.get_fdata().reshape(
mask.shape[0] * mask.shape[1] * mask.shape[2],
epi_data.shape[3]
)
# Transpose and z-score (standardize) the data
if zscore_data == True:
scaler = preprocessing.StandardScaler().fit(epi_mask_data.T)
preprocessed_data = scaler.transform(epi_mask_data.T)
else:
preprocessed_data = epi_mask_data.T
# Concatenate the data
if run == 1:
concatenated_data = preprocessed_data
else:
concatenated_data = np.hstack((concatenated_data, preprocessed_data))
# Apply the whole-brain masking: First, reshape the mask from 3D (X*Y*Z) to 1D (voxel).
# Second, get indices of non-zero voxels, i.e. voxels inside the mask.
# Third, zero out all of the voxels outside of the mask.
if mask_name is '':
mask_vector = np.nonzero(mask.reshape(mask.shape[0] * mask.shape[1] * mask.shape[2], ))[0]
concatenated_data = concatenated_data[mask_vector, :]
# Return the list of mask data
return concatenated_data, mask
# Create a function to shift the size
def shift_timing(label_TR, TR_shift_size):
# Create a short vector of extra zeros
zero_shift = np.zeros((TR_shift_size, 1))
# Zero pad the column from the top.
label_TR_shifted = np.vstack((zero_shift, label_TR))
# Don't include the last rows that have been shifted out of the time line.
label_TR_shifted = label_TR_shifted[0:label_TR.shape[0],0]
return label_TR_shifted
# Extract bold data for non-zero labels.
def reshape_data(label_TR_shifted, masked_data_all):
label_index = np.nonzero(label_TR_shifted)
label_index = np.squeeze(label_index)
# Pull out the indexes
indexed_data = np.transpose(masked_data_all[:,label_index])
nonzero_labels = label_TR_shifted[label_index]
return indexed_data, nonzero_labels
def normalize(bold_data_, run_ids):
"""normalized the data within each run
Parameters
--------------
bold_data_: np.array, n_stimuli x n_voxels
run_ids: np.array or a list
Return
--------------
normalized_data
"""
scaler = StandardScaler()
data = []
for r in range(ss_n_runs):
data.append(scaler.fit_transform(bold_data_[run_ids == r, :]))
normalized_data = np.vstack(data)
return normalized_data
def decode(X, y, cv_ids, model):
"""
Parameters
--------------
X: np.array, n_stimuli x n_voxels
y: np.array, n_stimuli,
cv_ids: np.array - n_stimuli,
Return
--------------
models, scores
"""
scores = []
models = []
ps = PredefinedSplit(cv_ids)
for train_index, test_index in ps.split():
# split the data
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# fit the model on the training set
model.fit(X_train, y_train)
# calculate the accuracy for the hold out run
score = model.score(X_test, y_test)
# save stuff
models.append(deepcopy(model))
scores.append(score)
return models, scores
| [
"copy.deepcopy",
"sklearn.model_selection.PredefinedSplit",
"sklearn.preprocessing.StandardScaler",
"nibabel.load",
"numpy.asarray",
"nilearn.masking.compute_epi_mask",
"numpy.zeros",
"numpy.transpose",
"numpy.hstack",
"numpy.nonzero",
"nilearn.input_data.NiftiMasker",
"numpy.squeeze",
"brai... | [((1307, 1340), 'numpy.asarray', 'np.asarray', (['[dim_x, dim_y, dim_z]'], {}), '([dim_x, dim_y, dim_z])\n', (1317, 1340), True, 'import numpy as np\n'), ((1367, 1393), 'brainiak.utils.fmrisim.mask_brain', 'sim.mask_brain', (['dimensions'], {}), '(dimensions)\n', (1381, 1393), True, 'import brainiak.utils.fmrisim as sim\n'), ((1828, 1846), 'nibabel.load', 'nib.load', (['maskfile'], {}), '(maskfile)\n', (1836, 1846), True, 'import nibabel as nib\n'), ((2174, 2190), 'nibabel.load', 'nib.load', (['epi_in'], {}), '(epi_in)\n', (2182, 2190), True, 'import nibabel as nib\n'), ((2468, 2494), 'nilearn.input_data.NiftiMasker', 'NiftiMasker', ([], {'mask_img': 'mask'}), '(mask_img=mask)\n', (2479, 2494), False, 'from nilearn.input_data import NiftiMasker\n'), ((5162, 5190), 'numpy.zeros', 'np.zeros', (['(TR_shift_size, 1)'], {}), '((TR_shift_size, 1))\n', (5170, 5190), True, 'import numpy as np\n'), ((5255, 5288), 'numpy.vstack', 'np.vstack', (['(zero_shift, label_TR)'], {}), '((zero_shift, label_TR))\n', (5264, 5288), True, 'import numpy as np\n'), ((5578, 5606), 'numpy.nonzero', 'np.nonzero', (['label_TR_shifted'], {}), '(label_TR_shifted)\n', (5588, 5606), True, 'import numpy as np\n'), ((5625, 5648), 'numpy.squeeze', 'np.squeeze', (['label_index'], {}), '(label_index)\n', (5635, 5648), True, 'import numpy as np\n'), ((5700, 5745), 'numpy.transpose', 'np.transpose', (['masked_data_all[:, label_index]'], {}), '(masked_data_all[:, label_index])\n', (5712, 5745), True, 'import numpy as np\n'), ((6116, 6132), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6130, 6132), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6271, 6286), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (6280, 6286), True, 'import numpy as np\n'), ((6605, 6628), 'sklearn.model_selection.PredefinedSplit', 'PredefinedSplit', (['cv_ids'], {}), '(cv_ids)\n', (6620, 6628), False, 'from sklearn.model_selection import PredefinedSplit\n'), ((2625, 2655), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (2653, 2655), False, 'from sklearn import preprocessing\n'), ((4466, 4515), 'numpy.hstack', 'np.hstack', (['(concatenated_data, preprocessed_data)'], {}), '((concatenated_data, preprocessed_data))\n', (4475, 4515), True, 'import numpy as np\n'), ((7037, 7052), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (7045, 7052), False, 'from copy import deepcopy\n'), ((4129, 4159), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (4157, 4159), False, 'from sklearn import preprocessing\n'), ((3463, 3489), 'nilearn.masking.compute_epi_mask', 'compute_epi_mask', (['epi_data'], {}), '(epi_data)\n', (3479, 3489), False, 'from nilearn.masking import compute_epi_mask\n'), ((3694, 3720), 'nilearn.masking.compute_epi_mask', 'compute_epi_mask', (['epi_data'], {}), '(epi_data)\n', (3710, 3720), False, 'from nilearn.masking import compute_epi_mask\n')] |
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from hpd import hpd_grid
def plot_post(sample, alpha=0.05, show_mode=True, kde_plot=True, bins=50,
ROPE=None, comp_val=None, roundto=2):
"""Plot posterior and HPD
Parameters
----------
sample : Numpy array or python list
An array containing MCMC samples
alpha : float
Desired probability of type I error (defaults to 0.05)
show_mode: Bool
If True the legend will show the mode(s) value(s), if false the mean(s)
will be displayed
kde_plot: Bool
If True the posterior will be displayed using a Kernel Density Estimation
otherwise an histogram will be used
bins: integer
Number of bins used for the histogram, only works when kde_plot is False
ROPE: list or numpy array
Lower and upper values of the Region Of Practical Equivalence
comp_val: float
Comparison value
Returns
-------
post_summary : dictionary
Containing values with several summary statistics
"""
post_summary = {'mean':0,'median':0,'mode':0, 'alpha':0,'hpd_low':0,
'hpd_high':0, 'comp_val':0, 'pc_gt_comp_val':0, 'ROPE_low':0,
'ROPE_high':0, 'pc_in_ROPE':0}
post_summary['mean'] = round(np.mean(sample), roundto)
post_summary['median'] = round(np.median(sample), roundto)
post_summary['alpha'] = alpha
# Compute the hpd, KDE and mode for the posterior
hpd, x, y, modes = hpd_grid(sample, alpha, roundto)
post_summary['hpd'] = hpd
post_summary['mode'] = modes
## Plot KDE.
if kde_plot:
plt.plot(x, y, color='k', lw=2)
## Plot histogram.
else:
plt.hist(sample, normed=True, bins=bins, facecolor='b',
edgecolor='w')
## Display mode or mean:
if show_mode:
string = '{:g} ' * len(post_summary['mode'])
plt.plot(0, label='mode =' + string.format(*post_summary['mode']), alpha=0)
else:
plt.plot(0, label='mean = {:g}'.format(post_summary['mean']), alpha=0)
## Display the hpd.
hpd_label = ''
for value in hpd:
plt.plot(value, [0, 0], linewidth=10, color='b')
hpd_label = hpd_label + '{:g} {:g}\n'.format(round(value[0], roundto), round(value[1], roundto))
plt.plot(0, 0, linewidth=4, color='b', label='hpd {:g}%\n{}'.format((1-alpha)*100, hpd_label))
## Display the ROPE.
if ROPE is not None:
pc_in_ROPE = round(np.sum((sample > ROPE[0]) & (sample < ROPE[1]))/len(sample)*100, roundto)
plt.plot(ROPE, [0, 0], linewidth=20, color='r', alpha=0.75)
plt.plot(0, 0, linewidth=4, color='r', label='{:g}% in ROPE'.format(pc_in_ROPE))
post_summary['ROPE_low'] = ROPE[0]
post_summary['ROPE_high'] = ROPE[1]
post_summary['pc_in_ROPE'] = pc_in_ROPE
## Display the comparison value.
if comp_val is not None:
pc_gt_comp_val = round(100 * np.sum(sample > comp_val)/len(sample), roundto)
pc_lt_comp_val = round(100 - pc_gt_comp_val, roundto)
plt.axvline(comp_val, ymax=.75, color='g', linewidth=4, alpha=0.75,
label='{:g}% < {:g} < {:g}%'.format(pc_lt_comp_val,
comp_val, pc_gt_comp_val))
post_summary['comp_val'] = comp_val
post_summary['pc_gt_comp_val'] = pc_gt_comp_val
plt.legend(loc=0, framealpha=1)
frame = plt.gca()
frame.axes.get_yaxis().set_ticks([])
return post_summary
| [
"numpy.sum",
"hpd.hpd_grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"numpy.median",
"matplotlib.pyplot.legend",
"numpy.mean",
"matplotlib.pyplot.gca"
] | [((1569, 1601), 'hpd.hpd_grid', 'hpd_grid', (['sample', 'alpha', 'roundto'], {}), '(sample, alpha, roundto)\n', (1577, 1601), False, 'from hpd import hpd_grid\n'), ((3447, 3478), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)', 'framealpha': '(1)'}), '(loc=0, framealpha=1)\n', (3457, 3478), True, 'import matplotlib.pyplot as plt\n'), ((3491, 3500), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3498, 3500), True, 'import matplotlib.pyplot as plt\n'), ((1368, 1383), 'numpy.mean', 'np.mean', (['sample'], {}), '(sample)\n', (1375, 1383), True, 'import numpy as np\n'), ((1429, 1446), 'numpy.median', 'np.median', (['sample'], {}), '(sample)\n', (1438, 1446), True, 'import numpy as np\n'), ((1712, 1743), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""k"""', 'lw': '(2)'}), "(x, y, color='k', lw=2)\n", (1720, 1743), True, 'import matplotlib.pyplot as plt\n'), ((1785, 1855), 'matplotlib.pyplot.hist', 'plt.hist', (['sample'], {'normed': '(True)', 'bins': 'bins', 'facecolor': '"""b"""', 'edgecolor': '"""w"""'}), "(sample, normed=True, bins=bins, facecolor='b', edgecolor='w')\n", (1793, 1855), True, 'import matplotlib.pyplot as plt\n'), ((2213, 2261), 'matplotlib.pyplot.plot', 'plt.plot', (['value', '[0, 0]'], {'linewidth': '(10)', 'color': '"""b"""'}), "(value, [0, 0], linewidth=10, color='b')\n", (2221, 2261), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2686), 'matplotlib.pyplot.plot', 'plt.plot', (['ROPE', '[0, 0]'], {'linewidth': '(20)', 'color': '"""r"""', 'alpha': '(0.75)'}), "(ROPE, [0, 0], linewidth=20, color='r', alpha=0.75)\n", (2635, 2686), True, 'import matplotlib.pyplot as plt\n'), ((2545, 2592), 'numpy.sum', 'np.sum', (['((sample > ROPE[0]) & (sample < ROPE[1]))'], {}), '((sample > ROPE[0]) & (sample < ROPE[1]))\n', (2551, 2592), True, 'import numpy as np\n'), ((3016, 3041), 'numpy.sum', 'np.sum', (['(sample > comp_val)'], {}), '(sample > comp_val)\n', (3022, 3041), True, 'import numpy as np\n')] |
import numpy as np
import onnxruntime
from dnnv.nn.converters.onnx import *
from dnnv.nn.operations import *
def test_Expand_dim_changed():
shape = np.array([3, 1])
data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
new_shape = np.array([2, 1, 6])
op = Expand(data, new_shape)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = data * np.ones(new_shape, dtype=np.float32)
assert result.shape == y.shape
assert np.allclose(result, y)
def test_Expand_dim_unchanged():
shape = np.array([3, 1])
new_shape = np.array([3, 4])
data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
input_op = Input(shape, np.dtype(np.float32))
op = Expand(input_op, new_shape)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [data])
assert len(results) == 1
result = results[0]
y = np.tile(data, 4)
assert result.shape == y.shape
assert np.allclose(result, y)
| [
"onnxruntime.backend.run",
"numpy.allclose",
"numpy.dtype",
"numpy.ones",
"numpy.array",
"numpy.tile",
"numpy.prod"
] | [((155, 171), 'numpy.array', 'np.array', (['[3, 1]'], {}), '([3, 1])\n', (163, 171), True, 'import numpy as np\n'), ((269, 288), 'numpy.array', 'np.array', (['[2, 1, 6]'], {}), '([2, 1, 6])\n', (277, 288), True, 'import numpy as np\n'), ((385, 424), 'onnxruntime.backend.run', 'onnxruntime.backend.run', (['onnx_model', '[]'], {}), '(onnx_model, [])\n', (408, 424), False, 'import onnxruntime\n'), ((578, 600), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (589, 600), True, 'import numpy as np\n'), ((648, 664), 'numpy.array', 'np.array', (['[3, 1]'], {}), '([3, 1])\n', (656, 664), True, 'import numpy as np\n'), ((681, 697), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (689, 697), True, 'import numpy as np\n'), ((929, 972), 'onnxruntime.backend.run', 'onnxruntime.backend.run', (['onnx_model', '[data]'], {}), '(onnx_model, [data])\n', (952, 972), False, 'import onnxruntime\n'), ((1035, 1051), 'numpy.tile', 'np.tile', (['data', '(4)'], {}), '(data, 4)\n', (1042, 1051), True, 'import numpy as np\n'), ((1099, 1121), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (1110, 1121), True, 'import numpy as np\n'), ((494, 530), 'numpy.ones', 'np.ones', (['new_shape'], {'dtype': 'np.float32'}), '(new_shape, dtype=np.float32)\n', (501, 530), True, 'import numpy as np\n'), ((808, 828), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (816, 828), True, 'import numpy as np\n'), ((207, 221), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (214, 221), True, 'import numpy as np\n'), ((733, 747), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (740, 747), True, 'import numpy as np\n')] |
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
import logging
logging.getLogger('numba').setLevel(logging.WARNING)
logging.getLogger('tvb').setLevel(logging.ERROR)
import tvb.simulator.lab as lab
from tvb.datatypes.sensors import SensorsInternal
import numpy.random as rgn
import numpy as np
from mpi4py import MPI
import os
import json
import time
import nest_elephant_tvb.Tvb.modify_tvb.noise as my_noise
import nest_elephant_tvb.Tvb.modify_tvb.Zerlaut as Zerlaut
from nest_elephant_tvb.Tvb.modify_tvb.Interface_co_simulation_parallel import Interface_co_simulation
from nest_elephant_tvb.Tvb.helper_function_zerlaut import findVec
def init(param_tvb_connection,param_tvb_coupling,param_tvb_integrator,param_tvb_model,param_tvb_monitor,cosim=None):
'''
Initialise the simulator with parameter
:param param_tvb_connection : parameters for the connection
:param param_tvb_coupling : parameters for the coupling between nodes
:param param_tvb_integrator : parameters of the integrator and the noise
:param param_tvb_model : parameters for the models of TVB
:param param_tvb_monitor : parameters for TVB monitors
:param cosim : if use or not mpi
:return: the simulator initialize
'''
## initialise the random generator
rgn.seed(param_tvb_integrator['seed_init']-1)
## Model configuration
if param_tvb_model['order'] == 1:
model = Zerlaut.ZerlautAdaptationFirstOrder(variables_of_interest='E I W_e W_i'.split())
elif param_tvb_model['order'] == 2:
model = Zerlaut.ZerlautAdaptationSecondOrder(variables_of_interest='E I C_ee C_ei C_ii W_e W_i'.split())
else:
raise Exception('Bad order for the model')
model.g_L=np.array(param_tvb_model['g_L'])
model.E_L_e=np.array(param_tvb_model['E_L_e'])
model.E_L_i=np.array(param_tvb_model['E_L_i'])
model.C_m=np.array(param_tvb_model['C_m'])
model.b_e=np.array(param_tvb_model['b_e'])
model.a_e=np.array(param_tvb_model['a_e'])
model.b_i=np.array(param_tvb_model['b_i'])
model.a_i=np.array(param_tvb_model['a_i'])
model.tau_w_e=np.array(param_tvb_model['tau_w_e'])
model.tau_w_i=np.array(param_tvb_model['tau_w_i'])
model.E_e=np.array(param_tvb_model['E_e'])
model.E_i=np.array(param_tvb_model['E_i'])
model.Q_e=np.array(param_tvb_model['Q_e'])
model.Q_i=np.array(param_tvb_model['Q_i'])
model.tau_e=np.array(param_tvb_model['tau_e'])
model.tau_i=np.array(param_tvb_model['tau_i'])
model.N_tot=np.array(param_tvb_model['N_tot'])
model.p_connect=np.array(param_tvb_model['p_connect'])
model.g=np.array(param_tvb_model['g'])
model.T=np.array(param_tvb_model['T'])
model.P_e=np.array(param_tvb_model['P_e'])
model.P_i=np.array(param_tvb_model['P_i'])
model.K_ext_e=np.array(param_tvb_model['K_ext_e'])
model.K_ext_i=np.array(0)
model.external_input_ex_ex=np.array(0.)
model.external_input_ex_in=np.array(0.)
model.external_input_in_ex=np.array(0.0)
model.external_input_in_in=np.array(0.0)
model.state_variable_range['E'] =np.array( param_tvb_model['initial_condition']['E'])
model.state_variable_range['I'] =np.array( param_tvb_model['initial_condition']['I'])
if param_tvb_model['order'] == 2:
model.state_variable_range['C_ee'] = np.array(param_tvb_model['initial_condition']['C_ee'])
model.state_variable_range['C_ei'] = np.array(param_tvb_model['initial_condition']['C_ei'])
model.state_variable_range['C_ii'] = np.array(param_tvb_model['initial_condition']['C_ii'])
model.state_variable_range['W_e'] = np.array(param_tvb_model['initial_condition']['W_e'])
model.state_variable_range['W_i'] = np.array(param_tvb_model['initial_condition']['W_i'])
## Connection
nb_region = int(param_tvb_connection['nb_region'])
tract_lengths = np.load(param_tvb_connection['path_distance'])
weights = np.load(param_tvb_connection['path_weight'])
if 'path_region_labels' in param_tvb_connection.keys():
region_labels = np.loadtxt(param_tvb_connection['path_region_labels'], dtype=str)
else:
region_labels = np.array([], dtype=np.dtype('<U128'))
if 'path_centers' in param_tvb_connection.keys():
centers = np.loadtxt(param_tvb_connection['path_centers'])
else:
centers = np.array([])
if 'orientation' in param_tvb_connection.keys() and param_tvb_connection['orientation']:
orientation = []
for i in np.transpose(centers):
orientation.append(findVec(i,np.mean(centers,axis=1)))
orientation = np.array(orientation)
else:
orientation = None
if 'path_cortical' in param_tvb_connection.keys():
cortical = np.load(param_tvb_connection['path_cortical'])
else:
cortical=None
connection = lab.connectivity.Connectivity(number_of_regions=nb_region,
tract_lengths=tract_lengths[:nb_region,:nb_region],
weights=weights[:nb_region,:nb_region],
region_labels=region_labels,
centres=centers.T,
cortical=cortical,
orientations=orientation)
# if 'normalised' in param_tvb_connection.keys() or param_tvb_connection['normalised']:
# connection.weights = connection.weights / np.sum(connection.weights, axis=0)
connection.speed = np.array(param_tvb_connection['velocity'])
## Coupling
coupling = lab.coupling.Linear(a=np.array(param_tvb_coupling['a']),
b=np.array(0.0))
## Integrator
noise = my_noise.Ornstein_Ulhenbeck_process(
tau_OU=param_tvb_integrator['tau_OU'],
mu=np.array(param_tvb_integrator['mu']).reshape((7,1,1)),
nsig=np.array(param_tvb_integrator['nsig']),
weights=np.array(param_tvb_integrator['weights']).reshape((7,1,1))
)
noise.random_stream.seed(param_tvb_integrator['seed'])
integrator = lab.integrators.HeunStochastic(noise=noise,dt=param_tvb_integrator['sim_resolution'])
# integrator = lab.integrators.HeunDeterministic()
## Monitors
monitors =[]
if param_tvb_monitor['Raw']:
monitors.append(lab.monitors.Raw())
if param_tvb_monitor['TemporalAverage']:
monitor_TAVG = lab.monitors.TemporalAverage(
variables_of_interest=param_tvb_monitor['parameter_TemporalAverage']['variables_of_interest'],
period=param_tvb_monitor['parameter_TemporalAverage']['period'])
monitors.append(monitor_TAVG)
if param_tvb_monitor['Bold']:
monitor_Bold = lab.monitors.Bold(
variables_of_interest=np.array(param_tvb_monitor['parameter_Bold']['variables_of_interest']),
period=param_tvb_monitor['parameter_Bold']['period'])
monitors.append(monitor_Bold)
if param_tvb_monitor['SEEG']:
sensor = SensorsInternal().from_file(param_tvb_monitor['parameter_SEEG']['path'])
projection_matrix = param_tvb_monitor['parameter_SEEG']['scaling_projection']/np.array(
[np.linalg.norm(np.expand_dims(i, 1) - centers[:, cortical], axis=0) for i in sensor.locations])
np.save(param_tvb_monitor['path_result']+'/projection.npy',projection_matrix)
monitors.append(lab.monitors.iEEG.from_file(
param_tvb_monitor['parameter_SEEG']['path'],
param_tvb_monitor['path_result']+'/projection.npy'))
if cosim is not None:
# special monitor for MPI
monitor_IO = Interface_co_simulation(
id_proxy=cosim['id_proxy'],
time_synchronize=cosim['time_synchronize']
)
monitors.append(monitor_IO)
#initialize the simulator:
simulator = lab.simulator.Simulator(model = model, connectivity = connection,
coupling = coupling, integrator = integrator, monitors = monitors
)
simulator.configure()
# save the initial condition
np.save(param_tvb_monitor['path_result']+'/step_init.npy',simulator.history.buffer)
# end edit
return simulator
def run_simulation(simulator, time, parameter_tvb):
'''
run a simulation
:param simulator: the simulator already initialize
:param time: the time of simulation
:param parameter_tvb: the parameter for the simulator
'''
# check how many monitor it's used
nb_monitor = parameter_tvb['Raw'] + parameter_tvb['TemporalAverage'] + parameter_tvb['Bold'] + parameter_tvb['SEEG']
# initialise the variable for the saving the result
save_result =[]
for i in range(nb_monitor):
save_result.append([])
# run the simulation
count = 0
for result in simulator(simulation_length=time):
for i in range(nb_monitor):
if result[i] is not None:
save_result[i].append(result[i])
#save the result in file
if result[0][0] >= parameter_tvb['save_time']*(count+1): #check if the time for saving at some time step
print('simulation time :'+str(result[0][0])+'\r')
np.save(parameter_tvb['path_result']+'/step_'+str(count)+'.npy',save_result)
save_result =[]
for i in range(nb_monitor):
save_result.append([])
count +=1
# save the last part
np.save(parameter_tvb['path_result']+'/step_'+str(count)+'.npy',save_result)
def simulate_tvb(results_path,begin,end,param_tvb_connection,param_tvb_coupling,
param_tvb_integrator,param_tvb_model,param_tvb_monitor):
'''
simulate TVB with zerlaut simulation
:param results_path: the folder to save the result
:param begin: the starting point of record WARNING : not used
:param end: the ending point of record
:param param_tvb_connection : parameters for the connection
:param param_tvb_coupling : parameters for the coupling between nodes
:param param_tvb_integrator : parameters of the integrator and the noise
:param param_tvb_model : parameters for the models of TVB
:param param_tvb_monitor : parameters for TVB monitors
:return: simulation
'''
param_tvb_monitor['path_result']=results_path+'/tvb/'
simulator = init(param_tvb_connection,param_tvb_coupling,param_tvb_integrator,param_tvb_model,param_tvb_monitor)
run_simulation(simulator,end,param_tvb_monitor)
def run_mpi(path):
'''
return the result of the simulation between the wanted time
:param path: the folder of the simulation
'''
# take the parameters of the simulation from the saving file
with open(path+'/parameter.json') as f:
parameters = json.load(f)
param_co_simulation = parameters['param_co_simulation']
param_tvb_connection= parameters['param_tvb_connection']
param_tvb_coupling= parameters['param_tvb_coupling']
param_tvb_integrator= parameters['param_tvb_integrator']
param_tvb_model= parameters['param_tvb_model']
param_tvb_monitor= parameters['param_tvb_monitor']
result_path= parameters['result_path']
end = parameters['end']
# configuration of the logger
logger = logging.getLogger('tvb')
fh = logging.FileHandler(path + '/log/tvb.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
if param_co_simulation['level_log'] == 0:
fh.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
elif param_co_simulation['level_log'] == 1:
fh.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
elif param_co_simulation['level_log'] == 2:
fh.setLevel(logging.WARNING)
logger.setLevel(logging.WARNING)
elif param_co_simulation['level_log'] == 3:
fh.setLevel(logging.ERROR)
logger.setLevel(logging.ERROR)
elif param_co_simulation['level_log'] == 4:
fh.setLevel(logging.CRITICAL)
logger.setLevel(logging.CRITICAL)
#initialise the TVB
param_tvb_monitor['path_result']=result_path+'/tvb/'
id_proxy = param_co_simulation['id_region_nest']
time_synch = param_co_simulation['synchronization']
path_send = result_path+"translation/send_to_tvb/"
path_receive = result_path+"translation/receive_from_tvb/"
simulator = init(param_tvb_connection,param_tvb_coupling,param_tvb_integrator,param_tvb_model,param_tvb_monitor,
{'id_proxy':np.array(id_proxy),
'time_synchronize':time_synch,
'path_send': path_send,
'path_receive': path_receive,
})
# configure for saving result of TVB
# check how many monitor it's used
nb_monitor = param_tvb_monitor['Raw'] + param_tvb_monitor['TemporalAverage'] + param_tvb_monitor['Bold'] + param_tvb_monitor['SEEG']
# initialise the variable for the saving the result
save_result =[]
for i in range(nb_monitor): # the input output monitor
save_result.append([])
#init MPI :
data = None #data for the proxy node (no initialisation in the parameter)
comm_receive=[]
for i in id_proxy:
comm_receive.append(init_mpi(path_send+str(i)+".txt",logger))
comm_send=[]
for i in id_proxy :
comm_send.append(init_mpi(path_receive+str(i)+".txt",logger))
# the loop of the simulation
count = 0
count_save = 0
while count*time_synch < end: # FAT END POINT
logger.info(" TVB receive data")
#receive MPI data
data_value = []
for comm in comm_receive:
receive = receive_mpi(comm)
time_data = receive[0]
data_value.append(receive[1])
data=np.empty((2,),dtype=object)
nb_step = np.rint((time_data[1]-time_data[0])/param_tvb_integrator['sim_resolution'])
nb_step_0 = np.rint(time_data[0]/param_tvb_integrator['sim_resolution']) + 1 # start at the first time step not at 0.0
time_data = np.arange(nb_step_0,nb_step_0+nb_step,1)*param_tvb_integrator['sim_resolution']
data_value = np.swapaxes(np.array(data_value),0,1)[:,:]
if data_value.shape[0] != time_data.shape[0]:
raise(Exception('Bad shape of data'))
data[:]=[time_data,data_value]
logger.info(" TVB start simulation "+str(count*time_synch))
nest_data=[]
for result in simulator(simulation_length=time_synch,proxy_data=data):
for i in range(nb_monitor):
if result[i] is not None:
save_result[i].append(result[i])
nest_data.append([result[-1][0],result[-1][1]])
#save the result in file
if result[-1][0] >= param_tvb_monitor['save_time']*(count_save+1): #check if the time for saving at some time step
np.save(param_tvb_monitor['path_result']+'/step_'+str(count_save)+'.npy',save_result)
save_result =[]
for i in range(nb_monitor):
save_result.append([])
count_save +=1
logger.info(" TVB end simulation")
# prepare to send data with MPI
nest_data = np.array(nest_data)
time = [nest_data[0,0],nest_data[-1,0]]
rate = np.concatenate(nest_data[:,1])
for index,comm in enumerate(comm_send):
send_mpi(comm,time,rate[:,index]*1e3)
#increment of the loop
count+=1
# save the last part
logger.info(" TVB finish")
np.save(param_tvb_monitor['path_result']+'/step_'+str(count_save)+'.npy',save_result)
for index,comm in enumerate(comm_send):
end_mpi(comm,result_path+"/translation/receive_from_tvb/"+str(id_proxy[index])+".txt",True,logger)
for index,comm in enumerate(comm_receive):
end_mpi(comm,result_path+"/translation/send_to_tvb/"+str(id_proxy[index])+".txt",False,logger)
MPI.Finalize() # ending with MPI
logger.info(" TVB exit")
return
## MPI function for receive and send data
def init_mpi(path,logger):
"""
initialise MPI connection
:param path:
:return:
"""
# while not os.path.exists(path+'.unlock'): # FAT END POINT
# logger.info(path+'.unlock')
# logger.info("spike detector ids not found yet, retry in 1 second")
# time.sleep(1)
# os.remove(path+'.unlock')
time.sleep(5)
fport = open(path, "r")
port=fport.readline()
fport.close()
logger.info("wait connection "+port);sys.stdout.flush()
comm = MPI.COMM_WORLD.Connect(port)
logger.info('connect to '+port);sys.stdout.flush()
return comm
def send_mpi(comm,times,data):
"""
send mpi data
:param comm: MPI communicator
:param times: times of values
:param data: rates inputs
:return:nothing
"""
status_ = MPI.Status()
# wait until the translator accept the connections
accept = False
while not accept:
req = comm.irecv(source=0,tag=0)
accept = req.wait(status_)
source = status_.Get_source() # the id of the excepted source
data = np.ascontiguousarray(data,dtype='d') # format the rate for sending
shape = np.array(data.shape[0],dtype='i') # size of data
times = np.array(times,dtype='d') # time of starting and ending step
comm.Send([times,MPI.DOUBLE],dest=source,tag=0)
comm.Send([shape,MPI.INT],dest=source,tag=0)
comm.Send([data, MPI.DOUBLE], dest=source, tag=0)
def receive_mpi(comm):
"""
receive proxy values the
:param comm: MPI communicator
:return: rate of all proxy
"""
status_ = MPI.Status()
# send to the translator : I want the next part
req = comm.isend(True, dest=1, tag=0)
req.wait()
time_step = np.empty(2, dtype='d')
comm.Recv([time_step, 2, MPI.DOUBLE], source=1, tag=MPI.ANY_TAG, status=status_)
# get the size of the rate
size=np.empty(1,dtype='i')
comm.Recv([size, MPI.INT], source=1, tag=0)
# get the rate
rates = np.empty(size, dtype='d')
comm.Recv([rates,size, MPI.DOUBLE],source=1,tag=MPI.ANY_TAG,status=status_)
# print the summary of the data
if status_.Get_tag() == 0:
return time_step,rates
else:
return None # TODO take in count
def end_mpi(comm,path,sending,logger):
"""
ending the communication
:param comm: MPI communicator
:param path: for the close the port
:param sending: if the translator is for sending or receiving data
:return: nothing
"""
# read the port before the deleted file
fport = open(path, "r")
port=fport.readline()
fport.close()
# different ending of the translator
if sending :
logger.info("TVB close connection send " + port)
sys.stdout.flush()
status_ = MPI.Status()
# wait until the translator accept the connections
logger.info("TVB send check")
accept = False
while not accept:
req = comm.irecv(source=0, tag=0)
accept = req.wait(status_)
logger.info("TVB send end simulation")
source = status_.Get_source() # the id of the excepted source
times = np.array([0.,0.],dtype='d') # time of starting and ending step
comm.Send([times,MPI.DOUBLE],dest=source,tag=1)
else:
logger.info("TVB close connection receive " + port)
# send to the translator : I want the next part
req = comm.isend(True, dest=1, tag=1)
req.wait()
# closing the connection at this end
logger.info("TVB disconnect communication")
comm.Disconnect()
logger.info("TVB close " + port)
MPI.Close_port(port)
logger.info("TVB close connection "+port)
return
def run_normal(path_parameter):
with open(path_parameter+'/parameter.json') as f:
parameters = json.load(f)
begin = parameters['begin']
end = parameters['end']
results_path = parameters['result_path']
simulate_tvb(results_path=results_path, begin=begin, end=end,
param_tvb_connection=parameters['param_tvb_connection'],
param_tvb_coupling=parameters['param_tvb_coupling'],
param_tvb_integrator=parameters['param_tvb_integrator'],
param_tvb_model=parameters['param_tvb_model'],
param_tvb_monitor=parameters['param_tvb_monitor'])
if __name__ == "__main__":
import sys
if len(sys.argv)==3:
if sys.argv[1] == '0': # run only tvb without mpi
run_normal(sys.argv[2])
elif sys.argv[1] == '1': # run tvb in co-simulation configuration
run_mpi(sys.argv[2])
else:
raise Exception('bad option of running')
else:
raise Exception('not good number of argument ')
| [
"numpy.load",
"numpy.random.seed",
"mpi4py.MPI.Status",
"numpy.empty",
"logging.Formatter",
"numpy.mean",
"sys.stdout.flush",
"numpy.arange",
"mpi4py.MPI.COMM_WORLD.Connect",
"logging.FileHandler",
"numpy.transpose",
"tvb.simulator.lab.monitors.TemporalAverage",
"mpi4py.MPI.Close_port",
"n... | [((1453, 1500), 'numpy.random.seed', 'rgn.seed', (["(param_tvb_integrator['seed_init'] - 1)"], {}), "(param_tvb_integrator['seed_init'] - 1)\n", (1461, 1500), True, 'import numpy.random as rgn\n'), ((1890, 1922), 'numpy.array', 'np.array', (["param_tvb_model['g_L']"], {}), "(param_tvb_model['g_L'])\n", (1898, 1922), True, 'import numpy as np\n'), ((1939, 1973), 'numpy.array', 'np.array', (["param_tvb_model['E_L_e']"], {}), "(param_tvb_model['E_L_e'])\n", (1947, 1973), True, 'import numpy as np\n'), ((1990, 2024), 'numpy.array', 'np.array', (["param_tvb_model['E_L_i']"], {}), "(param_tvb_model['E_L_i'])\n", (1998, 2024), True, 'import numpy as np\n'), ((2039, 2071), 'numpy.array', 'np.array', (["param_tvb_model['C_m']"], {}), "(param_tvb_model['C_m'])\n", (2047, 2071), True, 'import numpy as np\n'), ((2086, 2118), 'numpy.array', 'np.array', (["param_tvb_model['b_e']"], {}), "(param_tvb_model['b_e'])\n", (2094, 2118), True, 'import numpy as np\n'), ((2133, 2165), 'numpy.array', 'np.array', (["param_tvb_model['a_e']"], {}), "(param_tvb_model['a_e'])\n", (2141, 2165), True, 'import numpy as np\n'), ((2180, 2212), 'numpy.array', 'np.array', (["param_tvb_model['b_i']"], {}), "(param_tvb_model['b_i'])\n", (2188, 2212), True, 'import numpy as np\n'), ((2227, 2259), 'numpy.array', 'np.array', (["param_tvb_model['a_i']"], {}), "(param_tvb_model['a_i'])\n", (2235, 2259), True, 'import numpy as np\n'), ((2278, 2314), 'numpy.array', 'np.array', (["param_tvb_model['tau_w_e']"], {}), "(param_tvb_model['tau_w_e'])\n", (2286, 2314), True, 'import numpy as np\n'), ((2333, 2369), 'numpy.array', 'np.array', (["param_tvb_model['tau_w_i']"], {}), "(param_tvb_model['tau_w_i'])\n", (2341, 2369), True, 'import numpy as np\n'), ((2384, 2416), 'numpy.array', 'np.array', (["param_tvb_model['E_e']"], {}), "(param_tvb_model['E_e'])\n", (2392, 2416), True, 'import numpy as np\n'), ((2431, 2463), 'numpy.array', 'np.array', (["param_tvb_model['E_i']"], {}), "(param_tvb_model['E_i'])\n", (2439, 2463), True, 'import numpy as np\n'), ((2478, 2510), 'numpy.array', 'np.array', (["param_tvb_model['Q_e']"], {}), "(param_tvb_model['Q_e'])\n", (2486, 2510), True, 'import numpy as np\n'), ((2525, 2557), 'numpy.array', 'np.array', (["param_tvb_model['Q_i']"], {}), "(param_tvb_model['Q_i'])\n", (2533, 2557), True, 'import numpy as np\n'), ((2574, 2608), 'numpy.array', 'np.array', (["param_tvb_model['tau_e']"], {}), "(param_tvb_model['tau_e'])\n", (2582, 2608), True, 'import numpy as np\n'), ((2625, 2659), 'numpy.array', 'np.array', (["param_tvb_model['tau_i']"], {}), "(param_tvb_model['tau_i'])\n", (2633, 2659), True, 'import numpy as np\n'), ((2676, 2710), 'numpy.array', 'np.array', (["param_tvb_model['N_tot']"], {}), "(param_tvb_model['N_tot'])\n", (2684, 2710), True, 'import numpy as np\n'), ((2731, 2769), 'numpy.array', 'np.array', (["param_tvb_model['p_connect']"], {}), "(param_tvb_model['p_connect'])\n", (2739, 2769), True, 'import numpy as np\n'), ((2782, 2812), 'numpy.array', 'np.array', (["param_tvb_model['g']"], {}), "(param_tvb_model['g'])\n", (2790, 2812), True, 'import numpy as np\n'), ((2825, 2855), 'numpy.array', 'np.array', (["param_tvb_model['T']"], {}), "(param_tvb_model['T'])\n", (2833, 2855), True, 'import numpy as np\n'), ((2870, 2902), 'numpy.array', 'np.array', (["param_tvb_model['P_e']"], {}), "(param_tvb_model['P_e'])\n", (2878, 2902), True, 'import numpy as np\n'), ((2917, 2949), 'numpy.array', 'np.array', (["param_tvb_model['P_i']"], {}), "(param_tvb_model['P_i'])\n", (2925, 2949), True, 'import numpy as np\n'), ((2968, 3004), 'numpy.array', 'np.array', (["param_tvb_model['K_ext_e']"], {}), "(param_tvb_model['K_ext_e'])\n", (2976, 3004), True, 'import numpy as np\n'), ((3023, 3034), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (3031, 3034), True, 'import numpy as np\n'), ((3066, 3079), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (3074, 3079), True, 'import numpy as np\n'), ((3110, 3123), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (3118, 3123), True, 'import numpy as np\n'), ((3154, 3167), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (3162, 3167), True, 'import numpy as np\n'), ((3199, 3212), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (3207, 3212), True, 'import numpy as np\n'), ((3250, 3301), 'numpy.array', 'np.array', (["param_tvb_model['initial_condition']['E']"], {}), "(param_tvb_model['initial_condition']['E'])\n", (3258, 3301), True, 'import numpy as np\n'), ((3340, 3391), 'numpy.array', 'np.array', (["param_tvb_model['initial_condition']['I']"], {}), "(param_tvb_model['initial_condition']['I'])\n", (3348, 3391), True, 'import numpy as np\n'), ((3771, 3824), 'numpy.array', 'np.array', (["param_tvb_model['initial_condition']['W_e']"], {}), "(param_tvb_model['initial_condition']['W_e'])\n", (3779, 3824), True, 'import numpy as np\n'), ((3865, 3918), 'numpy.array', 'np.array', (["param_tvb_model['initial_condition']['W_i']"], {}), "(param_tvb_model['initial_condition']['W_i'])\n", (3873, 3918), True, 'import numpy as np\n'), ((4013, 4059), 'numpy.load', 'np.load', (["param_tvb_connection['path_distance']"], {}), "(param_tvb_connection['path_distance'])\n", (4020, 4059), True, 'import numpy as np\n'), ((4074, 4118), 'numpy.load', 'np.load', (["param_tvb_connection['path_weight']"], {}), "(param_tvb_connection['path_weight'])\n", (4081, 4118), True, 'import numpy as np\n'), ((4979, 5239), 'tvb.simulator.lab.connectivity.Connectivity', 'lab.connectivity.Connectivity', ([], {'number_of_regions': 'nb_region', 'tract_lengths': 'tract_lengths[:nb_region, :nb_region]', 'weights': 'weights[:nb_region, :nb_region]', 'region_labels': 'region_labels', 'centres': 'centers.T', 'cortical': 'cortical', 'orientations': 'orientation'}), '(number_of_regions=nb_region, tract_lengths=\n tract_lengths[:nb_region, :nb_region], weights=weights[:nb_region, :\n nb_region], region_labels=region_labels, centres=centers.T, cortical=\n cortical, orientations=orientation)\n', (5008, 5239), True, 'import tvb.simulator.lab as lab\n'), ((5707, 5749), 'numpy.array', 'np.array', (["param_tvb_connection['velocity']"], {}), "(param_tvb_connection['velocity'])\n", (5715, 5749), True, 'import numpy as np\n'), ((6286, 6377), 'tvb.simulator.lab.integrators.HeunStochastic', 'lab.integrators.HeunStochastic', ([], {'noise': 'noise', 'dt': "param_tvb_integrator['sim_resolution']"}), "(noise=noise, dt=param_tvb_integrator[\n 'sim_resolution'])\n", (6316, 6377), True, 'import tvb.simulator.lab as lab\n'), ((8032, 8159), 'tvb.simulator.lab.simulator.Simulator', 'lab.simulator.Simulator', ([], {'model': 'model', 'connectivity': 'connection', 'coupling': 'coupling', 'integrator': 'integrator', 'monitors': 'monitors'}), '(model=model, connectivity=connection, coupling=\n coupling, integrator=integrator, monitors=monitors)\n', (8055, 8159), True, 'import tvb.simulator.lab as lab\n'), ((8313, 8404), 'numpy.save', 'np.save', (["(param_tvb_monitor['path_result'] + '/step_init.npy')", 'simulator.history.buffer'], {}), "(param_tvb_monitor['path_result'] + '/step_init.npy', simulator.\n history.buffer)\n", (8320, 8404), True, 'import numpy as np\n'), ((11440, 11464), 'logging.getLogger', 'logging.getLogger', (['"""tvb"""'], {}), "('tvb')\n", (11457, 11464), False, 'import logging\n'), ((11474, 11516), 'logging.FileHandler', 'logging.FileHandler', (["(path + '/log/tvb.log')"], {}), "(path + '/log/tvb.log')\n", (11493, 11516), False, 'import logging\n'), ((11533, 11606), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (11550, 11606), False, 'import logging\n'), ((16167, 16181), 'mpi4py.MPI.Finalize', 'MPI.Finalize', ([], {}), '()\n', (16179, 16181), False, 'from mpi4py import MPI\n'), ((16626, 16639), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (16636, 16639), False, 'import time\n'), ((16753, 16771), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (16769, 16771), False, 'import sys\n'), ((16783, 16811), 'mpi4py.MPI.COMM_WORLD.Connect', 'MPI.COMM_WORLD.Connect', (['port'], {}), '(port)\n', (16805, 16811), False, 'from mpi4py import MPI\n'), ((16848, 16866), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (16864, 16866), False, 'import sys\n'), ((17081, 17093), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (17091, 17093), False, 'from mpi4py import MPI\n'), ((17343, 17380), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data'], {'dtype': '"""d"""'}), "(data, dtype='d')\n", (17363, 17380), True, 'import numpy as np\n'), ((17422, 17456), 'numpy.array', 'np.array', (['data.shape[0]'], {'dtype': '"""i"""'}), "(data.shape[0], dtype='i')\n", (17430, 17456), True, 'import numpy as np\n'), ((17483, 17509), 'numpy.array', 'np.array', (['times'], {'dtype': '"""d"""'}), "(times, dtype='d')\n", (17491, 17509), True, 'import numpy as np\n'), ((17852, 17864), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (17862, 17864), False, 'from mpi4py import MPI\n'), ((17990, 18012), 'numpy.empty', 'np.empty', (['(2)'], {'dtype': '"""d"""'}), "(2, dtype='d')\n", (17998, 18012), True, 'import numpy as np\n'), ((18138, 18160), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""i"""'}), "(1, dtype='i')\n", (18146, 18160), True, 'import numpy as np\n'), ((18239, 18264), 'numpy.empty', 'np.empty', (['size'], {'dtype': '"""d"""'}), "(size, dtype='d')\n", (18247, 18264), True, 'import numpy as np\n'), ((19861, 19881), 'mpi4py.MPI.Close_port', 'MPI.Close_port', (['port'], {}), '(port)\n', (19875, 19881), False, 'from mpi4py import MPI\n'), ((247, 273), 'logging.getLogger', 'logging.getLogger', (['"""numba"""'], {}), "('numba')\n", (264, 273), False, 'import logging\n'), ((300, 324), 'logging.getLogger', 'logging.getLogger', (['"""tvb"""'], {}), "('tvb')\n", (317, 324), False, 'import logging\n'), ((3476, 3530), 'numpy.array', 'np.array', (["param_tvb_model['initial_condition']['C_ee']"], {}), "(param_tvb_model['initial_condition']['C_ee'])\n", (3484, 3530), True, 'import numpy as np\n'), ((3576, 3630), 'numpy.array', 'np.array', (["param_tvb_model['initial_condition']['C_ei']"], {}), "(param_tvb_model['initial_condition']['C_ei'])\n", (3584, 3630), True, 'import numpy as np\n'), ((3676, 3730), 'numpy.array', 'np.array', (["param_tvb_model['initial_condition']['C_ii']"], {}), "(param_tvb_model['initial_condition']['C_ii'])\n", (3684, 3730), True, 'import numpy as np\n'), ((4203, 4268), 'numpy.loadtxt', 'np.loadtxt', (["param_tvb_connection['path_region_labels']"], {'dtype': 'str'}), "(param_tvb_connection['path_region_labels'], dtype=str)\n", (4213, 4268), True, 'import numpy as np\n'), ((4413, 4461), 'numpy.loadtxt', 'np.loadtxt', (["param_tvb_connection['path_centers']"], {}), "(param_tvb_connection['path_centers'])\n", (4423, 4461), True, 'import numpy as np\n'), ((4490, 4502), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4498, 4502), True, 'import numpy as np\n'), ((4638, 4659), 'numpy.transpose', 'np.transpose', (['centers'], {}), '(centers)\n', (4650, 4659), True, 'import numpy as np\n'), ((4751, 4772), 'numpy.array', 'np.array', (['orientation'], {}), '(orientation)\n', (4759, 4772), True, 'import numpy as np\n'), ((4883, 4929), 'numpy.load', 'np.load', (["param_tvb_connection['path_cortical']"], {}), "(param_tvb_connection['path_cortical'])\n", (4890, 4929), True, 'import numpy as np\n'), ((6606, 6804), 'tvb.simulator.lab.monitors.TemporalAverage', 'lab.monitors.TemporalAverage', ([], {'variables_of_interest': "param_tvb_monitor['parameter_TemporalAverage']['variables_of_interest']", 'period': "param_tvb_monitor['parameter_TemporalAverage']['period']"}), "(variables_of_interest=param_tvb_monitor[\n 'parameter_TemporalAverage']['variables_of_interest'], period=\n param_tvb_monitor['parameter_TemporalAverage']['period'])\n", (6634, 6804), True, 'import tvb.simulator.lab as lab\n'), ((7481, 7566), 'numpy.save', 'np.save', (["(param_tvb_monitor['path_result'] + '/projection.npy')", 'projection_matrix'], {}), "(param_tvb_monitor['path_result'] + '/projection.npy', projection_matrix\n )\n", (7488, 7566), True, 'import numpy as np\n'), ((7815, 7915), 'nest_elephant_tvb.Tvb.modify_tvb.Interface_co_simulation_parallel.Interface_co_simulation', 'Interface_co_simulation', ([], {'id_proxy': "cosim['id_proxy']", 'time_synchronize': "cosim['time_synchronize']"}), "(id_proxy=cosim['id_proxy'], time_synchronize=cosim[\n 'time_synchronize'])\n", (7838, 7915), False, 'from nest_elephant_tvb.Tvb.modify_tvb.Interface_co_simulation_parallel import Interface_co_simulation\n'), ((10963, 10975), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10972, 10975), False, 'import json\n'), ((14012, 14040), 'numpy.empty', 'np.empty', (['(2,)'], {'dtype': 'object'}), '((2,), dtype=object)\n', (14020, 14040), True, 'import numpy as np\n'), ((14058, 14137), 'numpy.rint', 'np.rint', (["((time_data[1] - time_data[0]) / param_tvb_integrator['sim_resolution'])"], {}), "((time_data[1] - time_data[0]) / param_tvb_integrator['sim_resolution'])\n", (14065, 14137), True, 'import numpy as np\n'), ((15453, 15472), 'numpy.array', 'np.array', (['nest_data'], {}), '(nest_data)\n', (15461, 15472), True, 'import numpy as np\n'), ((15536, 15567), 'numpy.concatenate', 'np.concatenate', (['nest_data[:, 1]'], {}), '(nest_data[:, 1])\n', (15550, 15567), True, 'import numpy as np\n'), ((18984, 19002), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (19000, 19002), False, 'import sys\n'), ((19021, 19033), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (19031, 19033), False, 'from mpi4py import MPI\n'), ((19399, 19430), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': '"""d"""'}), "([0.0, 0.0], dtype='d')\n", (19407, 19430), True, 'import numpy as np\n'), ((20047, 20059), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20056, 20059), False, 'import json\n'), ((5804, 5837), 'numpy.array', 'np.array', (["param_tvb_coupling['a']"], {}), "(param_tvb_coupling['a'])\n", (5812, 5837), True, 'import numpy as np\n'), ((5880, 5893), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (5888, 5893), True, 'import numpy as np\n'), ((6089, 6127), 'numpy.array', 'np.array', (["param_tvb_integrator['nsig']"], {}), "(param_tvb_integrator['nsig'])\n", (6097, 6127), True, 'import numpy as np\n'), ((6518, 6536), 'tvb.simulator.lab.monitors.Raw', 'lab.monitors.Raw', ([], {}), '()\n', (6534, 6536), True, 'import tvb.simulator.lab as lab\n'), ((7583, 7714), 'tvb.simulator.lab.monitors.iEEG.from_file', 'lab.monitors.iEEG.from_file', (["param_tvb_monitor['parameter_SEEG']['path']", "(param_tvb_monitor['path_result'] + '/projection.npy')"], {}), "(param_tvb_monitor['parameter_SEEG']['path'], \n param_tvb_monitor['path_result'] + '/projection.npy')\n", (7610, 7714), True, 'import tvb.simulator.lab as lab\n'), ((12743, 12761), 'numpy.array', 'np.array', (['id_proxy'], {}), '(id_proxy)\n', (12751, 12761), True, 'import numpy as np\n'), ((14154, 14216), 'numpy.rint', 'np.rint', (["(time_data[0] / param_tvb_integrator['sim_resolution'])"], {}), "(time_data[0] / param_tvb_integrator['sim_resolution'])\n", (14161, 14216), True, 'import numpy as np\n'), ((14281, 14325), 'numpy.arange', 'np.arange', (['nb_step_0', '(nb_step_0 + nb_step)', '(1)'], {}), '(nb_step_0, nb_step_0 + nb_step, 1)\n', (14290, 14325), True, 'import numpy as np\n'), ((4322, 4339), 'numpy.dtype', 'np.dtype', (['"""<U128"""'], {}), "('<U128')\n", (4330, 4339), True, 'import numpy as np\n'), ((6968, 7038), 'numpy.array', 'np.array', (["param_tvb_monitor['parameter_Bold']['variables_of_interest']"], {}), "(param_tvb_monitor['parameter_Bold']['variables_of_interest'])\n", (6976, 7038), True, 'import numpy as np\n'), ((7195, 7212), 'tvb.datatypes.sensors.SensorsInternal', 'SensorsInternal', ([], {}), '()\n', (7210, 7212), False, 'from tvb.datatypes.sensors import SensorsInternal\n'), ((14394, 14414), 'numpy.array', 'np.array', (['data_value'], {}), '(data_value)\n', (14402, 14414), True, 'import numpy as np\n'), ((4703, 4727), 'numpy.mean', 'np.mean', (['centers'], {'axis': '(1)'}), '(centers, axis=1)\n', (4710, 4727), True, 'import numpy as np\n'), ((6021, 6057), 'numpy.array', 'np.array', (["param_tvb_integrator['mu']"], {}), "(param_tvb_integrator['mu'])\n", (6029, 6057), True, 'import numpy as np\n'), ((6145, 6186), 'numpy.array', 'np.array', (["param_tvb_integrator['weights']"], {}), "(param_tvb_integrator['weights'])\n", (6153, 6186), True, 'import numpy as np\n'), ((7392, 7412), 'numpy.expand_dims', 'np.expand_dims', (['i', '(1)'], {}), '(i, 1)\n', (7406, 7412), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 2 14:17:34 2018
@author: jeremiasknoblauch
Description: Create AR(1) processes moving independently with CPs, and one s
contaminated series which is AR(1) + massive errors.
"""
import numpy as np
import scipy
from BVAR_NIG_DPD import BVARNIGDPD
from BVAR_NIG import BVARNIG
from detector import Detector
from cp_probability_model import CpModel
from Evaluation_tool import EvaluationTool
import matplotlib.pyplot as plt
"""STEP 1: Set up the simulation"""
normalize = True
mode = "DPD" #KL, both
K = 50 #number of series
k = 1 #number of contaminated series
T = 600
burn_in = 100
data = np.zeros((T,K,1))
AR_coefs = [np.ones(K) * (-0.5),
np.ones(K) * 0.75, #,
np.ones(K) * -0.7]
levels = [np.ones(K) * 0.3,
np.ones(K) * (-0.25), #,
np.ones(K) * 0.3]
CP_loc = [200,400] #, 400]
contamination_df = 4
contamination_scale = np.sqrt(5)
#T=600 setting: Only 1 CP too many!
#rld model is power_divergence
#RLD alpha is 0.1
#param inf uses DPD
#param alpha is 0.25
#intensity is 100
#shrinkage is 0.05
#alpha param learning: None
"""STEP 2: Run the simulation with contamination for i<k"""
for cp in range(0, len(CP_loc) + 1):
#Retrieve the correct number of obs in segment
if cp == 0:
T_ = CP_loc[0] + burn_in
start = 0
fin = CP_loc[0]
elif cp==len(CP_loc):
T_ = T - CP_loc[cp-1]
start = CP_loc[cp-1]
fin = T #DEBUG: probably wrong.
else:
T_ = CP_loc[cp] - CP_loc[cp-1]
start = CP_loc[cp-1]
fin = CP_loc[cp]
#Generate AR(1)
for i in range(0,K):
np.random.seed(i)
next_AR1 = np.random.normal(0,1,size=T_)
for j in range(1, T_):
next_AR1[j] = next_AR1[j-1]*AR_coefs[cp][i] + next_AR1[j] + levels[cp][i]
#if i < k, do contamination
if i<k:
np.random.seed(i*20)
contam = contamination_scale*scipy.stats.t.rvs(contamination_df, size=T_)
contam[np.where(contam <3)] = 0
next_AR1 = (next_AR1 + contam)
#if first segment, cut off the burn-in
if cp == 0:
next_AR1 = next_AR1[burn_in:]
#add the next AR 1 stream into 'data'
data[start:fin,i,0] = next_AR1
"""STEP 3: Set up analysis parameters"""
S1, S2 = K,1 #S1, S2 give you spatial dimensions
if normalize:
data = (data - np.mean(data))/np.sqrt(np.var(data))
"""STEP 3: Set up the optimization parameters"""
VB_window_size = 200
full_opt_thinning = 20
SGD_approx_goodness = 10
anchor_approx_goodness_SCSG = 25
anchor_approx_goodness_SVRG = 25
alpha_param_opt_t = 0 #don't wait with training
first_full_opt = 10
"""STEP 4: Set up the priors for the model universe's elements"""
#got good performance for T=200, K=2, a_p = 0.4, a_rld = 0.05
#got performance for T = 200, K = 2, a_p = 0.25, a_rld = 0.08, shrink = 0.1, int = 600
#For T=600, K=2 pretty good performance for
#rld model is kullback_leibler
#RLD alpha is 0.25
#param inf uses DPD
#param alpha is 0.1
#intensity is 50
#shrinkage is 0.1
#alpha param learning: None
############
#also good:
#rld model is power_divergence
#RLD alpha is 0.1
#param inf uses DPD
#param alpha is 0.3
#intensity is 100
#shrinkage is 0.1
#alpha param learning: None
############
#also good: (2 cps, but one too early)
#rld model is power_divergence
#RLD alpha is 0.15
#param inf uses DPD
#param alpha is 0.2
#intensity is 100
#shrinkage is 0.1
#alpha param learning: None
###########
#porentially also good, though CPs were at wrong places:
#rld model is power_divergence
#RLD alpha is 0.15
#param inf uses DPD
#param alpha is 0.3
#intensity is 100
#shrinkage is 0.25
#alpha param learning: None
###########
#PERFECT: T = 600, K = 5
#rld model is power_divergence
#RLD alpha is 0.15
#param inf uses DPD
#param alpha is 0.3
#intensity is 100
#shrinkage is 0.1
#alpha param learning: None
###########
#NEAR PERFECT: T = 1000, K = 5
#rld model is power_divergence
#RLD alpha is 0.15
#param inf uses DPD
#param alpha is 0.3
#intensity is 100
#shrinkage is 0.5
#alpha param learning: None
###########
#NEAR PERFECT: T = 600, K = 5
#rld model is kullback_leibler
#RLD alpha is 0.15
#param inf uses DPD
#param alpha is 0.2
#intensity is 100
#shrinkage is 100
#alpha param learning: None
###########
#SAVED VERSION:
#rld model is power_divergence
#RLD alpha is 0.15
#param inf uses DPD
#param alpha is 0.2
#intensity is 100
#shrinkage is 100
#alpha param learning: None
#window = 200, thinning = 20, SGD approx = 10, anchors = 25, first full opt =10
###########
#FOR K = 50: Really good except we get 4 too many CPs in first segment
#rld model is power_divergence
#RLD alpha is 0.15
#param inf uses DPD
#param alpha is 0.1
#intensity is 100
#shrinkage is 100
#alpha param learning: None
#similar for a_p = 0.25
#With a_p = 0.3 or 0.35 we get really good result! (if using KL)
a, b = 3,5
alpha_param = 0.9
alpha_rld = 0.35
rld = "kullback_leibler" #power_divergence kullback_leibler
rld_learning = False
param_learning = None #"individual" #"individual" #"individual"
prior_mean_scale, prior_var_scale = np.mean(data), 100 #np.sqrt(np.var(data))
cp_intensity = 100
"""STEP 5: Create models"""
model_universe = []
if mode == "DPD" or mode == "both":
model_universe = model_universe + [BVARNIGDPD(
prior_a=a,
prior_b=b, #b,
S1=S1,
S2=S2,
alpha_param = alpha_param,
prior_mean_beta=None,
prior_var_beta=None,
prior_mean_scale=prior_mean_scale, #prior_mean_scale,
prior_var_scale=prior_var_scale,
general_nbh_sequence=[[[]]]*S1*S2,
general_nbh_restriction_sequence = [[0]],
general_nbh_coupling = "weak coupling",
hyperparameter_optimization = "online", #"online", #"online", #"online",
VB_window_size = VB_window_size,
full_opt_thinning = full_opt_thinning,
SGD_batch_size = SGD_approx_goodness,
anchor_batch_size_SCSG = anchor_approx_goodness_SCSG,
anchor_batch_size_SVRG = anchor_approx_goodness_SVRG,
first_full_opt = first_full_opt
)]
if mode == "KL" or mode == "both":
model_universe = model_universe + [BVARNIG(
prior_a = a,
prior_b = b,
S1 = S1,
S2 = S2,
prior_mean_scale = prior_mean_scale,
prior_var_scale = prior_var_scale,
general_nbh_sequence = [[[]]]*S1*S2,
general_nbh_restriction_sequence = [[0]],
hyperparameter_optimization = "online" #"online"
)]
"""STEP 6: Set up the detector from this"""
model_universe = np.array(model_universe)
model_prior = np.array([1.0/len(model_universe)]*len(model_universe))
cp_model = CpModel(cp_intensity)
detector = Detector(
data=data,
model_universe=model_universe,
model_prior = model_prior,
cp_model = cp_model,
S1 = S1,
S2 = S2,
T = T,
store_rl=True,
store_mrl=True,
trim_type="keep_K",
threshold = 200,
notifications = 25,
save_performance_indicators = True,
generalized_bayes_rld = rld, #"power_divergence", #"kullback_leibler", #"power_divergence" , #"power_divergence", #"kullback_leibler",
alpha_param_learning = param_learning,#"together", #"individual", #"individual", #"individual", #"individual", #"together",
alpha_param = alpha_param,
alpha_param_opt_t = 100, #, #) #,
alpha_rld = alpha_rld, #pow(10, -5), #0.25,
alpha_rld_learning = rld_learning, #"power_divergence",
#alpha_rld = 0.25, #0.00000005,pow(10,-12)
#alpha_rld_learning=True,
loss_der_rld_learning="absolute_loss")
detector.run()
"""STEP 7: Make graphing tool"""
EvT = EvaluationTool()
EvT.build_EvaluationTool_via_run_detector(detector)
"""STEP 8: Inspect convergence of the hyperparameters"""
for lag in range(0, len(model_universe)):
plt.plot(np.linspace(1,len(detector.model_universe[lag].a_list),
len(detector.model_universe[lag].a_list)),
np.array(detector.model_universe[lag].a_list))
plt.plot(np.linspace(1,len(detector.model_universe[lag].b_list),
len(detector.model_universe[lag].b_list)),
np.array(detector.model_universe[lag].b_list))
"""STEP 9+: Inspect convergence of alpha-rld"""
if detector.generalized_bayes_rld == "power_divergence" and mode == "DPD":
plt.plot(detector.alpha_list)
for lag in range(0, len(model_universe)):
plt.plot(detector.model_universe[lag].alpha_param_list)
"""STEP 10: Plot the raw data + rld"""
height_ratio =[10,14]
custom_colors = ["blue", "purple"]
fig, ax_array = plt.subplots(2, figsize=(8,5), sharex = True,
gridspec_kw = {'height_ratios':height_ratio})
plt.subplots_adjust(hspace = .35, left = None, bottom = None,
right = None, top = None)
ylabel_coords = [-0.065, 0.5]
#Plot of raw Time Series
EvT.plot_raw_TS(data.reshape(T,S1,S2), indices = [0,1], xlab = None,
show_MAP_CPs = True,
time_range = np.linspace(1,T, T, dtype=int),
print_plt = False,
ylab = "value", ax = ax_array[0],
#all_dates = np.linspace(622 + 1, 1284, 1284 - (622 + 1), dtype = int),
custom_colors_series = ["black"]*4,
custom_colors_CPs = ["blue", "blue"]* 100,
custom_linestyles = ["solid"]*100,
ylab_fontsize = 14,
ylabel_coords = ylabel_coords)
#Run length distribution plot
EvT.plot_run_length_distr(buffer=0, show_MAP_CPs = False,
mark_median = False,
mark_max = True, upper_limit = 1000, print_colorbar = True,
colorbar_location= 'bottom',log_format = True, aspect_ratio = 'auto',
C1=0,C2=1,
time_range = np.linspace(1,
T-2,
T-2, dtype=int),
start = 1, stop = T,
all_dates = None,
#event_time_list=[715 ],
#label_list=["nilometer"], space_to_colorbar = 0.52,
custom_colors = ["blue", "blue"] * 30,
custom_linestyles = ["solid"]*30,
custom_linewidth = 3,
#arrow_colors= ["black"],
#number_fontsize = 14,
#arrow_length = 135,
#arrow_thickness = 3.0,
xlab_fontsize =14,
ylab_fontsize = 14,
#arrows_setleft_indices = [0],
#arrows_setleft_by = [50],
#zero_distance = 0.0,
ax = ax_array[1], figure = fig,
no_transform = True,
date_instructions_formatter = None,
date_instructions_locator = None,
ylabel_coords = ylabel_coords,
xlab = "observation number",
arrow_distance = 25)
"""STEP 11: Plot some performance metrics"""
print("CPs are ", detector.CPs[-2])
print("MSE is", np.mean(detector.MSE), 1.96*scipy.stats.sem(detector.MSE))
print("MAE is", np.mean(detector.MAE), 1.96*scipy.stats.sem(detector.MAE))
print("NLL is", np.mean(detector.negative_log_likelihood),1.96*scipy.stats.sem(detector.MSE))
print("rld model is",detector.generalized_bayes_rld )
print("RLD alpha is", detector.alpha_rld)
print("param inf uses", mode)
print("param alpha is", detector.model_universe[0].alpha_param)
print("intensity is", cp_intensity)
print("shrinkage is", prior_var_scale)
print("alpha param learning:", detector.alpha_param_learning)
#baseline_working_directory = "//Users//jeremiasknoblauch//Documents//OxWaSP"+
# "//BOCPDMS/Code//SpatialBOCD//PaperNIPS"
#results_path = baseline_working_directory + "//KL_K=5_k=1_T=600_2CPs_results_arld=015_ap=02_nolearning_rld_dpd_int=100_shrink=100.txt"
#EvT.store_results_to_HD(results_path)
#fig.savefig(baseline_working_directory + "//well_log" + mode + ".pdf",
# format = "pdf", dpi = 800)
#
#
#NOTE: We need a way to use BVARNIG(DPD) for only fitting a constant!
| [
"numpy.random.seed",
"scipy.stats.t.rvs",
"numpy.ones",
"numpy.mean",
"numpy.random.normal",
"cp_probability_model.CpModel",
"BVAR_NIG.BVARNIG",
"Evaluation_tool.EvaluationTool",
"detector.Detector",
"numpy.linspace",
"numpy.var",
"matplotlib.pyplot.subplots",
"BVAR_NIG_DPD.BVARNIGDPD",
"s... | [((678, 697), 'numpy.zeros', 'np.zeros', (['(T, K, 1)'], {}), '((T, K, 1))\n', (686, 697), True, 'import numpy as np\n'), ((956, 966), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (963, 966), True, 'import numpy as np\n'), ((6971, 6995), 'numpy.array', 'np.array', (['model_universe'], {}), '(model_universe)\n', (6979, 6995), True, 'import numpy as np\n'), ((7077, 7098), 'cp_probability_model.CpModel', 'CpModel', (['cp_intensity'], {}), '(cp_intensity)\n', (7084, 7098), False, 'from cp_probability_model import CpModel\n'), ((7110, 7571), 'detector.Detector', 'Detector', ([], {'data': 'data', 'model_universe': 'model_universe', 'model_prior': 'model_prior', 'cp_model': 'cp_model', 'S1': 'S1', 'S2': 'S2', 'T': 'T', 'store_rl': '(True)', 'store_mrl': '(True)', 'trim_type': '"""keep_K"""', 'threshold': '(200)', 'notifications': '(25)', 'save_performance_indicators': '(True)', 'generalized_bayes_rld': 'rld', 'alpha_param_learning': 'param_learning', 'alpha_param': 'alpha_param', 'alpha_param_opt_t': '(100)', 'alpha_rld': 'alpha_rld', 'alpha_rld_learning': 'rld_learning', 'loss_der_rld_learning': '"""absolute_loss"""'}), "(data=data, model_universe=model_universe, model_prior=model_prior,\n cp_model=cp_model, S1=S1, S2=S2, T=T, store_rl=True, store_mrl=True,\n trim_type='keep_K', threshold=200, notifications=25,\n save_performance_indicators=True, generalized_bayes_rld=rld,\n alpha_param_learning=param_learning, alpha_param=alpha_param,\n alpha_param_opt_t=100, alpha_rld=alpha_rld, alpha_rld_learning=\n rld_learning, loss_der_rld_learning='absolute_loss')\n", (7118, 7571), False, 'from detector import Detector\n'), ((8132, 8148), 'Evaluation_tool.EvaluationTool', 'EvaluationTool', ([], {}), '()\n', (8146, 8148), False, 'from Evaluation_tool import EvaluationTool\n'), ((9090, 9183), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'figsize': '(8, 5)', 'sharex': '(True)', 'gridspec_kw': "{'height_ratios': height_ratio}"}), "(2, figsize=(8, 5), sharex=True, gridspec_kw={'height_ratios':\n height_ratio})\n", (9102, 9183), True, 'import matplotlib.pyplot as plt\n'), ((9212, 9290), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.35)', 'left': 'None', 'bottom': 'None', 'right': 'None', 'top': 'None'}), '(hspace=0.35, left=None, bottom=None, right=None, top=None)\n', (9231, 9290), True, 'import matplotlib.pyplot as plt\n'), ((5208, 5221), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (5215, 5221), True, 'import numpy as np\n'), ((8835, 8864), 'matplotlib.pyplot.plot', 'plt.plot', (['detector.alpha_list'], {}), '(detector.alpha_list)\n', (8843, 8864), True, 'import matplotlib.pyplot as plt\n'), ((11156, 11177), 'numpy.mean', 'np.mean', (['detector.MSE'], {}), '(detector.MSE)\n', (11163, 11177), True, 'import numpy as np\n'), ((11231, 11252), 'numpy.mean', 'np.mean', (['detector.MAE'], {}), '(detector.MAE)\n', (11238, 11252), True, 'import numpy as np\n'), ((11306, 11347), 'numpy.mean', 'np.mean', (['detector.negative_log_likelihood'], {}), '(detector.negative_log_likelihood)\n', (11313, 11347), True, 'import numpy as np\n'), ((708, 718), 'numpy.ones', 'np.ones', (['K'], {}), '(K)\n', (715, 718), True, 'import numpy as np\n'), ((741, 751), 'numpy.ones', 'np.ones', (['K'], {}), '(K)\n', (748, 751), True, 'import numpy as np\n'), ((775, 785), 'numpy.ones', 'np.ones', (['K'], {}), '(K)\n', (782, 785), True, 'import numpy as np\n'), ((804, 814), 'numpy.ones', 'np.ones', (['K'], {}), '(K)\n', (811, 814), True, 'import numpy as np\n'), ((833, 843), 'numpy.ones', 'np.ones', (['K'], {}), '(K)\n', (840, 843), True, 'import numpy as np\n'), ((868, 878), 'numpy.ones', 'np.ones', (['K'], {}), '(K)\n', (875, 878), True, 'import numpy as np\n'), ((1695, 1712), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (1709, 1712), True, 'import numpy as np\n'), ((1732, 1763), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': 'T_'}), '(0, 1, size=T_)\n', (1748, 1763), True, 'import numpy as np\n'), ((8462, 8507), 'numpy.array', 'np.array', (['detector.model_universe[lag].a_list'], {}), '(detector.model_universe[lag].a_list)\n', (8470, 8507), True, 'import numpy as np\n'), ((8660, 8705), 'numpy.array', 'np.array', (['detector.model_universe[lag].b_list'], {}), '(detector.model_universe[lag].b_list)\n', (8668, 8705), True, 'import numpy as np\n'), ((8919, 8974), 'matplotlib.pyplot.plot', 'plt.plot', (['detector.model_universe[lag].alpha_param_list'], {}), '(detector.model_universe[lag].alpha_param_list)\n', (8927, 8974), True, 'import matplotlib.pyplot as plt\n'), ((9497, 9528), 'numpy.linspace', 'np.linspace', (['(1)', 'T', 'T'], {'dtype': 'int'}), '(1, T, T, dtype=int)\n', (9508, 9528), True, 'import numpy as np\n'), ((10231, 10270), 'numpy.linspace', 'np.linspace', (['(1)', '(T - 2)', '(T - 2)'], {'dtype': 'int'}), '(1, T - 2, T - 2, dtype=int)\n', (10242, 10270), True, 'import numpy as np\n'), ((11184, 11213), 'scipy.stats.sem', 'scipy.stats.sem', (['detector.MSE'], {}), '(detector.MSE)\n', (11199, 11213), False, 'import scipy\n'), ((11259, 11288), 'scipy.stats.sem', 'scipy.stats.sem', (['detector.MAE'], {}), '(detector.MAE)\n', (11274, 11288), False, 'import scipy\n'), ((11353, 11382), 'scipy.stats.sem', 'scipy.stats.sem', (['detector.MSE'], {}), '(detector.MSE)\n', (11368, 11382), False, 'import scipy\n'), ((1958, 1980), 'numpy.random.seed', 'np.random.seed', (['(i * 20)'], {}), '(i * 20)\n', (1972, 1980), True, 'import numpy as np\n'), ((2488, 2501), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2495, 2501), True, 'import numpy as np\n'), ((2511, 2523), 'numpy.var', 'np.var', (['data'], {}), '(data)\n', (2517, 2523), True, 'import numpy as np\n'), ((5393, 6010), 'BVAR_NIG_DPD.BVARNIGDPD', 'BVARNIGDPD', ([], {'prior_a': 'a', 'prior_b': 'b', 'S1': 'S1', 'S2': 'S2', 'alpha_param': 'alpha_param', 'prior_mean_beta': 'None', 'prior_var_beta': 'None', 'prior_mean_scale': 'prior_mean_scale', 'prior_var_scale': 'prior_var_scale', 'general_nbh_sequence': '([[[]]] * S1 * S2)', 'general_nbh_restriction_sequence': '[[0]]', 'general_nbh_coupling': '"""weak coupling"""', 'hyperparameter_optimization': '"""online"""', 'VB_window_size': 'VB_window_size', 'full_opt_thinning': 'full_opt_thinning', 'SGD_batch_size': 'SGD_approx_goodness', 'anchor_batch_size_SCSG': 'anchor_approx_goodness_SCSG', 'anchor_batch_size_SVRG': 'anchor_approx_goodness_SVRG', 'first_full_opt': 'first_full_opt'}), "(prior_a=a, prior_b=b, S1=S1, S2=S2, alpha_param=alpha_param,\n prior_mean_beta=None, prior_var_beta=None, prior_mean_scale=\n prior_mean_scale, prior_var_scale=prior_var_scale, general_nbh_sequence\n =[[[]]] * S1 * S2, general_nbh_restriction_sequence=[[0]],\n general_nbh_coupling='weak coupling', hyperparameter_optimization=\n 'online', VB_window_size=VB_window_size, full_opt_thinning=\n full_opt_thinning, SGD_batch_size=SGD_approx_goodness,\n anchor_batch_size_SCSG=anchor_approx_goodness_SCSG,\n anchor_batch_size_SVRG=anchor_approx_goodness_SVRG, first_full_opt=\n first_full_opt)\n", (5403, 6010), False, 'from BVAR_NIG_DPD import BVARNIGDPD\n'), ((6461, 6703), 'BVAR_NIG.BVARNIG', 'BVARNIG', ([], {'prior_a': 'a', 'prior_b': 'b', 'S1': 'S1', 'S2': 'S2', 'prior_mean_scale': 'prior_mean_scale', 'prior_var_scale': 'prior_var_scale', 'general_nbh_sequence': '([[[]]] * S1 * S2)', 'general_nbh_restriction_sequence': '[[0]]', 'hyperparameter_optimization': '"""online"""'}), "(prior_a=a, prior_b=b, S1=S1, S2=S2, prior_mean_scale=\n prior_mean_scale, prior_var_scale=prior_var_scale, general_nbh_sequence\n =[[[]]] * S1 * S2, general_nbh_restriction_sequence=[[0]],\n hyperparameter_optimization='online')\n", (6468, 6703), False, 'from BVAR_NIG import BVARNIG\n'), ((2020, 2064), 'scipy.stats.t.rvs', 'scipy.stats.t.rvs', (['contamination_df'], {'size': 'T_'}), '(contamination_df, size=T_)\n', (2037, 2064), False, 'import scipy\n'), ((2084, 2104), 'numpy.where', 'np.where', (['(contam < 3)'], {}), '(contam < 3)\n', (2092, 2104), True, 'import numpy as np\n')] |
import sys
from pathlib import Path
import lightgbm as lgb
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin, clone
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import ElasticNet, Lasso
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
parent_dir = str(Path(__file__).parent.parent.resolve())
sys.path.append(parent_dir)
class AveragingModels(
BaseEstimator,
RegressorMixin,
TransformerMixin
):
def __init__(self, models):
self.models = models
def fit(self, X, y):
self.models_ = [clone(x) for x in self.models]
for model in self.models_:
model.fit(X, y)
return self
def predict(self, X):
predictions = np.column_stack(
[model.predict(X) for model in self.models_]
)
return np.mean(predictions, axis=1)
class StackingAveragedModels(
BaseEstimator,
RegressorMixin,
TransformerMixin
):
def __init__(self, base_models, meta_model, n_folds=5):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
# 元のモデルのクローンにデータを学習させる
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
# クローンされたモデルを学習してからout-of-fold予測を作成
# クローンしたメタモデルを学習させるのに必要
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# out-of-hold予測を新しい特徴として用いてクローンしたメタモデルを学習
self.meta_model_.fit(out_of_fold_predictions, y)
return self
# 「テストデータに対するベースモデルの予測の平均」を
# メタモデルの特徴として,最終的な予測を行う
def predict(self, X):
meta_features = np.column_stack([
np.column_stack([
model.predict(X) for model in base_models
]).mean(axis=1)
for base_models in self.base_models_
])
return self.meta_model_.predict(meta_features)
def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
def rmsle_cv(
train: pd.DataFrame,
y_train: pd.DataFrame,
model: any,
n_folds: int = 5
) -> np.ndarray:
kf = KFold(
n_folds,
shuffle=True,
random_state=42
).get_n_splits(train.values)
rmse = np.sqrt(
-cross_val_score(
model,
train.values,
y_train,
scoring='neg_mean_squared_error',
cv=kf
)
)
return (rmse)
def train_model_pipe(
train: pd.DataFrame,
y_train: np.ndarray,
test: pd.DataFrame,
run_average_base_models: bool = False,
run_stacked_average: bool = False
) -> np.ndarray:
# LASSO Regression
# TODO:LASSOを復習
lasso = make_pipeline(
RobustScaler(),
Lasso(alpha=0.0005, random_state=1)
)
# Elastic Net Regression
# TODO:ENetを勉強
ENet = make_pipeline(
RobustScaler(),
ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3)
)
# Kernel Ridge Regression
# TODO:Kernel Ridge Regressionを勉強
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
# Gradient Boosting Regression
# TODO:huber lossだとなんで外れ値に頑健なの?
GBoost = GradientBoostingRegressor(
n_estimators=3000,
learning_rate=0.05,
max_depth=4,
max_features='sqrt',
min_samples_leaf=15,
min_samples_split=10,
loss='huber', # 外れ値に頑健
random_state=5
)
# XGBoost
# パラメータの値が恣意的すぎない?
# TODO: reg_alphaとreg_lambdaってなんだ…
model_xgb = xgb.XGBRegressor(
colsample_bytree=0.4603,
gamma=0.0468,
learning_rate=0.05,
max_depth=3,
min_child_weight=1.7817,
n_estimators=2200,
reg_alpha=0.4640,
reg_lambda=0.8571,
subsample=0.5213,
random_state=7,
nthread=-1
)
# LightGBM
# TODO: 論文
model_lgb = lgb.LGBMRegressor(
objective='regression',
num_leaves=5,
learning_rate=0.05,
n_estimators=720,
max_bin=55,
bagging_fraction=0.8,
bagging_freq=5,
feature_fraction=0.2319,
feature_fraction_seed=9,
bagging_seed=9,
min_data_in_leaf=6,
min_sum_hessian_in_leaf=11
)
run_model = None
if run_average_base_models:
run_model = AveragingModels(models=(ENet, GBoost, KRR, lasso))
if run_stacked_average:
# 0.1081 (0.0073)
run_model = StackingAveragedModels(
base_models=(ENet, GBoost, KRR),
meta_model=lasso
)
if run_model is not None:
score = rmsle_cv(
train,
y_train,
run_model
)
else:
stacked_averaged_models = StackingAveragedModels(
base_models=(ENet, GBoost, KRR),
meta_model=lasso
)
stacked_averaged_models.fit(train.values, y_train)
stacked_pred = np.expm1(stacked_averaged_models.predict(test.values))
model_xgb.fit(train, y_train)
xgb_pred = np.expm1(model_xgb.predict(test))
model_lgb.fit(train, y_train)
lgb_pred = np.expm1(model_lgb.predict(test.values))
score = stacked_pred*0.70+xgb_pred*0.15+lgb_pred*0.15
return score
if __name__ == '__main__':
from features.build_features import clean_for_reg
raw_d = Path('../../data/raw')
repo_d = Path('../../reports')
repo_fig_d = Path('../../reports/figures')
run_average_base_models = False
df_train = pd.read_csv(raw_d / 'train.csv')
df_test = pd.read_csv(raw_d / 'test.csv')
test_ID = df_test['Id']
if run_average_base_models:
df_train, y_train, df_test = clean_for_reg(df_train, df_test)
score = train_model_pipe(
df_train,
y_train,
df_test,
run_average_base_models=True
)
print(
'Averaged base models score:'
f'{score.mean():.4f}({score.std():.4f})'
)
else:
df_train, y_train, df_test = clean_for_reg(df_train, df_test)
score = train_model_pipe(
df_train,
y_train,
df_test,
run_stacked_average=True
)
print(
'Stacking Averaged models score:'
f'{score.mean():.4f}({score.std():.4f})'
)
| [
"sys.path.append",
"sklearn.kernel_ridge.KernelRidge",
"pandas.read_csv",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.preprocessing.RobustScaler",
"sklearn.linear_model.ElasticNet",
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.KFold",
"pathlib.Path",
"numpy.mean",... | [((606, 633), 'sys.path.append', 'sys.path.append', (['parent_dir'], {}), '(parent_dir)\n', (621, 633), False, 'import sys\n'), ((3780, 3844), 'sklearn.kernel_ridge.KernelRidge', 'KernelRidge', ([], {'alpha': '(0.6)', 'kernel': '"""polynomial"""', 'degree': '(2)', 'coef0': '(2.5)'}), "(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)\n", (3791, 3844), False, 'from sklearn.kernel_ridge import KernelRidge\n'), ((3930, 4111), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'n_estimators': '(3000)', 'learning_rate': '(0.05)', 'max_depth': '(4)', 'max_features': '"""sqrt"""', 'min_samples_leaf': '(15)', 'min_samples_split': '(10)', 'loss': '"""huber"""', 'random_state': '(5)'}), "(n_estimators=3000, learning_rate=0.05, max_depth=\n 4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss\n ='huber', random_state=5)\n", (3955, 4111), False, 'from sklearn.ensemble import GradientBoostingRegressor\n'), ((4275, 4498), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {'colsample_bytree': '(0.4603)', 'gamma': '(0.0468)', 'learning_rate': '(0.05)', 'max_depth': '(3)', 'min_child_weight': '(1.7817)', 'n_estimators': '(2200)', 'reg_alpha': '(0.464)', 'reg_lambda': '(0.8571)', 'subsample': '(0.5213)', 'random_state': '(7)', 'nthread': '(-1)'}), '(colsample_bytree=0.4603, gamma=0.0468, learning_rate=0.05,\n max_depth=3, min_child_weight=1.7817, n_estimators=2200, reg_alpha=\n 0.464, reg_lambda=0.8571, subsample=0.5213, random_state=7, nthread=-1)\n', (4291, 4498), True, 'import xgboost as xgb\n'), ((4632, 4901), 'lightgbm.LGBMRegressor', 'lgb.LGBMRegressor', ([], {'objective': '"""regression"""', 'num_leaves': '(5)', 'learning_rate': '(0.05)', 'n_estimators': '(720)', 'max_bin': '(55)', 'bagging_fraction': '(0.8)', 'bagging_freq': '(5)', 'feature_fraction': '(0.2319)', 'feature_fraction_seed': '(9)', 'bagging_seed': '(9)', 'min_data_in_leaf': '(6)', 'min_sum_hessian_in_leaf': '(11)'}), "(objective='regression', num_leaves=5, learning_rate=0.05,\n n_estimators=720, max_bin=55, bagging_fraction=0.8, bagging_freq=5,\n feature_fraction=0.2319, feature_fraction_seed=9, bagging_seed=9,\n min_data_in_leaf=6, min_sum_hessian_in_leaf=11)\n", (4649, 4901), True, 'import lightgbm as lgb\n'), ((6085, 6107), 'pathlib.Path', 'Path', (['"""../../data/raw"""'], {}), "('../../data/raw')\n", (6089, 6107), False, 'from pathlib import Path\n'), ((6121, 6142), 'pathlib.Path', 'Path', (['"""../../reports"""'], {}), "('../../reports')\n", (6125, 6142), False, 'from pathlib import Path\n'), ((6160, 6189), 'pathlib.Path', 'Path', (['"""../../reports/figures"""'], {}), "('../../reports/figures')\n", (6164, 6189), False, 'from pathlib import Path\n'), ((6243, 6275), 'pandas.read_csv', 'pd.read_csv', (["(raw_d / 'train.csv')"], {}), "(raw_d / 'train.csv')\n", (6254, 6275), True, 'import pandas as pd\n'), ((6290, 6321), 'pandas.read_csv', 'pd.read_csv', (["(raw_d / 'test.csv')"], {}), "(raw_d / 'test.csv')\n", (6301, 6321), True, 'import pandas as pd\n'), ((1097, 1125), 'numpy.mean', 'np.mean', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (1104, 1125), True, 'import numpy as np\n'), ((1531, 1553), 'sklearn.base.clone', 'clone', (['self.meta_model'], {}), '(self.meta_model)\n', (1536, 1553), False, 'from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin, clone\n'), ((1570, 1630), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.n_folds', 'shuffle': '(True)', 'random_state': '(156)'}), '(n_splits=self.n_folds, shuffle=True, random_state=156)\n', (1575, 1630), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((2719, 2748), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y', 'y_pred'], {}), '(y, y_pred)\n', (2737, 2748), False, 'from sklearn.metrics import mean_squared_error\n'), ((3468, 3482), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (3480, 3482), False, 'from sklearn.preprocessing import RobustScaler\n'), ((3492, 3527), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': '(0.0005)', 'random_state': '(1)'}), '(alpha=0.0005, random_state=1)\n', (3497, 3527), False, 'from sklearn.linear_model import ElasticNet, Lasso\n'), ((3617, 3631), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (3629, 3631), False, 'from sklearn.preprocessing import RobustScaler\n'), ((3641, 3695), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'alpha': '(0.0005)', 'l1_ratio': '(0.9)', 'random_state': '(3)'}), '(alpha=0.0005, l1_ratio=0.9, random_state=3)\n', (3651, 3695), False, 'from sklearn.linear_model import ElasticNet, Lasso\n'), ((6420, 6452), 'features.build_features.clean_for_reg', 'clean_for_reg', (['df_train', 'df_test'], {}), '(df_train, df_test)\n', (6433, 6452), False, 'from features.build_features import clean_for_reg\n'), ((6769, 6801), 'features.build_features.clean_for_reg', 'clean_for_reg', (['df_train', 'df_test'], {}), '(df_train, df_test)\n', (6782, 6801), False, 'from features.build_features import clean_for_reg\n'), ((833, 841), 'sklearn.base.clone', 'clone', (['x'], {}), '(x)\n', (838, 841), False, 'from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin, clone\n'), ((2881, 2926), 'sklearn.model_selection.KFold', 'KFold', (['n_folds'], {'shuffle': '(True)', 'random_state': '(42)'}), '(n_folds, shuffle=True, random_state=42)\n', (2886, 2926), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((3013, 3104), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'train.values', 'y_train'], {'scoring': '"""neg_mean_squared_error"""', 'cv': 'kf'}), "(model, train.values, y_train, scoring=\n 'neg_mean_squared_error', cv=kf)\n", (3028, 3104), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((1933, 1945), 'sklearn.base.clone', 'clone', (['model'], {}), '(model)\n', (1938, 1945), False, 'from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin, clone\n'), ((566, 580), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (570, 580), False, 'from pathlib import Path\n')] |
'''
Module providing `DictImportExport` and `PandasImportExport` (requiring a
working installation of pandas).
'''
import numpy as np
from .importexport import ImportExport
class DictImportExport(ImportExport):
'''
An importer/exporter for variables in format of dict of numpy arrays.
'''
@property
def name(self):
return "dict"
@staticmethod
def export_data(group, variables, units=True, level=0):
data = {}
for var in variables:
data[var] = np.array(group.state(var, use_units=units,
level=level+1),
copy=True, subok=True)
return data
@staticmethod
def import_data(group, data, units=True, level=0):
for key, value in data.items():
if getattr(group.variables[key], 'read_only'):
raise TypeError('Variable {} is read-only.'.format(key))
group.state(key, use_units=units, level=level+1)[:] = value
class PandasImportExport(ImportExport):
'''
An importer/exporter for variables in pandas DataFrame format.
'''
@property
def name(self):
return "pandas"
@staticmethod
def export_data(group, variables, units=True, level=0):
# pandas is not a default brian2 dependency, only import it here
try:
import pandas as pd
except ImportError as ex:
raise ImportError('Exporting to pandas needs a working installation'
' of pandas. Importing pandas failed: ' + str(ex))
if units:
raise NotImplementedError('Units not supported when exporting to '
'pandas data frame')
# we take advantage of the already implemented exporter
data = DictImportExport.export_data(group, variables,
units=units, level=level)
pandas_data = pd.DataFrame(data)
return pandas_data
@staticmethod
def import_data(group, data, units=True, level=0):
# pandas is not a default brian2 dependency, only import it here
try:
import pandas as pd
except ImportError as ex:
raise ImportError('Exporting to pandas needs a working installation'
' of pandas. Importing pandas failed: ' + str(ex))
if units:
raise NotImplementedError('Units not supported when importing from '
'pandas data frame')
colnames = data.columns
array_data = data.values
for e, colname in enumerate(colnames):
if getattr(group.variables[colname], 'read_only'):
raise TypeError('Variable {} is read-only.'.format(colname))
state = group.state(colname, use_units=units, level=level+1)
state[:] = np.squeeze(array_data[:, e])
| [
"pandas.DataFrame",
"numpy.squeeze"
] | [((1960, 1978), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1972, 1978), True, 'import pandas as pd\n'), ((2900, 2928), 'numpy.squeeze', 'np.squeeze', (['array_data[:, e]'], {}), '(array_data[:, e])\n', (2910, 2928), True, 'import numpy as np\n')] |
# parallelize_model
# Script designed to parallelize trainamodel.py, especially for
# purposes of grid search to optimize parameters.
# This version has a couple of minor changes to make it work for page models
import csv
from multiprocessing import Pool
import matplotlib.pyplot as plt
import trainapagemodel as tmod
import numpy as np
def gridsearch(metapath, sourcedir, k, feature_start, feature_end, feature_inc, c_start, c_end, c_num, outpath):
metadata, allpageIDs, docids = tmod.get_page_metadata(metapath)
vocabulary, countdict, id2group = tmod.get_vocabulary_and_counts_4pages(metadata, docids, sourcedir, feature_end)
print(vocabulary[0:100])
grid = []
xaxis = []
ymade = False
yaxis = []
for featurecount in range(feature_start, feature_end, feature_inc):
xaxis.append(featurecount)
for c in np.linspace(c_start, c_end, c_num):
if not ymade:
yaxis.append(c)
vocab_subset = vocabulary[0 : featurecount]
gridtuple = (vocab_subset, allpageIDs, docids, id2group, countdict, k, c, featurecount, metadata)
grid.append(gridtuple)
ymade = True
print(len(grid))
print(yaxis)
print(xaxis)
pool = Pool(processes = 10)
res = pool.map_async(tmod.model_gridtuple, grid)
res.wait()
resultlist = res.get()
assert len(resultlist) == len(grid)
pool.close()
pool.join()
xlen = len(xaxis)
ylen = len(yaxis)
matrix = np.zeros((xlen, ylen))
with open(outpath, mode = 'a', encoding = 'utf-8') as f:
writer = csv.writer(f)
for result in resultlist:
k, c, featurecount, accuracy, precision, recall, smaccuracy, smoothedpredictions, predictions, reallabels = result
f1 = 2 * (precision * recall) / (precision + recall)
writer.writerow([featurecount, c, accuracy, smaccuracy, precision, recall, f1])
if featurecount in xaxis and c in yaxis:
x = xaxis.index(featurecount)
y = yaxis.index(c)
matrix[x, y] = smaccuracy
plt.rcParams["figure.figsize"] = [9.0, 6.0]
plt.matshow(matrix, origin = 'lower', cmap = plt.cm.YlOrRd)
coords = np.unravel_index(matrix.argmax(), matrix.shape)
print(coords)
print(xaxis[coords[0]], yaxis[coords[1]])
plt.show()
if __name__ == '__main__':
gridsearch('page/fic.csv', '/Users/tunder/work/pagedata', 5, 70, 120, 5, 0.043, 0.106, 10, 'page/pagegrid.csv')
| [
"matplotlib.pyplot.show",
"csv.writer",
"trainapagemodel.get_vocabulary_and_counts_4pages",
"matplotlib.pyplot.matshow",
"numpy.zeros",
"trainapagemodel.get_page_metadata",
"numpy.linspace",
"multiprocessing.Pool"
] | [((489, 521), 'trainapagemodel.get_page_metadata', 'tmod.get_page_metadata', (['metapath'], {}), '(metapath)\n', (511, 521), True, 'import trainapagemodel as tmod\n'), ((560, 639), 'trainapagemodel.get_vocabulary_and_counts_4pages', 'tmod.get_vocabulary_and_counts_4pages', (['metadata', 'docids', 'sourcedir', 'feature_end'], {}), '(metadata, docids, sourcedir, feature_end)\n', (597, 639), True, 'import trainapagemodel as tmod\n'), ((1245, 1263), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(10)'}), '(processes=10)\n', (1249, 1263), False, 'from multiprocessing import Pool\n'), ((1495, 1517), 'numpy.zeros', 'np.zeros', (['(xlen, ylen)'], {}), '((xlen, ylen))\n', (1503, 1517), True, 'import numpy as np\n'), ((2160, 2215), 'matplotlib.pyplot.matshow', 'plt.matshow', (['matrix'], {'origin': '"""lower"""', 'cmap': 'plt.cm.YlOrRd'}), "(matrix, origin='lower', cmap=plt.cm.YlOrRd)\n", (2171, 2215), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2359, 2361), True, 'import matplotlib.pyplot as plt\n'), ((859, 893), 'numpy.linspace', 'np.linspace', (['c_start', 'c_end', 'c_num'], {}), '(c_start, c_end, c_num)\n', (870, 893), True, 'import numpy as np\n'), ((1597, 1610), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1607, 1610), False, 'import csv\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import warnings
from collections import OrderedDict, defaultdict
import json_tricks as json
import numpy as np
from ....core.post_processing import oks_nms, soft_oks_nms
from ...builder import DATASETS
from ..base import Kpt2dSviewRgbVidTopDownDataset
try:
from poseval import eval_helpers
from poseval.evaluateAP import evaluateAP
has_poseval = True
except (ImportError, ModuleNotFoundError):
has_poseval = False
@DATASETS.register_module()
class TopDownPoseTrack18VideoDataset(Kpt2dSviewRgbVidTopDownDataset):
"""PoseTrack18 dataset for top-down pose estimation.
`Posetrack: A benchmark for human pose estimation and tracking' CVPR'2018
More details can be found in the `paper
<https://arxiv.org/abs/1710.10000>`_ .
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
PoseTrack2018 keypoint indexes::
0: 'nose',
1: 'head_bottom',
2: 'head_top',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
Args:
ann_file (str): Path to the annotation file.
img_prefix (str): Path to a directory where videos/images are held.
Default: None.
data_cfg (dict): config
pipeline (list[dict | callable]): A sequence of data transforms.
dataset_info (DatasetInfo): A class containing all dataset info.
test_mode (bool): Store True when building test or
validation dataset. Default: False.
ph_fill_len (int): The length of the placeholder to fill in the
image filenames, default: 6 in PoseTrack18.
"""
def __init__(self,
ann_file,
img_prefix,
data_cfg,
pipeline,
dataset_info=None,
test_mode=False,
ph_fill_len=6):
super().__init__(
ann_file,
img_prefix,
data_cfg,
pipeline,
dataset_info=dataset_info,
test_mode=test_mode)
self.use_gt_bbox = data_cfg['use_gt_bbox']
self.bbox_file = data_cfg['bbox_file']
self.det_bbox_thr = data_cfg.get('det_bbox_thr', 0.0)
self.use_nms = data_cfg.get('use_nms', True)
self.soft_nms = data_cfg['soft_nms']
self.nms_thr = data_cfg['nms_thr']
self.oks_thr = data_cfg['oks_thr']
self.vis_thr = data_cfg['vis_thr']
self.frame_weight_train = data_cfg['frame_weight_train']
self.frame_weight_test = data_cfg['frame_weight_test']
self.frame_weight = self.frame_weight_test \
if self.test_mode else self.frame_weight_train
self.ph_fill_len = ph_fill_len
# select the frame indices
self.frame_index_rand = data_cfg.get('frame_index_rand', True)
self.frame_index_range = data_cfg.get('frame_index_range', [-2, 2])
self.num_adj_frames = data_cfg.get('num_adj_frames', 1)
self.frame_indices_train = data_cfg.get('frame_indices_train', None)
self.frame_indices_test = data_cfg.get('frame_indices_test',
[-2, -1, 0, 1, 2])
if self.frame_indices_train is not None:
self.frame_indices_train.sort()
self.frame_indices_test.sort()
self.db = self._get_db()
print(f'=> num_images: {self.num_images}')
print(f'=> load {len(self.db)} samples')
def _get_db(self):
"""Load dataset."""
if (not self.test_mode) or self.use_gt_bbox:
# use ground truth bbox
gt_db = self._load_coco_keypoint_annotations()
else:
# use bbox from detection
gt_db = self._load_posetrack_person_detection_results()
return gt_db
def _load_coco_keypoint_annotations(self):
"""Ground truth bbox and keypoints."""
gt_db = []
for img_id in self.img_ids:
gt_db.extend(self._load_coco_keypoint_annotation_kernel(img_id))
return gt_db
def _load_coco_keypoint_annotation_kernel(self, img_id):
"""load annotation from COCOAPI.
Note:
bbox:[x1, y1, w, h]
Args:
img_id: coco image id
Returns:
dict: db entry
"""
img_ann = self.coco.loadImgs(img_id)[0]
width = img_ann['width']
height = img_ann['height']
num_joints = self.ann_info['num_joints']
file_name = img_ann['file_name']
nframes = int(img_ann['nframes'])
frame_id = int(img_ann['frame_id'])
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)
objs = self.coco.loadAnns(ann_ids)
# sanitize bboxes
valid_objs = []
for obj in objs:
if 'bbox' not in obj:
continue
x, y, w, h = obj['bbox']
x1 = max(0, x)
y1 = max(0, y)
x2 = min(width - 1, x1 + max(0, w - 1))
y2 = min(height - 1, y1 + max(0, h - 1))
if ('area' not in obj or obj['area'] > 0) and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]
valid_objs.append(obj)
objs = valid_objs
bbox_id = 0
rec = []
for obj in objs:
if 'keypoints' not in obj:
continue
if max(obj['keypoints']) == 0:
continue
if 'num_keypoints' in obj and obj['num_keypoints'] == 0:
continue
joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)
keypoints = np.array(obj['keypoints']).reshape(-1, 3)
joints_3d[:, :2] = keypoints[:, :2]
joints_3d_visible[:, :2] = np.minimum(1, keypoints[:, 2:3])
center, scale = self._xywh2cs(*obj['clean_bbox'][:4])
image_files = []
cur_image_file = os.path.join(self.img_prefix,
self.id2name[img_id])
image_files.append(cur_image_file)
# "images/val/012834_mpii_test/000000.jpg" -->> "000000.jpg"
cur_image_name = file_name.split('/')[-1]
ref_idx = int(cur_image_name.replace('.jpg', ''))
# select the frame indices
if not self.test_mode and self.frame_indices_train is not None:
indices = self.frame_indices_train
elif not self.test_mode and self.frame_index_rand:
low, high = self.frame_index_range
indices = np.random.randint(low, high + 1, self.num_adj_frames)
else:
indices = self.frame_indices_test
for index in indices:
if self.test_mode and index == 0:
continue
# the supporting frame index
support_idx = ref_idx + index
support_idx = np.clip(support_idx, 0, nframes - 1)
sup_image_file = cur_image_file.replace(
cur_image_name,
str(support_idx).zfill(self.ph_fill_len) + '.jpg')
if os.path.exists(sup_image_file):
image_files.append(sup_image_file)
else:
warnings.warn(f'{sup_image_file} does not exist, '
f'use {cur_image_file} instead.')
image_files.append(cur_image_file)
rec.append({
'image_file': image_files,
'center': center,
'scale': scale,
'bbox': obj['clean_bbox'][:4],
'rotation': 0,
'joints_3d': joints_3d,
'joints_3d_visible': joints_3d_visible,
'dataset': self.dataset_name,
'bbox_score': 1,
'bbox_id': bbox_id,
'nframes': nframes,
'frame_id': frame_id,
'frame_weight': self.frame_weight
})
bbox_id = bbox_id + 1
return rec
def _load_posetrack_person_detection_results(self):
"""Load Posetrack person detection results.
Only in test mode.
"""
num_joints = self.ann_info['num_joints']
all_boxes = None
with open(self.bbox_file, 'r') as f:
all_boxes = json.load(f)
if not all_boxes:
raise ValueError('=> Load %s fail!' % self.bbox_file)
print(f'=> Total boxes: {len(all_boxes)}')
kpt_db = []
bbox_id = 0
for det_res in all_boxes:
if det_res['category_id'] != 1:
continue
score = det_res['score']
if score < self.det_bbox_thr:
continue
box = det_res['bbox']
# deal with different bbox file formats
if 'nframes' in det_res and 'frame_id' in det_res:
nframes = int(det_res['nframes'])
frame_id = int(det_res['frame_id'])
elif 'image_name' in det_res:
img_id = self.name2id[det_res['image_name']]
img_ann = self.coco.loadImgs(img_id)[0]
nframes = int(img_ann['nframes'])
frame_id = int(img_ann['frame_id'])
else:
img_id = det_res['image_id']
img_ann = self.coco.loadImgs(img_id)[0]
nframes = int(img_ann['nframes'])
frame_id = int(img_ann['frame_id'])
image_files = []
if 'image_name' in det_res:
file_name = det_res['image_name']
else:
file_name = self.id2name[det_res['image_id']]
cur_image_file = os.path.join(self.img_prefix, file_name)
image_files.append(cur_image_file)
# "images/val/012834_mpii_test/000000.jpg" -->> "000000.jpg"
cur_image_name = file_name.split('/')[-1]
ref_idx = int(cur_image_name.replace('.jpg', ''))
indices = self.frame_indices_test
for index in indices:
if self.test_mode and index == 0:
continue
# the supporting frame index
support_idx = ref_idx + index
support_idx = np.clip(support_idx, 0, nframes - 1)
sup_image_file = cur_image_file.replace(
cur_image_name,
str(support_idx).zfill(self.ph_fill_len) + '.jpg')
if os.path.exists(sup_image_file):
image_files.append(sup_image_file)
else:
warnings.warn(f'{sup_image_file} does not exist, '
f'use {cur_image_file} instead.')
image_files.append(cur_image_file)
center, scale = self._xywh2cs(*box[:4])
joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d_visible = np.ones((num_joints, 3), dtype=np.float32)
kpt_db.append({
'image_file': image_files,
'center': center,
'scale': scale,
'rotation': 0,
'bbox': box[:4],
'bbox_score': score,
'dataset': self.dataset_name,
'joints_3d': joints_3d,
'joints_3d_visible': joints_3d_visible,
'bbox_id': bbox_id,
'nframes': nframes,
'frame_id': frame_id,
'frame_weight': self.frame_weight
})
bbox_id = bbox_id + 1
print(f'=> Total boxes after filter '
f'low score@{self.det_bbox_thr}: {bbox_id}')
return kpt_db
def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
"""Evaluate posetrack keypoint results. The pose prediction results
will be saved in `${res_folder}/result_keypoints.json`.
Note:
num_keypoints: K
Args:
outputs (list(preds, boxes, image_paths))
:preds (np.ndarray[N,K,3]): The first two dimensions are
coordinates, score is the third dimension of the array.
:boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]
, scale[1],area, score]
:image_paths (list[str]): For example, ['val/010016_mpii_test
/000024.jpg']
:heatmap (np.ndarray[N, K, H, W]): model output heatmap.
:bbox_id (list(int))
res_folder (str): Path of directory to save the results.
metric (str | list[str]): Metric to be performed. Defaults: 'mAP'.
Returns:
dict: Evaluation results for evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['mAP']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
pred_folder = osp.join(res_folder, 'preds')
os.makedirs(pred_folder, exist_ok=True)
gt_folder = osp.join(
osp.dirname(self.ann_file),
osp.splitext(self.ann_file.split('_')[-1])[0])
kpts = defaultdict(list)
for output in outputs:
preds = output['preds']
boxes = output['boxes']
image_paths = output['image_paths']
bbox_ids = output['bbox_ids']
batch_size = len(image_paths)
for i in range(batch_size):
if not isinstance(image_paths[i], list):
image_id = self.name2id[image_paths[i]
[len(self.img_prefix):]]
else:
image_id = self.name2id[image_paths[i][0]
[len(self.img_prefix):]]
kpts[image_id].append({
'keypoints': preds[i],
'center': boxes[i][0:2],
'scale': boxes[i][2:4],
'area': boxes[i][4],
'score': boxes[i][5],
'image_id': image_id,
'bbox_id': bbox_ids[i]
})
kpts = self._sort_and_unique_bboxes(kpts)
# rescoring and oks nms
num_joints = self.ann_info['num_joints']
vis_thr = self.vis_thr
oks_thr = self.oks_thr
valid_kpts = defaultdict(list)
for image_id in kpts.keys():
img_kpts = kpts[image_id]
for n_p in img_kpts:
box_score = n_p['score']
kpt_score = 0
valid_num = 0
for n_jt in range(0, num_joints):
t_s = n_p['keypoints'][n_jt][2]
if t_s > vis_thr:
kpt_score = kpt_score + t_s
valid_num = valid_num + 1
if valid_num != 0:
kpt_score = kpt_score / valid_num
# rescoring
n_p['score'] = kpt_score * box_score
if self.use_nms:
nms = soft_oks_nms if self.soft_nms else oks_nms
keep = nms(img_kpts, oks_thr, sigmas=self.sigmas)
valid_kpts[image_id].append(
[img_kpts[_keep] for _keep in keep])
else:
valid_kpts[image_id].append(img_kpts)
self._write_keypoint_results(valid_kpts, gt_folder, pred_folder)
info_str = self._do_keypoint_eval(gt_folder, pred_folder)
name_value = OrderedDict(info_str)
return name_value
@staticmethod
def _write_keypoint_results(keypoint_results, gt_folder, pred_folder):
"""Write results into a json file.
Args:
keypoint_results (dict): keypoint results organized by image_id.
gt_folder (str): Path of directory for official gt files.
pred_folder (str): Path of directory to save the results.
"""
categories = []
cat = {}
cat['supercategory'] = 'person'
cat['id'] = 1
cat['name'] = 'person'
cat['keypoints'] = [
'nose', 'head_bottom', 'head_top', 'left_ear', 'right_ear',
'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',
'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee',
'right_knee', 'left_ankle', 'right_ankle'
]
cat['skeleton'] = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13],
[6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10],
[9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5],
[4, 6], [5, 7]]
categories.append(cat)
json_files = [
pos for pos in os.listdir(gt_folder) if pos.endswith('.json')
]
for json_file in json_files:
with open(osp.join(gt_folder, json_file), 'r') as f:
gt = json.load(f)
annotations = []
images = []
for image in gt['images']:
im = {}
im['id'] = image['id']
im['file_name'] = image['file_name']
images.append(im)
img_kpts = keypoint_results[im['id']]
if len(img_kpts) == 0:
continue
for track_id, img_kpt in enumerate(img_kpts[0]):
ann = {}
ann['image_id'] = img_kpt['image_id']
ann['keypoints'] = np.array(
img_kpt['keypoints']).reshape(-1).tolist()
ann['scores'] = np.array(ann['keypoints']).reshape(
[-1, 3])[:, 2].tolist()
ann['score'] = float(img_kpt['score'])
ann['track_id'] = track_id
annotations.append(ann)
info = {}
info['images'] = images
info['categories'] = categories
info['annotations'] = annotations
with open(osp.join(pred_folder, json_file), 'w') as f:
json.dump(info, f, sort_keys=True, indent=4)
def _do_keypoint_eval(self, gt_folder, pred_folder):
"""Keypoint evaluation using poseval."""
if not has_poseval:
raise ImportError('Please install poseval package for evaluation'
'on PoseTrack dataset '
'(see requirements/optional.txt)')
argv = ['', gt_folder + '/', pred_folder + '/']
print('Loading data')
gtFramesAll, prFramesAll = eval_helpers.load_data_dir(argv)
print('# gt frames :', len(gtFramesAll))
print('# pred frames:', len(prFramesAll))
# evaluate per-frame multi-person pose estimation (AP)
# compute AP
print('Evaluation of per-frame multi-person pose estimation')
apAll, _, _ = evaluateAP(gtFramesAll, prFramesAll, None, False, False)
# print AP
print('Average Precision (AP) metric:')
eval_helpers.printTable(apAll)
stats = eval_helpers.getCum(apAll)
stats_names = [
'Head AP', 'Shou AP', 'Elb AP', 'Wri AP', 'Hip AP', 'Knee AP',
'Ankl AP', 'Total AP'
]
info_str = list(zip(stats_names, stats))
return info_str
| [
"numpy.ones",
"numpy.clip",
"poseval.eval_helpers.getCum",
"collections.defaultdict",
"numpy.random.randint",
"os.path.join",
"poseval.eval_helpers.printTable",
"json_tricks.dump",
"poseval.evaluateAP.evaluateAP",
"os.path.dirname",
"poseval.eval_helpers.load_data_dir",
"os.path.exists",
"js... | [((13494, 13523), 'os.path.join', 'osp.join', (['res_folder', '"""preds"""'], {}), "(res_folder, 'preds')\n", (13502, 13523), True, 'import os.path as osp\n'), ((13532, 13571), 'os.makedirs', 'os.makedirs', (['pred_folder'], {'exist_ok': '(True)'}), '(pred_folder, exist_ok=True)\n', (13543, 13571), False, 'import os\n'), ((13717, 13734), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (13728, 13734), False, 'from collections import OrderedDict, defaultdict\n'), ((14925, 14942), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (14936, 14942), False, 'from collections import OrderedDict, defaultdict\n'), ((16061, 16082), 'collections.OrderedDict', 'OrderedDict', (['info_str'], {}), '(info_str)\n', (16072, 16082), False, 'from collections import OrderedDict, defaultdict\n'), ((19136, 19168), 'poseval.eval_helpers.load_data_dir', 'eval_helpers.load_data_dir', (['argv'], {}), '(argv)\n', (19162, 19168), False, 'from poseval import eval_helpers\n'), ((19447, 19503), 'poseval.evaluateAP.evaluateAP', 'evaluateAP', (['gtFramesAll', 'prFramesAll', 'None', '(False)', '(False)'], {}), '(gtFramesAll, prFramesAll, None, False, False)\n', (19457, 19503), False, 'from poseval.evaluateAP import evaluateAP\n'), ((19580, 19610), 'poseval.eval_helpers.printTable', 'eval_helpers.printTable', (['apAll'], {}), '(apAll)\n', (19603, 19610), False, 'from poseval import eval_helpers\n'), ((19628, 19654), 'poseval.eval_helpers.getCum', 'eval_helpers.getCum', (['apAll'], {}), '(apAll)\n', (19647, 19654), False, 'from poseval import eval_helpers\n'), ((5948, 5991), 'numpy.zeros', 'np.zeros', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (5956, 5991), True, 'import numpy as np\n'), ((6024, 6067), 'numpy.zeros', 'np.zeros', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (6032, 6067), True, 'import numpy as np\n'), ((6222, 6254), 'numpy.minimum', 'np.minimum', (['(1)', 'keypoints[:, 2:3]'], {}), '(1, keypoints[:, 2:3])\n', (6232, 6254), True, 'import numpy as np\n'), ((6381, 6432), 'os.path.join', 'os.path.join', (['self.img_prefix', 'self.id2name[img_id]'], {}), '(self.img_prefix, self.id2name[img_id])\n', (6393, 6432), False, 'import os\n'), ((8808, 8820), 'json_tricks.load', 'json.load', (['f'], {}), '(f)\n', (8817, 8820), True, 'import json_tricks as json\n'), ((10180, 10220), 'os.path.join', 'os.path.join', (['self.img_prefix', 'file_name'], {}), '(self.img_prefix, file_name)\n', (10192, 10220), False, 'import os\n'), ((11340, 11383), 'numpy.zeros', 'np.zeros', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (11348, 11383), True, 'import numpy as np\n'), ((11416, 11458), 'numpy.ones', 'np.ones', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (11423, 11458), True, 'import numpy as np\n'), ((13614, 13640), 'os.path.dirname', 'osp.dirname', (['self.ann_file'], {}), '(self.ann_file)\n', (13625, 13640), True, 'import os.path as osp\n'), ((7376, 7412), 'numpy.clip', 'np.clip', (['support_idx', '(0)', '(nframes - 1)'], {}), '(support_idx, 0, nframes - 1)\n', (7383, 7412), True, 'import numpy as np\n'), ((7597, 7627), 'os.path.exists', 'os.path.exists', (['sup_image_file'], {}), '(sup_image_file)\n', (7611, 7627), False, 'import os\n'), ((10739, 10775), 'numpy.clip', 'np.clip', (['support_idx', '(0)', '(nframes - 1)'], {}), '(support_idx, 0, nframes - 1)\n', (10746, 10775), True, 'import numpy as np\n'), ((10960, 10990), 'os.path.exists', 'os.path.exists', (['sup_image_file'], {}), '(sup_image_file)\n', (10974, 10990), False, 'import os\n'), ((17302, 17323), 'os.listdir', 'os.listdir', (['gt_folder'], {}), '(gt_folder)\n', (17312, 17323), False, 'import os\n'), ((17483, 17495), 'json_tricks.load', 'json.load', (['f'], {}), '(f)\n', (17492, 17495), True, 'import json_tricks as json\n'), ((18635, 18679), 'json_tricks.dump', 'json.dump', (['info', 'f'], {'sort_keys': '(True)', 'indent': '(4)'}), '(info, f, sort_keys=True, indent=4)\n', (18644, 18679), True, 'import json_tricks as json\n'), ((6093, 6119), 'numpy.array', 'np.array', (["obj['keypoints']"], {}), "(obj['keypoints'])\n", (6101, 6119), True, 'import numpy as np\n'), ((7019, 7072), 'numpy.random.randint', 'np.random.randint', (['low', '(high + 1)', 'self.num_adj_frames'], {}), '(low, high + 1, self.num_adj_frames)\n', (7036, 7072), True, 'import numpy as np\n'), ((7726, 7811), 'warnings.warn', 'warnings.warn', (['f"""{sup_image_file} does not exist, use {cur_image_file} instead."""'], {}), "(f'{sup_image_file} does not exist, use {cur_image_file} instead.'\n )\n", (7739, 7811), False, 'import warnings\n'), ((11089, 11174), 'warnings.warn', 'warnings.warn', (['f"""{sup_image_file} does not exist, use {cur_image_file} instead."""'], {}), "(f'{sup_image_file} does not exist, use {cur_image_file} instead.'\n )\n", (11102, 11174), False, 'import warnings\n'), ((17419, 17449), 'os.path.join', 'osp.join', (['gt_folder', 'json_file'], {}), '(gt_folder, json_file)\n', (17427, 17449), True, 'import os.path as osp\n'), ((18574, 18606), 'os.path.join', 'osp.join', (['pred_folder', 'json_file'], {}), '(pred_folder, json_file)\n', (18582, 18606), True, 'import os.path as osp\n'), ((18055, 18085), 'numpy.array', 'np.array', (["img_kpt['keypoints']"], {}), "(img_kpt['keypoints'])\n", (18063, 18085), True, 'import numpy as np\n'), ((18168, 18194), 'numpy.array', 'np.array', (["ann['keypoints']"], {}), "(ann['keypoints'])\n", (18176, 18194), True, 'import numpy as np\n')] |
__author__ = 'cs540-testers'
__credits__ = ['<NAME>', '<NAME>', '<NAME>',
'<NAME>']
version = 'v0.2.2'
import sys
import unittest
import numpy as np
from pca import load_and_center_dataset, get_covariance, get_eig, \
get_eig_perc, project_image, display_image
mnist_path = 'mnist.npy'
class TestLoadAndCenterDataset(unittest.TestCase):
def test_load(self):
x = load_and_center_dataset(mnist_path)
# The dataset needs to have the correct shape
self.assertEqual(np.shape(x), (2000, 784))
# The dataset should not be constant-valued
self.assertNotAlmostEqual(np.max(x) - np.min(x), 0)
def test_center(self):
x = load_and_center_dataset(mnist_path)
# Each coordinate of our dataset should average to 0
for i in range(np.shape(x)[1]):
self.assertAlmostEqual(np.sum(x[:, i]), 0)
class TestGetCovariance(unittest.TestCase):
def test_shape(self):
x = load_and_center_dataset(mnist_path)
S = get_covariance(x)
# S should be square and have side length d
self.assertEqual(np.shape(S), (784, 784))
def test_values(self):
x = load_and_center_dataset(mnist_path)
S = get_covariance(x)
# S should be symmetric
self.assertTrue(np.all(np.isclose(S, S.T)))
# S should have non-negative values on the diagonal
self.assertTrue(np.min(np.diagonal(S)) >= 0)
class TestGetEig(unittest.TestCase):
def test_small(self):
x = load_and_center_dataset(mnist_path)
S = get_covariance(x)
Lambda, U = get_eig(S, 2)
self.assertEqual(np.shape(Lambda), (2, 2))
self.assertTrue(np.all(np.isclose(
Lambda, [[350880.76329673, 0], [0, 245632.27295307]])))
# The eigenvectors should be the columns
self.assertEqual(np.shape(U), (784, 2))
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
def test_large(self):
x = load_and_center_dataset(mnist_path)
S = get_covariance(x)
Lambda, U = get_eig(S, 784)
self.assertEqual(np.shape(Lambda), (784, 784))
# Check that Lambda is diagonal
self.assertEqual(np.count_nonzero(
Lambda - np.diag(np.diagonal(Lambda))), 0)
# Check that Lambda is sorted in decreasing order
self.assertTrue(np.all(np.equal(np.diagonal(Lambda),
sorted(np.diagonal(Lambda), reverse=True))))
# The eigenvectors should be the columns
self.assertEqual(np.shape(U), (784, 784))
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
class TestGetEigPerc(unittest.TestCase):
def test_small(self):
x = load_and_center_dataset(mnist_path)
S = get_covariance(x)
Lambda, U = get_eig_perc(S, .07)
self.assertEqual(np.shape(Lambda), (2, 2))
self.assertTrue(np.all(np.isclose(
Lambda, [[350880.76329673, 0], [0, 245632.27295307]])))
# The eigenvectors should be the columns
self.assertEqual(np.shape(U), (784, 2))
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
def test_large(self):
x = load_and_center_dataset(mnist_path)
S = get_covariance(x)
# This will select all eigenvalues/eigenvectors
Lambda, U = get_eig_perc(S, -1)
self.assertEqual(np.shape(Lambda), (784, 784))
# Check that Lambda is diagonal
self.assertEqual(np.count_nonzero(
Lambda - np.diag(np.diagonal(Lambda))), 0)
# Check that Lambda is sorted in decreasing order
self.assertTrue(np.all(np.equal(np.diagonal(Lambda),
sorted(np.diagonal(Lambda), reverse=True))))
# The eigenvectors should be the columns
self.assertEqual(np.shape(U), (784, 784))
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
class TestProjectImage(unittest.TestCase):
def test_shape(self):
x = load_and_center_dataset(mnist_path)
S = get_covariance(x)
_, U = get_eig(S, 2)
# This is the image of the "9" in the spec
projected = project_image(x[3], U)
self.assertEqual(np.shape(projected), (784,))
self.assertAlmostEqual(np.min(projected), -113.79455198736488)
self.assertAlmostEqual(np.max(projected), 120.0658469887994)
if __name__ == '__main__':
# Hack to allow different locations of mnist.npy (done this way to allow
# unittest's flags to still be passed, if desired)
if '--mnist-path' in sys.argv:
path_index = sys.argv.index('--mnist-path') + 1
if path_index == len(sys.argv):
print('Error: must supply path after option --mnist-path')
sys.exit(1)
mnist_path = sys.argv[path_index]
del(sys.argv[path_index])
del(sys.argv[path_index - 1])
print('Homework 5 Tester Version', version)
unittest.main(argv=sys.argv)
| [
"unittest.main",
"pca.get_eig_perc",
"numpy.sum",
"pca.load_and_center_dataset",
"pca.get_eig",
"numpy.shape",
"numpy.min",
"pca.get_covariance",
"numpy.max",
"sys.argv.index",
"pca.project_image",
"numpy.isclose",
"sys.exit",
"numpy.diagonal"
] | [((4327, 4355), 'unittest.main', 'unittest.main', ([], {'argv': 'sys.argv'}), '(argv=sys.argv)\n', (4340, 4355), False, 'import unittest\n'), ((371, 406), 'pca.load_and_center_dataset', 'load_and_center_dataset', (['mnist_path'], {}), '(mnist_path)\n', (394, 406), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((633, 668), 'pca.load_and_center_dataset', 'load_and_center_dataset', (['mnist_path'], {}), '(mnist_path)\n', (656, 668), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((879, 914), 'pca.load_and_center_dataset', 'load_and_center_dataset', (['mnist_path'], {}), '(mnist_path)\n', (902, 914), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((921, 938), 'pca.get_covariance', 'get_covariance', (['x'], {}), '(x)\n', (935, 938), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((1061, 1096), 'pca.load_and_center_dataset', 'load_and_center_dataset', (['mnist_path'], {}), '(mnist_path)\n', (1084, 1096), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((1103, 1120), 'pca.get_covariance', 'get_covariance', (['x'], {}), '(x)\n', (1117, 1120), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((1363, 1398), 'pca.load_and_center_dataset', 'load_and_center_dataset', (['mnist_path'], {}), '(mnist_path)\n', (1386, 1398), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((1405, 1422), 'pca.get_covariance', 'get_covariance', (['x'], {}), '(x)\n', (1419, 1422), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((1437, 1450), 'pca.get_eig', 'get_eig', (['S', '(2)'], {}), '(S, 2)\n', (1444, 1450), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((1767, 1802), 'pca.load_and_center_dataset', 'load_and_center_dataset', (['mnist_path'], {}), '(mnist_path)\n', (1790, 1802), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((1809, 1826), 'pca.get_covariance', 'get_covariance', (['x'], {}), '(x)\n', (1823, 1826), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((1841, 1856), 'pca.get_eig', 'get_eig', (['S', '(784)'], {}), '(S, 784)\n', (1848, 1856), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((2397, 2432), 'pca.load_and_center_dataset', 'load_and_center_dataset', (['mnist_path'], {}), '(mnist_path)\n', (2420, 2432), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((2439, 2456), 'pca.get_covariance', 'get_covariance', (['x'], {}), '(x)\n', (2453, 2456), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((2471, 2492), 'pca.get_eig_perc', 'get_eig_perc', (['S', '(0.07)'], {}), '(S, 0.07)\n', (2483, 2492), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((2808, 2843), 'pca.load_and_center_dataset', 'load_and_center_dataset', (['mnist_path'], {}), '(mnist_path)\n', (2831, 2843), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((2850, 2867), 'pca.get_covariance', 'get_covariance', (['x'], {}), '(x)\n', (2864, 2867), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((2932, 2951), 'pca.get_eig_perc', 'get_eig_perc', (['S', '(-1)'], {}), '(S, -1)\n', (2944, 2951), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((3494, 3529), 'pca.load_and_center_dataset', 'load_and_center_dataset', (['mnist_path'], {}), '(mnist_path)\n', (3517, 3529), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((3536, 3553), 'pca.get_covariance', 'get_covariance', (['x'], {}), '(x)\n', (3550, 3553), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((3563, 3576), 'pca.get_eig', 'get_eig', (['S', '(2)'], {}), '(S, 2)\n', (3570, 3576), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((3636, 3658), 'pca.project_image', 'project_image', (['x[3]', 'U'], {}), '(x[3], U)\n', (3649, 3658), False, 'from pca import load_and_center_dataset, get_covariance, get_eig, get_eig_perc, project_image, display_image\n'), ((475, 486), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (483, 486), True, 'import numpy as np\n'), ((1005, 1016), 'numpy.shape', 'np.shape', (['S'], {}), '(S)\n', (1013, 1016), True, 'import numpy as np\n'), ((1471, 1487), 'numpy.shape', 'np.shape', (['Lambda'], {}), '(Lambda)\n', (1479, 1487), True, 'import numpy as np\n'), ((1657, 1668), 'numpy.shape', 'np.shape', (['U'], {}), '(U)\n', (1665, 1668), True, 'import numpy as np\n'), ((1877, 1893), 'numpy.shape', 'np.shape', (['Lambda'], {}), '(Lambda)\n', (1885, 1893), True, 'import numpy as np\n'), ((2244, 2255), 'numpy.shape', 'np.shape', (['U'], {}), '(U)\n', (2252, 2255), True, 'import numpy as np\n'), ((2512, 2528), 'numpy.shape', 'np.shape', (['Lambda'], {}), '(Lambda)\n', (2520, 2528), True, 'import numpy as np\n'), ((2698, 2709), 'numpy.shape', 'np.shape', (['U'], {}), '(U)\n', (2706, 2709), True, 'import numpy as np\n'), ((2972, 2988), 'numpy.shape', 'np.shape', (['Lambda'], {}), '(Lambda)\n', (2980, 2988), True, 'import numpy as np\n'), ((3339, 3350), 'numpy.shape', 'np.shape', (['U'], {}), '(U)\n', (3347, 3350), True, 'import numpy as np\n'), ((3679, 3698), 'numpy.shape', 'np.shape', (['projected'], {}), '(projected)\n', (3687, 3698), True, 'import numpy as np\n'), ((3733, 3750), 'numpy.min', 'np.min', (['projected'], {}), '(projected)\n', (3739, 3750), True, 'import numpy as np\n'), ((3798, 3815), 'numpy.max', 'np.max', (['projected'], {}), '(projected)\n', (3804, 3815), True, 'import numpy as np\n'), ((4037, 4067), 'sys.argv.index', 'sys.argv.index', (['"""--mnist-path"""'], {}), "('--mnist-path')\n", (4051, 4067), False, 'import sys\n'), ((4171, 4182), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4179, 4182), False, 'import sys\n'), ((576, 585), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (582, 585), True, 'import numpy as np\n'), ((588, 597), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (594, 597), True, 'import numpy as np\n'), ((742, 753), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (750, 753), True, 'import numpy as np\n'), ((785, 800), 'numpy.sum', 'np.sum', (['x[:, i]'], {}), '(x[:, i])\n', (791, 800), True, 'import numpy as np\n'), ((1173, 1191), 'numpy.isclose', 'np.isclose', (['S', 'S.T'], {}), '(S, S.T)\n', (1183, 1191), True, 'import numpy as np\n'), ((1522, 1586), 'numpy.isclose', 'np.isclose', (['Lambda', '[[350880.76329673, 0], [0, 245632.27295307]]'], {}), '(Lambda, [[350880.76329673, 0], [0, 245632.27295307]])\n', (1532, 1586), True, 'import numpy as np\n'), ((1705, 1734), 'numpy.isclose', 'np.isclose', (['(S @ U)', '(U @ Lambda)'], {}), '(S @ U, U @ Lambda)\n', (1715, 1734), True, 'import numpy as np\n'), ((2294, 2323), 'numpy.isclose', 'np.isclose', (['(S @ U)', '(U @ Lambda)'], {}), '(S @ U, U @ Lambda)\n', (2304, 2323), True, 'import numpy as np\n'), ((2563, 2627), 'numpy.isclose', 'np.isclose', (['Lambda', '[[350880.76329673, 0], [0, 245632.27295307]]'], {}), '(Lambda, [[350880.76329673, 0], [0, 245632.27295307]])\n', (2573, 2627), True, 'import numpy as np\n'), ((2746, 2775), 'numpy.isclose', 'np.isclose', (['(S @ U)', '(U @ Lambda)'], {}), '(S @ U, U @ Lambda)\n', (2756, 2775), True, 'import numpy as np\n'), ((3389, 3418), 'numpy.isclose', 'np.isclose', (['(S @ U)', '(U @ Lambda)'], {}), '(S @ U, U @ Lambda)\n', (3399, 3418), True, 'import numpy as np\n'), ((1274, 1288), 'numpy.diagonal', 'np.diagonal', (['S'], {}), '(S)\n', (1285, 1288), True, 'import numpy as np\n'), ((2111, 2130), 'numpy.diagonal', 'np.diagonal', (['Lambda'], {}), '(Lambda)\n', (2122, 2130), True, 'import numpy as np\n'), ((3206, 3225), 'numpy.diagonal', 'np.diagonal', (['Lambda'], {}), '(Lambda)\n', (3217, 3225), True, 'import numpy as np\n'), ((1999, 2018), 'numpy.diagonal', 'np.diagonal', (['Lambda'], {}), '(Lambda)\n', (2010, 2018), True, 'import numpy as np\n'), ((2143, 2162), 'numpy.diagonal', 'np.diagonal', (['Lambda'], {}), '(Lambda)\n', (2154, 2162), True, 'import numpy as np\n'), ((3094, 3113), 'numpy.diagonal', 'np.diagonal', (['Lambda'], {}), '(Lambda)\n', (3105, 3113), True, 'import numpy as np\n'), ((3238, 3257), 'numpy.diagonal', 'np.diagonal', (['Lambda'], {}), '(Lambda)\n', (3249, 3257), True, 'import numpy as np\n')] |
'''
Pipeline Filters:
price > 10 (price_filter)
5-day SMA > 10-day SMA (positive_movement)
30-day Parkinson Volatility is in bottom 1000 of stocks (pvol_filter)
AverageDollarVolume
10-day SMA > 30-day SMA (positive_movement2)
MACD Histogram Trading
To-DO:
manage leverage
risk management (determine why algorithm fails)
'''
from quantopian.pipeline import Pipeline
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline.filters import Q3000US
from quantopian.pipeline.factors import SimpleMovingAverage
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.data import morningstar
from quantopian.pipeline import CustomFactor
import talib
import math
import numpy as np
import pandas as pd
class PriceRange(CustomFactor):
inputs = [USEquityPricing.close]
def compute(self, today, assets, out, close):
out[:] = close[-1]
class Parkinson(CustomFactor):
inputs = [USEquityPricing.high, USEquityPricing.low]
def compute(self, today, assets, out, high, low):
x = np.log(high/low)
rs = (1.0/(4.0*math.log(2.0)))*x**2
p_vol = np.sqrt(rs.mean(axis=0))
out[:] = p_vol
class AvgDailyDollarVolumeTraded(CustomFactor):
inputs = [USEquityPricing.close, USEquityPricing.volume]
def compute(self, today, assets, out, close_price, volume):
dollar_volume = close_price * volume
avg_dollar_volume = np.mean(dollar_volume, axis=0)
out[:] = avg_dollar_volume
def initialize(context):
context.max_notional = 100000.1
context.min_notional = -100000.0
context.stockpct= 0.1
pipe = Pipeline()
attach_pipeline(pipe, name='my_pipeline')
pvol_factor = Parkinson(window_length=30)
pvol_filter = pvol_factor.bottom(500)
sma_30 = SimpleMovingAverage(inputs= [USEquityPricing.close], window_length=30)
sma_10 = SimpleMovingAverage(inputs= [USEquityPricing.close], window_length=10)
sma_5 = SimpleMovingAverage(inputs= [USEquityPricing.close], window_length=5)
priceclose= PriceRange(window_length=1)
price_filter= (priceclose > 10)
dollar_volume= AvgDailyDollarVolumeTraded(window_length=30)
dv_filter = dollar_volume > 100 * 10**5
context.account.leverage= 1
positive_movement= (sma_5 > sma_10)
positive_movement_long= (sma_10 > sma_30)
pipe.add(dv_filter, 'dv_filter')
pipe.add(pvol_factor, 'pvol_factor')
pipe.add(pvol_filter, 'pvol_filter')
pipe.add(price_filter, 'price_filter')
pipe.add(positive_movement, 'positive_movement')
pipe.add(positive_movement_long, 'positive_movement_long')
pipe.set_screen(price_filter & dv_filter & positive_movement & pvol_filter & positive_movement_long)
schedule_function(func=trader, date_rule=date_rules.every_day(), time_rule=time_rules.market_open(hours=0, minutes=5))
schedule_function(func=liquidate, date_rule=date_rules.every_day(), time_rule=time_rules.market_open(hours=0, minutes=1))
def before_trading_start(context, data):
context.results = pipeline_output('my_pipeline')
print(context.results.index)
def trader(context, data):
cash = context.portfolio.cash
leverage = context.account.leverage
for stock in context.results.index:
prices = data.history(stock, 'price', 50, '1d')
macd = MACD(prices, fastperiod=12, slowperiod=26, signalperiod=9)
macdline= MACDline(prices, fastperiod=12, slowperiod=26, signalperiod=9)
price= data.current(stock, 'price')
position = context.portfolio.positions[stock].amount
# If macd crosses back over, liquidate
if get_open_orders(stock):
continue
if macd < 0 and position > 0:
order_target(stock, 0)
# When macd crosses over to positive, full position
elif macd > 0 and position == 0 and MACDline > 0 and cash > price and leverage <1:
order_target_percent(stock, context.stockpct)
record(leverage=context.account.leverage, numofstocks= len(context.results))
def liquidate(context, data):
for stock in context.portfolio.positions:
prices = data.history(stock, 'price', 50, '1d')
macd = MACD(prices, fastperiod=12, slowperiod=26, signalperiod=9)
if macd < 0:
order_target(stock, 0)
def MACD(prices, fastperiod=12, slowperiod=26, signalperiod=9):
macd, signal, hist = talib.MACD(prices,
fastperiod=fastperiod,
slowperiod=slowperiod,
signalperiod=signalperiod)
return macd[-1] - signal[-1]
def MACDline(prices, fastperiod=12, slowperiod=26, signalperiod=9):
macd, signal, hist = talib.MACD(prices,
fastperiod=fastperiod,
slowperiod=slowperiod,
signalperiod=signalperiod)
return macd[-1] | [
"talib.MACD",
"numpy.log",
"quantopian.pipeline.factors.SimpleMovingAverage",
"quantopian.algorithm.pipeline_output",
"quantopian.algorithm.attach_pipeline",
"quantopian.pipeline.Pipeline",
"numpy.mean",
"math.log"
] | [((1660, 1670), 'quantopian.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (1668, 1670), False, 'from quantopian.pipeline import Pipeline\n'), ((1675, 1716), 'quantopian.algorithm.attach_pipeline', 'attach_pipeline', (['pipe'], {'name': '"""my_pipeline"""'}), "(pipe, name='my_pipeline')\n", (1690, 1716), False, 'from quantopian.algorithm import attach_pipeline, pipeline_output\n'), ((1819, 1888), 'quantopian.pipeline.factors.SimpleMovingAverage', 'SimpleMovingAverage', ([], {'inputs': '[USEquityPricing.close]', 'window_length': '(30)'}), '(inputs=[USEquityPricing.close], window_length=30)\n', (1838, 1888), False, 'from quantopian.pipeline.factors import SimpleMovingAverage\n'), ((1903, 1972), 'quantopian.pipeline.factors.SimpleMovingAverage', 'SimpleMovingAverage', ([], {'inputs': '[USEquityPricing.close]', 'window_length': '(10)'}), '(inputs=[USEquityPricing.close], window_length=10)\n', (1922, 1972), False, 'from quantopian.pipeline.factors import SimpleMovingAverage\n'), ((1986, 2054), 'quantopian.pipeline.factors.SimpleMovingAverage', 'SimpleMovingAverage', ([], {'inputs': '[USEquityPricing.close]', 'window_length': '(5)'}), '(inputs=[USEquityPricing.close], window_length=5)\n', (2005, 2054), False, 'from quantopian.pipeline.factors import SimpleMovingAverage\n'), ((3063, 3093), 'quantopian.algorithm.pipeline_output', 'pipeline_output', (['"""my_pipeline"""'], {}), "('my_pipeline')\n", (3078, 3093), False, 'from quantopian.algorithm import attach_pipeline, pipeline_output\n'), ((4450, 4545), 'talib.MACD', 'talib.MACD', (['prices'], {'fastperiod': 'fastperiod', 'slowperiod': 'slowperiod', 'signalperiod': 'signalperiod'}), '(prices, fastperiod=fastperiod, slowperiod=slowperiod,\n signalperiod=signalperiod)\n', (4460, 4545), False, 'import talib\n'), ((4776, 4871), 'talib.MACD', 'talib.MACD', (['prices'], {'fastperiod': 'fastperiod', 'slowperiod': 'slowperiod', 'signalperiod': 'signalperiod'}), '(prices, fastperiod=fastperiod, slowperiod=slowperiod,\n signalperiod=signalperiod)\n', (4786, 4871), False, 'import talib\n'), ((1075, 1093), 'numpy.log', 'np.log', (['(high / low)'], {}), '(high / low)\n', (1081, 1093), True, 'import numpy as np\n'), ((1455, 1485), 'numpy.mean', 'np.mean', (['dollar_volume'], {'axis': '(0)'}), '(dollar_volume, axis=0)\n', (1462, 1485), True, 'import numpy as np\n'), ((1115, 1128), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (1123, 1128), False, 'import math\n')] |
import os
import numpy as np
import tensorflow as tf
from models_vqa.model import Model
from models_vqa.config import build_cfg_from_argparse
from util.vqa_train.data_reader import DataReader
# Load config
cfg = build_cfg_from_argparse()
# Start session
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.GPU_ID)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=cfg.GPU_MEM_GROWTH)))
# Data files
imdb_file = cfg.IMDB_FILE % cfg.TRAIN.SPLIT_VQA
data_reader = DataReader(
imdb_file, shuffle=True, one_pass=False, batch_size=cfg.TRAIN.BATCH_SIZE,
vocab_question_file=cfg.VOCAB_QUESTION_FILE, T_encoder=cfg.MODEL.T_ENCODER,
vocab_answer_file=cfg.VOCAB_ANSWER_FILE,
load_gt_layout=cfg.TRAIN.USE_GT_LAYOUT,
vocab_layout_file=cfg.VOCAB_LAYOUT_FILE, T_decoder=cfg.MODEL.T_CTRL,
load_soft_score=cfg.TRAIN.VQA_USE_SOFT_SCORE)
num_vocab = data_reader.batch_loader.vocab_dict.num_vocab
num_choices = data_reader.batch_loader.answer_dict.num_vocab
module_names = data_reader.batch_loader.layout_dict.word_list
# Inputs and model
input_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_feat_batch = tf.placeholder(
tf.float32, [None, cfg.MODEL.H_FEAT, cfg.MODEL.W_FEAT, cfg.MODEL.FEAT_DIM])
model = Model(
input_seq_batch, seq_length_batch, image_feat_batch, num_vocab=num_vocab,
num_choices=num_choices, module_names=module_names, is_training=True)
# Loss function
if cfg.TRAIN.VQA_USE_SOFT_SCORE:
soft_score_batch = tf.placeholder(tf.float32, [None, num_choices])
# Summing, instead of averaging over the choices
loss_vqa = float(num_choices) * tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=model.vqa_scores, labels=soft_score_batch))
else:
answer_label_batch = tf.placeholder(tf.int32, [None])
loss_vqa = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=model.vqa_scores, labels=answer_label_batch))
if cfg.TRAIN.USE_GT_LAYOUT:
gt_layout_batch = tf.placeholder(tf.int32, [None, None])
loss_layout = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=model.module_logits, labels=gt_layout_batch))
else:
loss_layout = tf.convert_to_tensor(0.)
loss_rec = model.rec_loss
loss_train = (loss_vqa * cfg.TRAIN.VQA_LOSS_WEIGHT +
loss_layout * cfg.TRAIN.LAYOUT_LOSS_WEIGHT +
loss_rec * cfg.TRAIN.REC_LOSS_WEIGHT)
loss_total = loss_train + cfg.TRAIN.WEIGHT_DECAY * model.l2_reg
# Train with Adam
solver = tf.train.AdamOptimizer(learning_rate=cfg.TRAIN.SOLVER.LR)
grads_and_vars = solver.compute_gradients(loss_total)
if cfg.TRAIN.CLIP_GRADIENTS:
print('clipping gradients to max norm: %f' % cfg.TRAIN.GRAD_MAX_NORM)
gradients, variables = zip(*grads_and_vars)
gradients, _ = tf.clip_by_global_norm(gradients, cfg.TRAIN.GRAD_MAX_NORM)
grads_and_vars = zip(gradients, variables)
solver_op = solver.apply_gradients(grads_and_vars)
# Save moving average of parameters
ema = tf.train.ExponentialMovingAverage(decay=cfg.TRAIN.EMA_DECAY)
ema_op = ema.apply(model.params)
with tf.control_dependencies([solver_op]):
train_op = tf.group(ema_op)
# Save snapshot
snapshot_dir = cfg.TRAIN.SNAPSHOT_DIR % cfg.EXP_NAME
os.makedirs(snapshot_dir, exist_ok=True)
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
if cfg.TRAIN.START_ITER > 0:
snapshot_file = os.path.join(snapshot_dir, "%08d" % cfg.TRAIN.START_ITER)
print('resume training from %s' % snapshot_file)
snapshot_saver.restore(sess, snapshot_file)
else:
sess.run(tf.global_variables_initializer())
if cfg.TRAIN.INIT_FROM_WEIGHTS:
snapshot_saver.restore(sess, cfg.TRAIN.INIT_WEIGHTS_FILE)
print('initialized from %s' % cfg.TRAIN.INIT_WEIGHTS_FILE)
# Save config
np.save(os.path.join(snapshot_dir, 'cfg.npy'), np.array(cfg))
# Write summary to TensorBoard
log_dir = cfg.TRAIN.LOG_DIR % cfg.EXP_NAME
os.makedirs(log_dir, exist_ok=True)
log_writer = tf.summary.FileWriter(log_dir, tf.get_default_graph())
loss_vqa_ph = tf.placeholder(tf.float32, [])
loss_layout_ph = tf.placeholder(tf.float32, [])
loss_rec_ph = tf.placeholder(tf.float32, [])
accuracy_ph = tf.placeholder(tf.float32, [])
summary_trn = []
summary_trn.append(tf.summary.scalar("loss/vqa", loss_vqa_ph))
summary_trn.append(tf.summary.scalar("loss/layout", loss_layout_ph))
summary_trn.append(tf.summary.scalar("loss/rec", loss_rec_ph))
summary_trn.append(tf.summary.scalar("eval/vqa/accuracy", accuracy_ph))
log_step_trn = tf.summary.merge(summary_trn)
# Run training
avg_accuracy, accuracy_decay = 0., 0.99
for n_batch, batch in enumerate(data_reader.batches()):
n_iter = n_batch + cfg.TRAIN.START_ITER
if n_iter >= cfg.TRAIN.MAX_ITER:
break
feed_dict = {input_seq_batch: batch['input_seq_batch'],
seq_length_batch: batch['seq_length_batch'],
image_feat_batch: batch['image_feat_batch']}
if cfg.TRAIN.VQA_USE_SOFT_SCORE:
feed_dict[soft_score_batch] = batch['soft_score_batch']
else:
feed_dict[answer_label_batch] = batch['answer_label_batch']
if cfg.TRAIN.USE_GT_LAYOUT:
feed_dict[gt_layout_batch] = batch['gt_layout_batch']
vqa_scores_val, loss_vqa_val, loss_layout_val, loss_rec_val, _ = sess.run(
(model.vqa_scores, loss_vqa, loss_layout, loss_rec, train_op),
feed_dict)
# compute accuracy
vqa_labels = batch['answer_label_batch']
vqa_predictions = np.argmax(vqa_scores_val, axis=1)
accuracy = np.mean(vqa_predictions == vqa_labels)
avg_accuracy += (1-accuracy_decay) * (accuracy-avg_accuracy)
# Add to TensorBoard summary
if (n_iter+1) % cfg.TRAIN.LOG_INTERVAL == 0:
print("exp: %s, iter = %d\n\t" % (cfg.EXP_NAME, n_iter+1) +
"loss (vqa) = %f, loss (layout) = %f, loss (rec) = %f\n\t" % (
loss_vqa_val, loss_layout_val, loss_rec_val) +
"accuracy (cur) = %f, accuracy (avg) = %f" % (
accuracy, avg_accuracy))
summary = sess.run(log_step_trn, {
loss_vqa_ph: loss_vqa_val,
loss_layout_ph: loss_layout_val,
loss_rec_ph: loss_rec_val,
accuracy_ph: avg_accuracy})
log_writer.add_summary(summary, n_iter+1)
# Save snapshot
if ((n_iter+1) % cfg.TRAIN.SNAPSHOT_INTERVAL == 0 or
(n_iter+1) == cfg.TRAIN.MAX_ITER):
snapshot_file = os.path.join(snapshot_dir, "%08d" % (n_iter+1))
snapshot_saver.save(sess, snapshot_file, write_meta_graph=False)
print('snapshot saved to ' + snapshot_file)
| [
"numpy.argmax",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"numpy.mean",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.get_default_graph",
"tensorflow.summary.merge",
"os.path.join",
"models_vqa.config.build_cfg_from_argparse",
"tensorflow.clip_by_global_norm",
"ten... | [((214, 239), 'models_vqa.config.build_cfg_from_argparse', 'build_cfg_from_argparse', ([], {}), '()\n', (237, 239), False, 'from models_vqa.config import build_cfg_from_argparse\n'), ((492, 872), 'util.vqa_train.data_reader.DataReader', 'DataReader', (['imdb_file'], {'shuffle': '(True)', 'one_pass': '(False)', 'batch_size': 'cfg.TRAIN.BATCH_SIZE', 'vocab_question_file': 'cfg.VOCAB_QUESTION_FILE', 'T_encoder': 'cfg.MODEL.T_ENCODER', 'vocab_answer_file': 'cfg.VOCAB_ANSWER_FILE', 'load_gt_layout': 'cfg.TRAIN.USE_GT_LAYOUT', 'vocab_layout_file': 'cfg.VOCAB_LAYOUT_FILE', 'T_decoder': 'cfg.MODEL.T_CTRL', 'load_soft_score': 'cfg.TRAIN.VQA_USE_SOFT_SCORE'}), '(imdb_file, shuffle=True, one_pass=False, batch_size=cfg.TRAIN.\n BATCH_SIZE, vocab_question_file=cfg.VOCAB_QUESTION_FILE, T_encoder=cfg.\n MODEL.T_ENCODER, vocab_answer_file=cfg.VOCAB_ANSWER_FILE,\n load_gt_layout=cfg.TRAIN.USE_GT_LAYOUT, vocab_layout_file=cfg.\n VOCAB_LAYOUT_FILE, T_decoder=cfg.MODEL.T_CTRL, load_soft_score=cfg.\n TRAIN.VQA_USE_SOFT_SCORE)\n', (502, 872), False, 'from util.vqa_train.data_reader import DataReader\n'), ((1093, 1131), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (1107, 1131), True, 'import tensorflow as tf\n'), ((1151, 1183), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (1165, 1183), True, 'import tensorflow as tf\n'), ((1203, 1298), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, cfg.MODEL.H_FEAT, cfg.MODEL.W_FEAT, cfg.MODEL.FEAT_DIM]'], {}), '(tf.float32, [None, cfg.MODEL.H_FEAT, cfg.MODEL.W_FEAT, cfg.\n MODEL.FEAT_DIM])\n', (1217, 1298), True, 'import tensorflow as tf\n'), ((1307, 1465), 'models_vqa.model.Model', 'Model', (['input_seq_batch', 'seq_length_batch', 'image_feat_batch'], {'num_vocab': 'num_vocab', 'num_choices': 'num_choices', 'module_names': 'module_names', 'is_training': '(True)'}), '(input_seq_batch, seq_length_batch, image_feat_batch, num_vocab=\n num_vocab, num_choices=num_choices, module_names=module_names,\n is_training=True)\n', (1312, 1465), False, 'from models_vqa.model import Model\n'), ((2595, 2652), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'cfg.TRAIN.SOLVER.LR'}), '(learning_rate=cfg.TRAIN.SOLVER.LR)\n', (2617, 2652), True, 'import tensorflow as tf\n'), ((3076, 3136), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': 'cfg.TRAIN.EMA_DECAY'}), '(decay=cfg.TRAIN.EMA_DECAY)\n', (3109, 3136), True, 'import tensorflow as tf\n'), ((3315, 3355), 'os.makedirs', 'os.makedirs', (['snapshot_dir'], {'exist_ok': '(True)'}), '(snapshot_dir, exist_ok=True)\n', (3326, 3355), False, 'import os\n'), ((3373, 3405), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'None'}), '(max_to_keep=None)\n', (3387, 3405), True, 'import tensorflow as tf\n'), ((4010, 4045), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (4021, 4045), False, 'import os\n'), ((4128, 4158), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4142, 4158), True, 'import tensorflow as tf\n'), ((4176, 4206), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4190, 4206), True, 'import tensorflow as tf\n'), ((4221, 4251), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4235, 4251), True, 'import tensorflow as tf\n'), ((4266, 4296), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4280, 4296), True, 'import tensorflow as tf\n'), ((4596, 4625), 'tensorflow.summary.merge', 'tf.summary.merge', (['summary_trn'], {}), '(summary_trn)\n', (4612, 4625), True, 'import tensorflow as tf\n'), ((1539, 1586), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, num_choices]'], {}), '(tf.float32, [None, num_choices])\n', (1553, 1586), True, 'import tensorflow as tf\n'), ((1835, 1867), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (1849, 1867), True, 'import tensorflow as tf\n'), ((2070, 2108), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (2084, 2108), True, 'import tensorflow as tf\n'), ((2288, 2313), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(0.0)'], {}), '(0.0)\n', (2308, 2313), True, 'import tensorflow as tf\n'), ((2877, 2935), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', 'cfg.TRAIN.GRAD_MAX_NORM'], {}), '(gradients, cfg.TRAIN.GRAD_MAX_NORM)\n', (2899, 2935), True, 'import tensorflow as tf\n'), ((3175, 3211), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[solver_op]'], {}), '([solver_op])\n', (3198, 3211), True, 'import tensorflow as tf\n'), ((3228, 3244), 'tensorflow.group', 'tf.group', (['ema_op'], {}), '(ema_op)\n', (3236, 3244), True, 'import tensorflow as tf\n'), ((3477, 3534), 'os.path.join', 'os.path.join', (['snapshot_dir', "('%08d' % cfg.TRAIN.START_ITER)"], {}), "(snapshot_dir, '%08d' % cfg.TRAIN.START_ITER)\n", (3489, 3534), False, 'import os\n'), ((3881, 3918), 'os.path.join', 'os.path.join', (['snapshot_dir', '"""cfg.npy"""'], {}), "(snapshot_dir, 'cfg.npy')\n", (3893, 3918), False, 'import os\n'), ((3920, 3933), 'numpy.array', 'np.array', (['cfg'], {}), '(cfg)\n', (3928, 3933), True, 'import numpy as np\n'), ((4090, 4112), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4110, 4112), True, 'import tensorflow as tf\n'), ((4333, 4375), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss/vqa"""', 'loss_vqa_ph'], {}), "('loss/vqa', loss_vqa_ph)\n", (4350, 4375), True, 'import tensorflow as tf\n'), ((4396, 4444), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss/layout"""', 'loss_layout_ph'], {}), "('loss/layout', loss_layout_ph)\n", (4413, 4444), True, 'import tensorflow as tf\n'), ((4465, 4507), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss/rec"""', 'loss_rec_ph'], {}), "('loss/rec', loss_rec_ph)\n", (4482, 4507), True, 'import tensorflow as tf\n'), ((4528, 4579), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""eval/vqa/accuracy"""', 'accuracy_ph'], {}), "('eval/vqa/accuracy', accuracy_ph)\n", (4545, 4579), True, 'import tensorflow as tf\n'), ((5551, 5584), 'numpy.argmax', 'np.argmax', (['vqa_scores_val'], {'axis': '(1)'}), '(vqa_scores_val, axis=1)\n', (5560, 5584), True, 'import numpy as np\n'), ((5600, 5638), 'numpy.mean', 'np.mean', (['(vqa_predictions == vqa_labels)'], {}), '(vqa_predictions == vqa_labels)\n', (5607, 5638), True, 'import numpy as np\n'), ((1907, 2009), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'model.vqa_scores', 'labels': 'answer_label_batch'}), '(logits=model.vqa_scores,\n labels=answer_label_batch)\n', (1953, 2009), True, 'import tensorflow as tf\n'), ((2151, 2253), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'model.module_logits', 'labels': 'gt_layout_batch'}), '(logits=model.module_logits,\n labels=gt_layout_batch)\n', (2197, 2253), True, 'import tensorflow as tf\n'), ((3655, 3688), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3686, 3688), True, 'import tensorflow as tf\n'), ((6502, 6551), 'os.path.join', 'os.path.join', (['snapshot_dir', "('%08d' % (n_iter + 1))"], {}), "(snapshot_dir, '%08d' % (n_iter + 1))\n", (6514, 6551), False, 'import os\n'), ((1700, 1794), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'model.vqa_scores', 'labels': 'soft_score_batch'}), '(logits=model.vqa_scores, labels=\n soft_score_batch)\n', (1739, 1794), True, 'import tensorflow as tf\n'), ((367, 413), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': 'cfg.GPU_MEM_GROWTH'}), '(allow_growth=cfg.GPU_MEM_GROWTH)\n', (380, 413), True, 'import tensorflow as tf\n')] |
import argparse
import datetime as dt
import json
import os
import sys
from typing import List
import matplotlib
import numpy as np
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QLabel, QComboBox
from astropy import units
from astropy.coordinates import concatenate
from matplotlib import colors
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, \
NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib.gridspec import GridSpec
from sunpy import log
from sunpy.coordinates import get_horizons_coord
from sunpy.map import Map
from gcs.geometry import gcs_mesh_sunpy, apex_radius
from gcs.utils.helioviewer import get_helioviewer_client
from gcs.utils.widgets import SliderAndTextbox
matplotlib.use('Qt5Agg')
hv = get_helioviewer_client()
straight_vertices, front_vertices, circle_vertices = 10, 10, 20
filename = 'gcs_params.json'
draw_modes = ['off', 'point cloud', 'grid']
# disable sunpy warnings
log.setLevel('ERROR')
def running_difference(a, b):
return Map(b.data * 1.0 - a.data * 1.0, b.meta)
def load_image(spacecraft: str, detector: str, date: dt.datetime, runndiff: bool):
if spacecraft == 'STA':
observatory = 'STEREO_A'
instrument = 'SECCHI'
if detector not in ['COR1', 'COR2']:
raise ValueError(f'unknown detector {detector} for spacecraft {spacecraft}.')
elif spacecraft == 'STB':
observatory = 'STEREO_B'
instrument = 'SECCHI'
if detector not in ['COR1', 'COR2']:
raise ValueError(f'unknown detector {detector} for spacecraft {spacecraft}.')
elif spacecraft == 'SOHO':
observatory = 'SOHO'
instrument = 'LASCO'
if detector not in ['C2', 'C3']:
raise ValueError(f'unknown detector {detector} for spacecraft {spacecraft}.')
else:
raise ValueError(f'unknown spacecraft: {spacecraft}')
f = download_helioviewer(date, observatory, instrument, detector)
if runndiff:
f2 = download_helioviewer(date - dt.timedelta(hours=1), observatory, instrument, detector)
return running_difference(f2, f)
else:
return f
def download_helioviewer(date, observatory, instrument, detector):
file = hv.download_jp2(date, observatory=observatory, instrument=instrument, detector=detector)
f = Map(file)
if observatory == 'SOHO':
# add observer location information:
soho = get_horizons_coord('SOHO', f.date)
f.meta['HGLN_OBS'] = soho.lon.to('deg').value
f.meta['HGLT_OBS'] = soho.lat.to('deg').value
f.meta['DSUN_OBS'] = soho.radius.to('m').value
return f
def save_params(params):
with open(filename, 'w') as file:
json.dump(params, file)
def load_params():
if os.path.exists(filename):
with open(filename) as file:
return json.load(file)
else:
# start with default values
return {
'half_angle': 25,
'height': 10,
'kappa': 0.25,
'lat': 0,
'lon': 0,
'tilt': 0
}
class GCSGui(QtWidgets.QMainWindow):
def __init__(self, date: dt.datetime, spacecraft: List[str], runndiff: bool = False,
detector_stereo: str = 'COR2', detector_soho='C2'):
super().__init__()
self._spacecraft = spacecraft
self._date = date
self._runndiff = runndiff
self._detector_stereo = detector_stereo
self._detector_soho = detector_soho
self._root = QtWidgets.QWidget()
self.setCentralWidget(self._root)
self._mainlayout = QtWidgets.QHBoxLayout(self._root)
self._figure = Figure(figsize=(5 * len(spacecraft), 5))
canvas = FigureCanvas(self._figure)
self._mainlayout.addWidget(canvas, stretch=5)
self.addToolBar(NavigationToolbar(canvas, self))
self._current_draw_mode = None
self.create_widgets()
self.make_plot()
self.show()
def create_widgets(self):
params = load_params()
self._s_half_angle = SliderAndTextbox('Half angle [°]', 0, 90, params['half_angle'])
self._s_height = SliderAndTextbox('Height [Rs]', 0, 24, params['height'])
self._s_kappa = SliderAndTextbox('κ', 0, 1, params['kappa'])
self._s_lat = SliderAndTextbox('Latitude [°]', -90, 90, params['lat'])
self._s_lon = SliderAndTextbox('Longitude [°]', 0, 360, params['lon'])
self._s_tilt = SliderAndTextbox('Tilt angle [°]', -90, 90, params['tilt'])
sliders = self._s_half_angle, self._s_height, self._s_kappa, self._s_lat, self._s_lon, self._s_tilt
layout = QtWidgets.QVBoxLayout()
for slider in sliders:
layout.addWidget(slider)
slider.valueChanged.connect(self.plot_mesh)
# add checkbox to enable or disable plot
cb_mode_label = QLabel()
cb_mode_label.setText('Display mode')
layout.addWidget(cb_mode_label)
self._cb_mode = QComboBox()
for mode in draw_modes:
self._cb_mode.addItem(mode)
self._cb_mode.setCurrentIndex(2)
layout.addWidget(self._cb_mode)
self._cb_mode.currentIndexChanged.connect(self.plot_mesh)
# add labels for useful quantities
self._l_radius = QLabel()
layout.addWidget(self._l_radius)
b_save = QtWidgets.QPushButton('Save')
b_save.clicked.connect(self.save)
layout.addWidget(b_save)
layout.addStretch(1)
self._mainlayout.addLayout(layout, stretch=1)
def make_plot(self):
fig = self._figure
spacecraft = self._spacecraft
date = self._date
runndiff = self._runndiff
spec = GridSpec(ncols=len(spacecraft), nrows=1, figure=fig)
axes = []
images = []
self._mesh_plots = []
for i, sc in enumerate(spacecraft):
detector = self._detector_stereo if sc in ['STA', 'STB'] else self._detector_soho
image = load_image(sc, detector, date, runndiff)
images.append(image)
ax = fig.add_subplot(spec[:, i], projection=image)
axes.append(ax)
image.plot(axes=ax, cmap='Greys_r', norm=colors.Normalize(vmin=-30, vmax=30) if runndiff else None)
if i == len(spacecraft) - 1:
# for last plot: move labels to the right
ax.coords[1].set_ticks_position('r')
ax.coords[1].set_ticklabel_position('r')
ax.coords[1].set_axislabel_position('r')
self._bg = fig.canvas.copy_from_bbox(fig.bbox)
self._images = images
self._axes = axes
self.plot_mesh()
fig.canvas.draw()
fig.tight_layout()
def plot_mesh(self):
fig = self._figure
half_angle = np.radians(self._s_half_angle.val)
height = self._s_height.val
kappa = self._s_kappa.val
lat = np.radians(self._s_lat.val)
lon = np.radians(self._s_lon.val)
tilt = np.radians(self._s_tilt.val)
# calculate and show quantities
ra = apex_radius(half_angle, height, kappa)
self._l_radius.setText('Apex cross-section radius: {:.2f} Rs'.format(ra))
# check if plot should be shown
draw_mode = draw_modes[self._cb_mode.currentIndex()]
if draw_mode != self._current_draw_mode:
for plot in self._mesh_plots:
plot.remove()
self._mesh_plots = []
fig.canvas.draw()
self._current_draw_mode = draw_mode
if draw_mode == 'off':
return
# create GCS mesh
mesh = gcs_mesh_sunpy(self._date, half_angle, height, straight_vertices, front_vertices, circle_vertices,
kappa, lat, lon, tilt)
if draw_mode == 'grid':
mesh2 = mesh.reshape((front_vertices + straight_vertices) * 2 - 3, circle_vertices).T.flatten()
mesh = concatenate([mesh, mesh2])
for i, (image, ax) in enumerate(zip(self._images, self._axes)):
if len(self._mesh_plots) <= i:
# new plot
style = {
'grid': '-',
'point cloud': '.'
}[draw_mode]
params = {
'grid': dict(lw=0.5),
'point cloud': dict(ms=2)
}[draw_mode]
p = ax.plot_coord(mesh, style, color='blue', scalex=False, scaley=False, **params)[0]
self._mesh_plots.append(p)
else:
# update plot
p = self._mesh_plots[i]
frame0 = mesh.frame.transform_to(image.coordinate_frame)
xdata = frame0.spherical.lon.to_value(units.deg)
ydata = frame0.spherical.lat.to_value(units.deg)
p.set_xdata(xdata)
p.set_ydata(ydata)
ax.draw_artist(p)
fig.canvas.draw()
def get_params_dict(self):
return {
'half_angle': self._s_half_angle.val,
'height': self._s_height.val,
'kappa': self._s_kappa.val,
'lat': self._s_lat.val,
'lon': self._s_lon.val,
'tilt': self._s_tilt.val
}
def save(self):
save_params(self.get_params_dict())
self.close()
def main():
parser = argparse.ArgumentParser(description='Run the GCS GUI', prog='gcs_gui')
parser.add_argument('date', type=lambda d: dt.datetime.strptime(d, '%Y-%m-%d %H:%M'),
help='Date and time for the coronagraph images. Format: "yyyy-mm-dd HH:MM" (with quotes). '
'The closest available image will be loaded for each spacecraft.')
parser.add_argument('spacecraft', type=str, nargs='+', choices=['STA', 'STB', 'SOHO'],
help='List of spacecraft to use.')
parser.add_argument('-rd', '--running-difference', action='store_true',
help='Whether to use running difference images')
parser.add_argument('-soho', type=str, default='C2', choices=['C2', 'C3'],
help='Which coronagraph to use at SOHO/LASCO.')
parser.add_argument('-stereo', type=str, default='COR2', choices=['COR1', 'COR2'],
help='Which coronagraph to use at STEREO.')
args = parser.parse_args()
qapp = QtWidgets.QApplication(sys.argv)
app = GCSGui(args.date, args.spacecraft, args.running_difference, detector_stereo=args.stereo,
detector_soho=args.soho)
app.show()
qapp.exec_()
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QVBoxLayout",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib... | [((771, 795), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (785, 795), False, 'import matplotlib\n'), ((802, 826), 'gcs.utils.helioviewer.get_helioviewer_client', 'get_helioviewer_client', ([], {}), '()\n', (824, 826), False, 'from gcs.utils.helioviewer import get_helioviewer_client\n'), ((991, 1012), 'sunpy.log.setLevel', 'log.setLevel', (['"""ERROR"""'], {}), "('ERROR')\n", (1003, 1012), False, 'from sunpy import log\n'), ((1056, 1096), 'sunpy.map.Map', 'Map', (['(b.data * 1.0 - a.data * 1.0)', 'b.meta'], {}), '(b.data * 1.0 - a.data * 1.0, b.meta)\n', (1059, 1096), False, 'from sunpy.map import Map\n'), ((2361, 2370), 'sunpy.map.Map', 'Map', (['file'], {}), '(file)\n', (2364, 2370), False, 'from sunpy.map import Map\n'), ((2794, 2818), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2808, 2818), False, 'import os\n'), ((9408, 9478), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run the GCS GUI"""', 'prog': '"""gcs_gui"""'}), "(description='Run the GCS GUI', prog='gcs_gui')\n", (9431, 9478), False, 'import argparse\n'), ((10428, 10460), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (10450, 10460), False, 'from PyQt5 import QtWidgets\n'), ((2460, 2494), 'sunpy.coordinates.get_horizons_coord', 'get_horizons_coord', (['"""SOHO"""', 'f.date'], {}), "('SOHO', f.date)\n", (2478, 2494), False, 'from sunpy.coordinates import get_horizons_coord\n'), ((2742, 2765), 'json.dump', 'json.dump', (['params', 'file'], {}), '(params, file)\n', (2751, 2765), False, 'import json\n'), ((3545, 3564), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (3562, 3564), False, 'from PyQt5 import QtWidgets\n'), ((3634, 3667), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self._root'], {}), '(self._root)\n', (3655, 3667), False, 'from PyQt5 import QtWidgets\n'), ((3750, 3776), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self._figure'], {}), '(self._figure)\n', (3762, 3776), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((4095, 4158), 'gcs.utils.widgets.SliderAndTextbox', 'SliderAndTextbox', (['"""Half angle [°]"""', '(0)', '(90)', "params['half_angle']"], {}), "('Half angle [°]', 0, 90, params['half_angle'])\n", (4111, 4158), False, 'from gcs.utils.widgets import SliderAndTextbox\n'), ((4184, 4240), 'gcs.utils.widgets.SliderAndTextbox', 'SliderAndTextbox', (['"""Height [Rs]"""', '(0)', '(24)', "params['height']"], {}), "('Height [Rs]', 0, 24, params['height'])\n", (4200, 4240), False, 'from gcs.utils.widgets import SliderAndTextbox\n'), ((4265, 4309), 'gcs.utils.widgets.SliderAndTextbox', 'SliderAndTextbox', (['"""κ"""', '(0)', '(1)', "params['kappa']"], {}), "('κ', 0, 1, params['kappa'])\n", (4281, 4309), False, 'from gcs.utils.widgets import SliderAndTextbox\n'), ((4332, 4388), 'gcs.utils.widgets.SliderAndTextbox', 'SliderAndTextbox', (['"""Latitude [°]"""', '(-90)', '(90)', "params['lat']"], {}), "('Latitude [°]', -90, 90, params['lat'])\n", (4348, 4388), False, 'from gcs.utils.widgets import SliderAndTextbox\n'), ((4411, 4467), 'gcs.utils.widgets.SliderAndTextbox', 'SliderAndTextbox', (['"""Longitude [°]"""', '(0)', '(360)', "params['lon']"], {}), "('Longitude [°]', 0, 360, params['lon'])\n", (4427, 4467), False, 'from gcs.utils.widgets import SliderAndTextbox\n'), ((4491, 4550), 'gcs.utils.widgets.SliderAndTextbox', 'SliderAndTextbox', (['"""Tilt angle [°]"""', '(-90)', '(90)', "params['tilt']"], {}), "('Tilt angle [°]', -90, 90, params['tilt'])\n", (4507, 4550), False, 'from gcs.utils.widgets import SliderAndTextbox\n'), ((4677, 4700), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (4698, 4700), False, 'from PyQt5 import QtWidgets\n'), ((4899, 4907), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (4905, 4907), False, 'from PyQt5.QtWidgets import QLabel, QComboBox\n'), ((5018, 5029), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (5027, 5029), False, 'from PyQt5.QtWidgets import QLabel, QComboBox\n'), ((5318, 5326), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (5324, 5326), False, 'from PyQt5.QtWidgets import QLabel, QComboBox\n'), ((5386, 5415), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Save"""'], {}), "('Save')\n", (5407, 5415), False, 'from PyQt5 import QtWidgets\n'), ((6832, 6866), 'numpy.radians', 'np.radians', (['self._s_half_angle.val'], {}), '(self._s_half_angle.val)\n', (6842, 6866), True, 'import numpy as np\n'), ((6951, 6978), 'numpy.radians', 'np.radians', (['self._s_lat.val'], {}), '(self._s_lat.val)\n', (6961, 6978), True, 'import numpy as np\n'), ((6993, 7020), 'numpy.radians', 'np.radians', (['self._s_lon.val'], {}), '(self._s_lon.val)\n', (7003, 7020), True, 'import numpy as np\n'), ((7036, 7064), 'numpy.radians', 'np.radians', (['self._s_tilt.val'], {}), '(self._s_tilt.val)\n', (7046, 7064), True, 'import numpy as np\n'), ((7119, 7157), 'gcs.geometry.apex_radius', 'apex_radius', (['half_angle', 'height', 'kappa'], {}), '(half_angle, height, kappa)\n', (7130, 7157), False, 'from gcs.geometry import gcs_mesh_sunpy, apex_radius\n'), ((7675, 7800), 'gcs.geometry.gcs_mesh_sunpy', 'gcs_mesh_sunpy', (['self._date', 'half_angle', 'height', 'straight_vertices', 'front_vertices', 'circle_vertices', 'kappa', 'lat', 'lon', 'tilt'], {}), '(self._date, half_angle, height, straight_vertices,\n front_vertices, circle_vertices, kappa, lat, lon, tilt)\n', (7689, 7800), False, 'from gcs.geometry import gcs_mesh_sunpy, apex_radius\n'), ((2876, 2891), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2885, 2891), False, 'import json\n'), ((3855, 3886), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['canvas', 'self'], {}), '(canvas, self)\n', (3872, 3886), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((7986, 8012), 'astropy.coordinates.concatenate', 'concatenate', (['[mesh, mesh2]'], {}), '([mesh, mesh2])\n', (7997, 8012), False, 'from astropy.coordinates import concatenate\n'), ((2058, 2079), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (2070, 2079), True, 'import datetime as dt\n'), ((9526, 9567), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['d', '"""%Y-%m-%d %H:%M"""'], {}), "(d, '%Y-%m-%d %H:%M')\n", (9546, 9567), True, 'import datetime as dt\n'), ((6241, 6276), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(-30)', 'vmax': '(30)'}), '(vmin=-30, vmax=30)\n', (6257, 6276), False, 'from matplotlib import colors\n')] |
"""Plot posterior distributions of microlensing events.
Creates "pointilism" plots which show the discrete posterior,
and heatmap plots which show the true posterior.
"""
from cProfile import label
import MulensModel as mm
import math
from copy import deepcopy
import sampling
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.stats import chi2
import corner
import light_curve_simulation
import seaborn as sns
import matplotlib.ticker as ticker
import autocorrelation
def flux(m, theta, ts, caustics = None, label = None, color = None, alpha = None, ls = None, lw = None):
"""Plot the magnification produced by a microlensing event.
Args:
m: [int] Model index.
theta: [state] Model parameters.
ts: [list] Range to plot over.
caustics: [optional, bool] Whether the plot should be of the caustic curves.
"""
if m == 0:
model = mm.Model(dict(zip(['t_0', 'u_0', 't_E'], theta.truth[1:])))
if m == 1:
model = mm.Model(dict(zip(['t_0', 'u_0', 't_E', 'q', 's', 'alpha'], theta.truth[1:])))
model.set_magnification_methods([ts[0], 'point_source', ts[1]])
epochs = np.linspace(ts[0], ts[1], 720)
if caustics is not None:
if caustics > 0:
model.plot_trajectory(t_start = ts[0], t_stop = ts[1], color = 'black', linewidth = 1, ls='-', alpha = alpha, arrow = False)
model.plot_caustics(color = color, s = 0.25, marker = 'o', n_points = 25000, label = label)
else:
A = (model.magnification(epochs)-1)*theta.truth[0]+1
plt.plot(epochs, A, color = color, label = label, alpha = alpha, ls = ls, lw = lw)
return
def fitted_flux(m, theta, data, ts, label = None, color = None, alpha = None, ls = None, lw = None):
"""Plot the flux produced by fitted microlensing parameters.
Args:
m: [int] Model index.
theta: [state] Model parameters.
ts: [list] Range to plot over.
"""
if m == 0:
model = mm.Model(dict(zip(['t_0', 'u_0', 't_E'], theta.truth[1:])))
if m == 1:
model = mm.Model(dict(zip(['t_0', 'u_0', 't_E', 'q', 's', 'alpha'], theta.truth[1:])))
model.set_magnification_methods([ts[0], 'point_source', ts[1]])
epochs = np.linspace(ts[0], ts[1], 720)
a = model.magnification(epochs)
# Fit proposed flux as least squares solution.
#F = light_curve_simulation.least_squares_signal(a, data.flux)
F = (a-1)*theta.truth[0]+1
plt.plot(epochs, F, color = color, label = label, alpha = alpha, ls = ls, lw = lw)
return
def style():
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams['font.size'] = 15 #12
plt.style.use('seaborn-bright')
plt.rcParams["legend.edgecolor"] = '0'
plt.rcParams["legend.framealpha"] = 1
plt.rcParams["legend.title_fontsize"] = 10
plt.rcParams["legend.fontsize"] = 9
plt.rcParams["grid.linestyle"] = 'dashed'
plt.rcParams["grid.alpha"] = 0.25
plt.rc('axes.formatter', useoffset=False)
return
def broccoli(joint_model_chain, supset_states, subset_states, surrogate_supset_states, surrogate_subset_states, symbols, ranges, curves, event_params = None, name = '', dpi = 100):
"""Plot the joint posterior surface of two models.
Args:
supset_model: [model] Larger parameter space model.
subset_model: [model] Smaller parameter space model.
joint_model_chain: [chain] Generalised chain with states from both models.
symbols: [list] Strings to label plots with.
"""
# Fonts/visibility.
#lr = 45 # label rotation
n_ticks = 5 # tick labels
n_m_ticks = 5
axis_title_size = 16
axis_tick_size = 10
# Model sizing.
N_dim = 6 #supset_model.D
n_dim = 3 #subset_model.D
style()
figure = corner.corner(supset_states.T) # Use corner for layout/sizing.
figure.subplots_adjust(wspace = 0, hspace = 0)
# Fonts/visibility.
# plt.rcParams['font.size'] = 12
# plt.rcParams['axes.titlesize'] = 20
# plt.rcParams['axes.labelsize'] = 20
# Extract axes.
axes = np.array(figure.axes).reshape((N_dim, N_dim))
#print(supset_surrogate.samples.numpy())
single_theta, binary_theta, data = curves
# Loop diagonal.
for i in range(N_dim):
ax = axes[i, i]
ax.cla()
nbins = 20
if i < 3:
ax.hist(np.concatenate((surrogate_supset_states[i, :], surrogate_subset_states[i, :])), bins = nbins, density = True, color = 'tab:orange', alpha = 1.0, histtype='step', range=ranges[i])
ax.hist(np.concatenate((supset_states[i, :], subset_states[i, :])), bins = nbins, density = True, color = 'tab:blue', alpha = 1.0, histtype='step', range=ranges[i])
ax.axvline(single_theta.scaled[i+1], color='tab:green', ls='-', lw=1)
else:
ax.hist(surrogate_supset_states[i, :], bins = nbins, density = True, color = 'tab:orange', alpha = 1.0, histtype='step', range=ranges[i])
ax.hist(supset_states[i, :], bins = nbins, density = True, color = 'tab:blue', alpha = 1.0, histtype='step', range=ranges[i])
if event_params is not None:
ax.axvline(event_params.scaled[i+1], color = 'black', ls = '-', lw = 2)
ax.axvline(binary_theta.scaled[i+1], color='tab:purple', ls='-', lw=1)
ax.set_xlim(ranges[i])
ax.tick_params(which='both', top=False, direction='in')
ax.minorticks_on()
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator(n_m_ticks))
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator(n_m_ticks))
ax.tick_params(which='major', length=8)
ax.tick_params(which='minor', length=4)
ax.yaxis.set_major_locator(plt.MaxNLocator(n_ticks))
ax.xaxis.set_major_locator(plt.MaxNLocator(n_ticks))
if i == 0: # First diagonal tile.
#ax.set_ylabel(symbols[i], fontsize = axis_title_size)
#ax.tick_params(axis='y', labelrotation = 45)
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
ax.set_title('(a)', loc='left', fontsize=20)
elif i == N_dim - 1: # Last diagonal tile.
ax.set_xlabel(symbols[i], fontsize = axis_title_size)
ax.axes.get_yaxis().set_ticklabels([])
ax.tick_params(axis = 'x', labelrotation = 45, labelsize = axis_tick_size)
plt.setp(ax.xaxis.get_majorticklabels(), ha="right", rotation_mode="anchor")
else:
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticks([])
ax.axes.get_yaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticks([])
#ax.axes.get_xaxis().set_ticklabels([])
#ax.axes.get_xaxis().set_ticks([])
# Loop lower triangular.
for yi in range(N_dim):
for xi in range(yi):
ax = axes[yi, xi]
ax.cla()
#if yi<3 and xi<3:
# ax.scatter(subset_states[xi, :], subset_states[yi, :], c = 'white', alpha = 0.0, marker = ".", s = 75, linewidth = 0.0)
#ax.scatter(supset_states[xi, :], supset_states[yi, :], c = np.linspace(0.0, 1.0, len(supset_states[yi, :])), cmap = plt.get_cmap('RdBu'), alpha = 0.05, marker = "o", s = 25, linewidth = 0.0)
#xlim = ax.get_xlim()
#ylim = ax.get_ylim()
sns.kdeplot(x=surrogate_supset_states[xi, :], y=surrogate_supset_states[yi, :], ax=ax, levels=[0.393, 0.865, 0.989], color='tab:orange', bw_adjust=1.2, clip=[ranges[xi], ranges[yi]])
sns.kdeplot(x=supset_states[xi, :], y=supset_states[yi, :], ax=ax, levels=[0.393, 0.865, 0.989], color='tab:blue', bw_adjust=1.2, clip=[ranges[xi], ranges[yi]])
ax.scatter(event_params.scaled[xi+1], event_params.scaled[yi+1], color = 'black', alpha = 1.0, marker = "D", s = 50, linewidth = 1, zorder=9)
#ax.axvline(event_params.scaled[xi], color = 'black', ls = '-', lw = 2)
#ax.axhline(event_params.scaled[yi], color = 'black', ls = '-', lw = 2)
ax.scatter(binary_theta.scaled[xi+1], binary_theta.scaled[yi+1], color = 'tab:purple', alpha = 1.0, marker = "8", s = 50, linewidth = 1, zorder=10)
ax.set_xlim(ranges[xi])
ax.set_ylim(ranges[yi])
ax.tick_params(which='both', top=True, right=True, direction='in')
ax.minorticks_on()
ax.tick_params(which='major', length=8)
ax.tick_params(which='minor', length=4)
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator(n_m_ticks))
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator(n_m_ticks))
ax.yaxis.set_major_locator(plt.MaxNLocator(n_ticks))
ax.xaxis.set_major_locator(plt.MaxNLocator(n_ticks))
if yi == N_dim - 1: # Bottom row.
ax.set_xlabel(symbols[xi], fontsize = axis_title_size)
ax.tick_params(axis = 'x', labelrotation = 45, labelsize = axis_tick_size)
plt.setp(ax.xaxis.get_majorticklabels(), ha="right", rotation_mode="anchor")
else:
ax.axes.get_xaxis().set_ticklabels([])
if xi == 0: # First column.
ax.set_ylabel(symbols[yi], fontsize = axis_title_size)
ax.tick_params(axis = 'y', labelrotation = 45, labelsize = axis_tick_size)
plt.setp(ax.yaxis.get_majorticklabels(), va="bottom", rotation_mode="anchor")
else:
ax.axes.get_yaxis().set_ticklabels([])
# Add upper triangular plots.
if xi < n_dim and yi < n_dim:
# Acquire axes and plot.
axs = figure.get_axes()[4].get_gridspec()
axt = figure.add_subplot(axs[xi, yi])
#axt.scatter(subset_states[yi, :], subset_states[xi, :], c = np.linspace(0.0, 1.0, len(subset_states[yi, :])), cmap = plt.get_cmap('RdBu'), alpha = 0.05, marker = "o", s = 25, linewidth = 0.0)
sns.kdeplot(x=surrogate_subset_states[yi, :], y=surrogate_subset_states[xi, :], ax=axt, levels=[0.393, 0.865, 0.989], color='tab:orange', bw_adjust=1.2, clip=[ranges[yi], ranges[xi]])
sns.kdeplot(x=subset_states[yi, :], y=subset_states[xi, :], ax=axt, levels=[0.393, 0.865, 0.989], color='tab:blue', bw_adjust=1.2, clip=[ranges[yi], ranges[xi]])
axt.scatter(x=single_theta.scaled[yi+1], y=single_theta.scaled[xi+1], color = 'tab:green', alpha = 1.0, marker = "8", s = 50, linewidth = 1, zorder=9)
axt.set_xlim(ranges[yi])
axt.set_ylim(ranges[xi])
axt.tick_params(which='both', top=True, right=True, direction='in')
axt.minorticks_on()
axt.yaxis.set_minor_locator(ticker.AutoMinorLocator(n_m_ticks))
axt.xaxis.set_minor_locator(ticker.AutoMinorLocator(n_m_ticks))
axt.tick_params(which='major', length=8)
axt.tick_params(which='minor', length=4)
axt.yaxis.set_major_locator(plt.MaxNLocator(n_ticks))
axt.xaxis.set_major_locator(plt.MaxNLocator(n_ticks))
if yi == n_dim - 1: # Last column.
axt.set_ylabel(symbols[xi], fontsize = axis_title_size)
axt.yaxis.tick_right()
axt.tick_params(which='both', bottom=True, left=True, labelsize = axis_tick_size)
axt.yaxis.set_label_position("right")
axt.tick_params(axis = 'y', labelrotation = 45, labelsize = axis_tick_size)
plt.setp(axt.yaxis.get_majorticklabels(), va="top", rotation_mode="anchor")
else:
axt.axes.get_yaxis().set_ticklabels([])
if xi == 0: # First row.
axt.set_xlabel(symbols[yi], fontsize = axis_title_size)
axt.tick_params(axis = 'x', labelrotation = 45, labelsize = axis_tick_size)
axt.xaxis.tick_top()
axt.tick_params(which='both', bottom=True, left=True, labelsize = axis_tick_size)
axt.xaxis.set_label_position("top")
plt.setp(axt.xaxis.get_majorticklabels(), ha="left", rotation_mode="anchor")
else:
axt.axes.get_xaxis().set_ticklabels([])
figure.savefig('results/' + name + '-broccoli.png', bbox_inches = "tight", dpi = dpi, transparent=True)
figure.clf()
return | [
"corner.corner",
"seaborn.kdeplot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.ticker.AutoMinorLocator",
"numpy.array",
"matplotlib.pyplot.rc",
"numpy.linspace",
"matplotlib.pyplot.MaxNLocator",
"numpy.concatenate"
] | [((1164, 1194), 'numpy.linspace', 'np.linspace', (['ts[0]', 'ts[1]', '(720)'], {}), '(ts[0], ts[1], 720)\n', (1175, 1194), True, 'import numpy as np\n'), ((2243, 2273), 'numpy.linspace', 'np.linspace', (['ts[0]', 'ts[1]', '(720)'], {}), '(ts[0], ts[1], 720)\n', (2254, 2273), True, 'import numpy as np\n'), ((2464, 2536), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'F'], {'color': 'color', 'label': 'label', 'alpha': 'alpha', 'ls': 'ls', 'lw': 'lw'}), '(epochs, F, color=color, label=label, alpha=alpha, ls=ls, lw=lw)\n', (2472, 2536), True, 'import matplotlib.pyplot as plt\n'), ((2664, 2695), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-bright"""'], {}), "('seaborn-bright')\n", (2677, 2695), True, 'import matplotlib.pyplot as plt\n'), ((2957, 2998), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes.formatter"""'], {'useoffset': '(False)'}), "('axes.formatter', useoffset=False)\n", (2963, 2998), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3817), 'corner.corner', 'corner.corner', (['supset_states.T'], {}), '(supset_states.T)\n', (3800, 3817), False, 'import corner\n'), ((1567, 1639), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'A'], {'color': 'color', 'label': 'label', 'alpha': 'alpha', 'ls': 'ls', 'lw': 'lw'}), '(epochs, A, color=color, label=label, alpha=alpha, ls=ls, lw=lw)\n', (1575, 1639), True, 'import matplotlib.pyplot as plt\n'), ((4079, 4100), 'numpy.array', 'np.array', (['figure.axes'], {}), '(figure.axes)\n', (4087, 4100), True, 'import numpy as np\n'), ((5487, 5521), 'matplotlib.ticker.AutoMinorLocator', 'ticker.AutoMinorLocator', (['n_m_ticks'], {}), '(n_m_ticks)\n', (5510, 5521), True, 'import matplotlib.ticker as ticker\n'), ((5558, 5592), 'matplotlib.ticker.AutoMinorLocator', 'ticker.AutoMinorLocator', (['n_m_ticks'], {}), '(n_m_ticks)\n', (5581, 5592), True, 'import matplotlib.ticker as ticker\n'), ((5725, 5749), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['n_ticks'], {}), '(n_ticks)\n', (5740, 5749), True, 'import matplotlib.pyplot as plt\n'), ((5786, 5810), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['n_ticks'], {}), '(n_ticks)\n', (5801, 5810), True, 'import matplotlib.pyplot as plt\n'), ((7449, 7640), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': 'surrogate_supset_states[xi, :]', 'y': 'surrogate_supset_states[yi, :]', 'ax': 'ax', 'levels': '[0.393, 0.865, 0.989]', 'color': '"""tab:orange"""', 'bw_adjust': '(1.2)', 'clip': '[ranges[xi], ranges[yi]]'}), "(x=surrogate_supset_states[xi, :], y=surrogate_supset_states[yi,\n :], ax=ax, levels=[0.393, 0.865, 0.989], color='tab:orange', bw_adjust=\n 1.2, clip=[ranges[xi], ranges[yi]])\n", (7460, 7640), True, 'import seaborn as sns\n'), ((7644, 7813), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': 'supset_states[xi, :]', 'y': 'supset_states[yi, :]', 'ax': 'ax', 'levels': '[0.393, 0.865, 0.989]', 'color': '"""tab:blue"""', 'bw_adjust': '(1.2)', 'clip': '[ranges[xi], ranges[yi]]'}), "(x=supset_states[xi, :], y=supset_states[yi, :], ax=ax, levels=[\n 0.393, 0.865, 0.989], color='tab:blue', bw_adjust=1.2, clip=[ranges[xi],\n ranges[yi]])\n", (7655, 7813), True, 'import seaborn as sns\n'), ((4367, 4445), 'numpy.concatenate', 'np.concatenate', (['(surrogate_supset_states[i, :], surrogate_subset_states[i, :])'], {}), '((surrogate_supset_states[i, :], surrogate_subset_states[i, :]))\n', (4381, 4445), True, 'import numpy as np\n'), ((4566, 4624), 'numpy.concatenate', 'np.concatenate', (['(supset_states[i, :], subset_states[i, :])'], {}), '((supset_states[i, :], subset_states[i, :]))\n', (4580, 4624), True, 'import numpy as np\n'), ((8628, 8662), 'matplotlib.ticker.AutoMinorLocator', 'ticker.AutoMinorLocator', (['n_m_ticks'], {}), '(n_m_ticks)\n', (8651, 8662), True, 'import matplotlib.ticker as ticker\n'), ((8703, 8737), 'matplotlib.ticker.AutoMinorLocator', 'ticker.AutoMinorLocator', (['n_m_ticks'], {}), '(n_m_ticks)\n', (8726, 8737), True, 'import matplotlib.ticker as ticker\n'), ((8778, 8802), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['n_ticks'], {}), '(n_ticks)\n', (8793, 8802), True, 'import matplotlib.pyplot as plt\n'), ((8843, 8867), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['n_ticks'], {}), '(n_ticks)\n', (8858, 8867), True, 'import matplotlib.pyplot as plt\n'), ((10126, 10318), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': 'surrogate_subset_states[yi, :]', 'y': 'surrogate_subset_states[xi, :]', 'ax': 'axt', 'levels': '[0.393, 0.865, 0.989]', 'color': '"""tab:orange"""', 'bw_adjust': '(1.2)', 'clip': '[ranges[yi], ranges[xi]]'}), "(x=surrogate_subset_states[yi, :], y=surrogate_subset_states[xi,\n :], ax=axt, levels=[0.393, 0.865, 0.989], color='tab:orange', bw_adjust\n =1.2, clip=[ranges[yi], ranges[xi]])\n", (10137, 10318), True, 'import seaborn as sns\n'), ((10326, 10497), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': 'subset_states[yi, :]', 'y': 'subset_states[xi, :]', 'ax': 'axt', 'levels': '[0.393, 0.865, 0.989]', 'color': '"""tab:blue"""', 'bw_adjust': '(1.2)', 'clip': '[ranges[yi], ranges[xi]]'}), "(x=subset_states[yi, :], y=subset_states[xi, :], ax=axt, levels=\n [0.393, 0.865, 0.989], color='tab:blue', bw_adjust=1.2, clip=[ranges[yi\n ], ranges[xi]])\n", (10337, 10497), True, 'import seaborn as sns\n'), ((10904, 10938), 'matplotlib.ticker.AutoMinorLocator', 'ticker.AutoMinorLocator', (['n_m_ticks'], {}), '(n_m_ticks)\n', (10927, 10938), True, 'import matplotlib.ticker as ticker\n'), ((10984, 11018), 'matplotlib.ticker.AutoMinorLocator', 'ticker.AutoMinorLocator', (['n_m_ticks'], {}), '(n_m_ticks)\n', (11007, 11018), True, 'import matplotlib.ticker as ticker\n'), ((11178, 11202), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['n_ticks'], {}), '(n_ticks)\n', (11193, 11202), True, 'import matplotlib.pyplot as plt\n'), ((11248, 11272), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['n_ticks'], {}), '(n_ticks)\n', (11263, 11272), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
def count_params(net):
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size()) > 1, net.parameters()))))
def check_nan(x: Tensor) -> bool:
arr = x.detach().cpu().numpy()
not_zero = arr.any()
assert not_zero
is_nan = np.isnan(arr).any()
assert not is_nan
def analyse(model: nn.Module):
n_total = n_positive = 0
max_abs = avg_abs = 0
for layer in model.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
weight = layer.weight
n_total += weight.nelement()
n_positive += weight[weight > 0].count_nonzero().item()
abs_weight = weight.abs()
max_abs = max(abs_weight.max().item(), max_abs)
avg_abs += abs_weight.sum().item()
avg_abs /= n_total
print(f'{n_total=}, {n_positive=}, {max_abs=}, {avg_abs=}')
| [
"numpy.isnan"
] | [((574, 587), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (582, 587), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 00:53:21 2020
@author: guo.1648
"""
# This code is for biggan.
# my testing code:
# Try the NN with the generated images as query, the original dataset as training set,
# to see if the NN can find original images that we found similar to the generated images.
# Note: this code corresponds with FLOWER_128_sub1000, FLOWER_128
# After this testing code (and maybe after the GAN finshes training),
# I will use sample_*.sh script to generating samples.
# Note that I may need to modify the sample.py code and utils.sample_sheet() func
# to save full images, instead of a sheet!!!
# NOT run this! run NN_getDist_testCode_forBiggan.py instead.
import cv2
import os
import numpy as np
#from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestNeighbors
#from skimage import img_as_ubyte
#import torchvision.transforms as transforms
"""
#### for FLOWER_128_sub1000: 1000 images dataset
src_sampleSheetImg = '/scratch/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub1000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/fixed_samples38800.jpg'
srcRootDir_originDataImg = '/scratch/BigGAN-PyTorch/imgs/flower_sub1000/'
dstRootDir_viewSampleSheetImgs = '/scratch/BigGAN-PyTorch/imgs/NN_query/FLOWER_128_sub1000/view_sampleSheetImgs/'
dstRootDir_NNmatchResult = '/scratch/BigGAN-PyTorch/imgs/NN_query/FLOWER_128_sub1000/NNmatchResult/'
dstImgName_NNmatchSheet = '/scratch/BigGAN-PyTorch/imgs/NN_query/FLOWER_128_sub1000/NNmatchResultSheet.jpg'
# parameters:
im_size = 128
batch_size = 16 # i.e., the sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for FLOWER_128: 8189 images dataset (the original FLOWER dataset)
src_sampleSheetImg = '/scratch/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/fixed_samples21400.jpg'
srcRootDir_originDataImg = '/scratch/BigGAN-PyTorch/data/flower/jpg/'
dstRootDir_viewSampleSheetImgs = '/scratch/BigGAN-PyTorch/imgs/NN_query/FLOWER_128/view_sampleSheetImgs/'
dstRootDir_NNmatchResult = '/scratch/BigGAN-PyTorch/imgs/NN_query/FLOWER_128/NNmatchResult/'
dstImgName_NNmatchSheet = '/scratch/BigGAN-PyTorch/imgs/NN_query/FLOWER_128/NNmatchResultSheet.jpg'
# parameters:
im_size = 128
batch_size = 48 # i.e., the sample sheet is of 6x8 !!!!:
num_row = 8
num_col = 6
"""
#### for FLOWER_128_sub4000: 4000 images dataset
src_sampleSheetImg = '/scratch/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub4000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/fixed_samples8800.jpg'
# newly modified: different from that in FLOWER_128_sub1000 and FLOWER_128:
#srcRootDir_originDataImg = '/scratch/BigGAN-PyTorch/imgs/flower_sub4000/'
srcRootDir_imgNpz = '/scratch/BigGAN-PyTorch/FLOWER_128_sub4000_imgs.npz'
dstRootDir_viewSampleSheetImgs = '/scratch/BigGAN-PyTorch/imgs/NN_query/FLOWER_128_sub4000/samples8800/view_sampleSheetImgs/'
dstRootDir_NNmatchResult = '/scratch/BigGAN-PyTorch/imgs/NN_query/FLOWER_128_sub4000/samples8800/NNmatchResult/'
dstImgName_NNmatchSheet = '/scratch/BigGAN-PyTorch/imgs/NN_query/FLOWER_128_sub4000/samples8800/NNmatchResultSheet.jpg'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
def dealWith_sampleSheet():
sampleSheet_img = cv2.imread(src_sampleSheetImg)
(sheet_img_height, sheet_img_width, ch) = sampleSheet_img.shape
single_img_height = sheet_img_height//num_row # 130
single_img_width = sheet_img_width//num_col # 130
# a list to store each image in the sampleSheet_img
sample_img_list = []
# split the sampleSheet img into batch_size (here 16) images:
tmp_count = 1
for i in range(num_row):
for j in range(num_col):
start_row_pos = i*single_img_height
end_row_pos = (i+1)*single_img_height
start_col_pos = j*single_img_width
end_col_pos = (j+1)*single_img_width
single_sample_img = sampleSheet_img[start_row_pos:end_row_pos,start_col_pos:end_col_pos,:]
if tmp_count <= batch_size:
sample_img_list.append(single_sample_img)
tmp_count += 1
return sample_img_list
def my_center_crop(origin_img, crop_size):
y,x,_ = origin_img.shape
startx = x//2-(crop_size//2)
starty = y//2-(crop_size//2)
origin_img_centCrop = origin_img[starty:starty+crop_size,startx:startx+crop_size]
return origin_img_centCrop
def image_to_feature_vector(image):
# Note: the image is already resized to a fixed size.
# flatten the image into a list of raw pixel intensities:
return image.flatten()
def generateTrainSet(len_featVec, dim):
all_origin_img_vecs = [] # this is our feature space
all_origin_img_names = []
# newly modified: different from that in FLOWER_128_sub1000 and FLOWER_128:
images_arr = np.load(srcRootDir_imgNpz)
#images_arr.files
images_list = list(images_arr['imgs'][:,0])
# newly modified: different from that in FLOWER_128_sub1000 and FLOWER_128:
"""
for (dirpath, dirnames, filenames) in os.walk(srcRootDir_originDataImg):
for filename in filenames:
if ".jpg" in filename:
"""
for filename in images_list:
#print("------------------deal with---------------------")
#print(filename)
#origin_img = cv2.imread(srcRootDir_originDataImg+filename)
origin_img = cv2.imread(filename)
origin_img_centCrop = my_center_crop(origin_img, min(origin_img.shape[0],origin_img.shape[1]))
# resize using linear interpolation:
origin_img_centCrop_resize = cv2.resize(origin_img_centCrop, dim)
# also convert it to feature vector:
origin_img_centCrop_resize_vec = image_to_feature_vector(origin_img_centCrop_resize)
assert(len(origin_img_centCrop_resize_vec)==len_featVec)
all_origin_img_vecs.append(origin_img_centCrop_resize_vec)
all_origin_img_names.append(filename)
return (np.array(all_origin_img_vecs), all_origin_img_names)
def combine_matchingResult(match_img_list):
# combine the match_img together into a corresponding sheet
(single_img_height, single_img_width, ch) = match_img_list[0].shape
match_img_sheet = np.zeros((single_img_height*num_row,single_img_width*num_col,ch),dtype=np.uint8)
for i in range(num_row):
for j in range(num_col):
start_row_pos = i*single_img_height
end_row_pos = (i+1)*single_img_height
start_col_pos = j*single_img_width
end_col_pos = (j+1)*single_img_width
match_img_idx = i*num_col + j
if match_img_idx < batch_size:
match_img_sheet[start_row_pos:end_row_pos,start_col_pos:end_col_pos,:] = match_img_list[match_img_idx]
# save this sheet
cv2.imwrite(dstImgName_NNmatchSheet, match_img_sheet)
return
def query_NN_wrapper(sample_img_list):
# this is a wrapper func!
# first, get the training set from original images:
len_featVec = len(image_to_feature_vector(sample_img_list[0]))
dim = (sample_img_list[0].shape[1],sample_img_list[0].shape[0])
trainSet_feats, all_origin_img_names = generateTrainSet(len_featVec, dim)
neigh = NearestNeighbors(n_neighbors=1) # radius=0.4
neigh.fit(trainSet_feats)
# then, query:
match_img_list = []
for i in range(len(sample_img_list)):
single_sample_img = sample_img_list[i]
# get the query vector:
single_sample_img_vec = image_to_feature_vector(single_sample_img)
# NN to search:
match_idx = neigh.kneighbors([single_sample_img_vec], 1, return_distance=False)[0][0]
"""
match_distance, match_idx = neigh.kneighbors([single_sample_img_vec], 1, return_distance=True)
match_distance = match_distance[0][0]
match_idx = match_idx[0][0]
"""
match_imgName = all_origin_img_names[match_idx]
match_img = trainSet_feats[match_idx,:].reshape((dim[1],dim[0],3))
match_img_list.append(match_img)
# save the matching result:
im_h = cv2.hconcat([single_sample_img, match_img])
cv2.imwrite(dstRootDir_NNmatchResult+str(i+1)+'_'+match_imgName, im_h)
# newly added: also combine the match_img together into a corresponding sheet!
combine_matchingResult(match_img_list)
return
if __name__ == '__main__':
# first, deal with the sample sheet:
sample_img_list = dealWith_sampleSheet()
"""
# for debug: save the generated sample images to visualize:
for i in range(len(sample_img_list)):
single_sample_img = sample_img_list[i]
cv2.imwrite(dstRootDir_viewSampleSheetImgs+str(i+1)+'.jpg', single_sample_img)
"""
# finally, query each single_sample_img into original dataset (FLOWER_128_xxx here);
# also, save the matching results:
query_NN_wrapper(sample_img_list)
| [
"numpy.load",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread",
"sklearn.neighbors.NearestNeighbors",
"numpy.array",
"cv2.hconcat",
"cv2.resize"
] | [((3661, 3691), 'cv2.imread', 'cv2.imread', (['src_sampleSheetImg'], {}), '(src_sampleSheetImg)\n', (3671, 3691), False, 'import cv2\n'), ((5267, 5293), 'numpy.load', 'np.load', (['srcRootDir_imgNpz'], {}), '(srcRootDir_imgNpz)\n', (5274, 5293), True, 'import numpy as np\n'), ((6665, 6756), 'numpy.zeros', 'np.zeros', (['(single_img_height * num_row, single_img_width * num_col, ch)'], {'dtype': 'np.uint8'}), '((single_img_height * num_row, single_img_width * num_col, ch),\n dtype=np.uint8)\n', (6673, 6756), True, 'import numpy as np\n'), ((7255, 7308), 'cv2.imwrite', 'cv2.imwrite', (['dstImgName_NNmatchSheet', 'match_img_sheet'], {}), '(dstImgName_NNmatchSheet, match_img_sheet)\n', (7266, 7308), False, 'import cv2\n'), ((7688, 7719), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (7704, 7719), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((5826, 5846), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (5836, 5846), False, 'import cv2\n'), ((6032, 6068), 'cv2.resize', 'cv2.resize', (['origin_img_centCrop', 'dim'], {}), '(origin_img_centCrop, dim)\n', (6042, 6068), False, 'import cv2\n'), ((6402, 6431), 'numpy.array', 'np.array', (['all_origin_img_vecs'], {}), '(all_origin_img_vecs)\n', (6410, 6431), True, 'import numpy as np\n'), ((8557, 8600), 'cv2.hconcat', 'cv2.hconcat', (['[single_sample_img, match_img]'], {}), '([single_sample_img, match_img])\n', (8568, 8600), False, 'import cv2\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import time
import json
import numpy as np
import cv2
import random
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
from numpy.linalg import inv
from lib.options import BaseOptions
from lib.mesh_util import save_obj_mesh_with_color, reconstruction
from lib.data import EvalWPoseDataset, EvalDataset
from lib.model import HGPIFuNetwNML, HGPIFuMRNet
from lib.geometry import index
from PIL import Image
parser = BaseOptions()
def gen_mesh(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=50000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
# if 'calib_world' in data:
# calib_world = data['calib_world'].numpy()[0]
# verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]
color = np.zeros(verts.shape)
interval = 50000
for i in range(len(color) // interval + 1):
left = i * interval
if i == len(color) // interval:
right = -1
else:
right = (i + 1) * interval
net.calc_normal(verts_tensor[:, None, :, left:right], calib_tensor[:,None], calib_tensor)
nml = net.nmls.detach().cpu().numpy()[0] * 0.5 + 0.5
color[left:right] = nml.T
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def gen_mesh_imgColor(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=100000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
# if this returns error, projection must be defined somewhere else
xyz_tensor = net.projection(verts_tensor, calib_tensor[:1])
uv = xyz_tensor[:, :2, :]
color = index(image_tensor[:1], uv).detach().cpu().numpy()[0].T
color = color * 0.5 + 0.5
if 'calib_world' in data:
calib_world = data['calib_world'].numpy()[0]
verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def recon(opt, use_rect=False):
# load checkpoints
state_dict_path = None
if opt.load_netMR_checkpoint_path is not None:
state_dict_path = opt.load_netMR_checkpoint_path
elif opt.resume_epoch < 0:
state_dict_path = '%s/%s_train_latest' % (opt.checkpoints_path, opt.name)
opt.resume_epoch = 0
else:
state_dict_path = '%s/%s_train_epoch_%d' % (opt.checkpoints_path, opt.name, opt.resume_epoch)
start_id = opt.start_id
end_id = opt.end_id
#cuda = torch.device('cuda:%d' % opt.gpu_id if torch.cuda.is_available() else 'cpu')
cuda = torch.device('cpu')
state_dict = None
if state_dict_path is not None and os.path.exists(state_dict_path):
print('Resuming from ', state_dict_path)
state_dict = torch.load(state_dict_path, map_location=cuda)
print('Warning: opt is overwritten.')
dataroot = opt.dataroot
resolution = opt.resolution
results_path = opt.results_path
loadSize = opt.loadSize
opt = state_dict['opt']
opt.dataroot = dataroot
opt.resolution = resolution
opt.results_path = results_path
opt.loadSize = loadSize
else:
raise Exception('failed loading state dict!', state_dict_path)
# parser.print_options(opt)
if use_rect:
test_dataset = EvalDataset(opt)
else:
test_dataset = EvalWPoseDataset(opt)
print('test data size: ', len(test_dataset))
projection_mode = test_dataset.projection_mode
opt_netG = state_dict['opt_netG']
netG = HGPIFuNetwNML(opt_netG, projection_mode).to(device=cuda)
netMR = HGPIFuMRNet(opt, netG, projection_mode).to(device=cuda)
def set_eval():
netG.eval()
# load checkpoints
netMR.load_state_dict(state_dict['model_state_dict'])
os.makedirs(opt.checkpoints_path, exist_ok=True)
os.makedirs(opt.results_path, exist_ok=True)
os.makedirs('%s/%s/recon' % (opt.results_path, opt.name), exist_ok=True)
if start_id < 0:
start_id = 0
if end_id < 0:
end_id = len(test_dataset)
## test
with torch.no_grad():
set_eval()
print('generate mesh (test) ...')
for i in tqdm(range(start_id, end_id)):
if i >= len(test_dataset):
break
# for multi-person processing, set it to False
if True:
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], opt.resolution)
print(save_path)
gen_mesh(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
else:
for j in range(test_dataset.get_n_person(i)):
test_dataset.person_id = j
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], j)
gen_mesh(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
def reconWrapper(args=None, use_rect=False):
opt = parser.parse(args)
recon(opt, use_rect)
if __name__ == '__main__':
reconWrapper()
| [
"lib.options.BaseOptions",
"torch.cat",
"torch.device",
"torch.no_grad",
"os.path.abspath",
"lib.model.HGPIFuMRNet",
"cv2.imwrite",
"torch.load",
"os.path.dirname",
"os.path.exists",
"lib.model.HGPIFuNetwNML",
"lib.geometry.index",
"lib.data.EvalWPoseDataset",
"numpy.ones_like",
"numpy.l... | [((790, 803), 'lib.options.BaseOptions', 'BaseOptions', ([], {}), '()\n', (801, 803), False, 'from lib.options import BaseOptions\n'), ((5452, 5471), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5464, 5471), False, 'import torch\n'), ((6674, 6722), 'os.makedirs', 'os.makedirs', (['opt.checkpoints_path'], {'exist_ok': '(True)'}), '(opt.checkpoints_path, exist_ok=True)\n', (6685, 6722), False, 'import os\n'), ((6727, 6771), 'os.makedirs', 'os.makedirs', (['opt.results_path'], {'exist_ok': '(True)'}), '(opt.results_path, exist_ok=True)\n', (6738, 6771), False, 'import os\n'), ((6776, 6848), 'os.makedirs', 'os.makedirs', (["('%s/%s/recon' % (opt.results_path, opt.name))"], {'exist_ok': '(True)'}), "('%s/%s/recon' % (opt.results_path, opt.name), exist_ok=True)\n", (6787, 6848), False, 'import os\n'), ((222, 247), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (237, 247), False, 'import os\n'), ((1802, 1839), 'numpy.concatenate', 'np.concatenate', (['save_img_list'], {'axis': '(1)'}), '(save_img_list, axis=1)\n', (1816, 1839), True, 'import numpy as np\n'), ((1848, 1884), 'cv2.imwrite', 'cv2.imwrite', (['save_img_path', 'save_img'], {}), '(save_img_path, save_img)\n', (1859, 1884), False, 'import cv2\n'), ((1915, 2027), 'lib.mesh_util.reconstruction', 'reconstruction', (['net', 'cuda', 'calib_tensor', 'res', 'b_min', 'b_max', 'thresh'], {'use_octree': 'use_octree', 'num_samples': '(50000)'}), '(net, cuda, calib_tensor, res, b_min, b_max, thresh,\n use_octree=use_octree, num_samples=50000)\n', (1929, 2027), False, 'from lib.mesh_util import save_obj_mesh_with_color, reconstruction\n'), ((2347, 2368), 'numpy.zeros', 'np.zeros', (['verts.shape'], {}), '(verts.shape)\n', (2355, 2368), True, 'import numpy as np\n'), ((2824, 2880), 'lib.mesh_util.save_obj_mesh_with_color', 'save_obj_mesh_with_color', (['save_path', 'verts', 'faces', 'color'], {}), '(save_path, verts, faces, color)\n', (2848, 2880), False, 'from lib.mesh_util import save_obj_mesh_with_color, reconstruction\n'), ((3933, 3970), 'numpy.concatenate', 'np.concatenate', (['save_img_list'], {'axis': '(1)'}), '(save_img_list, axis=1)\n', (3947, 3970), True, 'import numpy as np\n'), ((3979, 4015), 'cv2.imwrite', 'cv2.imwrite', (['save_img_path', 'save_img'], {}), '(save_img_path, save_img)\n', (3990, 4015), False, 'import cv2\n'), ((4046, 4159), 'lib.mesh_util.reconstruction', 'reconstruction', (['net', 'cuda', 'calib_tensor', 'res', 'b_min', 'b_max', 'thresh'], {'use_octree': 'use_octree', 'num_samples': '(100000)'}), '(net, cuda, calib_tensor, res, b_min, b_max, thresh,\n use_octree=use_octree, num_samples=100000)\n', (4060, 4159), False, 'from lib.mesh_util import save_obj_mesh_with_color, reconstruction\n'), ((4750, 4806), 'lib.mesh_util.save_obj_mesh_with_color', 'save_obj_mesh_with_color', (['save_path', 'verts', 'faces', 'color'], {}), '(save_path, verts, faces, color)\n', (4774, 4806), False, 'from lib.mesh_util import save_obj_mesh_with_color, reconstruction\n'), ((5534, 5565), 'os.path.exists', 'os.path.exists', (['state_dict_path'], {}), '(state_dict_path)\n', (5548, 5565), False, 'import os\n'), ((5637, 5683), 'torch.load', 'torch.load', (['state_dict_path'], {'map_location': 'cuda'}), '(state_dict_path, map_location=cuda)\n', (5647, 5683), False, 'import torch\n'), ((6198, 6214), 'lib.data.EvalDataset', 'EvalDataset', (['opt'], {}), '(opt)\n', (6209, 6214), False, 'from lib.data import EvalWPoseDataset, EvalDataset\n'), ((6248, 6269), 'lib.data.EvalWPoseDataset', 'EvalWPoseDataset', (['opt'], {}), '(opt)\n', (6264, 6269), False, 'from lib.data import EvalWPoseDataset, EvalDataset\n'), ((6968, 6983), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6981, 6983), False, 'import torch\n'), ((143, 168), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (158, 168), False, 'import os\n'), ((1222, 1272), 'torch.cat', 'torch.cat', (['[image_tensor_global, net.netG.nmlF]', '(0)'], {}), '([image_tensor_global, net.netG.nmlF], 0)\n', (1231, 1272), False, 'import torch\n'), ((1345, 1395), 'torch.cat', 'torch.cat', (['[image_tensor_global, net.netG.nmlB]', '(0)'], {}), '([image_tensor_global, net.netG.nmlB], 0)\n', (1354, 1395), False, 'import torch\n'), ((3353, 3403), 'torch.cat', 'torch.cat', (['[image_tensor_global, net.netG.nmlF]', '(0)'], {}), '([image_tensor_global, net.netG.nmlF], 0)\n', (3362, 3403), False, 'import torch\n'), ((3476, 3526), 'torch.cat', 'torch.cat', (['[image_tensor_global, net.netG.nmlB]', '(0)'], {}), '([image_tensor_global, net.netG.nmlB], 0)\n', (3485, 3526), False, 'import torch\n'), ((6421, 6461), 'lib.model.HGPIFuNetwNML', 'HGPIFuNetwNML', (['opt_netG', 'projection_mode'], {}), '(opt_netG, projection_mode)\n', (6434, 6461), False, 'from lib.model import HGPIFuNetwNML, HGPIFuMRNet\n'), ((6490, 6529), 'lib.model.HGPIFuMRNet', 'HGPIFuMRNet', (['opt', 'netG', 'projection_mode'], {}), '(opt, netG, projection_mode)\n', (6501, 6529), False, 'from lib.model import HGPIFuNetwNML, HGPIFuMRNet\n'), ((4715, 4731), 'numpy.linalg.inv', 'inv', (['calib_world'], {}), '(calib_world)\n', (4718, 4731), False, 'from numpy.linalg import inv\n'), ((4684, 4710), 'numpy.ones_like', 'np.ones_like', (['verts[:, :1]'], {}), '(verts[:, :1])\n', (4696, 4710), True, 'import numpy as np\n'), ((2060, 2085), 'torch.from_numpy', 'torch.from_numpy', (['verts.T'], {}), '(verts.T)\n', (2076, 2085), False, 'import torch\n'), ((4192, 4217), 'torch.from_numpy', 'torch.from_numpy', (['verts.T'], {}), '(verts.T)\n', (4208, 4217), False, 'import torch\n'), ((4449, 4476), 'lib.geometry.index', 'index', (['image_tensor[:1]', 'uv'], {}), '(image_tensor[:1], uv)\n', (4454, 4476), False, 'from lib.geometry import index\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 16:43:04 2020
@author: iurym
"""
from app import app, request, jsonify
import pickle
import pandas as pd
import numpy as np
# Load the pickle model
model = pickle.load(open('machine-learning/training/data/model', 'rb'))
@app.route('/')
def home():
return "API HOME"
# GET REQUEST
@app.route('/predict')
def predict():
'''Predict the results sent at the request params.
Request example:
http:127.0.0.0:5000/predict??age=23&sex=0&pclass=2&sibsp=0
Returns
-------
json
json file with "survived" key, and value 0 for not survived and
1 for survived
'''
# Get the params
age = request.args.get('age', None)
sex = request.args.get('sex', None)
pclass = request.args.get('pclass', None)
sibsp = request.args.get('sibsp', None)
# Convert to numpy array
data_np = np.array([[pclass, sex, age, sibsp]])
# Convert to pandas dataframes
data = pd.DataFrame(data_np, columns=['Pclass', 'Sex', 'Age', 'SibSp'])
# Predict
res = model.predict(data)
res = str(res[0])
# Save result in a python dict
data_return = {
'survived': res
}
# Return the dict converted to JSON
return jsonify(data_return)
| [
"pandas.DataFrame",
"app.app.route",
"app.request.args.get",
"numpy.array",
"app.jsonify"
] | [((275, 289), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (284, 289), False, 'from app import app, request, jsonify\n'), ((349, 370), 'app.app.route', 'app.route', (['"""/predict"""'], {}), "('/predict')\n", (358, 370), False, 'from app import app, request, jsonify\n'), ((700, 729), 'app.request.args.get', 'request.args.get', (['"""age"""', 'None'], {}), "('age', None)\n", (716, 729), False, 'from app import app, request, jsonify\n'), ((740, 769), 'app.request.args.get', 'request.args.get', (['"""sex"""', 'None'], {}), "('sex', None)\n", (756, 769), False, 'from app import app, request, jsonify\n'), ((783, 815), 'app.request.args.get', 'request.args.get', (['"""pclass"""', 'None'], {}), "('pclass', None)\n", (799, 815), False, 'from app import app, request, jsonify\n'), ((828, 859), 'app.request.args.get', 'request.args.get', (['"""sibsp"""', 'None'], {}), "('sibsp', None)\n", (844, 859), False, 'from app import app, request, jsonify\n'), ((908, 945), 'numpy.array', 'np.array', (['[[pclass, sex, age, sibsp]]'], {}), '([[pclass, sex, age, sibsp]])\n', (916, 945), True, 'import numpy as np\n'), ((993, 1057), 'pandas.DataFrame', 'pd.DataFrame', (['data_np'], {'columns': "['Pclass', 'Sex', 'Age', 'SibSp']"}), "(data_np, columns=['Pclass', 'Sex', 'Age', 'SibSp'])\n", (1005, 1057), True, 'import pandas as pd\n'), ((1275, 1295), 'app.jsonify', 'jsonify', (['data_return'], {}), '(data_return)\n', (1282, 1295), False, 'from app import app, request, jsonify\n')] |
"""
This file holds common audio functions for processing time-domain audio in the
form of .wav files and frequency-domain audio in the form of log-mel
spectrograms.
"""
import librosa
import noisereduce as nr
import numpy as np
import au_constants as auc
import spectrogram as sg
def load_wav(wav_path):
"""
Loads a wav file as a numpy array. Wraps the Librosa load function to keep
the parameters consistent. Drops the file's sampling rate because all wav
files will be resampled to the sampling rate defined in constants.py.
:param wav_path: Path to wav file
:return: np.array
"""
wav, sr = librosa.load(
wav_path, sr=auc.SR, dtype=np.dtype(auc.WAV_DATA_TYPE),
res_type=auc.RES_ALGOR)
if sr != auc.SR:
print("Sampling rate mismatch.")
return None
return wav
def process_wav(wav, noisy=False):
"""
Processes a wav sample into a constant length, scaled, log-mel spectrogram.
:param wav: The audio time series
:param noisy: Used if the data is known to be noisy
:return: np.array
"""
# Reshape to a constant length. Slice if too long, pad if too short
if auc.MAX_DATA_POINTS < len(wav):
wav = wav[:auc.MAX_DATA_POINTS]
else:
wav = pad_wav(wav)
if noisy:
noisy_part = wav[:auc.NOISY_DURATION]
# noinspection PyTypeChecker
wav = nr.reduce_noise(
audio_clip=wav, noise_clip=noisy_part, verbose=False)
# Convert to log-mel spectrogram
melspecgram = sg.wave_to_melspecgram(wav)
# Scale the spectrogram to be between -1 and 1
scaled_melspecgram = sg.scale_melspecgram(melspecgram)
return scaled_melspecgram
def remove_first_last_sec(wav, sr):
"""
Removes the first and last second of an audio clip.
:param wav: The audio time series data points
:param sr: The sampling rate of the audio
:return: np.array
"""
return wav[sr:-sr]
def pad_wav(wav, desired_length=auc.MAX_DATA_POINTS):
"""
Pads an audio waveform to the desired length by adding 0s to the right end.
:param wav: The audio time series data points
:param desired_length: The desired length to pad to
:return: np.array
"""
length_diff = desired_length - wav.shape[0]
if length_diff < 0:
print("The waveform is longer than the desired length.")
return None
padded_wav = np.pad(
wav, pad_width=(0, length_diff), mode='constant', constant_values=0)
if len(padded_wav) != desired_length:
print("An error occurred during padding the waveform.")
return None
return padded_wav
| [
"numpy.pad",
"spectrogram.wave_to_melspecgram",
"numpy.dtype",
"spectrogram.scale_melspecgram",
"noisereduce.reduce_noise"
] | [((1529, 1556), 'spectrogram.wave_to_melspecgram', 'sg.wave_to_melspecgram', (['wav'], {}), '(wav)\n', (1551, 1556), True, 'import spectrogram as sg\n'), ((1634, 1667), 'spectrogram.scale_melspecgram', 'sg.scale_melspecgram', (['melspecgram'], {}), '(melspecgram)\n', (1654, 1667), True, 'import spectrogram as sg\n'), ((2408, 2483), 'numpy.pad', 'np.pad', (['wav'], {'pad_width': '(0, length_diff)', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(wav, pad_width=(0, length_diff), mode='constant', constant_values=0)\n", (2414, 2483), True, 'import numpy as np\n'), ((1390, 1459), 'noisereduce.reduce_noise', 'nr.reduce_noise', ([], {'audio_clip': 'wav', 'noise_clip': 'noisy_part', 'verbose': '(False)'}), '(audio_clip=wav, noise_clip=noisy_part, verbose=False)\n', (1405, 1459), True, 'import noisereduce as nr\n'), ((680, 707), 'numpy.dtype', 'np.dtype', (['auc.WAV_DATA_TYPE'], {}), '(auc.WAV_DATA_TYPE)\n', (688, 707), True, 'import numpy as np\n')] |
from collections import OrderedDict
import hashlib
import os
from experiment_logger import get_logger
import numpy as np
from allennlp.commands.elmo import ElmoEmbedder
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper
from tqdm import tqdm
# With loaded embedding matrix, the padding vector will be initialized to zero
# and will not be trained. Hopefully this isn't a problem. It seems better than
# random initialization...
PADDING_TOKEN = "_PAD"
UNK_TOKEN = "_"
BOS_TOKEN = ELMoCharacterMapper.bos_token
EOS_TOKEN = ELMoCharacterMapper.eos_token
EXISTING_VOCAB_TOKEN = "<PASSWORD>"
def initialize_word2idx():
# These tokens should exist in every vocab and set of embeddings.
word2idx = OrderedDict()
word2idx[BOS_TOKEN] = len(word2idx)
word2idx[EOS_TOKEN] = len(word2idx)
word2idx[UNK_TOKEN] = len(word2idx)
return word2idx
class EmbeddingsReader(object):
def context_insensitive_elmo(self, *args, **kwargs):
return context_insensitive_elmo(*args, **kwargs)
def read_glove(self, *args, **kwargs):
return read_glove(*args, **kwargs)
def get_emb_w2v(self, options, embeddings_path, word2idx):
embeddings, word2idx = self.read_glove(embeddings_path, word2idx)
return embeddings, word2idx
def get_emb_elmo(self, options, embeddings_path, word2idx):
options_path = options.elmo_options_path
weights_path = options.elmo_weights_path
embeddings = self.context_insensitive_elmo(weights_path=weights_path, options_path=options_path,
word2idx=word2idx, cuda=options.cuda, cache_dir=options.elmo_cache_dir)
return embeddings, word2idx
def get_emb_both(self, options, embeddings_path, word2idx):
e_w2v, w2i_w2v = self.get_emb_w2v(options, embeddings_path, word2idx)
e_elmo, w2i_elmo = self.get_emb_elmo(options, embeddings_path, word2idx)
vec_size = e_w2v.shape[1] + e_elmo.shape[1]
vocab = [w for w, i in sorted(w2i_w2v.items(), key=lambda x: x[1]) if w in w2i_elmo]
vocab_size = len(vocab)
embeddings = np.zeros((vocab_size, vec_size), dtype=np.float32)
word2idx = {w: i for i, w in enumerate(vocab)}
for w, i in word2idx.items():
embeddings[i, :e_w2v.shape[1]] = e_w2v[w2i_w2v[w]]
embeddings[i, e_w2v.shape[1]:] = e_elmo[w2i_elmo[w]]
return embeddings, word2idx
def get_embeddings(self, options, embeddings_path, word2idx):
if options.emb == 'w2v':
out = self.get_emb_w2v(options, embeddings_path, word2idx)
elif options.emb == 'elmo':
out = self.get_emb_elmo(options, embeddings_path, word2idx)
elif options.emb == 'bert':
# TODO:
out = self.get_emb_elmo(options, embeddings_path, word2idx)
elif options.emb == 'both':
out = self.get_emb_both(options, embeddings_path, word2idx)
return out
def read_glove(filename, word2idx):
"""
Two cases:
1. The word2idx has already been filtered according to embedding vocabulary.
2. The word2idx is derived solely from the raw text data.
"""
logger = get_logger()
glove_vocab = set()
size = None
validate_word2idx(word2idx)
logger.info('Reading Glove Vocab.')
with open(filename) as f:
for i, line in enumerate(f):
word, vec = line.split(' ', 1)
glove_vocab.add(word)
if i == 0:
size = len(vec.strip().split(' '))
new_vocab = set.intersection(set(word2idx.keys()), glove_vocab)
new_vocab.discard(PADDING_TOKEN)
new_vocab.discard(UNK_TOKEN)
if word2idx.get(EXISTING_VOCAB_TOKEN, None) == 2:
new_word2idx = word2idx.copy()
logger.info('Using existing vocab mapping.')
else:
new_word2idx = OrderedDict()
new_word2idx[PADDING_TOKEN] = len(new_word2idx)
new_word2idx[UNK_TOKEN] = len(new_word2idx)
new_word2idx[EXISTING_VOCAB_TOKEN] = len(new_word2idx)
for w, _ in word2idx.items():
if w in new_word2idx:
continue
new_word2idx[w] = len(new_word2idx)
logger.info('Creating new mapping.')
logger.info('glove-vocab-size={} vocab-size={} intersection-size={} (-{})'.format(
len(glove_vocab), len(word2idx), len(new_vocab), len(word2idx) - len(new_vocab)))
embeddings = np.zeros((len(new_word2idx), size), dtype=np.float32)
logger.info('Reading Glove Embeddings.')
with open(filename) as f:
for line in f:
word, vec = line.strip().split(' ', 1)
if word is PADDING_TOKEN or word is UNK_TOKEN:
continue
if word in new_vocab and word not in new_word2idx:
raise ValueError
if word not in new_word2idx:
continue
word_id = new_word2idx[word]
vec = np.fromstring(vec, dtype=float, sep=' ')
embeddings[word_id] = vec
validate_word2idx(new_word2idx)
return embeddings, new_word2idx
def validate_word2idx(word2idx):
vocab = [w for w, i in sorted(word2idx.items(), key=lambda x: x[1])]
for i, w in enumerate(vocab):
assert word2idx[w] == i
def hash_vocab(vocab, version='v1.2.0'):
m = hashlib.sha256()
m.update(str.encode(version))
for w in vocab:
m.update(str.encode(w))
return m.hexdigest()
def save_elmo_cache(path, vectors):
np.save(path, vectors)
def load_elmo_cache(path, order):
vectors = np.load(path)
assert len(order) == len(vectors)
return vectors[order]
def get_vocab_list_and_order(word2idx):
vocab = list(sorted(word2idx.keys()))
sorted_word2idx = {k: i for i, k in enumerate(vocab)}
order = [sorted_word2idx[w] for w, _ in sorted(word2idx.items(), key=lambda x: x[1])]
assert len(order) == len(vocab)
return vocab, order
def context_insensitive_elmo(weights_path, options_path, word2idx, cuda=False, cache_dir=None):
"""
Embeddings are always saved in sorted order (by vocab) and loaded according to word2idx.
"""
logger = get_logger()
validate_word2idx(word2idx)
vocab, order = get_vocab_list_and_order(word2idx)
if cache_dir is not None:
key = hash_vocab(vocab)
cache_path = os.path.join(cache_dir, 'elmo_{}.npy'.format(key))
logger.info('elmo cache path = {}'.format(cache_path))
if os.path.exists(cache_path):
logger.info('Loading cached elmo vectors: {}'.format(cache_path))
vectors = load_elmo_cache(cache_path, order)
logger.info('Embeddings with shape = {}'.format(vectors.shape))
return vectors
device = 0 if cuda else -1
batch_size = 256
nbatches = len(vocab) // batch_size + 1
# TODO: Does not support padding.
elmo = ElmoEmbedder(options_file=options_path, weight_file=weights_path, cuda_device=device)
vec_lst = []
for i in tqdm(range(nbatches), desc='elmo'):
start = i * batch_size
batch = vocab[start:start+batch_size]
if len(batch) == 0:
continue
vec = elmo.embed_sentence(batch)
vec_lst.append(vec)
vectors = np.concatenate([x[0] for x in vec_lst], axis=0)
if cache_dir is not None:
logger.info('Saving cached elmo vectors: {}'.format(cache_path))
save_elmo_cache(cache_path, vectors)
vectors = vectors[order]
logger.info('Embeddings with shape = {}'.format(vectors.shape))
return vectors
| [
"numpy.load",
"numpy.save",
"experiment_logger.get_logger",
"allennlp.commands.elmo.ElmoEmbedder",
"numpy.zeros",
"os.path.exists",
"hashlib.sha256",
"collections.OrderedDict",
"numpy.fromstring",
"numpy.concatenate"
] | [((730, 743), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (741, 743), False, 'from collections import OrderedDict\n'), ((3176, 3188), 'experiment_logger.get_logger', 'get_logger', ([], {}), '()\n', (3186, 3188), False, 'from experiment_logger import get_logger\n'), ((5309, 5325), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (5323, 5325), False, 'import hashlib\n'), ((5479, 5501), 'numpy.save', 'np.save', (['path', 'vectors'], {}), '(path, vectors)\n', (5486, 5501), True, 'import numpy as np\n'), ((5552, 5565), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (5559, 5565), True, 'import numpy as np\n'), ((6142, 6154), 'experiment_logger.get_logger', 'get_logger', ([], {}), '()\n', (6152, 6154), False, 'from experiment_logger import get_logger\n'), ((6868, 6957), 'allennlp.commands.elmo.ElmoEmbedder', 'ElmoEmbedder', ([], {'options_file': 'options_path', 'weight_file': 'weights_path', 'cuda_device': 'device'}), '(options_file=options_path, weight_file=weights_path,\n cuda_device=device)\n', (6880, 6957), False, 'from allennlp.commands.elmo import ElmoEmbedder\n'), ((7230, 7277), 'numpy.concatenate', 'np.concatenate', (['[x[0] for x in vec_lst]'], {'axis': '(0)'}), '([x[0] for x in vec_lst], axis=0)\n', (7244, 7277), True, 'import numpy as np\n'), ((2105, 2155), 'numpy.zeros', 'np.zeros', (['(vocab_size, vec_size)'], {'dtype': 'np.float32'}), '((vocab_size, vec_size), dtype=np.float32)\n', (2113, 2155), True, 'import numpy as np\n'), ((3844, 3857), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3855, 3857), False, 'from collections import OrderedDict\n'), ((6454, 6480), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (6468, 6480), False, 'import os\n'), ((4931, 4971), 'numpy.fromstring', 'np.fromstring', (['vec'], {'dtype': 'float', 'sep': '""" """'}), "(vec, dtype=float, sep=' ')\n", (4944, 4971), True, 'import numpy as np\n')] |
import json
from typing import Tuple, Dict, List
import numpy as np
from bestiary.meta.dataset import MetaDataset
from bestiary.utils.data import RandomDataset
class Sinusoid(RandomDataset):
def __init__(self, amplitude: float, frequency: float, phase: float,
noise: float = 0., limits: Tuple[float] = (-5, 5)):
r"""
Random dataset that outputs a sinusoid.
Parameters
----------
amplitude: The amplitude of the sinusoid.
frequency: The frequency.
phase: The initial phase.
noise: The standard deviation :math:`\sigma`
limits: The sampling limits for :math:`x`.
"""
self.limits_ = limits
self.amplitude = amplitude
self.frequency = frequency
self.phase = phase
self.noise = noise
def __len__(self):
return 2 ** 32
def sample_x(self):
return np.random.uniform(*self.limits_)
def __getitem__(self, item):
x = self.sample_x()
y = self.amplitude * np.sin(self.frequency * x + self.phase) + np.random.normal(0, self.noise)
x, y = np.float32([x]), np.float32([y])
return x, y
def sample_characteristics():
return dict(
amplitude=np.random.uniform(1, 5),
frequency=np.random.uniform(1 / 10, 5 / 10) * 2 * np.pi,
phase=np.random.uniform(-1, 1) * np.pi,
)
class Sinusoids(MetaDataset):
def __init__(self, classes: int = 10000, noise: float = 0., shots: int = 10,
characteristics: List[Dict] = None):
if characteristics is None:
characteristics = [sample_characteristics() for _ in range(classes)]
self.characteristics_ = dict(characteristics=characteristics, noise=noise, shots=shots)
datasets = [
Sinusoid(**characteristic, noise=noise)
for characteristic in characteristics
]
super().__init__(datasets=datasets, shots=shots)
def save(self, filename):
with open(filename, 'w') as f:
json.dump(self.characteristics_, f, indent=2, sort_keys=True)
@classmethod
def load(cls, filename):
with open(filename, 'r') as f:
characteristics = json.load(f)
return cls(**characteristics)
| [
"json.dump",
"numpy.random.uniform",
"json.load",
"numpy.float32",
"numpy.sin",
"numpy.random.normal"
] | [((916, 948), 'numpy.random.uniform', 'np.random.uniform', (['*self.limits_'], {}), '(*self.limits_)\n', (933, 948), True, 'import numpy as np\n'), ((1082, 1113), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.noise'], {}), '(0, self.noise)\n', (1098, 1113), True, 'import numpy as np\n'), ((1130, 1145), 'numpy.float32', 'np.float32', (['[x]'], {}), '([x])\n', (1140, 1145), True, 'import numpy as np\n'), ((1147, 1162), 'numpy.float32', 'np.float32', (['[y]'], {}), '([y])\n', (1157, 1162), True, 'import numpy as np\n'), ((1251, 1274), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(5)'], {}), '(1, 5)\n', (1268, 1274), True, 'import numpy as np\n'), ((2050, 2111), 'json.dump', 'json.dump', (['self.characteristics_', 'f'], {'indent': '(2)', 'sort_keys': '(True)'}), '(self.characteristics_, f, indent=2, sort_keys=True)\n', (2059, 2111), False, 'import json\n'), ((2228, 2240), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2237, 2240), False, 'import json\n'), ((1040, 1079), 'numpy.sin', 'np.sin', (['(self.frequency * x + self.phase)'], {}), '(self.frequency * x + self.phase)\n', (1046, 1079), True, 'import numpy as np\n'), ((1355, 1379), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1372, 1379), True, 'import numpy as np\n'), ((1294, 1327), 'numpy.random.uniform', 'np.random.uniform', (['(1 / 10)', '(5 / 10)'], {}), '(1 / 10, 5 / 10)\n', (1311, 1327), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 28 16:03:38 2018
@author: ahmed
source: file:///media/ahmed/MYLINUXLIVE/PyEphem/New%20folder/Fun%20with%20the%20Sun%20and%20PyEphem%20-%20Chris%20Ramsay.html
"""
# Import some bits
import ephem, math, datetime
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
plt.style.use('Solarize_Light2')
home = ephem.Observer()
# Set up
home.date = '2017-01-01 09:00:00'
home.lat = '53.4975'
home.lon = '-0.3154'
sun = ephem.Sun()
sun.compute(home)
rising = home.previous_rising(sun).datetime()
print('Sunrise is at {}:{}:{}'.format(rising.hour, rising.minute, rising.second))
transit = home.next_transit(sun).datetime()
print('Local noon is at {}:{}:{}'.format(transit.hour, transit.minute, transit.second))
setting = home.next_setting(sun).datetime()
print('Sunset is at {}:{}:{}'.format(setting.hour, setting.minute, setting.second))
#%% Apparent Solar Time¶
# Prepare
home.date = '2017/1/1'
sun = ephem.Sun()
times = []
def get_diff(tm):
"""Return a difference in seconds between tm and 12:00:00 on tm's date"""
a = datetime.datetime.combine(tm, datetime.time(12, 0))
return (a-tm).total_seconds()/60
# Prepare the data
for i in range(1, 368):
home.date += ephem.Date(1)
trans = home.next_transit(sun).datetime()
times.append(get_diff(trans))
# Set up
ts = pd.Series(times, index=pd.date_range('2017/1/1', periods=len(times)))
print("\n ---- Apparent Solar Time ------")
print(ts.loc['2017-04-14':'2017-04-26'])
plt.figure()
ax = ts.plot()
plt.rcParams["figure.figsize"] = [9, 6]
ax.set_xlabel(u'Date', fontsize=11)
ax.set_ylabel(u'Variation (minutes)', fontsize=11)
# Fire
plt.show()
#%% Drawing the Analemma
# Prepare
home.date = '2017/1/1 12:00:00'
sun = ephem.Sun()
posx = []
posy = []
# Solstice altitude
phi = 90 - math.degrees(home.lat)
# Earth axial tilt
epsilon = 23.439
def get_sun_az(tm):
"""Get the azimuth based on a date"""
sun.compute(tm)
return math.degrees(sun.az)
def get_sun_alt(tm):
"""Get the altitude based on a date"""
sun.compute(tm)
return math.degrees(sun.alt)
# Prepare the data
for i in range(1, 368):
home.date += ephem.Date(1)
trans = home.next_transit(sun).datetime()
posx.append(get_sun_az(home))
posy.append(get_sun_alt(home))
# Set up
fig, ax = plt.subplots()
ax.plot(posx, posy)
ax.grid(True)
ax.set_xlabel(u'Azimuth (°)', fontsize=11)
ax.set_ylabel(u'Altitude (°)', fontsize=11)
# Add some labels, lines & resize
ax.annotate('Vernal equinox', xy=(min(posx), phi + 1), xytext=(min(posx), phi + 1))
ax.annotate('Autumnal equinox', xy=(max(posx) -2, phi + 1), xytext=(max(posx) -2, phi + 1))
ax.annotate('Nothern solstice', xy=(180.1, phi + epsilon + 1), xytext=(180.1, phi + epsilon + 1))
ax.annotate('Southern solstice', xy=(180.1, phi - epsilon - 2), xytext=(180.1, phi - epsilon - 2))
plt.plot((min(posx), max(posx)), (phi + epsilon, phi + epsilon), 'blue')
plt.plot((min(posx), max(posx)), (phi, phi), 'pink')
plt.plot((min(posx), max(posx)), (phi - epsilon, phi - epsilon), 'green')
plt.axvline(180, color='yellow')
plt.rcParams["figure.figsize"] = [9, 6]
plot_margin = 4
x0, x1, y0, y1 = plt.axis()
plt.axis((x0, x1, y0 - plot_margin, y1 + plot_margin))
# Fire
plt.show()
#%% Changing the time of day we view the analemma
# Prepare
home.date = '2017/1/1 08:30:00'
home.horizon = '0'
sun = ephem.Sun()
posy = []
posx = []
def get_sun_az(tm):
"""Get the azimuth based on a date"""
sun.compute(tm)
return math.degrees(sun.az)
def get_sun_alt(tm):
"""Get the altitude based on a date"""
sun.compute(tm)
return math.degrees(sun.alt)
# Prepare the data
for i in range(1, 368):
home.date += ephem.Date(1)
posy.append(get_sun_alt(home))
posx.append(get_sun_az(home))
# Set up
fig, ax = plt.subplots()
ax.plot(posx, posy)
# Add some labels & resize
ax.set_xlabel(u'Azimuth (°)', fontsize=11)
ax.set_ylabel(u'Altitude (°)', fontsize=11)
plt.rcParams["figure.figsize"] = [9, 6]
# Fire
plt.show()
#%% Calculating Twilights
initial_set = home.next_setting(sun).datetime() # zero edge
next_set = home.next_setting(sun, use_center=True).datetime() # zero centre
print("\n ---- Calculating Twilights ------")
print('Centre sunset is at {}:{}:{}'.format(next_set.hour, next_set.minute, next_set.second))
print('Edge sunset is at {}:{}:{}'.format(initial_set.hour, initial_set.minute, initial_set.second))
delta = initial_set - next_set
print('Time difference is {} mins, {} secs'.format(delta.seconds/60, delta.seconds%60))
def get_setting_twilights(obs, body):
"""Returns a list of twilight datetimes in epoch format"""
results = []
# Twilights, their horizons and whether to use the centre of the Sun or not
twilights = [('0', False), ('-6', True), ('-12', True), ('-18', True)]
for twi in twilights:
# Zero the horizon
obs.horizon = twi[0]
try:
# Get the setting time and date
now = obs.next_setting(body, use_center=twi[1]).datetime()
# Get seconds elapsed since midnight
results.append(
(now - now.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()
)
except ephem.AlwaysUpError:
# There will be occasions where the sun stays up, make that max seconds
results.append(86400.0)
return results
home.horizon = '0'
twilights = get_setting_twilights(home, sun)
print(twilights)
# Prepare
home.date = '2017/01/01 12:00:00'
home.horizon = '0'
sun = ephem.Sun()
twidataset = []
# Calculate just over a year of data
for i in range(1, 368):
home.date += ephem.Date(1)
twidataset.append(get_setting_twilights(home, sun))
print(twidataset[150:160])
df = pd.DataFrame(twidataset, columns=['Sunset', 'Civil', 'Nautical', 'Astronomical'])
print(df[150:160])
#%% Plot Twilights¶
def timeformatter(x, pos):
"""The two args are the value and tick position"""
return '{}:{}:{:02d}'.format(int(x/3600), int(x/24/60), int(x%60))
def dateformatter(x, pos):
"""The two args are the value and tick position"""
dto = datetime.date.fromordinal(datetime.date(2017, 1, 1).toordinal() + int(x) - 1)
return '{}-{:02d}'.format(dto.year, dto.month)
plt.rcParams["figure.figsize"] = [9, 6]
ax = df.plot.area(stacked=False, color=['#e60000', '#80ccff', '#33adff', '#008ae6'], alpha=0.2)
# Sort out x-axis
# Demarcate months
dim = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
ax.xaxis.set_ticks(np.cumsum(dim))
ax.xaxis.set_major_formatter(FuncFormatter(dateformatter))
ax.set_xlabel(u'Date', fontsize=11)
# Sort out y-axis
ax.yaxis.set_major_formatter(FuncFormatter(timeformatter))
ax.set_ylim([55000, 86400])
ax.set_ylabel(u'Time', fontsize=11)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=30, fontsize=9)
# Done
plt.show()
#%% Sunset length throughout the year
# Prepare
home.date = '2017/04/01 12:00:00'
home.horizon = '0'
sun = ephem.Sun()
print("\n ---- Sunset length throughout the year ------")
# Starting with the 0 degrees
s_start = home.next_setting(sun, use_center=False).datetime()
print(s_start)
# Now the -0.53 degrees
home.horizon = '-0.53'
s_end = home.next_setting(sun, use_center=False).datetime()
print(s_end)
# The difference is...
delta = s_end - s_start
print(delta.total_seconds())
# Let's go for a little run and finish off with a pandas Series containing some data
home.date = '2017/01/01 12:00:00'
settings = []
sun = ephem.Sun()
for i in range(1, 368):
home.date += ephem.Date(1)
home.horizon = '0'
start = home.next_setting(sun, use_center=False).datetime()
home.horizon = '-0.53'
end = home.next_setting(sun, use_center=False).datetime()
settings.append((end - start).total_seconds())
ts = pd.Series(settings, index=pd.date_range('2017/1/1', periods=len(settings)))
print(ts[0:12])
plt.figure()
ax = ts.plot.area(alpha=0.2)
plt.rcParams["figure.figsize"] = [9, 6]
ax.set_xlabel(u'Date', fontsize=11)
ax.set_ylabel(u'Sunset length (seconds)', fontsize=11)
ax.set_ylim([math.floor(ts.min()) - 15, math.floor(ts.max()) + 15])
# Fire
plt.show()
| [
"pandas.DataFrame",
"matplotlib.pyplot.axvline",
"ephem.Observer",
"matplotlib.pyplot.show",
"ephem.Date",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.axis",
"datetime.date",
"numpy.cumsum",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"ephem.Sun",
"matplotlib.ticker.FuncForma... | [((398, 430), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""Solarize_Light2"""'], {}), "('Solarize_Light2')\n", (411, 430), True, 'import matplotlib.pyplot as plt\n'), ((438, 454), 'ephem.Observer', 'ephem.Observer', ([], {}), '()\n', (452, 454), False, 'import ephem, math, datetime\n'), ((546, 557), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (555, 557), False, 'import ephem, math, datetime\n'), ((1030, 1041), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (1039, 1041), False, 'import ephem, math, datetime\n'), ((1574, 1586), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1584, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1746), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1744, 1746), True, 'import matplotlib.pyplot as plt\n'), ((1822, 1833), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (1831, 1833), False, 'import ephem, math, datetime\n'), ((2388, 2402), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2400, 2402), True, 'import matplotlib.pyplot as plt\n'), ((3131, 3163), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(180)'], {'color': '"""yellow"""'}), "(180, color='yellow')\n", (3142, 3163), True, 'import matplotlib.pyplot as plt\n'), ((3237, 3247), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (3245, 3247), True, 'import matplotlib.pyplot as plt\n'), ((3248, 3302), 'matplotlib.pyplot.axis', 'plt.axis', (['(x0, x1, y0 - plot_margin, y1 + plot_margin)'], {}), '((x0, x1, y0 - plot_margin, y1 + plot_margin))\n', (3256, 3302), True, 'import matplotlib.pyplot as plt\n'), ((3310, 3320), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3318, 3320), True, 'import matplotlib.pyplot as plt\n'), ((3439, 3450), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (3448, 3450), False, 'import ephem, math, datetime\n'), ((3868, 3882), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3880, 3882), True, 'import matplotlib.pyplot as plt\n'), ((4064, 4074), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4072, 4074), True, 'import matplotlib.pyplot as plt\n'), ((5597, 5608), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (5606, 5608), False, 'import ephem, math, datetime\n'), ((5812, 5897), 'pandas.DataFrame', 'pd.DataFrame', (['twidataset'], {'columns': "['Sunset', 'Civil', 'Nautical', 'Astronomical']"}), "(twidataset, columns=['Sunset', 'Civil', 'Nautical',\n 'Astronomical'])\n", (5824, 5897), True, 'import pandas as pd\n'), ((6843, 6884), 'matplotlib.pyplot.setp', 'plt.setp', (['labels'], {'rotation': '(30)', 'fontsize': '(9)'}), '(labels, rotation=30, fontsize=9)\n', (6851, 6884), True, 'import matplotlib.pyplot as plt\n'), ((6892, 6902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6900, 6902), True, 'import matplotlib.pyplot as plt\n'), ((7011, 7022), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (7020, 7022), False, 'import ephem, math, datetime\n'), ((7527, 7538), 'ephem.Sun', 'ephem.Sun', ([], {}), '()\n', (7536, 7538), False, 'import ephem, math, datetime\n'), ((7924, 7936), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7934, 7936), True, 'import matplotlib.pyplot as plt\n'), ((8172, 8182), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8180, 8182), True, 'import matplotlib.pyplot as plt\n'), ((1308, 1321), 'ephem.Date', 'ephem.Date', (['(1)'], {}), '(1)\n', (1318, 1321), False, 'import ephem, math, datetime\n'), ((1886, 1908), 'math.degrees', 'math.degrees', (['home.lat'], {}), '(home.lat)\n', (1898, 1908), False, 'import ephem, math, datetime\n'), ((2039, 2059), 'math.degrees', 'math.degrees', (['sun.az'], {}), '(sun.az)\n', (2051, 2059), False, 'import ephem, math, datetime\n'), ((2156, 2177), 'math.degrees', 'math.degrees', (['sun.alt'], {}), '(sun.alt)\n', (2168, 2177), False, 'import ephem, math, datetime\n'), ((2239, 2252), 'ephem.Date', 'ephem.Date', (['(1)'], {}), '(1)\n', (2249, 2252), False, 'import ephem, math, datetime\n'), ((3565, 3585), 'math.degrees', 'math.degrees', (['sun.az'], {}), '(sun.az)\n', (3577, 3585), False, 'import ephem, math, datetime\n'), ((3682, 3703), 'math.degrees', 'math.degrees', (['sun.alt'], {}), '(sun.alt)\n', (3694, 3703), False, 'import ephem, math, datetime\n'), ((3765, 3778), 'ephem.Date', 'ephem.Date', (['(1)'], {}), '(1)\n', (3775, 3778), False, 'import ephem, math, datetime\n'), ((5704, 5717), 'ephem.Date', 'ephem.Date', (['(1)'], {}), '(1)\n', (5714, 5717), False, 'import ephem, math, datetime\n'), ((6561, 6575), 'numpy.cumsum', 'np.cumsum', (['dim'], {}), '(dim)\n', (6570, 6575), True, 'import numpy as np\n'), ((6606, 6634), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['dateformatter'], {}), '(dateformatter)\n', (6619, 6634), False, 'from matplotlib.ticker import FuncFormatter\n'), ((6719, 6747), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['timeformatter'], {}), '(timeformatter)\n', (6732, 6747), False, 'from matplotlib.ticker import FuncFormatter\n'), ((7580, 7593), 'ephem.Date', 'ephem.Date', (['(1)'], {}), '(1)\n', (7590, 7593), False, 'import ephem, math, datetime\n'), ((1188, 1208), 'datetime.time', 'datetime.time', (['(12)', '(0)'], {}), '(12, 0)\n', (1201, 1208), False, 'import ephem, math, datetime\n'), ((6207, 6232), 'datetime.date', 'datetime.date', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (6220, 6232), False, 'import ephem, math, datetime\n')] |
import cv2
import torch
import mmcv
import numpy as np
from .coco_transform import xywh2cs, get_affine_transform
from mmskeleton.ops.nms.nms import oks_nms
class VideoDemo(object):
def __init__(self, ):
super(VideoDemo, self).__init__()
@staticmethod
def bbox_filter(bbox_result, bbox_thre=0.0):
# clone from mmdetection
if isinstance(bbox_result, tuple):
bbox_result, segm_result = bbox_result
else:
bbox_result, segm_result = bbox_result, None
bboxes = np.vstack(bbox_result)
bbox_labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(bbox_labels)
# get bboxes for each person
person_id = 0
person_bboxes = bboxes[labels == person_id]
person_mask = person_bboxes[:, 4] >= bbox_thre
person_bboxes = person_bboxes[person_mask]
return person_bboxes, labels[labels == person_id][person_mask]
@staticmethod
def skeleton_preprocess(image, bboxes, skeleton_cfg):
# output collector
result_list = []
meta = dict()
meta['scale'] = []
meta['rotation'] = []
meta['center'] = []
meta['score'] = []
# preprocess config
image_size = skeleton_cfg['image_size']
image_width = image_size[0]
image_height = image_size[1]
aspect_ratio = image_width * 1.0 / image_height
pixel_std = skeleton_cfg['pixel_std']
image_mean = skeleton_cfg['image_mean']
image_std = skeleton_cfg['image_std']
for idx, bbox in enumerate(bboxes):
x1, y1, x2, y2 = bbox[:4]
w, h = x2 - x1, y2 - y1
center, scale = xywh2cs(x1, y1, h, w, aspect_ratio, pixel_std)
trans = get_affine_transform(center, scale, 0, image_size)
transformed_image = cv2.warpAffine(
image,
trans, (int(image_size[0]), int(image_size[1])),
flags=cv2.INTER_LINEAR)
# transfer into Torch.Tensor
transformed_image = transformed_image / 255.0
transformed_image = transformed_image - image_mean
transformed_image = transformed_image / image_std
transformed_image = transformed_image.transpose(2, 0, 1)
result_list.append(transformed_image)
# from IPython import embed; embed()
meta['scale'].append(scale)
meta['rotation'].append(0)
meta['center'].append(center)
meta['score'].append(bbox[4])
result = torch.from_numpy(np.array(result_list)).float()
for name, data in meta.items():
meta[name] = torch.from_numpy(np.array(data)).float()
return result, meta
@staticmethod
def skeleton_postprocess(
preds,
max_vals,
meta,
):
all_preds = np.concatenate((preds, max_vals), axis=-1)
_kpts = []
for idx, kpt in enumerate(all_preds):
center = meta['center'][idx].numpy()
scale = meta['scale'][idx].numpy()
area = np.prod(scale * 200, 0)
score = meta['score'][idx].numpy()
_kpts.append({
'keypoints': kpt,
'center': center,
'scale': scale,
'area': area,
'score': score,
})
num_joints = 17
in_vis_thre = 0.2
oks_thre = 0.9
oks_nmsed_kpts = []
for n_p in _kpts:
box_score = n_p['score']
kpt_score = 0
valid_num = 0
for n_jt in range(0, num_joints):
t_s = n_p['keypoints'][n_jt][2]
if t_s > in_vis_thre:
kpt_score = kpt_score + t_s
valid_num = valid_num + 1
if valid_num != 0:
kpt_score = kpt_score / valid_num
# rescoring
n_p['score'] = kpt_score * box_score
keep = oks_nms([_kpts[i] for i in range(len(_kpts))], oks_thre)
if len(keep) == 0:
oks_nmsed_kpts.append(_kpts['keypoints'])
else:
oks_nmsed_kpts.append(
[_kpts[_keep]['keypoints'] for _keep in keep])
return np.array(oks_nmsed_kpts[0])
| [
"numpy.full",
"numpy.prod",
"numpy.array",
"numpy.vstack",
"numpy.concatenate"
] | [((557, 579), 'numpy.vstack', 'np.vstack', (['bbox_result'], {}), '(bbox_result)\n', (566, 579), True, 'import numpy as np\n'), ((740, 767), 'numpy.concatenate', 'np.concatenate', (['bbox_labels'], {}), '(bbox_labels)\n', (754, 767), True, 'import numpy as np\n'), ((3059, 3101), 'numpy.concatenate', 'np.concatenate', (['(preds, max_vals)'], {'axis': '(-1)'}), '((preds, max_vals), axis=-1)\n', (3073, 3101), True, 'import numpy as np\n'), ((4480, 4507), 'numpy.array', 'np.array', (['oks_nmsed_kpts[0]'], {}), '(oks_nmsed_kpts[0])\n', (4488, 4507), True, 'import numpy as np\n'), ((618, 659), 'numpy.full', 'np.full', (['bbox.shape[0]', 'i'], {'dtype': 'np.int32'}), '(bbox.shape[0], i, dtype=np.int32)\n', (625, 659), True, 'import numpy as np\n'), ((3287, 3310), 'numpy.prod', 'np.prod', (['(scale * 200)', '(0)'], {}), '(scale * 200, 0)\n', (3294, 3310), True, 'import numpy as np\n'), ((2746, 2767), 'numpy.array', 'np.array', (['result_list'], {}), '(result_list)\n', (2754, 2767), True, 'import numpy as np\n'), ((2861, 2875), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2869, 2875), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Module for evaluating an RNN agent.
Defines functions evaluate_agent to run a simulation for a provided agent and
environment to calculate the average reward and safety costs for the agent.
"""
from absl import logging
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
def violence_risk(observation):
return observation['response'][0]['violence_score']
def health_risk(observation):
return 1-observation['response'][0]['health_score']
def evaluate_agent(agent, env, alpha, num_users=100, deterministic=False,
scatter_plot_trajectories=False, figure_file_obj=None,
risk_score_extractor=violence_risk):
"""Runs an agent-env simulation to evaluate average reward and safety costs.
Args:
agent: rnn_cvar_agent.SafeRNNAgent object.
env: Recsim environment that returns responses with reward and health score.
alpha: The alpha used as the level for VaR/CVaR.
num_users: Number of users to sample for the evaluation.
deterministic: Whether the agent chooses the argmax action instead of
sampling.
scatter_plot_trajectories: Whether to evaluate
figure_file_obj: File object to store the plot.
risk_score_extractor: A function which takes an observation and returns a
risk score.
Returns:
Dictionary with average reward, health score, cvar, var for num_users
sampled.
"""
results = {}
if hasattr(env._environment, 'set_active_pool'): # pylint: disable=protected-access
pools = ['train', 'eval', 'test']
else:
pools = ['all']
for pool in pools:
tf.keras.backend.set_learning_phase(0)
if hasattr(env._environment, 'set_active_pool'): # pylint: disable=protected-access
env._environment.set_active_pool(pool) # pylint: disable=protected-access
else:
assert pool == 'all'
rewards = []
health = []
ratings = []
max_episode_length = agent.max_episode_length
agent.epsilon = 0.0 # Turn off any exploration.
# Set the learning phase to 0 i.e. evaluation to not use dropout.
# Generate num_users trajectories.
for _ in range(num_users):
# TODO(): Clean the logged variables by making a data class.
curr_user_reward = 0.0
curr_user_health = 0.0
curr_user_rating = 0.0
reward = 0
observation = env.reset()
for _ in range(max_episode_length):
slate = agent.step(reward, observation, eval_mode=True,
deterministic=deterministic)
observation, reward, _, _ = env.step(slate)
curr_user_reward += reward
curr_user_health += 1-risk_score_extractor(observation)
if 'rating' in observation['response'][0]:
curr_user_rating += observation['response'][0]['rating']
agent.end_episode(reward, observation, eval_mode=True)
rewards.append(curr_user_reward/float(max_episode_length))
health.append(curr_user_health/float(max_episode_length))
ratings.append(curr_user_rating/float(max_episode_length))
agent.empty_buffer()
health_risks = 1-np.array(health)
var = np.percentile(health_risks, 100*alpha)
cvar = compute_cvar(health_risks, var)
logging.info('Average Reward = %f, Average Health = %f, '
'Average Ratings = %f,VaR = %f, CVaR = %f',
np.mean(rewards), np.mean(health), np.mean(ratings), var, cvar)
# Set the learning phase back to 1.
tf.keras.backend.set_learning_phase(1)
if scatter_plot_trajectories:
plot_trajectories(ratings, health, figure_file_obj)
results[pool] = {
'rewards': np.mean(rewards),
'health': np.mean(health),
'ratings': np.mean(ratings),
'var': var,
'cvar': cvar
}
if len(results) == 1: # No train/eval/test split, just return one value.
return results['all']
# Promote the eval results to the top-level dictionary.
results.update(results['eval'])
return results
def plot_trajectories(rewards, health, figure_file_obj):
"""Create a KDE or scatter plot of health rewards vs health."""
plt.figure()
try:
g = sns.jointplot(x=rewards, y=health, kind='kde')
g.plot_joint(plt.scatter, c='grey', s=30, linewidth=1, marker='+')
except np.linalg.LinAlgError:
# If the data does not support KDE plotting, just use scatter.
g = sns.jointplot(x=rewards, y=health, kind='scatter')
g.ax_joint.collections[0].set_alpha(0)
g.set_axis_labels('$Reward$', '$Health$')
if figure_file_obj:
plt.savefig(figure_file_obj, format='png')
else:
plt.show()
def compute_cvar(health_risks, var):
"""Returns CVaR for the provided health_risks array."""
return np.mean([risk for risk in health_risks if risk >= var])
| [
"matplotlib.pyplot.show",
"numpy.percentile",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"seaborn.jointplot",
"tensorflow.keras.backend.set_learning_phase",
"matplotlib.pyplot.savefig"
] | [((4729, 4741), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4739, 4741), True, 'import matplotlib.pyplot as plt\n'), ((5316, 5371), 'numpy.mean', 'np.mean', (['[risk for risk in health_risks if risk >= var]'], {}), '([risk for risk in health_risks if risk >= var])\n', (5323, 5371), True, 'import numpy as np\n'), ((2247, 2285), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(0)'], {}), '(0)\n', (2282, 2285), True, 'import tensorflow as tf\n'), ((3750, 3790), 'numpy.percentile', 'np.percentile', (['health_risks', '(100 * alpha)'], {}), '(health_risks, 100 * alpha)\n', (3763, 3790), True, 'import numpy as np\n'), ((4080, 4118), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(1)'], {}), '(1)\n', (4115, 4118), True, 'import tensorflow as tf\n'), ((4757, 4803), 'seaborn.jointplot', 'sns.jointplot', ([], {'x': 'rewards', 'y': 'health', 'kind': '"""kde"""'}), "(x=rewards, y=health, kind='kde')\n", (4770, 4803), True, 'import seaborn as sns\n'), ((5144, 5186), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_file_obj'], {'format': '"""png"""'}), "(figure_file_obj, format='png')\n", (5155, 5186), True, 'import matplotlib.pyplot as plt\n'), ((5199, 5209), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5207, 5209), True, 'import matplotlib.pyplot as plt\n'), ((3723, 3739), 'numpy.array', 'np.array', (['health'], {}), '(health)\n', (3731, 3739), True, 'import numpy as np\n'), ((3972, 3988), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (3979, 3988), True, 'import numpy as np\n'), ((3990, 4005), 'numpy.mean', 'np.mean', (['health'], {}), '(health)\n', (3997, 4005), True, 'import numpy as np\n'), ((4007, 4023), 'numpy.mean', 'np.mean', (['ratings'], {}), '(ratings)\n', (4014, 4023), True, 'import numpy as np\n'), ((4252, 4268), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (4259, 4268), True, 'import numpy as np\n'), ((4288, 4303), 'numpy.mean', 'np.mean', (['health'], {}), '(health)\n', (4295, 4303), True, 'import numpy as np\n'), ((4324, 4340), 'numpy.mean', 'np.mean', (['ratings'], {}), '(ratings)\n', (4331, 4340), True, 'import numpy as np\n'), ((4982, 5032), 'seaborn.jointplot', 'sns.jointplot', ([], {'x': 'rewards', 'y': 'health', 'kind': '"""scatter"""'}), "(x=rewards, y=health, kind='scatter')\n", (4995, 5032), True, 'import seaborn as sns\n')] |
'''
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% COPYRIGHT NOTICE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Permission is granted for anyone to copy, use, modify, or distribute the
accompanying programs and documents for any purpose, provided this copyright
notice is retained and prominently displayed, along with a complete citation of
the published version of the paper:
______________________________________________________________________________
| <NAME>, and <NAME> |
| Solving the quantum many-body problem with artificial neural-networks |
|______________________________________________________________________________|
The programs and documents are distributed without any warranty, express or
implied.
These programs were written for research purposes only, and are meant to
demonstrate and reproduce the main results obtained in the paper.
All use of these programs is entirely at the user's own risk.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
'''
from typing import List, Any, Tuple
import numpy as np
import math
import os
class Sampler:
'''
This class runs the Metropolis-Hastings algorithm to generate spin configuaration
samples from the network.
'''
def __init__(self, hamiltonian, neuralNetState, zeroMagnetization=True, filename=None,
initialState=None, writeOutput=True) -> None:
self.hamiltonian = hamiltonian
self.neuralNetState = neuralNetState
self.neuralNetEnergy = None
self.neuralNetEnergyError = None
self.localEnergies = []
self.zeroMagnetization = zeroMagnetization
self.samplesFile = filename
self.numSpins = self.hamiltonian.numSpins
self.stateHistory = []
self.currentLocalEnergy = None
self.correlation_time = None
self.writeOutput = writeOutput
# Sampling Statistics
self.acceptances = None
self.numMoves = None
# path to store energies
self.dataDir = './data/'
if not os.path.exists(self.dataDir):
os.makedirs(self.dataDir)
if initialState is None:
self.initRandomState()
else:
self.currentState = initialState
def initRandomState(self) -> None:
'''
Generates a random state by sampling a uniform distribution.
'''
# Generate a random state by sampling a uniform distribution [0, numSpins)
self.currentState = np.random.uniform(size=self.numSpins)
# If a[i] < 0.5, set a[i] to -1, else set it to 1
self.currentState = np.where(self.currentState < 0.5, -1, 1)
# Ensure that the total magnetization of the state is zero, that is, sum(state) = 0
if self.zeroMagnetization:
if self.numSpins % 2:
raise ValueError("Cannot initialize a random state with zero magnetization for an odd number of spins\n")
totalMagnetization = np.sum(self.currentState)
# If not zero, set the defaulting location to -1 or +1 to reduce/increase
# the total magnetization
if totalMagnetization > 0:
while totalMagnetization != 0:
randomSite = self.chooseRandomSite()
while self.currentState[randomSite] < 0:
randomSite = self.chooseRandomSite()
self.currentState[randomSite] = -1
totalMagnetization = totalMagnetization - 1
elif totalMagnetization < 0:
while totalMagnetization != 0:
randomSite = self.chooseRandomSite()
while self.currentState[randomSite] > 0:
randomSite = self.chooseRandomSite()
self.currentState[randomSite] = 1
totalMagnetization = totalMagnetization + 1
def randomSpinFlips(self, numFlips) -> List:
'''
For the Metropolis-Hastings algorithm, randomly flips at most two sites in a
state.
'''
# Choose a site randomly to flip
firstSite = self.chooseRandomSite()
if numFlips == 2:
secondSite = self.chooseRandomSite()
if self.zeroMagnetization:
if self.currentState[firstSite] == self.currentState[secondSite]:
return []
else:
return [firstSite, secondSite]
else:
if firstSite == secondSite:
return []
else:
return [firstSite, secondSite]
else:
return [firstSite]
def resetSamplerStats(self) -> None:
'''
Resets the sampler statistics, i.e, the number of acceptances, the total number of
moves, and the state history.
'''
self.acceptances = 0
self.numMoves = 0
self.stateHistory = []
def acceptanceRate(self) -> float:
'''
Calculates the acceptance rate of the sampling
'''
return self.acceptances / self.numMoves
def move(self, numFlips) -> None:
flipSites = self.randomSpinFlips(numFlips)
if len(flipSites) > 0:
# Find the acceptance probability
psiRatio = self.neuralNetState.amplitudeRatio(
self.currentState, flipSites)
acceptanceProbability = np.square(np.abs(psiRatio))
# Metropolis-Hastings Test
if acceptanceProbability > np.random.random():
self.neuralNetState.updateLookupTables(
self.currentState, flipSites)
# Test passed, set the current state to the flipped version
for flip in flipSites:
self.currentState[flip] *= -1
self.acceptances += 1
self.numMoves += 1
def computeLocalEnergy(self) -> float:
'''
Finds the value of the local energy on the current state
'''
# Find the non-zero matrix elements of the Hamiltonian
state = self.currentState
(matElements, spinFlips) = self.hamiltonian.findNonZeroElements(state)
energies = [self.neuralNetState.amplitudeRatio(state, spinFlips[i])*element
for (i, element) in enumerate(matElements)]
return sum(energies)
def run(self, numSweeps, thermFactor=0.1, sweepFactor=1, numFlips=None) -> None:
'''
Runs the Monte-Carlo Sampling for the NQS.
A sweep consists of (numSpins * sweepFactor) steps; sweep to consists of
flipping each spin an expected number of numFlips times.
'''
if numFlips is None:
numFlips = self.hamiltonian.minSpinFlips
if numFlips < 1 and numFlips > 2:
raise ValueError("Number of spin flips must be equal to 1 or 2.\n")
if not (0 <= thermFactor <= 1):
raise ValueError(
"The thermalization factor should be a real number between 0 and 1.\n")
if numSweeps < 50:
raise ValueError(
"Please enter a number of sweeps sufficiently large (>50).\n")
print("Starting Monte-Carlo Sampling...")
print(f"{numSweeps} sweeps will be perfomed.")
self.resetSamplerStats()
self.neuralNetState.initLookupTables(self.currentState)
if thermFactor != 0:
print('Starting Thermalization...')
numMoves = (int)((thermFactor * numSweeps)
* (sweepFactor * self.numSpins))
for _ in range(numMoves):
self.move(numFlips)
print('Done.')
self.resetSamplerStats()
for _ in range(numSweeps):
for _ in range(sweepFactor * self.numSpins):
self.move(numFlips)
self.currentLocalEnergy = self.computeLocalEnergy()
self.localEnergies.append(self.currentLocalEnergy)
self.stateHistory.append(np.array(self.currentState))
print('Completed Monte-Carlo Sampling.')
return self.estimateOutputEnergy()
def estimateOutputEnergy(self) -> None:
'''
Computes a stochastic estimate of the energy of the NQS and if writeOutput is set to True,
it writes the energy to a file 'computed_energies.txt'
'''
nblocks = 50
blocksize = len(self.localEnergies) // nblocks
enmean = 0
enmeansq = 0
enmeanUnblocked = 0
enmeanSqUnblocked = 0
for block in range(nblocks):
eblock = 0
for j in range(block*blocksize, (block + 1) * blocksize):
eblock += self.localEnergies[j].real
delta = self.localEnergies[j].real - enmeanUnblocked
enmeanUnblocked += delta / (j + 1)
delta2 = self.localEnergies[j].real - enmeanUnblocked
enmeanSqUnblocked += delta * delta2
eblock /= blocksize
delta = eblock - enmean
enmean += delta / (block + 1)
delta2 = eblock - enmean
enmeansq += delta * delta2
enmeansq /= (nblocks - 1)
enmeanSqUnblocked /= (nblocks * blocksize - 1)
estAvg = enmean / self.numSpins
estError = math.sqrt(enmeansq / nblocks) / self.numSpins
self.neuralNetEnergy = np.squeeze(estAvg)
self.neuralNetEnergyError = np.squeeze(estError)
energyReport = f"Estimated average energy per spin: {estAvg} +/- {estError}"
print(energyReport)
binReport = f'Error estimated with binning analysis consisting of {nblocks} bins of {blocksize} samples each.'
print(binReport)
self.correlation_time = 0.5 * blocksize * enmeansq / enmeanSqUnblocked
autocorrelation = f'Estimated autocorrelation time is {self.correlation_time}'
print(autocorrelation)
if self.writeOutput:
# Save the computed energies to a file
with open(self.dataDir + 'computed_energies.txt', 'a+') as f:
np.savetxt(f, estAvg)
def chooseRandomSite(self) -> int:
'''
Generates a random integer in the range [0, numSpins) that
serves as a random index for the currentState attribute.
'''
return np.random.randint(low=0, high=self.numSpins-1)
| [
"numpy.random.uniform",
"numpy.sum",
"os.makedirs",
"math.sqrt",
"numpy.abs",
"numpy.savetxt",
"os.path.exists",
"numpy.where",
"numpy.random.randint",
"numpy.random.random",
"numpy.array",
"numpy.squeeze"
] | [((2518, 2555), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.numSpins'}), '(size=self.numSpins)\n', (2535, 2555), True, 'import numpy as np\n'), ((2642, 2682), 'numpy.where', 'np.where', (['(self.currentState < 0.5)', '(-1)', '(1)'], {}), '(self.currentState < 0.5, -1, 1)\n', (2650, 2682), True, 'import numpy as np\n'), ((9401, 9419), 'numpy.squeeze', 'np.squeeze', (['estAvg'], {}), '(estAvg)\n', (9411, 9419), True, 'import numpy as np\n'), ((9456, 9476), 'numpy.squeeze', 'np.squeeze', (['estError'], {}), '(estError)\n', (9466, 9476), True, 'import numpy as np\n'), ((10339, 10387), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(self.numSpins - 1)'}), '(low=0, high=self.numSpins - 1)\n', (10356, 10387), True, 'import numpy as np\n'), ((2076, 2104), 'os.path.exists', 'os.path.exists', (['self.dataDir'], {}), '(self.dataDir)\n', (2090, 2104), False, 'import os\n'), ((2118, 2143), 'os.makedirs', 'os.makedirs', (['self.dataDir'], {}), '(self.dataDir)\n', (2129, 2143), False, 'import os\n'), ((3001, 3026), 'numpy.sum', 'np.sum', (['self.currentState'], {}), '(self.currentState)\n', (3007, 3026), True, 'import numpy as np\n'), ((9324, 9353), 'math.sqrt', 'math.sqrt', (['(enmeansq / nblocks)'], {}), '(enmeansq / nblocks)\n', (9333, 9353), False, 'import math\n'), ((5452, 5468), 'numpy.abs', 'np.abs', (['psiRatio'], {}), '(psiRatio)\n', (5458, 5468), True, 'import numpy as np\n'), ((5548, 5566), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5564, 5566), True, 'import numpy as np\n'), ((8033, 8060), 'numpy.array', 'np.array', (['self.currentState'], {}), '(self.currentState)\n', (8041, 8060), True, 'import numpy as np\n'), ((10106, 10127), 'numpy.savetxt', 'np.savetxt', (['f', 'estAvg'], {}), '(f, estAvg)\n', (10116, 10127), True, 'import numpy as np\n')] |
import numpy as np
import sys, os
import time
import cv2
sys.path.append(os.getcwd())
# crnn packages
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import utils
import temp.crnn_fc_model as crnn
import Levenshtein
import params_qi as params
import alphabets
str1 = alphabets.alphabet
color_dict=['蓝','黄','绿','黑','白']
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--images_path', type=str, default="/rdata/qi.liu/code/LPR/ezai/all_projects/carplate_recognition/data/test_new/energy_longmao.txt", help='the path to your images')
opt = parser.parse_args()
# crnn params
crnn_model_path = '/rdata/qi.liu/code/LPR/pytorch_crnn/crnn_chinese_characters_rec-master/temp/model_text.pth'
alphabet = str1
nclass = len(alphabet)+1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# crnn文本信息识别,颜色识别
def crnn_recognition(image, model):
imgH = 32
imgW = 100
converter = utils.strLabelConverter(alphabet)
### ratio
h, w, c = image.shape
image = cv2.resize(image, (0,0), fx=imgW/w, fy=imgH/h, interpolation=cv2.INTER_CUBIC)
image = (np.reshape(image, (imgH, imgW, 3))).transpose(2, 0, 1)
image = image.astype(np.float32) / 255.
image = torch.from_numpy(image).type(torch.FloatTensor)
if torch.cuda.is_available():
image = image.cuda(device)
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
print(preds.shape)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
log = converter.decode(preds.data, preds_size.data, raw=True)
print("raw_data: ", log)
return sim_pred\
if __name__ == '__main__':
image_root = '/rdata/qi.liu/code/LPR/ezai/all_projects/carplate_recognition'
f_wrong =open('results/wrong.txt', 'a')
# crnn network
model = crnn.CRNN_FC(32, 3, 76, isPretrain=False, leakyRelu=False)
if torch.cuda.is_available():
model = model.cuda(device)
print('loading pretrained model from {0}'.format(crnn_model_path))
# 导入已经训练好的crnn模型
model.load_state_dict(torch.load(crnn_model_path))
count_all = 0
with open(opt.images_path, 'r' ,encoding='utf-8') as f:
for item in f.readlines():
print(count_all)
count_all += 1
image_path = os.path.join(image_root,item.strip().split(' ')[0])
image_name = item.strip().split(' ')[0].split('/')[-1]
label_text = item.strip().split(' ')[1]
print(image_path)
image = cv2.imread(image_path)
pred_text = crnn_recognition(image, model)
print('results: {0}'.format(pred_text), 'GT: {0}'.format(label_text) )
| [
"argparse.ArgumentParser",
"os.getcwd",
"torch.autograd.Variable",
"torch.load",
"utils.strLabelConverter",
"temp.crnn_fc_model.CRNN_FC",
"cv2.imread",
"torch.cuda.is_available",
"numpy.reshape",
"cv2.resize",
"torch.from_numpy"
] | [((380, 405), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (403, 405), False, 'import argparse\n'), ((73, 84), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (82, 84), False, 'import sys, os\n'), ((960, 993), 'utils.strLabelConverter', 'utils.strLabelConverter', (['alphabet'], {}), '(alphabet)\n', (983, 993), False, 'import utils\n'), ((1047, 1134), 'cv2.resize', 'cv2.resize', (['image', '(0, 0)'], {'fx': '(imgW / w)', 'fy': '(imgH / h)', 'interpolation': 'cv2.INTER_CUBIC'}), '(image, (0, 0), fx=imgW / w, fy=imgH / h, interpolation=cv2.\n INTER_CUBIC)\n', (1057, 1134), False, 'import cv2\n'), ((1305, 1330), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1328, 1330), False, 'import torch\n'), ((1420, 1435), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (1428, 1435), False, 'from torch.autograd import Variable\n'), ((2023, 2081), 'temp.crnn_fc_model.CRNN_FC', 'crnn.CRNN_FC', (['(32)', '(3)', '(76)'], {'isPretrain': '(False)', 'leakyRelu': '(False)'}), '(32, 3, 76, isPretrain=False, leakyRelu=False)\n', (2035, 2081), True, 'import temp.crnn_fc_model as crnn\n'), ((2090, 2115), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2113, 2115), False, 'import torch\n'), ((821, 846), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (844, 846), False, 'import torch\n'), ((2270, 2297), 'torch.load', 'torch.load', (['crnn_model_path'], {}), '(crnn_model_path)\n', (2280, 2297), False, 'import torch\n'), ((1138, 1172), 'numpy.reshape', 'np.reshape', (['image', '(imgH, imgW, 3)'], {}), '(image, (imgH, imgW, 3))\n', (1148, 1172), True, 'import numpy as np\n'), ((1249, 1272), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1265, 1272), False, 'import torch\n'), ((2717, 2739), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2727, 2739), False, 'import cv2\n')] |
"""Derive Potential Evapotransporation (evspsblpot) using De Bruin (2016).
<NAME>., <NAME>., <NAME>., <NAME>.: A
Thermodynamically Based Model for Actual Evapotranspiration of an Extensive
Grass Field Close to FAO Reference, Suitable for Remote Sensing Application,
American Meteorological Society, 17, 1373-1382, DOI: 10.1175/JHM-D-15-0006.1,
2016.
"""
import numpy as np
import iris
def tetens_derivative(tas):
"""Compute the derivative of Teten's formula for saturated vapor pressure.
Tetens formula (https://en.wikipedia.org/wiki/Tetens_equation) :=
es(T) = e0 * exp(a * T / (T + b))
Derivate (checked with Wolfram alpha)
des / dT = a * b * e0 * exp(a * T / (b + T)) / (b + T)^2
"""
# Ensure temperature is in degC
tas.convert_units('degC')
# Saturated vapour pressure at 273 Kelvin
e0_const = iris.coords.AuxCoord(np.float32(6.112),
long_name='Saturated vapour pressure',
units='hPa')
emp_a = np.float32(17.67) # empirical constant a
# Empirical constant b in Tetens formula
emp_b = iris.coords.AuxCoord(np.float32(243.5),
long_name='Empirical constant b',
units='degC')
exponent = iris.analysis.maths.exp(emp_a * tas / (tas + emp_b))
return (exponent * e0_const / (tas + emp_b)**2) * (emp_a * emp_b)
def get_constants(psl):
"""Define constants to compute De Bruin (2016) reference evaporation.
The Definition of rv and rd constants is provided in
Wallace and Hobbs (2006), 2.6 equation 3.14.
The Definition of lambda and cp is provided in Wallace and Hobbs 2006.
The Definition of beta and cs is provided in De Bruin (2016), section 4a.
"""
# Ensure psl is in hPa
psl.convert_units('hPa')
# Definition of constants
# source='Wallace and Hobbs (2006), 2.6 equation 3.14',
rv_const = iris.coords.AuxCoord(np.float32(461.51),
long_name='Gas constant water vapour',
units='J K-1 kg-1')
# source='Wallace and Hobbs (2006), 2.6 equation 3.14',
rd_const = iris.coords.AuxCoord(np.float32(287.0),
long_name='Gas constant dry air',
units='J K-1 kg-1')
# Latent heat of vaporization in J kg-1 (or J m-2 day-1)
# source='Wallace and Hobbs 2006'
lambda_ = iris.coords.AuxCoord(np.float32(2.5e6),
long_name='Latent heat of vaporization',
units='J kg-1')
# Specific heat of dry air constant pressure
# source='Wallace and Hobbs 2006',
cp_const = iris.coords.AuxCoord(np.float32(1004),
long_name='Specific heat of dry air',
units='J K-1 kg-1')
# source='De Bruin (2016), section 4a',
beta = iris.coords.AuxCoord(np.float32(20),
long_name='Correction Constant',
units='W m-2')
# source = 'De Bruin (2016), section 4a',
cs_const = iris.coords.AuxCoord(np.float32(110),
long_name='Empirical constant',
units='W m-2')
# source = De Bruin (10.1175/JHM-D-15-0006.1), page 1376
# gamma = (rv/rd) * (cp*msl/lambda_)
# iris.exceptions.NotYetImplementedError: coord / coord
rv_rd_const = rv_const.points[0] / rd_const.points[0]
gamma = rv_rd_const * (psl * cp_const / lambda_)
return gamma, cs_const, beta, lambda_
def debruin_pet(psl, rsds, rsdt, tas):
"""Compute De Bruin (2016) reference evaporation.
Implement equation 6 from De Bruin (10.1175/JHM-D-15-0006.1)
"""
# Variable derivation
delta_svp = tetens_derivative(tas)
gamma, cs_const, beta, lambda_ = get_constants(psl)
# the definition of the radiation components according to the paper:
kdown = rsds
kdown_ext = rsdt
# Equation 5
rad_factor = np.float32(1 - 0.23)
net_radiation = (rad_factor * kdown) - (kdown * cs_const / kdown_ext)
# Equation 6
# the unit is W m-2
ref_evap = ((delta_svp / (delta_svp + gamma)) * net_radiation) + beta
pet = ref_evap / lambda_
pet.var_name = 'evspsblpot'
pet.standard_name = 'water_potential_evaporation_flux'
pet.long_name = 'Potential Evapotranspiration'
return pet
| [
"numpy.float32",
"iris.analysis.maths.exp"
] | [((1020, 1037), 'numpy.float32', 'np.float32', (['(17.67)'], {}), '(17.67)\n', (1030, 1037), True, 'import numpy as np\n'), ((1289, 1341), 'iris.analysis.maths.exp', 'iris.analysis.maths.exp', (['(emp_a * tas / (tas + emp_b))'], {}), '(emp_a * tas / (tas + emp_b))\n', (1312, 1341), False, 'import iris\n'), ((4085, 4105), 'numpy.float32', 'np.float32', (['(1 - 0.23)'], {}), '(1 - 0.23)\n', (4095, 4105), True, 'import numpy as np\n'), ((865, 882), 'numpy.float32', 'np.float32', (['(6.112)'], {}), '(6.112)\n', (875, 882), True, 'import numpy as np\n'), ((1141, 1158), 'numpy.float32', 'np.float32', (['(243.5)'], {}), '(243.5)\n', (1151, 1158), True, 'import numpy as np\n'), ((1963, 1981), 'numpy.float32', 'np.float32', (['(461.51)'], {}), '(461.51)\n', (1973, 1981), True, 'import numpy as np\n'), ((2210, 2227), 'numpy.float32', 'np.float32', (['(287.0)'], {}), '(287.0)\n', (2220, 2227), True, 'import numpy as np\n'), ((2490, 2511), 'numpy.float32', 'np.float32', (['(2500000.0)'], {}), '(2500000.0)\n', (2500, 2511), True, 'import numpy as np\n'), ((2761, 2777), 'numpy.float32', 'np.float32', (['(1004)'], {}), '(1004)\n', (2771, 2777), True, 'import numpy as np\n'), ((2986, 3000), 'numpy.float32', 'np.float32', (['(20)'], {}), '(20)\n', (2996, 3000), True, 'import numpy as np\n'), ((3197, 3212), 'numpy.float32', 'np.float32', (['(110)'], {}), '(110)\n', (3207, 3212), True, 'import numpy as np\n')] |
import os
import warnings
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
import dgl
import numpy as np
from tqdm import tqdm
from Utils.DataProcessing import *
from Datasets.FlickrDataset import FlickrMIRDataset
from Models.GCN import GCN
from Trainer.Trainer import Trainer
import torch
import sys
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
num_channel = 128
learning_rate = 0.001
epochs = 2000
patience = 200
num_run = 5
num_feat = 2048
num_class = 1
data_file = 'Data/MIR/feat/'
data_edge_file = 'Data/MIR/pairs/'
save_model_path = '25JAN2022/'
org_edge_file = 'mir_priv.pairs'
feat_file = 'resnet50_plain_feat.npy'
edge_file = [
'mir_kdd_0.1.pairs',
'mir_kdd_0.2.pairs',
'mir_kdd_0.4.pairs',
'mir_kdd_0.6.pairs',
'mir_kdd_0.8.pairs',
'mir_kdd_1.0.pairs',
]
eps_edge = ['0.1', '0.2', '0.4', '0.6', '0.8', '1.0']
all_result = {}
avg_result = {}
i = 0
for efile in tqdm(edge_file):
print("Running for feat file: {}".format(efile))
dataset = FlickrMIRDataset(feat_file=data_file + feat_file, edge_org = data_edge_file + org_edge_file, edge_generated = data_edge_file + efile, type_test = 'edge_base')
temp_auc = []
temp_f1 = []
temp_acc = []
for run in range(num_run):
print("Run {}".format(run + 1))
name_model_to_save = save_model_path + "edge_epsEdge_{}_run_{}.pt".format(eps_edge[i], run+1)
model = GCN(in_feats=dataset.num_feature, h_feats=num_channel, num_classes=dataset.num_classes)
trainer = Trainer(num_epoch=epochs, learning_rate=learning_rate, patience=patience, model=model, dataset=dataset,
name_model=name_model_to_save, device=device)
auc, f1, acc = trainer.train()
all_result["edge_epsEdge_{}_run_{}".format(eps_edge[i], run+1)] = (auc, f1, acc)
temp_auc.append(auc)
temp_f1.append(f1)
temp_acc.append(acc)
avg_result["edge_epsEdge_{}".format(eps_edge[i])] = (
np.mean(np.array(temp_auc)), np.mean(np.array(temp_f1)), np.mean(np.array(temp_acc)))
i += 1
print("=============== ALL RESULTS: ===================")
for key in all_result:
print(key, all_result[key])
print("=============== AVG RESULTS: ===================")
for key in avg_result:
print(key, avg_result[key])
| [
"tqdm.tqdm",
"warnings.simplefilter",
"Datasets.FlickrDataset.FlickrMIRDataset",
"warnings.filterwarnings",
"torch.cuda.is_available",
"Models.GCN.GCN",
"numpy.array",
"Trainer.Trainer.Trainer"
] | [((114, 176), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (137, 176), False, 'import warnings\n'), ((177, 239), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (198, 239), False, 'import warnings\n'), ((240, 295), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (263, 295), False, 'import warnings\n'), ((1162, 1177), 'tqdm.tqdm', 'tqdm', (['edge_file'], {}), '(edge_file)\n', (1166, 1177), False, 'from tqdm import tqdm\n'), ((1246, 1407), 'Datasets.FlickrDataset.FlickrMIRDataset', 'FlickrMIRDataset', ([], {'feat_file': '(data_file + feat_file)', 'edge_org': '(data_edge_file + org_edge_file)', 'edge_generated': '(data_edge_file + efile)', 'type_test': '"""edge_base"""'}), "(feat_file=data_file + feat_file, edge_org=data_edge_file +\n org_edge_file, edge_generated=data_edge_file + efile, type_test='edge_base'\n )\n", (1262, 1407), False, 'from Datasets.FlickrDataset import FlickrMIRDataset\n'), ((559, 584), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (582, 584), False, 'import torch\n'), ((1647, 1739), 'Models.GCN.GCN', 'GCN', ([], {'in_feats': 'dataset.num_feature', 'h_feats': 'num_channel', 'num_classes': 'dataset.num_classes'}), '(in_feats=dataset.num_feature, h_feats=num_channel, num_classes=dataset.\n num_classes)\n', (1650, 1739), False, 'from Models.GCN import GCN\n'), ((1753, 1906), 'Trainer.Trainer.Trainer', 'Trainer', ([], {'num_epoch': 'epochs', 'learning_rate': 'learning_rate', 'patience': 'patience', 'model': 'model', 'dataset': 'dataset', 'name_model': 'name_model_to_save', 'device': 'device'}), '(num_epoch=epochs, learning_rate=learning_rate, patience=patience,\n model=model, dataset=dataset, name_model=name_model_to_save, device=device)\n', (1760, 1906), False, 'from Trainer.Trainer import Trainer\n'), ((2204, 2222), 'numpy.array', 'np.array', (['temp_auc'], {}), '(temp_auc)\n', (2212, 2222), True, 'import numpy as np\n'), ((2233, 2250), 'numpy.array', 'np.array', (['temp_f1'], {}), '(temp_f1)\n', (2241, 2250), True, 'import numpy as np\n'), ((2261, 2279), 'numpy.array', 'np.array', (['temp_acc'], {}), '(temp_acc)\n', (2269, 2279), True, 'import numpy as np\n')] |
import logging
import numpy as np
from astropy import units as u
from astropy.nddata import NDDataRef
from astropy.utils.decorators import lazyproperty
from astropy.nddata import NDUncertainty
from ..wcs import WCSWrapper, WCSAdapter
from .spectrum_mixin import OneDSpectrumMixin
__all__ = ['Spectrum1D']
__doctest_skip__ = ['Spectrum1D.spectral_resolution']
class Spectrum1D(OneDSpectrumMixin, NDDataRef):
"""
Spectrum container for 1D spectral data.
Parameters
----------
flux : `astropy.units.Quantity` or astropy.nddata.NDData`-like
The flux data for this spectrum.
spectral_axis : `astropy.units.Quantity`
Dispersion information with the same shape as the last (or only)
dimension of flux.
wcs : `astropy.wcs.WCS` or `gwcs.wcs.WCS`
WCS information object.
velocity_convention : {"doppler_relativistic", "doppler_optical", "doppler_radio"}
Convention used for velocity conversions.
rest_value : `~astropy.units.Quantity`
Any quantity supported by the standard spectral equivalencies
(wavelength, energy, frequency, wave number). Describes the rest value
of the spectral axis for use with velocity conversions.
uncertainty : `~astropy.nddata.NDUncertainty`
Contains uncertainty information along with propagation rules for
spectrum arithmetic. Can take a unit, but if none is given, will use
the unit defined in the flux.
meta : dict
Arbitrary container for any user-specific information to be carried
around with the spectrum container object.
"""
def __init__(self, flux=None, spectral_axis=None, wcs=None,
velocity_convention=None, rest_value=None, *args, **kwargs):
# If the flux (data) argument is a subclass of nddataref (as it would
# be for internal arithmetic operations), avoid setup entirely.
if isinstance(flux, NDDataRef):
self._velocity_convention = flux._velocity_convention
self._rest_value = flux._rest_value
super(Spectrum1D, self).__init__(flux)
return
# Ensure that the flux argument is an astropy quantity
if flux is not None and not isinstance(flux, u.Quantity):
raise ValueError("Flux must be a `Quantity` object.")
# Insure that the unit information codified in the quantity object is
# the One True Unit.
kwargs.setdefault('unit', flux.unit if isinstance(flux, u.Quantity)
else kwargs.get('unit'))
# In cases of slicing, new objects will be initialized with `data`
# instead of `flux`. Ensure we grab the `data` argument.
if flux is None and 'data' in kwargs:
flux = kwargs.pop('data')
# Attempt to parse the spectral axis. If none is given, try instead to
# parse a given wcs. This is put into a GWCS object to
# then be used behind-the-scenes for all specutils operations.
if spectral_axis is not None:
# Ensure that the spectral axis is an astropy quantity
if not isinstance(spectral_axis, u.Quantity):
raise ValueError("Spectral axis must be a `Quantity` object.")
wcs = WCSWrapper.from_array(spectral_axis)
elif wcs is not None:
if not issubclass(wcs.__class__, WCSAdapter):
wcs = WCSWrapper(wcs)
elif isinstance(flux, float) or isinstance(flux, int) or isinstance(flux, np.ndarray):
# In the case where the arithmetic operation is being performed with
# a single float, int, or array object, just go ahead and ignore wcs
# requirements
super(Spectrum1D, self).__init__(data=flux)
return
else:
# If no wcs and no spectral axis has been given, raise an error
raise LookupError("No WCS object or spectral axis information has "
"been given. Please provide one.")
self._velocity_convention = velocity_convention
if rest_value is None:
if wcs.rest_frequency != 0:
self._rest_value = wcs.rest_frequency * u.Hz
elif wcs.rest_wavelength != 0:
self._rest_value = wcs.rest_wavelength * u.AA
else:
self._rest_value = 0 * u.AA
else:
self._rest_value = rest_value
if not isinstance(self._rest_value, u.Quantity):
logging.info("No unit information provided with rest value. "
"Assuming units of spectral axis ('%s').",
spectral_axis.unit)
self._rest_value = u.Quantity(rest_value, spectral_axis.unit)
elif not self._rest_value.unit.is_equivalent(u.AA) and not self._rest_value.unit.is_equivalent(u.Hz):
raise u.UnitsError("Rest value must be energy/wavelength/frequency equivalent.")
super(Spectrum1D, self).__init__(
data=flux.value if isinstance(flux, u.Quantity) else flux,
wcs=wcs, **kwargs)
@property
def frequency(self):
"""
The frequency as a `~astropy.units.Quantity` in units of GHz
"""
return self.spectral_axis.to(u.GHz, u.spectral())
@property
def wavelength(self):
"""
The wavelength as a `~astropy.units.Quantity` in units of Angstroms
"""
return self.spectral_axis.to(u.AA, u.spectral())
@property
def energy(self):
"""
The energy of the spectral axis as a `~astropy.units.Quantity` in units
of eV.
"""
return self.spectral_axis.to(u.eV, u.spectral())
@property
def photon_flux(self):
"""
The flux density of photons as a `~astropy.units.Quantity`, in units of
photons per cm^2 per second per spectral_axis unit
"""
flux_in_spectral_axis_units = self.flux.to(u.W * u.cm**-2 * self.spectral_axis.unit**-1, u.spectral_density(self.spectral_axis))
photon_flux_density = flux_in_spectral_axis_units / (self.energy / u.photon)
return photon_flux_density.to(u.photon * u.cm**-2 * u.s**-1 *
self.spectral_axis.unit**-1)
@lazyproperty
def bin_edges(self):
return self.wcs.bin_edges()
@property
def shape(self):
return self.flux.shape
@staticmethod
def _compare_wcs(this_operand, other_operand):
"""
NNData arithmetic callable to determine if two wcs's are compatible.
"""
# If the other operand is a simple number or array, allow the operations
if (isinstance(other_operand, float) or isinstance(other_operand, int)
or isinstance(other_operand, np.ndarray)):
return True
# First check if units are equivalent, if so, create a new spectrum
# object with spectral axis in compatible units
other_wcs = other_operand.wcs.with_spectral_unit(
this_operand.wcs.spectral_axis_unit)
if other_wcs is None:
return False
# Check if the shape of the axes are compatible
if this_operand.spectral_axis.shape != other_operand.spectral_axis.shape:
logging.error("Shape of spectral axes between operands must be "
"equivalent.")
return False
# And that they cover the same range
if (this_operand.spectral_axis[0] != other_operand.spectral_axis[0] or
this_operand.spectral_axis[-1] != other_operand.spectral_axis[-1]):
logging.error("Spectral axes between operands must cover the "
"same range. Interpolation may be required.")
return False
# Check if the delta dispersion is equivalent between the two axes
if not np.array_equal(np.diff(this_operand.spectral_axis),
np.diff(other_operand.spectral_axis)):
logging.error("Delta dispersion of spectral axes of operands "
"must be equivalent. Interpolation may be required.")
return False
return True
def __add__(self, other):
if not isinstance(other, NDDataRef):
other = u.Quantity(other, unit=self.unit)
return self.add(
other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))
def __sub__(self, other):
if not isinstance(other, NDDataRef):
other = u.Quantity(other, unit=self.unit)
return self.subtract(
other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))
def __mul__(self, other):
if not isinstance(other, NDDataRef):
other = u.Quantity(other)
return self.multiply(
other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))
def __div__(self, other):
if not isinstance(other, NDDataRef):
other = u.Quantity(other)
return self.divide(
other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))
def __truediv__(self, other):
if not isinstance(other, NDDataRef):
other = u.Quantity(other)
return self.divide(
other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))
def _format_array_summary(self, label, array):
mean = np.mean(array)
s = "{:17} [ {:.5}, ..., {:.5} ], mean={:.5}"
return s.format(label+':', array[0], array[-1], mean)
def __str__(self):
result = "Spectrum1D "
# Handle case of single value flux
if self.flux.ndim == 0:
result += "(length=1)\n"
return result + "flux: {}".format(self.flux)
# Handle case of multiple flux arrays
result += "(length={})\n".format(len(self.spectral_axis))
if self.flux.ndim > 1:
for i, flux in enumerate(self.flux):
label = 'flux{:2}'.format(i)
result += self._format_array_summary(label, flux) + '\n'
else:
result += self._format_array_summary('flux', self.flux) + '\n'
# Add information about spectral axis
result += self._format_array_summary('spectral axis', self.spectral_axis)
# Add information about uncertainties if available
if self.uncertainty:
result += "\nuncertainty: [ {}, ..., {} ]".format(
self.uncertainty[0], self.uncertainty[-1])
return result
def __repr__(self):
if self.wcs:
result = "<Spectrum1D(flux={}, spectral_axis={})>".format(
repr(self.flux), repr(self.spectral_axis))
else:
result = "<Spectrum1D(flux={})>".format(repr(self.flux))
return result
def spectral_resolution(self, true_dispersion, delta_dispersion, axis=-1):
"""Evaluate the probability distribution of the spectral resolution.
Examples
--------
To tabulate a binned resolution function at 6000A covering +/-10A in
0.2A steps:
>>> R = spectrum1d.spectral_resolution(
... 6000 * u.Angstrom, np.linspace(-10, 10, 51) * u.Angstrom)
>>> assert R.shape == (50,)
>>> assert np.allclose(R.sum(), 1.)
To build a sparse resolution matrix for true wavelengths 4000-8000A
in 0.1A steps:
>>> R = spectrum1d.spectral_resolution(
... np.linspace(4000, 8000, 40001)[:, np.newaxis] * u.Angstrom,
... np.linspace(-10, +10, 201) * u.Angstrom)
>>> assert R.shape == (40000, 200)
>>> assert np.allclose(R.sum(axis=1), 1.)
Parameters
----------
true_dispersion : `~astropy.units.Quantity`
True value(s) of dispersion for which the resolution should be
evaluated.
delta_dispersion : `~astropy.units.Quantity`
Array of (observed - true) dispersion bin edges to integrate the
resolution probability density over.
axis : int
Which axis of ``delta_dispersion`` contains the strictly increasing
dispersion values to interpret as bin edges. The dimension of
``delta_dispersion`` along this axis must be at least two.
Returns
-------
numpy array
Array of dimensionless probabilities calculated as the integral of
P(observed | true) over each bin in (observed - true). The output
shape is the result of broadcasting the input shapes.
"""
pass
| [
"logging.error",
"astropy.units.Quantity",
"logging.info",
"numpy.mean",
"astropy.units.spectral",
"numpy.diff",
"astropy.units.spectral_density",
"astropy.units.UnitsError"
] | [((9445, 9459), 'numpy.mean', 'np.mean', (['array'], {}), '(array)\n', (9452, 9459), True, 'import numpy as np\n'), ((5318, 5330), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (5328, 5330), True, 'from astropy import units as u\n'), ((5516, 5528), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (5526, 5528), True, 'from astropy import units as u\n'), ((5729, 5741), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (5739, 5741), True, 'from astropy import units as u\n'), ((6045, 6083), 'astropy.units.spectral_density', 'u.spectral_density', (['self.spectral_axis'], {}), '(self.spectral_axis)\n', (6063, 6083), True, 'from astropy import units as u\n'), ((7311, 7387), 'logging.error', 'logging.error', (['"""Shape of spectral axes between operands must be equivalent."""'], {}), "('Shape of spectral axes between operands must be equivalent.')\n", (7324, 7387), False, 'import logging\n'), ((7663, 7778), 'logging.error', 'logging.error', (['"""Spectral axes between operands must cover the same range. Interpolation may be required."""'], {}), "(\n 'Spectral axes between operands must cover the same range. Interpolation may be required.'\n )\n", (7676, 7778), False, 'import logging\n'), ((8047, 8170), 'logging.error', 'logging.error', (['"""Delta dispersion of spectral axes of operands must be equivalent. Interpolation may be required."""'], {}), "(\n 'Delta dispersion of spectral axes of operands must be equivalent. Interpolation may be required.'\n )\n", (8060, 8170), False, 'import logging\n'), ((8332, 8365), 'astropy.units.Quantity', 'u.Quantity', (['other'], {'unit': 'self.unit'}), '(other, unit=self.unit)\n', (8342, 8365), True, 'from astropy import units as u\n'), ((8566, 8599), 'astropy.units.Quantity', 'u.Quantity', (['other'], {'unit': 'self.unit'}), '(other, unit=self.unit)\n', (8576, 8599), True, 'from astropy import units as u\n'), ((8805, 8822), 'astropy.units.Quantity', 'u.Quantity', (['other'], {}), '(other)\n', (8815, 8822), True, 'from astropy import units as u\n'), ((9028, 9045), 'astropy.units.Quantity', 'u.Quantity', (['other'], {}), '(other)\n', (9038, 9045), True, 'from astropy import units as u\n'), ((9253, 9270), 'astropy.units.Quantity', 'u.Quantity', (['other'], {}), '(other)\n', (9263, 9270), True, 'from astropy import units as u\n'), ((4524, 4655), 'logging.info', 'logging.info', (['"""No unit information provided with rest value. Assuming units of spectral axis (\'%s\')."""', 'spectral_axis.unit'], {}), '(\n "No unit information provided with rest value. Assuming units of spectral axis (\'%s\')."\n , spectral_axis.unit)\n', (4536, 4655), False, 'import logging\n'), ((4742, 4784), 'astropy.units.Quantity', 'u.Quantity', (['rest_value', 'spectral_axis.unit'], {}), '(rest_value, spectral_axis.unit)\n', (4752, 4784), True, 'from astropy import units as u\n'), ((7929, 7964), 'numpy.diff', 'np.diff', (['this_operand.spectral_axis'], {}), '(this_operand.spectral_axis)\n', (7936, 7964), True, 'import numpy as np\n'), ((7996, 8032), 'numpy.diff', 'np.diff', (['other_operand.spectral_axis'], {}), '(other_operand.spectral_axis)\n', (8003, 8032), True, 'import numpy as np\n'), ((4921, 4995), 'astropy.units.UnitsError', 'u.UnitsError', (['"""Rest value must be energy/wavelength/frequency equivalent."""'], {}), "('Rest value must be energy/wavelength/frequency equivalent.')\n", (4933, 4995), True, 'from astropy import units as u\n')] |
import gym
import numpy as np
from gym import spaces
class FourRoom(gym.Env):
def __init__(self, seed=None, goal_type='fix_goal'):
self.n = 11
self.map = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0,
0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]).reshape((self.n, self.n))
self.goal_type = goal_type
self.goal = None
self.init()
def init(self):
self.observation_space = spaces.Box(low=0, high=1, shape=(self.n*self.n,), dtype=np.float32)
self.observation_space.n = self.n
self.dx = [0, 1, 0, -1]
self.dy = [1, 0, -1, 0]
self.action_space = spaces.Discrete(len(self.dx))
self.reset()
def label2obs(self, x, y):
a = np.zeros((self.n*self.n,))
assert self.x < self.n and self.y < self.n
a[x * self.n + y] = 1
return a
def get_obs(self):
assert self.goal is not None
return self.label2obs(self.x, self.y)
def reset(self):
'''
condition = True
while condition:
self.x = np.random.randint(1, self.n)
self.y = np.random.randint(1, self.n)
condition = (self.map[self.x, self.y] == 0)
'''
self.x, self.y = 9, 9
loc = np.where(self.map > 0.5)
assert len(loc) == 2
#if self.goal_type == 'random':
# goal_idx = np.random.randint(len(loc[0]))
if self.goal_type == 'fix_goal':
goal_idx = 0
else:
raise NotImplementedError
self.goal = loc[0][goal_idx], loc[1][goal_idx]
self.done = False
return self.get_obs()
def set_xy(self, x, y):
self.x = x
self.y = y
return self.get_obs()
def step(self, action):
#assert not self.done
nx, ny = self.x + self.dx[action], self.y + self.dy[action]
info = {'is_success': False}
#before = self.get_obs().argmax()
if self.map[nx, ny]:
self.x, self.y = nx, ny
#dis = (self.goal[0]-self.x)**2 + (self.goal[1]-self.y)**2
#reward = -np.sqrt(dis)
reward = -1
done = False
else:
#dis = (self.goal[0]-self.x)**2 + (self.goal[1]-self.y)**2
#reward = -np.sqrt(dis)
reward = -1
done = False
if nx == self.goal[0] and ny == self.goal[1]:
reward = 0
info = {'is_success': True}
done = self.done = True
return self.get_obs(), reward, done, info
def compute_reward(self, state, goal, info):
state_obs = state.argmax(axis=1)
goal_obs = goal.argmax(axis=1)
reward = np.where(state_obs == goal_obs, 0, -1)
return reward
def restore(self, obs):
obs = obs.argmax()
self.x = obs//self.n
self.y = obs % self.n
def inv_action(self, state, prev_state):
x, y = state // self.n, state % self.n
px, py = prev_state // self.n, prev_state % self.n
dx = x - px
dy = y - py
if dx == 1 and dy == 0:
return 1
elif dx == -1 and dy == 0:
return 3
elif dy == 1 and dx == 0:
return 0
else:
return 2
def bfs_dist(self, state, goal, order=True):
#using bfs to search for shortest path
visited = {key: False for key in range(self.n*self.n)}
state_key = state.argmax()
goal_key = goal.argmax()
queue = []
visited[state_key] = True
queue.append(state_key)
dist = [-np.inf] * (self.n*self.n)
past = [-1] * (self.n*self.n)
dist[state_key] = 0
if order:
act_order = range(4)
else:
act_order = range(3, 0, -1)
while (queue):
par = queue.pop(0)
if par == goal_key:
break
x_par, y_par = par // self.n, par % self.n
for action in act_order:
x_child, y_child = x_par + self.dx[action], y_par + self.dy[action]
child = x_child*self.n + y_child
if self.map[x_child, y_child] == 0:
continue
if visited[child] == False:
visited[child] = True
queue.append(child)
dist[child] = dist[par] + 1
past[child] = par
state_action_pair = []
cur_state = goal_key
while cur_state is not state_key:
prev_state = past[cur_state]
prev_action = self.inv_action(cur_state, prev_state)
x_prev, y_prev = prev_state // self.n, prev_state % self.n
print(x_prev, y_prev)
state_action_pair.append(np.hstack([self.label2obs(x_prev, y_prev), np.array((prev_action, ))]))
cur_state = prev_state
state_action_pair.reverse()
state_action_pair.append(np.hstack([self.label2obs(goal_key // self.n, goal_key % self.n), np.array((prev_action, ))]))
print(len(state_action_pair))
return dist, state_action_pair
def get_pairwise(self, state, target):
dist = self.bfs_dist(state, target)
return dist
def all_states(self):
states = []
mask = []
for i in range(self.n):
for j in range(self.n):
self.x = i
self.y = j
states.append(self.get_obs())
if isinstance(states[-1], dict):
states[-1] = states[-1]['observation']
mask.append(self.map[self.x, self.y] > 0.5)
return np.array(states)[mask]
def all_edges(self):
A = np.zeros((self.n*self.n, self.n*self.n))
mask = []
for i in range(self.n):
for j in range(self.n):
mask.append(self.map[i, j] > 0.5)
if self.map[i][j]:
for a in range(4):
self.x = i
self.y = j
t = self.step(a)[0]
if isinstance(t, dict):
t = t['observation']
self.restore(t)
A[i*self.n+j, self.x*self.n + self.y] = 1
return A[mask][:, mask]
def add_noise(self, start, goal, dist, alpha=0.1, order=False):
if order:
act_order = range(4)
else:
act_order = range(3, 0, -1)
cur_state_id = start.argmax()
goal_id = goal.argmax()
new_seq = []
while(cur_state_id != goal_id):
x_cur, y_cur = cur_state_id // self.n, cur_state_id % self.n
if np.random.randn() < alpha:
cur_action = np.random.randint(4)
nx, ny = x_cur+self.dx[cur_action], y_cur+self.dy[cur_action]
new_seq.append(np.hstack([self.label2obs(x_cur, y_cur), np.array((cur_action, ))]))
#print('state, action', (cur_state_id//self.n, cur_state_id%self.n), cur_action)
if self.map[nx][ny] > 0.5:
cur_state_id = nx*self.n + ny
else:
cur_state_id = cur_state_id
else:
dist_n = -np.inf
cur_action = -1
for action in act_order:
x_n, y_n = x_cur + self.dx[action], y_cur + self.dy[action]
if dist[x_n*self.n+y_n] > dist_n:
dist_n = dist[x_n*self.n+y_n]
cur_action = action
elif dist[x_n*self.n+y_n] == dist_n:
cur_action = np.random.choice(np.array([cur_action, action]))
nx, ny = x_cur+self.dx[cur_action], y_cur+self.dy[cur_action]
new_seq.append(np.hstack([self.label2obs(x_cur, y_cur), np.array((cur_action, ))]))
#print('state, action', (cur_state_id//self.n, cur_state_id%self.n), cur_action)
if self.map[nx][ny] > 0.5:
cur_state_id = nx*self.n + ny
else:
cur_state_id = cur_state_id
new_seq.append(np.hstack([self.label2obs(goal_id//self.n, goal_id%self.n), np.array((cur_action, ))]))
return new_seq
class FourRoom1(FourRoom):
def __init__(self, seed=None, *args, **kwargs):
FourRoom.__init__(self, *args, **kwargs)
self.n = 11
self.map = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0,
0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]).reshape((self.n, self.n))
self.init()
def init(self):
self.observation_space = spaces.Box(low=0, high=1, shape=(self.n*self.n,), dtype=np.float32)
self.observation_space.n = self.n
self.dx = [0, 1, 0, -1]
self.dy = [2, 0, -2, 0]
self.action_space = spaces.Discrete(len(self.dx))
self.reset()
def inv_action(self, state, prev_state):
x, y = state // self.n, state % self.n
px, py = prev_state // self.n, prev_state % self.n
dx = x - px
dy = y - py
if dx == 1 and dy == 0:
return 1
elif dx == -1 and dy == 0:
return 3
elif dy == 2 and dx == 0:
return 0
else:
return 2
def step(self, action):
#assert not self.done
nx, ny = max(0, self.x + self.dx[action]), max(0, self.y + self.dy[action])
nx, ny = min(self.n-1, nx), min(self.n-1, ny)
info = {'is_success': False}
#before = self.get_obs().argmax()
if self.map[nx, ny]:
self.x, self.y = nx, ny
#dis = (self.goal[0]-self.x)**2 + (self.goal[1]-self.y)**2
#reward = -np.sqrt(dis)
reward = -1
done = False
else:
#dis = (self.goal[0]-self.x)**2 + (self.goal[1]-self.y)**2
#reward = -np.sqrt(dis)
reward = -1
done = False
if nx == self.goal[0] and ny == self.goal[1]:
reward = 0
info = {'is_success': True}
done = self.done = True
return self.get_obs(), reward, done, info
| [
"numpy.random.randn",
"numpy.zeros",
"numpy.where",
"numpy.array",
"gym.spaces.Box",
"numpy.random.randint"
] | [((853, 922), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(self.n * self.n,)', 'dtype': 'np.float32'}), '(low=0, high=1, shape=(self.n * self.n,), dtype=np.float32)\n', (863, 922), False, 'from gym import spaces\n'), ((1150, 1178), 'numpy.zeros', 'np.zeros', (['(self.n * self.n,)'], {}), '((self.n * self.n,))\n', (1158, 1178), True, 'import numpy as np\n'), ((1678, 1702), 'numpy.where', 'np.where', (['(self.map > 0.5)'], {}), '(self.map > 0.5)\n', (1686, 1702), True, 'import numpy as np\n'), ((3101, 3139), 'numpy.where', 'np.where', (['(state_obs == goal_obs)', '(0)', '(-1)'], {}), '(state_obs == goal_obs, 0, -1)\n', (3109, 3139), True, 'import numpy as np\n'), ((6108, 6152), 'numpy.zeros', 'np.zeros', (['(self.n * self.n, self.n * self.n)'], {}), '((self.n * self.n, self.n * self.n))\n', (6116, 6152), True, 'import numpy as np\n'), ((9476, 9545), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(self.n * self.n,)', 'dtype': 'np.float32'}), '(low=0, high=1, shape=(self.n * self.n,), dtype=np.float32)\n', (9486, 9545), False, 'from gym import spaces\n'), ((6047, 6063), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (6055, 6063), True, 'import numpy as np\n'), ((176, 569), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1,\n 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0,\n 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,\n 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0])\n', (184, 569), True, 'import numpy as np\n'), ((7103, 7120), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (7118, 7120), True, 'import numpy as np\n'), ((7159, 7179), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (7176, 7179), True, 'import numpy as np\n'), ((8859, 9252), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1,\n 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1,\n 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0,\n 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0,\n 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1,\n 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0])\n', (8867, 9252), True, 'import numpy as np\n'), ((5417, 5441), 'numpy.array', 'np.array', (['(prev_action,)'], {}), '((prev_action,))\n', (5425, 5441), True, 'import numpy as np\n'), ((8640, 8663), 'numpy.array', 'np.array', (['(cur_action,)'], {}), '((cur_action,))\n', (8648, 8663), True, 'import numpy as np\n'), ((5218, 5242), 'numpy.array', 'np.array', (['(prev_action,)'], {}), '((prev_action,))\n', (5226, 5242), True, 'import numpy as np\n'), ((7330, 7353), 'numpy.array', 'np.array', (['(cur_action,)'], {}), '((cur_action,))\n', (7338, 7353), True, 'import numpy as np\n'), ((8268, 8291), 'numpy.array', 'np.array', (['(cur_action,)'], {}), '((cur_action,))\n', (8276, 8291), True, 'import numpy as np\n'), ((8085, 8115), 'numpy.array', 'np.array', (['[cur_action, action]'], {}), '([cur_action, action])\n', (8093, 8115), True, 'import numpy as np\n')] |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ...common._registration import register_converter
import numpy as np
def convert_non_max_suppression(scope, operator, container):
if container.target_opset < 10:
raise RuntimeError('NonMaxSuppression is only support in Opset 10 or higher')
op_type = 'NonMaxSuppression'
attrs = {'name': operator.full_name}
raw_model = operator.raw_operator.nonMaximumSuppression
# Attribute: center_point_box
# The box data is supplied as [x_center, y_center, width, height], similar to TF models.
attrs['center_point_box'] = 1
# We index into scope.variable_name_mapping with key of raw_model.inputFeatureName to extract list of values.
# Assuming there's only one NMS node in this graph, the last node will be the input we're looking for.
# If there's more than one NMS coreml model, then return error.
if raw_model.HasField('coordinatesInputFeatureName'):
coordinates_input = scope.variable_name_mapping[raw_model.coordinatesInputFeatureName]
if len(coordinates_input) > 1:
raise RuntimeError('NMS conversion does not currently support more than one NMS node in an ONNX graph')
attrs['boxes'] = np.array(coordinates_input[0]).astype(np.float32)
if raw_model.HasField('confidenceInputFeatureName'):
confidence_input = scope.variable_name_mapping[raw_model.confidenceInputFeatureName]
if len(coordinates_input) > 1:
raise RuntimeError('NMS conversion does not currently support more than one NMS node in an ONNX graph')
attrs['scores'] = np.array(confidence_input[0]).astype(np.float32)
if raw_model.HasField('iouThreshold'):
attrs['iou_threshold'] = np.array(raw_model.iouThreshold).astype(np.float32)
if raw_model.HasField('confidenceThreshold'):
attrs['score_threshold'] = np.array(raw_model.confidenceThreshold).astype(np.float32)
if raw_model.HasField('PickTop') and raw_model.PickTop.HasField('perClass'):
attrs['max_output_boxes_per_class'] = np.array(raw_model.PickTop.perClass).astype(np.float32)
container.add_node(op_type, [operator.inputs[0].full_name], [operator.outputs[0].full_name],
op_version=10, **attrs)
register_converter('nonMaxSuppression', convert_non_max_suppression)
| [
"numpy.array"
] | [((1491, 1521), 'numpy.array', 'np.array', (['coordinates_input[0]'], {}), '(coordinates_input[0])\n', (1499, 1521), True, 'import numpy as np\n'), ((1872, 1901), 'numpy.array', 'np.array', (['confidence_input[0]'], {}), '(confidence_input[0])\n', (1880, 1901), True, 'import numpy as np\n'), ((1998, 2030), 'numpy.array', 'np.array', (['raw_model.iouThreshold'], {}), '(raw_model.iouThreshold)\n', (2006, 2030), True, 'import numpy as np\n'), ((2135, 2174), 'numpy.array', 'np.array', (['raw_model.confidenceThreshold'], {}), '(raw_model.confidenceThreshold)\n', (2143, 2174), True, 'import numpy as np\n'), ((2322, 2358), 'numpy.array', 'np.array', (['raw_model.PickTop.perClass'], {}), '(raw_model.PickTop.perClass)\n', (2330, 2358), True, 'import numpy as np\n')] |
import inspect
import json
import os
import shutil
from importlib import import_module
from pathlib import Path
from datetime import timedelta
import numpy as np
import pandas as pd
from .log import logger
from .introspection import infer_args_from_method
def check_directory_exists_and_if_not_mkdir(directory):
""" Checks if the given directory exists and creates it if it does not exist
Parameters
==========
directory: str
Name of the directory
"""
Path(directory).mkdir(parents=True, exist_ok=True)
class BilbyJsonEncoder(json.JSONEncoder):
def default(self, obj):
from ..prior import MultivariateGaussianDist, Prior, PriorDict
from ...gw.prior import HealPixMapPriorDist
from ...bilby_mcmc.proposals import ProposalCycle
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, PriorDict):
return {"__prior_dict__": True, "content": obj._get_json_dict()}
if isinstance(obj, (MultivariateGaussianDist, HealPixMapPriorDist, Prior)):
return {
"__prior__": True,
"__module__": obj.__module__,
"__name__": obj.__class__.__name__,
"kwargs": dict(obj.get_instantiation_dict()),
}
if isinstance(obj, ProposalCycle):
return str(obj)
try:
from astropy import cosmology as cosmo, units
if isinstance(obj, cosmo.FLRW):
return encode_astropy_cosmology(obj)
if isinstance(obj, units.Quantity):
return encode_astropy_quantity(obj)
if isinstance(obj, units.PrefixUnit):
return str(obj)
except ImportError:
logger.debug("Cannot import astropy, cannot write cosmological priors")
if isinstance(obj, np.ndarray):
return {"__array__": True, "content": obj.tolist()}
if isinstance(obj, complex):
return {"__complex__": True, "real": obj.real, "imag": obj.imag}
if isinstance(obj, pd.DataFrame):
return {"__dataframe__": True, "content": obj.to_dict(orient="list")}
if isinstance(obj, pd.Series):
return {"__series__": True, "content": obj.to_dict()}
if inspect.isfunction(obj):
return {
"__function__": True,
"__module__": obj.__module__,
"__name__": obj.__name__,
}
if inspect.isclass(obj):
return {
"__class__": True,
"__module__": obj.__module__,
"__name__": obj.__name__,
}
if isinstance(obj, (timedelta)):
return {
"__timedelta__": True,
"__total_seconds__": obj.total_seconds()
}
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def encode_astropy_cosmology(obj):
cls_name = obj.__class__.__name__
dct = {key: getattr(obj, key) for key in infer_args_from_method(obj.__init__)}
dct["__cosmology__"] = True
dct["__name__"] = cls_name
return dct
def encode_astropy_quantity(dct):
dct = dict(__astropy_quantity__=True, value=dct.value, unit=str(dct.unit))
if isinstance(dct["value"], np.ndarray):
dct["value"] = list(dct["value"])
return dct
def decode_astropy_cosmology(dct):
try:
from astropy import cosmology as cosmo
cosmo_cls = getattr(cosmo, dct["__name__"])
del dct["__cosmology__"], dct["__name__"]
return cosmo_cls(**dct)
except ImportError:
logger.debug(
"Cannot import astropy, cosmological priors may not be " "properly loaded."
)
return dct
def decode_astropy_quantity(dct):
try:
from astropy import units
if dct["value"] is None:
return None
else:
del dct["__astropy_quantity__"]
return units.Quantity(**dct)
except ImportError:
logger.debug(
"Cannot import astropy, cosmological priors may not be " "properly loaded."
)
return dct
def load_json(filename, gzip):
if gzip or os.path.splitext(filename)[1].lstrip(".") == "gz":
import gzip
with gzip.GzipFile(filename, "r") as file:
json_str = file.read().decode("utf-8")
dictionary = json.loads(json_str, object_hook=decode_bilby_json)
else:
with open(filename, "r") as file:
dictionary = json.load(file, object_hook=decode_bilby_json)
return dictionary
def decode_bilby_json(dct):
if dct.get("__prior_dict__", False):
cls = getattr(import_module(dct["__module__"]), dct["__name__"])
obj = cls._get_from_json_dict(dct)
return obj
if dct.get("__prior__", False):
try:
cls = getattr(import_module(dct["__module__"]), dct["__name__"])
except AttributeError:
logger.debug(
"Unknown prior class for parameter {}, defaulting to base Prior object".format(
dct["kwargs"]["name"]
)
)
from ..prior import Prior
cls = Prior
obj = cls(**dct["kwargs"])
return obj
if dct.get("__cosmology__", False):
return decode_astropy_cosmology(dct)
if dct.get("__astropy_quantity__", False):
return decode_astropy_quantity(dct)
if dct.get("__array__", False):
return np.asarray(dct["content"])
if dct.get("__complex__", False):
return complex(dct["real"], dct["imag"])
if dct.get("__dataframe__", False):
return pd.DataFrame(dct["content"])
if dct.get("__series__", False):
return pd.Series(dct["content"])
if dct.get("__function__", False) or dct.get("__class__", False):
default = ".".join([dct["__module__"], dct["__name__"]])
return getattr(import_module(dct["__module__"]), dct["__name__"], default)
if dct.get("__timedelta__", False):
return timedelta(seconds=dct["__total_seconds__"])
return dct
def recursively_decode_bilby_json(dct):
"""
Recursively call `bilby_decode_json`
Parameters
----------
dct: dict
The dictionary to decode
Returns
-------
dct: dict
The original dictionary with all the elements decode if possible
"""
dct = decode_bilby_json(dct)
if isinstance(dct, dict):
for key in dct:
if isinstance(dct[key], dict):
dct[key] = recursively_decode_bilby_json(dct[key])
return dct
def decode_from_hdf5(item):
"""
Decode an item from HDF5 format to python type.
This currently just converts __none__ to None and some arrays to lists
.. versionadded:: 1.0.0
Parameters
----------
item: object
Item to be decoded
Returns
-------
output: object
Converted input item
"""
if isinstance(item, str) and item == "__none__":
output = None
elif isinstance(item, bytes) and item == b"__none__":
output = None
elif isinstance(item, (bytes, bytearray)):
output = item.decode()
elif isinstance(item, np.ndarray):
if item.size == 0:
output = item
elif "|S" in str(item.dtype) or isinstance(item[0], bytes):
output = [it.decode() for it in item]
else:
output = item
elif isinstance(item, np.bool_):
output = bool(item)
else:
output = item
return output
def encode_for_hdf5(key, item):
"""
Encode an item to a HDF5 saveable format.
.. versionadded:: 1.1.0
Parameters
----------
item: object
Object to be encoded, specific options are provided for Bilby types
Returns
-------
output: object
Input item converted into HDF5 saveable format
"""
from ..prior.dict import PriorDict
if isinstance(item, np.int_):
item = int(item)
elif isinstance(item, np.float_):
item = float(item)
elif isinstance(item, np.complex_):
item = complex(item)
if isinstance(item, (np.ndarray, int, float, complex, str, bytes)):
output = item
elif item is None:
output = "__none__"
elif isinstance(item, list):
if len(item) == 0:
output = item
elif isinstance(item[0], (str, bytes)) or item[0] is None:
output = list()
for value in item:
if isinstance(value, str):
output.append(value.encode("utf-8"))
elif isinstance(value, bytes):
output.append(value)
else:
output.append(b"__none__")
elif isinstance(item[0], (int, float, complex)):
output = np.array(item)
else:
raise ValueError(f'Cannot save {key}: {type(item)} type')
elif isinstance(item, PriorDict):
output = json.dumps(item._get_json_dict())
elif isinstance(item, pd.DataFrame):
output = item.to_dict(orient="list")
elif isinstance(item, pd.Series):
output = item.to_dict()
elif inspect.isfunction(item) or inspect.isclass(item):
output = dict(
__module__=item.__module__, __name__=item.__name__, __class__=True
)
elif isinstance(item, dict):
output = item.copy()
elif isinstance(item, tuple):
output = {str(ii): elem for ii, elem in enumerate(item)}
else:
raise ValueError(f'Cannot save {key}: {type(item)} type')
return output
def recursively_load_dict_contents_from_group(h5file, path):
"""
Recursively load a HDF5 file into a dictionary
.. versionadded:: 1.1.0
Parameters
----------
h5file: h5py.File
Open h5py file object
path: str
Path within the HDF5 file
Returns
-------
output: dict
The contents of the HDF5 file unpacked into the dictionary.
"""
import h5py
output = dict()
for key, item in h5file[path].items():
if isinstance(item, h5py.Dataset):
output[key] = decode_from_hdf5(item[()])
elif isinstance(item, h5py.Group):
output[key] = recursively_load_dict_contents_from_group(
h5file, path + key + "/"
)
return output
def recursively_save_dict_contents_to_group(h5file, path, dic):
"""
Recursively save a dictionary to a HDF5 group
.. versionadded:: 1.1.0
Parameters
----------
h5file: h5py.File
Open HDF5 file
path: str
Path inside the HDF5 file
dic: dict
The dictionary containing the data
"""
for key, item in dic.items():
item = encode_for_hdf5(key, item)
if isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + "/", item)
else:
h5file[path + key] = item
def safe_file_dump(data, filename, module):
""" Safely dump data to a .pickle file
Parameters
==========
data:
data to dump
filename: str
The file to dump to
module: pickle, dill
The python module to use
"""
temp_filename = filename + ".temp"
with open(temp_filename, "wb") as file:
module.dump(data, file)
shutil.move(temp_filename, filename)
def move_old_file(filename, overwrite=False):
""" Moves or removes an old file.
Parameters
==========
filename: str
Name of the file to be move
overwrite: bool, optional
Whether or not to remove the file or to change the name
to filename + '.old'
"""
if os.path.isfile(filename):
if overwrite:
logger.debug("Removing existing file {}".format(filename))
os.remove(filename)
else:
logger.debug(
"Renaming existing file {} to {}.old".format(filename, filename)
)
shutil.move(filename, filename + ".old")
logger.debug("Saving result to {}".format(filename))
def safe_save_figure(fig, filename, **kwargs):
check_directory_exists_and_if_not_mkdir(os.path.dirname(filename))
from matplotlib import rcParams
try:
fig.savefig(fname=filename, **kwargs)
except RuntimeError:
logger.debug("Failed to save plot with tex labels turning off tex.")
rcParams["text.usetex"] = False
fig.savefig(fname=filename, **kwargs)
| [
"os.remove",
"os.path.isfile",
"pathlib.Path",
"pandas.DataFrame",
"json.loads",
"inspect.isclass",
"os.path.dirname",
"datetime.timedelta",
"gzip.GzipFile",
"inspect.isfunction",
"json.JSONEncoder.default",
"astropy.units.Quantity",
"importlib.import_module",
"numpy.asarray",
"pandas.Se... | [((11413, 11449), 'shutil.move', 'shutil.move', (['temp_filename', 'filename'], {}), '(temp_filename, filename)\n', (11424, 11449), False, 'import shutil\n'), ((11759, 11783), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (11773, 11783), False, 'import os\n'), ((2356, 2379), 'inspect.isfunction', 'inspect.isfunction', (['obj'], {}), '(obj)\n', (2374, 2379), False, 'import inspect\n'), ((2553, 2573), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (2568, 2573), False, 'import inspect\n'), ((2955, 2990), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (2979, 2990), False, 'import json\n'), ((4477, 4528), 'json.loads', 'json.loads', (['json_str'], {'object_hook': 'decode_bilby_json'}), '(json_str, object_hook=decode_bilby_json)\n', (4487, 4528), False, 'import json\n'), ((5578, 5604), 'numpy.asarray', 'np.asarray', (["dct['content']"], {}), "(dct['content'])\n", (5588, 5604), True, 'import numpy as np\n'), ((5747, 5775), 'pandas.DataFrame', 'pd.DataFrame', (["dct['content']"], {}), "(dct['content'])\n", (5759, 5775), True, 'import pandas as pd\n'), ((5828, 5853), 'pandas.Series', 'pd.Series', (["dct['content']"], {}), "(dct['content'])\n", (5837, 5853), True, 'import pandas as pd\n'), ((6127, 6170), 'datetime.timedelta', 'timedelta', ([], {'seconds': "dct['__total_seconds__']"}), "(seconds=dct['__total_seconds__'])\n", (6136, 6170), False, 'from datetime import timedelta\n'), ((12248, 12273), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (12263, 12273), False, 'import os\n'), ((490, 505), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (494, 505), False, 'from pathlib import Path\n'), ((4049, 4070), 'astropy.units.Quantity', 'units.Quantity', ([], {}), '(**dct)\n', (4063, 4070), False, 'from astropy import cosmology as cosmo, units\n'), ((4367, 4395), 'gzip.GzipFile', 'gzip.GzipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (4380, 4395), False, 'import gzip\n'), ((4606, 4652), 'json.load', 'json.load', (['file'], {'object_hook': 'decode_bilby_json'}), '(file, object_hook=decode_bilby_json)\n', (4615, 4652), False, 'import json\n'), ((4768, 4800), 'importlib.import_module', 'import_module', (["dct['__module__']"], {}), "(dct['__module__'])\n", (4781, 4800), False, 'from importlib import import_module\n'), ((6012, 6044), 'importlib.import_module', 'import_module', (["dct['__module__']"], {}), "(dct['__module__'])\n", (6025, 6044), False, 'from importlib import import_module\n'), ((11890, 11909), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (11899, 11909), False, 'import os\n'), ((12057, 12097), 'shutil.move', 'shutil.move', (['filename', "(filename + '.old')"], {}), "(filename, filename + '.old')\n", (12068, 12097), False, 'import shutil\n'), ((4956, 4988), 'importlib.import_module', 'import_module', (["dct['__module__']"], {}), "(dct['__module__'])\n", (4969, 4988), False, 'from importlib import import_module\n'), ((4282, 4308), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (4298, 4308), False, 'import os\n'), ((8909, 8923), 'numpy.array', 'np.array', (['item'], {}), '(item)\n', (8917, 8923), True, 'import numpy as np\n'), ((9262, 9286), 'inspect.isfunction', 'inspect.isfunction', (['item'], {}), '(item)\n', (9280, 9286), False, 'import inspect\n'), ((9290, 9311), 'inspect.isclass', 'inspect.isclass', (['item'], {}), '(item)\n', (9305, 9311), False, 'import inspect\n')] |
from dolfin import *
comm = MPI.comm_world
rank = MPI.rank(comm)
rank_string = str(rank)
size = MPI.size(comm)
import numpy as np
import multiprocessing
import logging, logging.handlers
import inspect
import time, os, gc, sys
import ctypes
myeps = 1e-10
def human_time(tt):
seconds = tt % 60
if seconds > 0.01:
ret = '{:.2f}s'.format(seconds)
else:
ret = '{:.2e}s'.format(seconds)
tt = int((tt-seconds))//60
if tt == 0:
return ret
minutes = tt % 60
ret = '{:d}m:{:s}'.format(minutes, ret)
tt = (tt-minutes)//60
if tt == 0:
return ret
hours = tt % 24
ret = '{:d}h:{:s}'.format(hours, ret)
tt = (tt-hours)//24
if tt == 0:
return ret
return '{:d}d:{:s}'.format(tt, ret)
def makedirs_norace(path):
if path and not os.path.exists(path):
os.makedirs(path, exist_ok = True)
def get_shared_array(shape):
base = multiprocessing.Array(ctypes.c_double, int(np.prod(shape)))
return np.frombuffer(base.get_obj()).reshape(shape)
class IndentFormatter(logging.Formatter):
def __init__(self, **args):
logging.Formatter.__init__(self, **args)
self.baseline = len(inspect.stack())
def format(self, rec):
stack = inspect.stack()
rec.indent = '. '*(len(stack)-self.baseline)
rec.function = stack[8][3]
out = logging.Formatter.format(self, rec)
del rec.indent; del rec.function
return out
formatter = IndentFormatter(fmt = '[%(asctime)s]-[%(processName)s]\n%(indent)s%(message)s')
class LogWrapper:
def __init__(self, logname, *, stdout=False):
self.lock = multiprocessing.Lock()
self.lock.acquire()
self.logger = logging.getLogger(rank_string)
self.logger.setLevel(logging.INFO)
path = os.path.dirname(logname)
makedirs_norace(path)
self.handler = logging.FileHandler(logname+'_rk_'+str(rank)+'.log', mode = 'w')
self.handler.setFormatter(formatter)
self.logger.addHandler(self.handler)
if stdout:
self.stdout_handler = logging.StreamHandler(sys.stdout)
self.stdout_handler.setLevel(logging.INFO)
self.stdout_handler.setFormatter(formatter)
self.logger.addHandler(self.stdout_handler)
self.lock.release()
def info(self, *args, **kwargs):
self.lock.acquire()
self.logger.info(*args, **kwargs)
self.lock.release()
def critical(self, *args, **kwargs):
self.lock.acquire()
self.logger.critical(*args, **kwargs)
self.lock.release()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.lock.acquire()
self.logger.handlers = []
del self.logger
self.handler.flush()
self.handler.close()
del self.handler
self.lock.release()
del self.lock
def get_time():
return time.process_time()
def simple_batch_fun(uus, VV, low = 0, high = 1, fun = None, logger = None, max_procs = None):
uus_len = high-low
cpu_count = multiprocessing.cpu_count() if max_procs is None else np.min([multiprocessing.cpu_count(), max_procs])
if cpu_count > 1:
done_queue = multiprocessing.Queue()
def done_put(qq):
qq.put(None)
def done_get():
done_queue.get()
else:
done_queue = None
def done_put(qq):
pass
def done_get():
pass
if logger is not None:
logger.info('SIMPLE: trying to allocate shared size [{:d}] array'.format(uus_len*VV.dim()))
polys_array = get_shared_array((uus_len, VV.dim()))
logger.info('SIMPLE: shared array allocated')
def tmp_fun(block_low, block_high, qq):
logger.info(' SIMPLE TMP: ['+str(block_low)+', '+str(block_high)+'] start')
for ii in range(block_low, block_high):
logger.info(' [{:d}/{:d}] start'.format(ii+1, high))
polys_array[ii-low] = fun(uus[ii]).vector().get_local()
logger.info(' [{:d}/{:d}] end'.format(ii+1, high))
logger.info(' SIMPLE TMP: ['+str(block_low)+', '+str(block_high)+'] computations done')
done_put(qq)
logger.info(' SIMPLE TMP: ['+str(block_low)+', '+str(block_high)+'] queue put')
processes = []
logger.info('SIMPLE: Using [{:d}] processes with [{:d}] requested and [{:d}] cpus available'.format(cpu_count, max_procs, multiprocessing.cpu_count()))
if cpu_count > 1:
block = int(np.ceil(uus_len / cpu_count))
logger.info('SIMPLE: ['+str(block)+'] for ['+str(cpu_count)+']cpus for ['+str(uus_len)+'] functions')
block_low = low
block_high = np.min([block_low+block, high])
for cpu in range(cpu_count):
if block_low >= block_high:
break
logger.info('SIMPLE: ['+str(cpu)+']: ['+str(low)+', '+str(block_low)+', '+str(block_high)+', '+str(high)+'] starting')
processes.append(multiprocessing.Process(target = tmp_fun, args = (block_low, block_high, done_queue)))
processes[-1].start()
block_low = block_high
block_high = np.min([block_high+block, high])
logger.info('SIMPLE: ['+str(cpu)+']: ['+str(low)+', '+str(block_low)+', '+str(block_high)+', '+str(high)+'] started')
proc_count = len(processes)
for cpu, proc in enumerate(processes):
done_get()
logger.info('SIMPLE: ['+str(cpu+1)+'/'+str(proc_count)+'] fetched')
for cpu, proc in enumerate(processes):
proc.join()
proc = None
processes[cpu] = None
logger.info('SIMPLE: ['+str(cpu+1)+'/'+str(proc_count)+'] joined')
del processes
else:
tmp_fun(low, high, done_queue)
done_get()
del done_queue
polys = []
for ii in range(uus_len):
polys.append(Function(VV, name = 'u'))
polys[-1].vector().set_local(polys_array[ii])
del polys_array
logger.info('SIMPLE: END')
else:
polys_array = get_shared_array((uus_len, VV.dim()))
def tmp_fun(block_low, block_high, qq):
for ii in range(block_low, block_high):
polys_array[ii-low] = fun(uus[ii]).vector().get_local()
done_put(qq)
processes = []
if cpu_count > 1:
block = int(np.ceil(uus_len / cpu_count))
block_low = low
block_high = np.min([block_low+block, high])
for cpu in range(cpu_count):
if block_low >= block_high:
break
processes.append(multiprocessing.Process(target = tmp_fun, args = (block_low, block_high, done_queue)))
processes[-1].start()
block_low = block_high
block_high = np.min([block_high+block, high])
proc_count = len(processes)
for cpu, proc in enumerate(processes):
done_get()
for cpu, proc in enumerate(processes):
proc.join()
proc = None
processes[cpu] = None
del processes
else:
tmp_fun(low, high, done_queue)
done_get()
del done_queue
polys = []
for ii in range(uus_len):
polys.append(Function(VV, name = 'u'))
polys[-1].vector().set_local(polys_array[ii])
del polys_array
return polys
def batch_fun(uus, VV, low = 0, high = 1, offsets = None, fun = None, polys = None, times = None, logger = None, max_procs = None):
cpu_count = multiprocessing.cpu_count() if max_procs is None else np.min([multiprocessing.cpu_count(), max_procs])
offset = offsets[low]
num_polys = offsets[high]-offset
if cpu_count > 1:
done_queue = multiprocessing.Queue()
def done_put(qq):
qq.put(None)
def done_get():
done_queue.get()
else:
done_queue = None
def done_put(qq):
pass
def done_get():
pass
if logger is not None:
logger.info('BATCH: trying to allocate shared size [{:d}] array'.format(num_polys*VV.dim()))
polys_array = get_shared_array((num_polys, VV.dim()))
logger.info('BATCH: shared array allocated')
times_array = get_shared_array(offsets[high])
def tmp_fun(block_low, block_high, qq):
logger.info(' BATCH TMP: ['+str(block_low)+', '+str(block_high)+'] start')
for ii in range(block_low, block_high):
logger.info(' [{:d}/{:d}] start'.format(ii+1, offsets[high]))
old = get_time()
polys_array[ii-offset] = fun(uus[ii]).vector().get_local()
new = get_time()
times_array[ii] = new-old
logger.info(' [{:d}/{:d}] end'.format(ii+1, offsets[high]))
logger.info(' BATCH TMP: ['+str(block_low)+', '+str(block_high)+'] computations done')
done_put(qq)
logger.info(' BATCH TMP: ['+str(block_low)+', '+str(block_high)+'] times put in queue')
processes = []
logger.info('BATCH: Using [{:d}] processes with [{:d}] requested and [{:d}] cpus available'.format(cpu_count, max_procs, multiprocessing.cpu_count()))
if cpu_count > 1:
block = int(np.ceil(num_polys / cpu_count))
logger.info('BATCH: blocks of ['+str(block)+'] for ['+str(cpu_count)+'] cpus for ['+str(num_polys)+'] functions')
block_low = offsets[low]
block_high = np.min([block_low+block, offsets[high]])
for cpu in range(cpu_count):
if block_low >= block_high:
break
logger.info('BATCH: ['+str(cpu)+']: ['+str(offsets[low])+', '+str(block_low)+', '+str(block_high)+', '+str(offsets[high])+'] starting')
processes.append(multiprocessing.Process(target = tmp_fun, args = (block_low, block_high, done_queue)))
processes[-1].start()
block_low = block_high
block_high = np.min([block_high+block, offsets[high]])
logger.info('BATCH: ['+str(cpu)+']: ['+str(offsets[low])+', '+str(block_low)+', '+str(block_high)+', '+str(offsets[high])+'] started')
proc_count = len(processes)
logger.info('BATCH: ['+str(proc_count)+'] processes started')
for cpu, proc in enumerate(processes):
done_get()
logger.info('BATCH: ['+str(cpu+1)+'/'+str(proc_count)+'] fetched')
for cpu, proc in enumerate(processes):
proc.join()
proc = None
processes[cpu] = None
logger.info('BATCH: ['+str(cpu+1)+'/'+str(proc_count)+'] joined')
del processes
else:
tmp_fun(offsets[low], offsets[high], done_queue)
done_get()
del done_queue
for ii in range(num_polys):
polys.append(Function(VV, name = 'u'))
polys[-1].vector().set_local(polys_array[ii])
del polys_array
offset = offsets[low]
for deg in range(low, high):
tmp = times[-1]
for ii in range(offsets[deg], offsets[deg+1]):
tmp += times_array[ii]
times.append(tmp)
del times_array
logger.info('BATCH: END')
else:
polys_array = get_shared_array((num_polys, VV.dim()))
times_array = get_shared_array(offsets[high])
def tmp_fun(block_low, block_high, qq):
for ii in range(block_low, block_high):
old = get_time()
polys_array[ii-offset] = fun(uus[ii]).vector().get_local()
new = get_time()
times_array[ii] = new-old
done_put(qq)
processes = []
cpu_count = multiprocessing.cpu_count() if max_procs is None else np.min([multiprocessing.cpu_count(), max_procs])
if cpu_count > 1:
block = int(np.ceil(num_polys / cpu_count))
block_low = offsets[low]
block_high = np.min([block_low+block, offsets[high]])
for cpu in range(cpu_count):
if block_low >= block_high:
break
processes.append(multiprocessing.Process(target = tmp_fun, args = (block_low, block_high, done_queue)))
processes[-1].start()
block_low = block_high
block_high = np.min([block_high+block, offsets[high]])
proc_count = len(processes)
for cpu, proc in enumerate(processes):
done_get()
for cpu, proc in enumerate(processes):
proc.join()
proc = None
processes[cpu] = None
del processes
else:
tmp_fun(offsets[low], offsets[high], done_queue)
done_get()
del done_queue
for ii in range(num_polys):
polys.append(Function(VV, name = 'u'))
polys[-1].vector().set_local(polys_array[ii])
del polys_array
offset = offsets[low]
for deg in range(low, high):
tmp = times[-1]
for ii in range(offsets[deg], offsets[deg+1]):
tmp += times_array[ii]
times.append(tmp)
del times_array
def matplotlib_stuff():
import matplotlib.pyplot as plt
ploteps = 5e-2
legendx = 3.6
legendy = 1.2
line_styles = ['-', ':', '--', '-.']
num_line_styles = len(line_styles)
marker_styles = ['o', '+', 's', 'x', 'p', '*', 'd']
num_marker_styles = len(marker_styles)
styles = [aa+bb for aa in marker_styles for bb in line_styles]
num_styles = len(styles)
color_styles = ['r', 'g', 'b', 'c', 'y', 'm', 'k', '#a0522d', '#6b8e23']
num_color_styles = len(color_styles)
def set_log_ticks(ax, minval, maxval, xaxis = False, semilog = False):
if semilog:
fac = ploteps*(maxval-minval)
minval = 0
# minval -= fac;
maxval += fac;
if maxval % 20:
maxval = 20*np.ceil(maxval/20)
if xaxis:
ax.set_xlim(minval, maxval)
ticks = ax.get_xticks()
ax.set_xticklabels(r'$'+str(kk).rstrip('0').rstrip('.')+r'$' for kk in ticks)
else:
ax.set_ylim(minval, maxval)
ticks = ax.get_yticks()
ax.set_yticklabels(r'$'+str(kk)+r'$' for kk in ticks)
else:
fac = np.exp(ploteps*np.log(maxval/minval));
minval /= fac; maxval *= fac
low_pow = int(np.floor(np.log(minval)/np.log(10)))
low_mul = int(np.floor(minval/10**low_pow))
low = low_mul*10**low_pow
top_pow = int(np.floor(np.log(maxval)/np.log(10)))
top_mul = int(np.floor(maxval/10**top_pow))
if top_mul*10**top_pow < maxval:
if top_mul == 9:
top_pow += 1
top_mul = 1
else:
top_mul += 1
top = top_mul*10**top_pow
inter = range(low_pow+1, top_pow+(1 if top_mul > 1 else 0))
if len(inter):
ticks = [10**kk for kk in inter]
labels = [r'$10^{'+str(kk)+r'}$' for kk in inter]
width = np.log(ticks[-1]/ticks[0])
else:
ticks = []; labels = []
if not len(inter) or (np.log(minval/low) < .1*width and
np.log(ticks[0]/low) > .2*width):
ticks = [low]+ticks
if low_mul > 1:
label = r'$'+str(low_mul)+r'\cdot 10^{'+str(low_pow)+r'}$'
else:
label = r'$10^{'+str(low_pow)+r'}$'
labels = [label]+labels
if not len(inter) or (np.log(top/maxval) < .1*width and
np.log(top/ticks[-1]) > .2*width):
ticks = ticks+[top]
if top_mul > 1:
label = r'$'+str(top_mul)+r'\cdot 10^{'+str(top_pow)+r'}$'
else:
label = r'$10^{'+str(top_pow)+r'}$'
labels = labels+[label]
minval = np.min([minval, ticks[0]])
maxval = np.max([maxval, ticks[-1]])
if xaxis:
ax.set_xlim(minval, maxval)
else:
ax.set_ylim(minval, maxval)
numticks = len(ticks)
mul = int(np.ceil(numticks/6))
ticks = ticks[::-mul][::-1]
labels = labels[::-mul][::-1]
if xaxis:
ax.set_xticks(ticks)
ax.set_xticklabels(labels)
else:
ax.set_yticks(ticks)
ax.set_yticklabels(labels)
ax.grid(True, which = 'major')
return locals
def test_log_ticks():
test = np.arange(0.0001, 999)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.loglog(test, np.sqrt(test))
set_log_ticks(ax, min(test), max(test), True)
set_log_ticks(ax, min(np.sqrt(test)), max(np.sqrt(test)))
fig.savefig('custom_ticks_demo.pdf')
def get_bcs(basedim, elasticity, spe = False, reference = False, ldomain = False):
markers = list(range(1, 2*basedim+1))
if ldomain:
markers += list(range(7, 7+basedim))
if not spe:
const = 5e-1
if elasticity:
left_c = Constant([-const]+[0.]*(basedim-1))
right_c = Constant([const]+[0.]*(basedim-1))
left_q = Expression(['cc*(1.-x[1]*x[1])']+['0']*(basedim-1), cc = const, degree = 2)
right_q = Expression(['cc*(x[1]*x[1]-1)']+['0']*(basedim-1), cc = const, degree = 2)
bottom_c = Constant([0.]+[const]+[0.]*(basedim-2))
top_c = Constant([0.]+[-const]+[0.]*(basedim-2))
bottom_q = Expression(['0']+['cc*(1.-x[0]*x[0])']+['0']*(basedim-2), cc = const, degree = 2)
top_q = Expression(['0']+['cc*(x[0]*x[0]-1)']+['0']*(basedim-2), cc = const, degree = 2)
zero = Constant([0.]*basedim)
fenics = Expression(['cc*x[0]*(1-x[1])']+['cc*x[1]*(1-x[0])']+['0']*(basedim-2), cc = const, degree = 3)
pp = 0.3; ll = pp/((1.+pp)*(1-2*pp)); mm = 1./(2.*(1+pp))
expr = Constant([const*(ll+mm)]+[const*(ll+mm)]+[0.]*(basedim-2))
else:
left_c = Constant(-const)
right_c = Constant(const)
left_q = Expression('cc*(1.-x[1]*x[1])', cc = const, degree = 2)
right_q = Expression('cc*(x[1]*x[1]-1.)', cc = const, degree = 2)
bottom_c = right_c
top_c = left_c
bottom_q = Expression('cc*(x[0]*x[0]-1.)', cc = const, degree = 2)
top_q = Expression('cc*(1.-x[0]*x[0])', cc = const, degree = 2)
zero = Constant(0.)
if not ldomain:
fenics = Expression('1.+0.25*(x[0]+1.)*(x[0]+1.)+0.5*(x[1]+1.)*(x[1]+1.)', degree = 2)
expr = Constant(-3./2.)
else:
if basedim == 2:
fenics = Expression('[](double rr, double phi) { return pow(rr, 2./3.)*sin(2./3.*phi); }(sqrt(x[0]*x[0]+x[1]*x[1]), fmod(atan2(-x[0], x[1])+2.*atan2(0,-1), 2.*atan2(0,-1)))', degree = 5)
expr = Constant(0)
elif basedim == 3:
lbda = 0.25
fenics = Expression('[](double rr) { return pow(rr, lbda); }(sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2]))', lbda=lbda, degree = 5)
expr = Expression('[](double rr) { return -lbda*(ldbda+1)*pow(rr, lbda-2); }(sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2])', lbda=lbda, degree = 3)
if not reference:
return [([(left_c, 1), (right_c, 2)], [(bottom_c, 3), (top_c, 4)], bottom_c),
([(left_q, 1), (right_q, 2)], [(bottom_q, 3), (top_q, 4)], None),
([(fenics, ii) for ii in markers], [], expr)]
else:
return [([(left_c, 1), (right_c, 2)], [], None),
([(left_c, 1), (right_c, 2)], [], bottom_c),
([(left_q, 1), (right_q, 2)], [], None),
([(left_q, 1), (right_q, 2)], [], bottom_c),
([(fenics, ii) for ii in markers], [], expr),
([(zero, ii) for ii in markers], [], expr),
([], [(bottom_c, 3), (top_c, 4)], None),
([], [(bottom_c, 3), (top_c, 4)], bottom_c),
([], [(bottom_q, 3), (top_q, 4)], None),
([], [(bottom_q, 3), (top_q, 4)], bottom_c)]
else:
assert(basedim == 3)
in_well_in = 2e4
in_well_out = 1e4
out_well_in = 4e3
if elasticity:
in_well_in_c = Constant((0, 0, in_well_in))
in_well_out_c = Constant((0, 0, in_well_out))
out_well_in_c = Constant((0, 0, -out_well_in))
else:
in_well_in_c = Constant(in_well_in)
in_well_out_c = Constant(-in_well_out)
out_well_in_c = Constant(out_well_in)
return [([(in_well_in_c, 101), (in_well_out_c, 102), (out_well_in_c, 104)], [], None)]
def build_nullspace(VV, elasticity = False):
tmp = Function(VV)
if elasticity:
basedim = VV.mesh().geometry().dim()
assert(basedim == 2 or basedim == 3), 'dimension [{:d}] nullspace not implemented'.format(basedim)
nullspace_basis = [tmp.vector().copy() for ii in range(3 if basedim == 2 else 6)]
#translational
VV.sub(0).dofmap().set(nullspace_basis[0], 1.0);
VV.sub(1).dofmap().set(nullspace_basis[1], 1.0);
#rotational
VV.sub(0).set_x(nullspace_basis[basedim], -1.0, 1);
VV.sub(1).set_x(nullspace_basis[basedim], 1.0, 0);
if basedim == 3:
#dim3 translation
VV.sub(2).dofmap().set(nullspace_basis[2], 1.0);
#dim3 rotation
VV.sub(0).set_x(nullspace_basis[4], 1.0, 2);
VV.sub(2).set_x(nullspace_basis[4], -1.0, 0);
VV.sub(2).set_x(nullspace_basis[5], 1.0, 1);
VV.sub(1).set_x(nullspace_basis[5], -1.0, 2);
else:
nullspace_basis = [tmp.vector().copy()]
nullspace_basis[0][:] = 1.
for xx in nullspace_basis:
xx.apply("insert")
basis = VectorSpaceBasis(nullspace_basis)
basis.orthonormalize()
return basis
def krylov_solve(AA, xx, bb, *args):
solver = KrylovSolver(*args)
return solver.solve(AA, xx, bb)
def failsafe_solve(AA, xx, bb, krylov_args = ('cg', 'hypre_amg'), solver_args = ('pastix')):
try:
solver = KrylovSolver(*krylov_args)
it = solver.solve(AA, xx, bb)
except:
solve(AA, xx, bb, *solver_args)
it = 0
return it
def compare_mesh(mesha, meshb):
if mesha.num_vertices() != meshb.num_vertices():
return False
if mesha.num_cells() != meshb.num_cells():
return False
if mesha.num_facets() != meshb.num_facets():
return False
return True
adapt_cpp_code = """
#include<pybind11/pybind11.h>
#include<dolfin/adaptivity/adapt.h>
#include<dolfin/mesh/Mesh.h>
#include<dolfin/mesh/MeshFunction.h>
namespace py = pybind11;
PYBIND11_MODULE(SIGNATURE, m) {
m.def("adapt", (std::shared_ptr<dolfin::MeshFunction<std::size_t>> (*)(const dolfin::MeshFunction<std::size_t>&,
std::shared_ptr<const dolfin::Mesh>)) &dolfin::adapt,
py::arg("mesh_function"), py::arg("adapted_mesh"));
}
"""
adapt = compile_cpp_code(adapt_cpp_code).adapt
if __name__ == '__main__':
with LogWrapper('./test1.log') as logger:
logger.info('Test 1')
with LogWrapper('./test2.log') as logger:
logger.info('Test 2')
| [
"logging.Formatter.__init__",
"multiprocessing.Lock",
"numpy.floor",
"matplotlib.pyplot.figure",
"numpy.arange",
"multiprocessing.Queue",
"numpy.prod",
"multiprocessing.cpu_count",
"time.process_time",
"os.path.dirname",
"os.path.exists",
"numpy.max",
"numpy.ceil",
"logging.StreamHandler",... | [((2953, 2972), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2970, 2972), False, 'import time, os, gc, sys\n'), ((17209, 17231), 'numpy.arange', 'np.arange', (['(0.0001)', '(999)'], {}), '(0.0001, 999)\n', (17218, 17231), True, 'import numpy as np\n'), ((17243, 17255), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17253, 17255), True, 'import matplotlib.pyplot as plt\n'), ((846, 878), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (857, 878), False, 'import time, os, gc, sys\n'), ((1121, 1161), 'logging.Formatter.__init__', 'logging.Formatter.__init__', (['self'], {}), '(self, **args)\n', (1147, 1161), False, 'import logging, logging.handlers\n'), ((1250, 1265), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1263, 1265), False, 'import inspect\n'), ((1368, 1403), 'logging.Formatter.format', 'logging.Formatter.format', (['self', 'rec'], {}), '(self, rec)\n', (1392, 1403), False, 'import logging, logging.handlers\n'), ((1646, 1668), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (1666, 1668), False, 'import multiprocessing\n'), ((1719, 1749), 'logging.getLogger', 'logging.getLogger', (['rank_string'], {}), '(rank_string)\n', (1736, 1749), False, 'import logging, logging.handlers\n'), ((1808, 1832), 'os.path.dirname', 'os.path.dirname', (['logname'], {}), '(logname)\n', (1823, 1832), False, 'import time, os, gc, sys\n'), ((3109, 3136), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3134, 3136), False, 'import multiprocessing\n'), ((3255, 3278), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (3276, 3278), False, 'import multiprocessing\n'), ((7831, 7858), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (7856, 7858), False, 'import multiprocessing\n'), ((8040, 8063), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (8061, 8063), False, 'import multiprocessing\n'), ((17306, 17319), 'numpy.sqrt', 'np.sqrt', (['test'], {}), '(test)\n', (17313, 17319), True, 'import numpy as np\n'), ((816, 836), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (830, 836), False, 'import time, os, gc, sys\n'), ((965, 979), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (972, 979), True, 'import numpy as np\n'), ((1190, 1205), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1203, 1205), False, 'import inspect\n'), ((2094, 2127), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (2115, 2127), False, 'import logging, logging.handlers\n'), ((4811, 4844), 'numpy.min', 'np.min', (['[block_low + block, high]'], {}), '([block_low + block, high])\n', (4817, 4844), True, 'import numpy as np\n'), ((6685, 6718), 'numpy.min', 'np.min', (['[block_low + block, high]'], {}), '([block_low + block, high])\n', (6691, 6718), True, 'import numpy as np\n'), ((9808, 9850), 'numpy.min', 'np.min', (['[block_low + block, offsets[high]]'], {}), '([block_low + block, offsets[high]])\n', (9814, 9850), True, 'import numpy as np\n'), ((12107, 12134), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (12132, 12134), False, 'import multiprocessing\n'), ((12354, 12396), 'numpy.min', 'np.min', (['[block_low + block, offsets[high]]'], {}), '([block_low + block, offsets[high]])\n', (12360, 12396), True, 'import numpy as np\n'), ((16546, 16572), 'numpy.min', 'np.min', (['[minval, ticks[0]]'], {}), '([minval, ticks[0]])\n', (16552, 16572), True, 'import numpy as np\n'), ((16594, 16621), 'numpy.max', 'np.max', (['[maxval, ticks[-1]]'], {}), '([maxval, ticks[-1]])\n', (16600, 16621), True, 'import numpy as np\n'), ((17397, 17410), 'numpy.sqrt', 'np.sqrt', (['test'], {}), '(test)\n', (17404, 17410), True, 'import numpy as np\n'), ((17417, 17430), 'numpy.sqrt', 'np.sqrt', (['test'], {}), '(test)\n', (17424, 17430), True, 'import numpy as np\n'), ((3171, 3198), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3196, 3198), False, 'import multiprocessing\n'), ((4534, 4561), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4559, 4561), False, 'import multiprocessing\n'), ((4614, 4642), 'numpy.ceil', 'np.ceil', (['(uus_len / cpu_count)'], {}), '(uus_len / cpu_count)\n', (4621, 4642), True, 'import numpy as np\n'), ((5315, 5349), 'numpy.min', 'np.min', (['[block_high + block, high]'], {}), '([block_high + block, high])\n', (5321, 5349), True, 'import numpy as np\n'), ((6602, 6630), 'numpy.ceil', 'np.ceil', (['(uus_len / cpu_count)'], {}), '(uus_len / cpu_count)\n', (6609, 6630), True, 'import numpy as np\n'), ((7054, 7088), 'numpy.min', 'np.min', (['[block_high + block, high]'], {}), '([block_high + block, high])\n', (7060, 7088), True, 'import numpy as np\n'), ((7893, 7920), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (7918, 7920), False, 'import multiprocessing\n'), ((9508, 9535), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (9533, 9535), False, 'import multiprocessing\n'), ((9588, 9618), 'numpy.ceil', 'np.ceil', (['(num_polys / cpu_count)'], {}), '(num_polys / cpu_count)\n', (9595, 9618), True, 'import numpy as np\n'), ((10338, 10381), 'numpy.min', 'np.min', (['[block_high + block, offsets[high]]'], {}), '([block_high + block, offsets[high]])\n', (10344, 10381), True, 'import numpy as np\n'), ((12260, 12290), 'numpy.ceil', 'np.ceil', (['(num_polys / cpu_count)'], {}), '(num_polys / cpu_count)\n', (12267, 12290), True, 'import numpy as np\n'), ((12732, 12775), 'numpy.min', 'np.min', (['[block_high + block, offsets[high]]'], {}), '([block_high + block, offsets[high]])\n', (12738, 12775), True, 'import numpy as np\n'), ((14976, 15008), 'numpy.floor', 'np.floor', (['(minval / 10 ** low_pow)'], {}), '(minval / 10 ** low_pow)\n', (14984, 15008), True, 'import numpy as np\n'), ((15133, 15165), 'numpy.floor', 'np.floor', (['(maxval / 10 ** top_pow)'], {}), '(maxval / 10 ** top_pow)\n', (15141, 15165), True, 'import numpy as np\n'), ((15637, 15665), 'numpy.log', 'np.log', (['(ticks[-1] / ticks[0])'], {}), '(ticks[-1] / ticks[0])\n', (15643, 15665), True, 'import numpy as np\n'), ((16806, 16827), 'numpy.ceil', 'np.ceil', (['(numticks / 6)'], {}), '(numticks / 6)\n', (16813, 16827), True, 'import numpy as np\n'), ((5122, 5207), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'tmp_fun', 'args': '(block_low, block_high, done_queue)'}), '(target=tmp_fun, args=(block_low, block_high,\n done_queue))\n', (5145, 5207), False, 'import multiprocessing\n'), ((6861, 6946), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'tmp_fun', 'args': '(block_low, block_high, done_queue)'}), '(target=tmp_fun, args=(block_low, block_high,\n done_queue))\n', (6884, 6946), False, 'import multiprocessing\n'), ((10145, 10230), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'tmp_fun', 'args': '(block_low, block_high, done_queue)'}), '(target=tmp_fun, args=(block_low, block_high,\n done_queue))\n', (10168, 10230), False, 'import multiprocessing\n'), ((12169, 12196), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (12194, 12196), False, 'import multiprocessing\n'), ((12539, 12624), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'tmp_fun', 'args': '(block_low, block_high, done_queue)'}), '(target=tmp_fun, args=(block_low, block_high,\n done_queue))\n', (12562, 12624), False, 'import multiprocessing\n'), ((14383, 14403), 'numpy.ceil', 'np.ceil', (['(maxval / 20)'], {}), '(maxval / 20)\n', (14390, 14403), True, 'import numpy as np\n'), ((14821, 14844), 'numpy.log', 'np.log', (['(maxval / minval)'], {}), '(maxval / minval)\n', (14827, 14844), True, 'import numpy as np\n'), ((14922, 14936), 'numpy.log', 'np.log', (['minval'], {}), '(minval)\n', (14928, 14936), True, 'import numpy as np\n'), ((14937, 14947), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (14943, 14947), True, 'import numpy as np\n'), ((15079, 15093), 'numpy.log', 'np.log', (['maxval'], {}), '(maxval)\n', (15085, 15093), True, 'import numpy as np\n'), ((15094, 15104), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (15100, 15104), True, 'import numpy as np\n'), ((15756, 15776), 'numpy.log', 'np.log', (['(minval / low)'], {}), '(minval / low)\n', (15762, 15776), True, 'import numpy as np\n'), ((15824, 15846), 'numpy.log', 'np.log', (['(ticks[0] / low)'], {}), '(ticks[0] / low)\n', (15830, 15846), True, 'import numpy as np\n'), ((16157, 16177), 'numpy.log', 'np.log', (['(top / maxval)'], {}), '(top / maxval)\n', (16163, 16177), True, 'import numpy as np\n'), ((16225, 16248), 'numpy.log', 'np.log', (['(top / ticks[-1])'], {}), '(top / ticks[-1])\n', (16231, 16248), True, 'import numpy as np\n')] |
import numpy as np
import spartan
from spartan import expr, util
import parakeet
from spartan.array import distarray, extent
import time
from scipy.spatial.distance import cdist
@util.synchronized
@parakeet.jit
def _find_closest(pts, centers):
idxs = np.zeros(pts.shape[0], np.int)
for i in range(pts.shape[0]):
min_dist = 1e9
min_idx = 0
p = pts[i]
for j in xrange(centers.shape[0]):
c = centers[j]
dist = np.sum((p - c) ** 2)
if dist < min_dist:
min_dist = dist
min_idx = j
idxs[i] = min_idx
return idxs
def _find_cluster_mapper(inputs, ex, d_pts, old_centers,
new_centers, new_counts, labels):
centers = old_centers
pts = d_pts.fetch(ex)
closest = _find_closest(pts, centers)
l_counts = np.zeros((centers.shape[0], 1), dtype=np.int)
l_centers = np.zeros_like(centers)
for i in range(centers.shape[0]):
matching = (closest == i)
l_counts[i] = matching.sum()
l_centers[i] = pts[matching].sum(axis=0)
# update centroid positions
new_centers.update(extent.from_shape(new_centers.shape), l_centers)
new_counts.update(extent.from_shape(new_counts.shape), l_counts)
labels.update(extent.create(ex.ul, (ex.lr[0], 1), labels.shape),
closest.reshape(pts.shape[0], 1))
return []
def kmeans_outer_dist_mapper(ex_a, tile_a, ex_b, tile_b):
points = tile_a
centers = tile_b
target_ex = extent.create((ex_a[0].ul[0], ),
(ex_a[0].lr[0], ),
(ex_a[0].array_shape[0], ))
yield target_ex, np.argmin(cdist(points, centers), axis=1)
def kmeans_map2_dist_mapper(ex, tile, centers=None):
points = tile[0]
target_ex = extent.create((ex[0].ul[0], ),
(ex[0].lr[0], ),
(ex[0].array_shape[0], ))
yield target_ex, np.argmin(cdist(points, centers), axis=1)
def kmeans_count_mapper(extents, tiles, centers_count):
target_ex = extent.create((0, ), (centers_count, ), (centers_count, ))
result = np.bincount(tiles[0].astype(np.int), minlength=centers_count)
yield target_ex, result
def kmeans_center_mapper(extents, tiles, centers_count):
points = tiles[0]
labels = tiles[1]
target_ex = extent.create((0, 0), (centers_count, points.shape[1]),
(centers_count, points.shape[1]))
#new_centers = np.ndarray((centers_count, points.shape[1]))
#sorted_labels = np.sort(tiles[1])
#argsorted_labels = np.argsort(tiles[1])
#index = np.searchsorted(sorted_labels, np.arange(centers_count), side='right')
#for i in xrange(centers_count):
#if i == 0 or sorted_labels[index[i] - 1] != i:
#continue
#else:
#if i == 0:
#new_centers[i] = np.sum(argsorted_labels[0:index[0]], axis=0)
#else:
#new_centers[i] = np.sum(argsorted_labels[index[i - 1]:index[i]], axis=0)
new_centers = np.zeros((centers_count, points.shape[1]))
for i in xrange(centers_count):
matching = (labels == i)
new_centers[i] = points[matching].sum(axis=0)
yield target_ex, new_centers
class KMeans(object):
def __init__(self, n_clusters=8, n_iter=100):
"""K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
n_iter : int, optional, default: 10
Number of iterations of the k-means algorithm for a
single run.
"""
self.n_clusters = n_clusters
self.n_iter = n_iter
def fit(self, X, centers=None, implementation='map2'):
"""Compute k-means clustering.
Parameters
----------
X : spartan matrix, shape=(n_samples, n_features). It should be tiled by rows.
centers : numpy.ndarray. The initial centers. If None, it will be randomly generated.
"""
num_dim = X.shape[1]
num_points = X.shape[0]
labels = expr.zeros((num_points, 1), dtype=np.int)
if implementation == 'map2':
if centers is None:
centers = np.random.rand(self.n_clusters, num_dim)
for i in range(self.n_iter):
labels = expr.map2(X, 0, fn=kmeans_map2_dist_mapper, fn_kw={"centers": centers},
shape=(X.shape[0], ))
counts = expr.map2(labels, 0, fn=kmeans_count_mapper,
fn_kw={'centers_count': self.n_clusters},
shape=(centers.shape[0], ))
new_centers = expr.map2((X, labels), (0, 0), fn=kmeans_center_mapper,
fn_kw={'centers_count': self.n_clusters},
shape=(centers.shape[0], centers.shape[1]))
counts = counts.optimized().glom()
centers = new_centers.optimized().glom()
# If any centroids don't have any points assigined to them.
zcount_indices = (counts == 0).reshape(self.n_clusters)
if np.any(zcount_indices):
# One or more centroids may not have any points assigned to them,
# which results in their position being the zero-vector. We reseed these
# centroids with new random values.
n_points = np.count_nonzero(zcount_indices)
# In order to get rid of dividing by zero.
counts[zcount_indices] = 1
centers[zcount_indices, :] = np.random.randn(n_points, num_dim)
centers = centers / counts.reshape(centers.shape[0], 1)
return centers, labels
elif implementation == 'outer':
if centers is None:
centers = expr.rand(self.n_clusters, num_dim)
for i in range(self.n_iter):
labels = expr.outer((X, centers), (0, None), fn=kmeans_outer_dist_mapper,
shape=(X.shape[0],))
#labels = expr.argmin(distances, axis=1)
counts = expr.map2(labels, 0, fn=kmeans_count_mapper,
fn_kw={'centers_count': self.n_clusters},
shape=(centers.shape[0], ))
new_centers = expr.map2((X, labels), (0, 0), fn=kmeans_center_mapper,
fn_kw={'centers_count': self.n_clusters},
shape=(centers.shape[0], centers.shape[1]))
counts = counts.optimized().glom()
centers = new_centers.optimized().glom()
# If any centroids don't have any points assigined to them.
zcount_indices = (counts == 0).reshape(self.n_clusters)
if np.any(zcount_indices):
# One or more centroids may not have any points assigned to them,
# which results in their position being the zero-vector. We reseed these
# centroids with new random values.
n_points = np.count_nonzero(zcount_indices)
# In order to get rid of dividing by zero.
counts[zcount_indices] = 1
centers[zcount_indices, :] = np.random.randn(n_points, num_dim)
centers = centers / counts.reshape(centers.shape[0], 1)
centers = expr.from_numpy(centers)
return centers, labels
elif implementation == 'broadcast':
if centers is None:
centers = expr.rand(self.n_clusters, num_dim)
for i in range(self.n_iter):
util.log_warn("k_means_ %d %d", i, time.time())
X_broadcast = expr.reshape(X, (X.shape[0], 1, X.shape[1]))
centers_broadcast = expr.reshape(centers, (1, centers.shape[0],
centers.shape[1]))
distances = expr.sum(expr.square(X_broadcast - centers_broadcast), axis=2)
labels = expr.argmin(distances, axis=1)
center_idx = expr.arange((1, centers.shape[0]))
matches = expr.reshape(labels, (labels.shape[0], 1)) == center_idx
matches = matches.astype(np.int64)
counts = expr.sum(matches, axis=0)
centers = expr.sum(X_broadcast * expr.reshape(matches, (matches.shape[0],
matches.shape[1], 1)),
axis=0)
counts = counts.optimized().glom()
centers = centers.optimized().glom()
# If any centroids don't have any points assigined to them.
zcount_indices = (counts == 0).reshape(self.n_clusters)
if np.any(zcount_indices):
# One or more centroids may not have any points assigned to them,
# which results in their position being the zero-vector. We reseed these
# centroids with new random values.
n_points = np.count_nonzero(zcount_indices)
# In order to get rid of dividing by zero.
counts[zcount_indices] = 1
centers[zcount_indices, :] = np.random.randn(n_points, num_dim)
centers = centers / counts.reshape(centers.shape[0], 1)
centers = expr.from_numpy(centers)
return centers, labels
elif implementation == 'shuffle':
if centers is None:
centers = np.random.rand(self.n_clusters, num_dim)
for i in range(self.n_iter):
# Reset them to zero.
new_centers = expr.ndarray((self.n_clusters, num_dim),
reduce_fn=lambda a, b: a + b)
new_counts = expr.ndarray((self.n_clusters, 1), dtype=np.int,
reduce_fn=lambda a, b: a + b)
_ = expr.shuffle(X,
_find_cluster_mapper,
kw={'d_pts': X,
'old_centers': centers,
'new_centers': new_centers,
'new_counts': new_counts,
'labels': labels},
shape_hint=(1,),
cost_hint={hash(labels): {'00': 0,
'01': np.prod(labels.shape)}})
_.force()
new_counts = new_counts.glom()
new_centers = new_centers.glom()
# If any centroids don't have any points assigined to them.
zcount_indices = (new_counts == 0).reshape(self.n_clusters)
if np.any(zcount_indices):
# One or more centroids may not have any points assigned to them,
# which results in their position being the zero-vector. We reseed these
# centroids with new random values.
n_points = np.count_nonzero(zcount_indices)
# In order to get rid of dividing by zero.
new_counts[zcount_indices] = 1
new_centers[zcount_indices, :] = np.random.randn(n_points, num_dim)
new_centers = new_centers / new_counts
centers = new_centers
return centers, labels
| [
"spartan.array.extent.from_shape",
"numpy.sum",
"spartan.expr.sum",
"spartan.expr.rand",
"spartan.expr.outer",
"numpy.prod",
"numpy.zeros_like",
"numpy.random.randn",
"spartan.expr.from_numpy",
"spartan.expr.zeros",
"spartan.expr.ndarray",
"scipy.spatial.distance.cdist",
"spartan.expr.square... | [((255, 285), 'numpy.zeros', 'np.zeros', (['pts.shape[0]', 'np.int'], {}), '(pts.shape[0], np.int)\n', (263, 285), True, 'import numpy as np\n'), ((790, 835), 'numpy.zeros', 'np.zeros', (['(centers.shape[0], 1)'], {'dtype': 'np.int'}), '((centers.shape[0], 1), dtype=np.int)\n', (798, 835), True, 'import numpy as np\n'), ((850, 872), 'numpy.zeros_like', 'np.zeros_like', (['centers'], {}), '(centers)\n', (863, 872), True, 'import numpy as np\n'), ((1426, 1502), 'spartan.array.extent.create', 'extent.create', (['(ex_a[0].ul[0],)', '(ex_a[0].lr[0],)', '(ex_a[0].array_shape[0],)'], {}), '((ex_a[0].ul[0],), (ex_a[0].lr[0],), (ex_a[0].array_shape[0],))\n', (1439, 1502), False, 'from spartan.array import distarray, extent\n'), ((1711, 1781), 'spartan.array.extent.create', 'extent.create', (['(ex[0].ul[0],)', '(ex[0].lr[0],)', '(ex[0].array_shape[0],)'], {}), '((ex[0].ul[0],), (ex[0].lr[0],), (ex[0].array_shape[0],))\n', (1724, 1781), False, 'from spartan.array import distarray, extent\n'), ((1974, 2029), 'spartan.array.extent.create', 'extent.create', (['(0,)', '(centers_count,)', '(centers_count,)'], {}), '((0,), (centers_count,), (centers_count,))\n', (1987, 2029), False, 'from spartan.array import distarray, extent\n'), ((2245, 2338), 'spartan.array.extent.create', 'extent.create', (['(0, 0)', '(centers_count, points.shape[1])', '(centers_count, points.shape[1])'], {}), '((0, 0), (centers_count, points.shape[1]), (centers_count,\n points.shape[1]))\n', (2258, 2338), False, 'from spartan.array import distarray, extent\n'), ((2901, 2943), 'numpy.zeros', 'np.zeros', (['(centers_count, points.shape[1])'], {}), '((centers_count, points.shape[1]))\n', (2909, 2943), True, 'import numpy as np\n'), ((1070, 1106), 'spartan.array.extent.from_shape', 'extent.from_shape', (['new_centers.shape'], {}), '(new_centers.shape)\n', (1087, 1106), False, 'from spartan.array import distarray, extent\n'), ((1139, 1174), 'spartan.array.extent.from_shape', 'extent.from_shape', (['new_counts.shape'], {}), '(new_counts.shape)\n', (1156, 1174), False, 'from spartan.array import distarray, extent\n'), ((1202, 1251), 'spartan.array.extent.create', 'extent.create', (['ex.ul', '(ex.lr[0], 1)', 'labels.shape'], {}), '(ex.ul, (ex.lr[0], 1), labels.shape)\n', (1215, 1251), False, 'from spartan.array import distarray, extent\n'), ((3915, 3956), 'spartan.expr.zeros', 'expr.zeros', (['(num_points, 1)'], {'dtype': 'np.int'}), '((num_points, 1), dtype=np.int)\n', (3925, 3956), False, 'from spartan import expr, util\n'), ((442, 462), 'numpy.sum', 'np.sum', (['((p - c) ** 2)'], {}), '((p - c) ** 2)\n', (448, 462), True, 'import numpy as np\n'), ((1591, 1613), 'scipy.spatial.distance.cdist', 'cdist', (['points', 'centers'], {}), '(points, centers)\n', (1596, 1613), False, 'from scipy.spatial.distance import cdist\n'), ((1870, 1892), 'scipy.spatial.distance.cdist', 'cdist', (['points', 'centers'], {}), '(points, centers)\n', (1875, 1892), False, 'from scipy.spatial.distance import cdist\n'), ((4035, 4075), 'numpy.random.rand', 'np.random.rand', (['self.n_clusters', 'num_dim'], {}), '(self.n_clusters, num_dim)\n', (4049, 4075), True, 'import numpy as np\n'), ((4129, 4225), 'spartan.expr.map2', 'expr.map2', (['X', '(0)'], {'fn': 'kmeans_map2_dist_mapper', 'fn_kw': "{'centers': centers}", 'shape': '(X.shape[0],)'}), "(X, 0, fn=kmeans_map2_dist_mapper, fn_kw={'centers': centers},\n shape=(X.shape[0],))\n", (4138, 4225), False, 'from spartan import expr, util\n'), ((4268, 4386), 'spartan.expr.map2', 'expr.map2', (['labels', '(0)'], {'fn': 'kmeans_count_mapper', 'fn_kw': "{'centers_count': self.n_clusters}", 'shape': '(centers.shape[0],)'}), "(labels, 0, fn=kmeans_count_mapper, fn_kw={'centers_count': self.\n n_clusters}, shape=(centers.shape[0],))\n", (4277, 4386), False, 'from spartan import expr, util\n'), ((4459, 4610), 'spartan.expr.map2', 'expr.map2', (['(X, labels)', '(0, 0)'], {'fn': 'kmeans_center_mapper', 'fn_kw': "{'centers_count': self.n_clusters}", 'shape': '(centers.shape[0], centers.shape[1])'}), "((X, labels), (0, 0), fn=kmeans_center_mapper, fn_kw={\n 'centers_count': self.n_clusters}, shape=(centers.shape[0], centers.\n shape[1]))\n", (4468, 4610), False, 'from spartan import expr, util\n'), ((4902, 4924), 'numpy.any', 'np.any', (['zcount_indices'], {}), '(zcount_indices)\n', (4908, 4924), True, 'import numpy as np\n'), ((5153, 5185), 'numpy.count_nonzero', 'np.count_nonzero', (['zcount_indices'], {}), '(zcount_indices)\n', (5169, 5185), True, 'import numpy as np\n'), ((5315, 5349), 'numpy.random.randn', 'np.random.randn', (['n_points', 'num_dim'], {}), '(n_points, num_dim)\n', (5330, 5349), True, 'import numpy as np\n'), ((5525, 5560), 'spartan.expr.rand', 'expr.rand', (['self.n_clusters', 'num_dim'], {}), '(self.n_clusters, num_dim)\n', (5534, 5560), False, 'from spartan import expr, util\n'), ((5614, 5704), 'spartan.expr.outer', 'expr.outer', (['(X, centers)', '(0, None)'], {'fn': 'kmeans_outer_dist_mapper', 'shape': '(X.shape[0],)'}), '((X, centers), (0, None), fn=kmeans_outer_dist_mapper, shape=(X.\n shape[0],))\n', (5624, 5704), False, 'from spartan import expr, util\n'), ((5794, 5912), 'spartan.expr.map2', 'expr.map2', (['labels', '(0)'], {'fn': 'kmeans_count_mapper', 'fn_kw': "{'centers_count': self.n_clusters}", 'shape': '(centers.shape[0],)'}), "(labels, 0, fn=kmeans_count_mapper, fn_kw={'centers_count': self.\n n_clusters}, shape=(centers.shape[0],))\n", (5803, 5912), False, 'from spartan import expr, util\n'), ((5985, 6136), 'spartan.expr.map2', 'expr.map2', (['(X, labels)', '(0, 0)'], {'fn': 'kmeans_center_mapper', 'fn_kw': "{'centers_count': self.n_clusters}", 'shape': '(centers.shape[0], centers.shape[1])'}), "((X, labels), (0, 0), fn=kmeans_center_mapper, fn_kw={\n 'centers_count': self.n_clusters}, shape=(centers.shape[0], centers.\n shape[1]))\n", (5994, 6136), False, 'from spartan import expr, util\n'), ((6428, 6450), 'numpy.any', 'np.any', (['zcount_indices'], {}), '(zcount_indices)\n', (6434, 6450), True, 'import numpy as np\n'), ((6959, 6983), 'spartan.expr.from_numpy', 'expr.from_numpy', (['centers'], {}), '(centers)\n', (6974, 6983), False, 'from spartan import expr, util\n'), ((6679, 6711), 'numpy.count_nonzero', 'np.count_nonzero', (['zcount_indices'], {}), '(zcount_indices)\n', (6695, 6711), True, 'import numpy as np\n'), ((6841, 6875), 'numpy.random.randn', 'np.random.randn', (['n_points', 'num_dim'], {}), '(n_points, num_dim)\n', (6856, 6875), True, 'import numpy as np\n'), ((7097, 7132), 'spartan.expr.rand', 'expr.rand', (['self.n_clusters', 'num_dim'], {}), '(self.n_clusters, num_dim)\n', (7106, 7132), False, 'from spartan import expr, util\n'), ((7247, 7291), 'spartan.expr.reshape', 'expr.reshape', (['X', '(X.shape[0], 1, X.shape[1])'], {}), '(X, (X.shape[0], 1, X.shape[1]))\n', (7259, 7291), False, 'from spartan import expr, util\n'), ((7320, 7382), 'spartan.expr.reshape', 'expr.reshape', (['centers', '(1, centers.shape[0], centers.shape[1])'], {}), '(centers, (1, centers.shape[0], centers.shape[1]))\n', (7332, 7382), False, 'from spartan import expr, util\n'), ((7534, 7564), 'spartan.expr.argmin', 'expr.argmin', (['distances'], {'axis': '(1)'}), '(distances, axis=1)\n', (7545, 7564), False, 'from spartan import expr, util\n'), ((7586, 7620), 'spartan.expr.arange', 'expr.arange', (['(1, centers.shape[0])'], {}), '((1, centers.shape[0]))\n', (7597, 7620), False, 'from spartan import expr, util\n'), ((7756, 7781), 'spartan.expr.sum', 'expr.sum', (['matches'], {'axis': '(0)'}), '(matches, axis=0)\n', (7764, 7781), False, 'from spartan import expr, util\n'), ((8220, 8242), 'numpy.any', 'np.any', (['zcount_indices'], {}), '(zcount_indices)\n', (8226, 8242), True, 'import numpy as np\n'), ((8751, 8775), 'spartan.expr.from_numpy', 'expr.from_numpy', (['centers'], {}), '(centers)\n', (8766, 8775), False, 'from spartan import expr, util\n'), ((7212, 7223), 'time.time', 'time.time', ([], {}), '()\n', (7221, 7223), False, 'import time\n'), ((7463, 7507), 'spartan.expr.square', 'expr.square', (['(X_broadcast - centers_broadcast)'], {}), '(X_broadcast - centers_broadcast)\n', (7474, 7507), False, 'from spartan import expr, util\n'), ((7639, 7681), 'spartan.expr.reshape', 'expr.reshape', (['labels', '(labels.shape[0], 1)'], {}), '(labels, (labels.shape[0], 1))\n', (7651, 7681), False, 'from spartan import expr, util\n'), ((8471, 8503), 'numpy.count_nonzero', 'np.count_nonzero', (['zcount_indices'], {}), '(zcount_indices)\n', (8487, 8503), True, 'import numpy as np\n'), ((8633, 8667), 'numpy.random.randn', 'np.random.randn', (['n_points', 'num_dim'], {}), '(n_points, num_dim)\n', (8648, 8667), True, 'import numpy as np\n'), ((8887, 8927), 'numpy.random.rand', 'np.random.rand', (['self.n_clusters', 'num_dim'], {}), '(self.n_clusters, num_dim)\n', (8901, 8927), True, 'import numpy as np\n'), ((9016, 9086), 'spartan.expr.ndarray', 'expr.ndarray', (['(self.n_clusters, num_dim)'], {'reduce_fn': '(lambda a, b: a + b)'}), '((self.n_clusters, num_dim), reduce_fn=lambda a, b: a + b)\n', (9028, 9086), False, 'from spartan import expr, util\n'), ((9143, 9221), 'spartan.expr.ndarray', 'expr.ndarray', (['(self.n_clusters, 1)'], {'dtype': 'np.int', 'reduce_fn': '(lambda a, b: a + b)'}), '((self.n_clusters, 1), dtype=np.int, reduce_fn=lambda a, b: a + b)\n', (9155, 9221), False, 'from spartan import expr, util\n'), ((10018, 10040), 'numpy.any', 'np.any', (['zcount_indices'], {}), '(zcount_indices)\n', (10024, 10040), True, 'import numpy as np\n'), ((7823, 7885), 'spartan.expr.reshape', 'expr.reshape', (['matches', '(matches.shape[0], matches.shape[1], 1)'], {}), '(matches, (matches.shape[0], matches.shape[1], 1))\n', (7835, 7885), False, 'from spartan import expr, util\n'), ((10269, 10301), 'numpy.count_nonzero', 'np.count_nonzero', (['zcount_indices'], {}), '(zcount_indices)\n', (10285, 10301), True, 'import numpy as np\n'), ((10439, 10473), 'numpy.random.randn', 'np.random.randn', (['n_points', 'num_dim'], {}), '(n_points, num_dim)\n', (10454, 10473), True, 'import numpy as np\n'), ((9745, 9766), 'numpy.prod', 'np.prod', (['labels.shape'], {}), '(labels.shape)\n', (9752, 9766), True, 'import numpy as np\n')] |
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
from keras.layers import Dropout
from keras.layers import Conv1D
from keras.layers import Input
from keras.layers import MaxPooling1D
from keras.layers.merge import concatenate
from keras.models import load_model
from nltk.data import find
import nltk
import gensim
import os.path
from utils import functions as F
from utils.knowledge_base import KnowledgeBase as KB
import numpy as np
class CNN(object):
def __init__(self,k_base):
word2vec_sample = str(find('models/word2vec_sample/pruned.word2vec.txt'))
self.wv = gensim.models.KeyedVectors.load_word2vec_format(word2vec_sample, binary=False)
self.wv_size = 300 #word2vec tem 300 dimensoes
self.emb_size = 0
self.pos_dict = {}
self.pos_i = 2
self.k_base = k_base
self.tokenizer = Tokenizer(num_words=10000,oov_token=1)
self.tokenizer.fit_on_texts(self.k_base.df['segment'])
print("Training CNN model...")
self.train_model()
def create_embedding_matrix(self,vocab_size,word_index):
pos_dict = {}
self.emb_size = self.wv_size + self.k_base.num_attributes + 3 #word2vec + cp vector (size = num_attributes) + 3 features (position,lenght and postag)
embedding_matrix = np.zeros((vocab_size,self.emb_size))
for index, row in self.k_base.df.iterrows():
segment = row['segment']
terms = segment.split()
pos_tags = nltk.pos_tag(terms)
segment_size = len(terms)
cp_vector = self.k_base.get_probabilities(terms)
for pos_segment,word in enumerate(terms):
#first feature is word2vec
if word in self.wv:
vector = self.wv[word]
else:
vector = np.random.rand(self.wv_size)
#second feature is position in segment
vector = np.append(vector,pos_segment)
#third feature is position in record - it was removed
#fourth feature is size of the segment
vector = np.append(vector,segment_size)
#fifth feature is pos_tag
tag = pos_tags[pos_segment][1]
if tag not in self.pos_dict:
self.pos_dict[tag] = self.pos_i
self.pos_i += 1
vector = np.append(vector,self.pos_dict[tag])
#sixth feature is cp propability vector
vector = np.append(vector,cp_vector[word])
#add to embedding matrix
embedding_matrix[word_index[word]] = vector
return embedding_matrix
def define_model(self, vocab_size, num_filters, filter_sizes, embedding_matrix):
inputs = Input(shape=(self.max_length,))
embedding = Embedding(vocab_size, self.emb_size, weights=[embedding_matrix], input_length=self.max_length, trainable=True)(inputs)
layers = []
for i in filter_sizes:
conv = Conv1D(filters=num_filters, kernel_size=i, activation='relu')(embedding)
poolsize = self.max_length-i+1
pool = MaxPooling1D(pool_size=poolsize)(conv)
layers.append(pool)
# merge
merged = concatenate(layers)
#flatten and Dropout
flat = Flatten()(merged)
drop = Dropout(0.5)(flat)
# softmax
outputs = Dense(self.k_base.num_attributes, activation='softmax')(drop)
model = Model(inputs=inputs, outputs=outputs)
# compile
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# summarize
print(model.summary())
return model
def train_model(self):
X_train = self.tokenizer.texts_to_sequences(self.k_base.df['segment'])
y_train = self.k_base.df['label']
vocab_size = len(self.tokenizer.word_index) + 1
self.max_length = max([len(x) for x in X_train])
X_train = pad_sequences(X_train, padding='post', maxlen=self.max_length)
embedding_matrix = self.create_embedding_matrix(vocab_size,self.tokenizer.word_index)
# define model
self.model = self.define_model(vocab_size,100,[4,6],embedding_matrix)
# train model
self.model.fit(X_train, y_train, epochs=10, verbose=0)
#self.model.save("model.h5")
def predict(self,block):
tokens = self.tokenizer.texts_to_sequences([block.value])
padded = pad_sequences(tokens, padding='post',maxlen=self.max_length)
predictions = self.model.predict(padded)[0]
scores = {}
for i in range(0,len(predictions)):
scores[self.k_base.labels_dict[i]] = predictions[i]
return scores
| [
"keras.preprocessing.sequence.pad_sequences",
"keras.layers.Dropout",
"numpy.random.rand",
"nltk.data.find",
"keras.layers.merge.concatenate",
"numpy.zeros",
"keras.models.Model",
"keras.layers.Flatten",
"keras.layers.Conv1D",
"numpy.append",
"keras.layers.MaxPooling1D",
"keras.preprocessing.t... | [((807, 885), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['word2vec_sample'], {'binary': '(False)'}), '(word2vec_sample, binary=False)\n', (854, 885), False, 'import gensim\n'), ((1071, 1110), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': '(10000)', 'oov_token': '(1)'}), '(num_words=10000, oov_token=1)\n', (1080, 1110), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1510, 1547), 'numpy.zeros', 'np.zeros', (['(vocab_size, self.emb_size)'], {}), '((vocab_size, self.emb_size))\n', (1518, 1547), True, 'import numpy as np\n'), ((2998, 3029), 'keras.layers.Input', 'Input', ([], {'shape': '(self.max_length,)'}), '(shape=(self.max_length,))\n', (3003, 3029), False, 'from keras.layers import Input\n'), ((3478, 3497), 'keras.layers.merge.concatenate', 'concatenate', (['layers'], {}), '(layers)\n', (3489, 3497), False, 'from keras.layers.merge import concatenate\n'), ((3708, 3745), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (3713, 3745), False, 'from keras.models import Model\n'), ((4219, 4281), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X_train'], {'padding': '"""post"""', 'maxlen': 'self.max_length'}), "(X_train, padding='post', maxlen=self.max_length)\n", (4232, 4281), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((4712, 4773), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['tokens'], {'padding': '"""post"""', 'maxlen': 'self.max_length'}), "(tokens, padding='post', maxlen=self.max_length)\n", (4725, 4773), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((737, 787), 'nltk.data.find', 'find', (['"""models/word2vec_sample/pruned.word2vec.txt"""'], {}), "('models/word2vec_sample/pruned.word2vec.txt')\n", (741, 787), False, 'from nltk.data import find\n'), ((1696, 1715), 'nltk.pos_tag', 'nltk.pos_tag', (['terms'], {}), '(terms)\n', (1708, 1715), False, 'import nltk\n'), ((3050, 3164), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'self.emb_size'], {'weights': '[embedding_matrix]', 'input_length': 'self.max_length', 'trainable': '(True)'}), '(vocab_size, self.emb_size, weights=[embedding_matrix],\n input_length=self.max_length, trainable=True)\n', (3059, 3164), False, 'from keras.layers import Embedding\n'), ((3542, 3551), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3549, 3551), False, 'from keras.layers import Flatten\n'), ((3575, 3587), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3582, 3587), False, 'from keras.layers import Dropout\n'), ((3630, 3685), 'keras.layers.Dense', 'Dense', (['self.k_base.num_attributes'], {'activation': '"""softmax"""'}), "(self.k_base.num_attributes, activation='softmax')\n", (3635, 3685), False, 'from keras.layers import Dense\n'), ((2151, 2181), 'numpy.append', 'np.append', (['vector', 'pos_segment'], {}), '(vector, pos_segment)\n', (2160, 2181), True, 'import numpy as np\n'), ((2331, 2362), 'numpy.append', 'np.append', (['vector', 'segment_size'], {}), '(vector, segment_size)\n', (2340, 2362), True, 'import numpy as np\n'), ((2609, 2646), 'numpy.append', 'np.append', (['vector', 'self.pos_dict[tag]'], {}), '(vector, self.pos_dict[tag])\n', (2618, 2646), True, 'import numpy as np\n'), ((2727, 2761), 'numpy.append', 'np.append', (['vector', 'cp_vector[word]'], {}), '(vector, cp_vector[word])\n', (2736, 2761), True, 'import numpy as np\n'), ((3239, 3300), 'keras.layers.Conv1D', 'Conv1D', ([], {'filters': 'num_filters', 'kernel_size': 'i', 'activation': '"""relu"""'}), "(filters=num_filters, kernel_size=i, activation='relu')\n", (3245, 3300), False, 'from keras.layers import Conv1D\n'), ((3374, 3406), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': 'poolsize'}), '(pool_size=poolsize)\n', (3386, 3406), False, 'from keras.layers import MaxPooling1D\n'), ((2042, 2070), 'numpy.random.rand', 'np.random.rand', (['self.wv_size'], {}), '(self.wv_size)\n', (2056, 2070), True, 'import numpy as np\n')] |
"""
Collection of tests for unified linear algebra functions
"""
# global
import pytest
import numpy as np
# local
import ivy
import ivy.functional.backends.numpy
import ivy_tests.test_ivy.helpers as helpers
# svd
@pytest.mark.parametrize(
"x", [[[[1., 0.], [0., 1.]]], [[[[1., 0.], [0., 1.]]]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_svd(x, dtype, tensor_fn, dev, call):
if call in [helpers.tf_call, helpers.tf_graph_call] and 'cpu' in dev:
# tf.linalg.svd segfaults when CUDA is installed, but array is on CPU
pytest.skip()
# smoke test
x = tensor_fn(x, dtype, dev)
u, s, vh = ivy.svd(x)
# type test
assert ivy.is_array(u)
assert ivy.is_array(s)
assert ivy.is_array(vh)
# cardinality test
assert u.shape == x.shape
assert s.shape == x.shape[:-1]
assert vh.shape == x.shape
# value test
pred_u, pred_s, pred_vh = call(ivy.svd, x)
true_u, true_s, true_vh = ivy.functional.backends.numpy.svd(ivy.to_numpy(x))
assert np.allclose(pred_u, true_u)
assert np.allclose(pred_s, true_s)
assert np.allclose(pred_vh, true_vh)
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.svd)
# vector_norm
@pytest.mark.parametrize(
"x_n_p_n_ax_n_kd_n_tn", [([[1., 2.], [3., 4.]], 2, -1, None, [2.236068, 5.0]),
([[1., 2.], [3., 4.]], 3, None, False, 4.641588),
([[1., 2.], [3., 4.]], -float('inf'), None, False, 1.),
([[1., 2.], [3., 4.]], 0, None, False, 4.),
([[1., 2.], [3., 4.]], float('inf'), None, False, 4.),
([[1., 2.], [3., 4.]], 0.5, 0, True, [[7.464102, 11.656854]]),
([[[1., 2.], [3., 4.]]], 1, None, None, 10.)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_vector_norm(x_n_p_n_ax_n_kd_n_tn, dtype, tensor_fn, dev, call):
# smoke test
x, p, ax, kd, true_norm = x_n_p_n_ax_n_kd_n_tn
x = tensor_fn(x, dtype, dev)
kwargs = {k: v for k, v in zip(['x', 'p', 'axis', 'keepdims'], [x, p, ax, kd]) if v is not None}
ret = ivy.vector_norm(**kwargs)
# type test
assert ivy.is_array(ret)
# cardinality test
if kd:
expected_shape = [1 if i == ax else item for i, item in enumerate(x.shape)]
elif ax is None:
expected_shape = [1]
else:
expected_shape = list(x.shape)
expected_shape.pop(ax)
assert ret.shape == tuple(expected_shape)
# value test
kwargs.pop('x', None)
assert np.allclose(call(ivy.vector_norm, x, **kwargs), np.array(true_norm))
# compilation test
if call is helpers.torch_call:
# pytorch jit does not support calling joint ivy methods.
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.vector_norm)
# matrix_norm
@pytest.mark.parametrize(
"x_n_p_n_ax_n_kd", [([[[1., 2.], [3., 4.]]], 2, (-2, -1), None),
([[1., 2.], [3., 4.]], -2, None, False),
([[1., 2.], [3., 4.]], -float('inf'), None, False),
([[1., 2.], [3., 4.]], float('inf'), None, False),
([[[1.], [2.]], [[3.], [4.]]], 1, (0, 1), True),
([[[1., 2.], [3., 4.]]], -1, None, None)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_matrix_norm(x_n_p_n_ax_n_kd, dtype, tensor_fn, dev, call):
# smoke test
x_raw, p, ax, kd = x_n_p_n_ax_n_kd
if p == -2 and call in [helpers.tf_call, helpers.tf_graph_call]:
# tensorflow does not support these p value of -2
pytest.skip()
if call is helpers.mx_call:
# MXNet does not support matrix norms
pytest.skip()
x = tensor_fn(x_raw, dtype, dev)
kwargs = {k: v for k, v in zip(['x', 'p', 'axes', 'keepdims'], [x, p, ax, kd]) if v is not None}
ret = ivy.matrix_norm(**kwargs)
# type test
assert ivy.is_array(ret)
# cardinality test
if kd:
expected_shape = [1] * len(x.shape)
elif ax is None:
if len(x.shape) > 2:
expected_shape = [1] * (len(x.shape) - 2)
else:
expected_shape = [1]
else:
expected_shape = [1 for i, item in enumerate(x.shape) if i not in [a % len(x.shape) for a in ax]]
assert ret.shape == tuple(expected_shape)
# value test
kwargs.pop('x', None)
pred = call(ivy.matrix_norm, x, **kwargs)
if 'p' in kwargs:
kwargs['ord'] = kwargs['p']
del kwargs['p']
if 'axes' in kwargs:
kwargs['axis'] = kwargs['axes']
del kwargs['axes']
else:
kwargs['axis'] = (-2, -1)
assert np.allclose(pred, np.linalg.norm(np.array(x_raw), **kwargs))
# compilation test
if call is helpers.torch_call:
# ToDo: add correct message here
# pytorch jit does not support Union typing.
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.matrix_norm)
# inv
@pytest.mark.parametrize(
"x", [[[1., 0.], [0., 1.]], [[[1., 0.], [0., 1.]]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_inv(x, dtype, tensor_fn, dev, call):
if call in [helpers.tf_call, helpers.tf_graph_call] and 'cpu' in dev:
# tf.linalg.inv segfaults when CUDA is installed, but array is on CPU
pytest.skip()
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.inv(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.inv, x), ivy.functional.backends.numpy.inv(ivy.to_numpy(x)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.inv)
# pinv
@pytest.mark.parametrize(
"x", [[[1., 0.], [0., 1.], [1., 0.]], [[[1., 0.], [0., 1.], [1., 0.]]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_pinv(x, dtype, tensor_fn, dev, call):
if call in [helpers.tf_call, helpers.tf_graph_call] and 'cpu' in dev:
# tf.linalg.pinv segfaults when CUDA is installed, but array is on CPU
pytest.skip()
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.pinv(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape[:-2] + (x.shape[-1], x.shape[-2])
# value test
assert np.allclose(call(ivy.pinv, x), ivy.functional.backends.numpy.pinv(ivy.to_numpy(x)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.pinv)
# vector_to_skew_symmetric_matrix
@pytest.mark.parametrize(
"x", [[[[1., 2., 3.]], [[4., 5., 6.]], [[1., 2., 3.]], [[4., 5., 6.]], [[1., 2., 3.]]], [[1., 2., 3.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_vector_to_skew_symmetric_matrix(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.vector_to_skew_symmetric_matrix(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape + (x.shape[-1],)
# value test
assert np.allclose(call(ivy.vector_to_skew_symmetric_matrix, x),
ivy.functional.backends.numpy.vector_to_skew_symmetric_matrix(ivy.to_numpy(x)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.vector_to_skew_symmetric_matrix)
# cholesky
@pytest.mark.parametrize(
"x", [[[1.0, -1.0, 2.0], [-1.0, 5.0, -4.0], [2.0, -4.0, 6.0]], [[[1.0, -1.0, 2.0], [-1.0, 5.0, -4.0], [2.0, -4.0, 6.0]]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cholesky(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.cholesky(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cholesky, x), ivy.functional.backends.numpy.cholesky(ivy.to_numpy(x)))
# compilation test
if not ivy.array_mode():
helpers.assert_compilable(ivy.cholesky)
| [
"ivy.array_mode",
"ivy.cholesky",
"ivy.pinv",
"ivy_tests.test_ivy.helpers.assert_compilable",
"ivy.vector_to_skew_symmetric_matrix",
"ivy.svd",
"numpy.allclose",
"ivy.inv",
"pytest.skip",
"ivy.is_array",
"numpy.array",
"ivy.to_numpy",
"pytest.mark.parametrize",
"ivy.matrix_norm",
"ivy.ve... | [((219, 312), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x"""', '[[[[1.0, 0.0], [0.0, 1.0]]], [[[[1.0, 0.0], [0.0, 1.0]]]]]'], {}), "('x', [[[[1.0, 0.0], [0.0, 1.0]]], [[[[1.0, 0.0], [\n 0.0, 1.0]]]]])\n", (242, 312), False, 'import pytest\n'), ((306, 351), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (329, 351), False, 'import pytest\n'), ((358, 423), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tensor_fn"""', '[ivy.array, helpers.var_fn]'], {}), "('tensor_fn', [ivy.array, helpers.var_fn])\n", (381, 423), False, 'import pytest\n'), ((1916, 1961), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (1939, 1961), False, 'import pytest\n'), ((1968, 2033), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tensor_fn"""', '[ivy.array, helpers.var_fn]'], {}), "('tensor_fn', [ivy.array, helpers.var_fn])\n", (1991, 2033), False, 'import pytest\n'), ((3499, 3544), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (3522, 3544), False, 'import pytest\n'), ((3551, 3616), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tensor_fn"""', '[ivy.array, helpers.var_fn]'], {}), "('tensor_fn', [ivy.array, helpers.var_fn])\n", (3574, 3616), False, 'import pytest\n'), ((5240, 5328), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x"""', '[[[1.0, 0.0], [0.0, 1.0]], [[[1.0, 0.0], [0.0, 1.0]]]]'], {}), "('x', [[[1.0, 0.0], [0.0, 1.0]], [[[1.0, 0.0], [0.0,\n 1.0]]]])\n", (5263, 5328), False, 'import pytest\n'), ((5323, 5368), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (5346, 5368), False, 'import pytest\n'), ((5375, 5440), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tensor_fn"""', '[ivy.array, helpers.var_fn]'], {}), "('tensor_fn', [ivy.array, helpers.var_fn])\n", (5398, 5440), False, 'import pytest\n'), ((6052, 6164), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x"""', '[[[1.0, 0.0], [0.0, 1.0], [1.0, 0.0]], [[[1.0, 0.0], [0.0, 1.0], [1.0, 0.0]]]]'], {}), "('x', [[[1.0, 0.0], [0.0, 1.0], [1.0, 0.0]], [[[1.0,\n 0.0], [0.0, 1.0], [1.0, 0.0]]]])\n", (6075, 6164), False, 'import pytest\n'), ((6155, 6200), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (6178, 6200), False, 'import pytest\n'), ((6207, 6272), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tensor_fn"""', '[ivy.array, helpers.var_fn]'], {}), "('tensor_fn', [ivy.array, helpers.var_fn])\n", (6230, 6272), False, 'import pytest\n'), ((6951, 7101), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x"""', '[[[[1.0, 2.0, 3.0]], [[4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0]], [[4.0, 5.0, 6.0]\n ], [[1.0, 2.0, 3.0]]], [[1.0, 2.0, 3.0]]]'], {}), "('x', [[[[1.0, 2.0, 3.0]], [[4.0, 5.0, 6.0]], [[1.0,\n 2.0, 3.0]], [[4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0]]], [[1.0, 2.0, 3.0]]])\n", (6974, 7101), False, 'import pytest\n'), ((7086, 7131), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (7109, 7131), False, 'import pytest\n'), ((7138, 7203), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tensor_fn"""', '[ivy.array, helpers.var_fn]'], {}), "('tensor_fn', [ivy.array, helpers.var_fn])\n", (7161, 7203), False, 'import pytest\n'), ((7824, 7975), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x"""', '[[[1.0, -1.0, 2.0], [-1.0, 5.0, -4.0], [2.0, -4.0, 6.0]], [[[1.0, -1.0, 2.0\n ], [-1.0, 5.0, -4.0], [2.0, -4.0, 6.0]]]]'], {}), "('x', [[[1.0, -1.0, 2.0], [-1.0, 5.0, -4.0], [2.0, -\n 4.0, 6.0]], [[[1.0, -1.0, 2.0], [-1.0, 5.0, -4.0], [2.0, -4.0, 6.0]]]])\n", (7847, 7975), False, 'import pytest\n'), ((7977, 8022), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (8000, 8022), False, 'import pytest\n'), ((8029, 8094), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tensor_fn"""', '[ivy.array, helpers.var_fn]'], {}), "('tensor_fn', [ivy.array, helpers.var_fn])\n", (8052, 8094), False, 'import pytest\n'), ((714, 724), 'ivy.svd', 'ivy.svd', (['x'], {}), '(x)\n', (721, 724), False, 'import ivy\n'), ((752, 767), 'ivy.is_array', 'ivy.is_array', (['u'], {}), '(u)\n', (764, 767), False, 'import ivy\n'), ((779, 794), 'ivy.is_array', 'ivy.is_array', (['s'], {}), '(s)\n', (791, 794), False, 'import ivy\n'), ((806, 822), 'ivy.is_array', 'ivy.is_array', (['vh'], {}), '(vh)\n', (818, 822), False, 'import ivy\n'), ((1098, 1125), 'numpy.allclose', 'np.allclose', (['pred_u', 'true_u'], {}), '(pred_u, true_u)\n', (1109, 1125), True, 'import numpy as np\n'), ((1137, 1164), 'numpy.allclose', 'np.allclose', (['pred_s', 'true_s'], {}), '(pred_s, true_s)\n', (1148, 1164), True, 'import numpy as np\n'), ((1176, 1205), 'numpy.allclose', 'np.allclose', (['pred_vh', 'true_vh'], {}), '(pred_vh, true_vh)\n', (1187, 1205), True, 'import numpy as np\n'), ((2324, 2349), 'ivy.vector_norm', 'ivy.vector_norm', ([], {}), '(**kwargs)\n', (2339, 2349), False, 'import ivy\n'), ((2377, 2394), 'ivy.is_array', 'ivy.is_array', (['ret'], {}), '(ret)\n', (2389, 2394), False, 'import ivy\n'), ((4143, 4168), 'ivy.matrix_norm', 'ivy.matrix_norm', ([], {}), '(**kwargs)\n', (4158, 4168), False, 'import ivy\n'), ((4196, 4213), 'ivy.is_array', 'ivy.is_array', (['ret'], {}), '(ret)\n', (4208, 4213), False, 'import ivy\n'), ((5726, 5736), 'ivy.inv', 'ivy.inv', (['x'], {}), '(x)\n', (5733, 5736), False, 'import ivy\n'), ((5764, 5781), 'ivy.is_array', 'ivy.is_array', (['ret'], {}), '(ret)\n', (5776, 5781), False, 'import ivy\n'), ((6560, 6571), 'ivy.pinv', 'ivy.pinv', (['x'], {}), '(x)\n', (6568, 6571), False, 'import ivy\n'), ((6599, 6616), 'ivy.is_array', 'ivy.is_array', (['ret'], {}), '(ret)\n', (6611, 6616), False, 'import ivy\n'), ((7343, 7381), 'ivy.vector_to_skew_symmetric_matrix', 'ivy.vector_to_skew_symmetric_matrix', (['x'], {}), '(x)\n', (7378, 7381), False, 'import ivy\n'), ((7409, 7426), 'ivy.is_array', 'ivy.is_array', (['ret'], {}), '(ret)\n', (7421, 7426), False, 'import ivy\n'), ((8211, 8226), 'ivy.cholesky', 'ivy.cholesky', (['x'], {}), '(x)\n', (8223, 8226), False, 'import ivy\n'), ((8254, 8271), 'ivy.is_array', 'ivy.is_array', (['ret'], {}), '(ret)\n', (8266, 8271), False, 'import ivy\n'), ((635, 648), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (646, 648), False, 'import pytest\n'), ((1070, 1085), 'ivy.to_numpy', 'ivy.to_numpy', (['x'], {}), '(x)\n', (1082, 1085), False, 'import ivy\n'), ((1240, 1256), 'ivy.array_mode', 'ivy.array_mode', ([], {}), '()\n', (1254, 1256), False, 'import ivy\n'), ((1266, 1300), 'ivy_tests.test_ivy.helpers.assert_compilable', 'helpers.assert_compilable', (['ivy.svd'], {}), '(ivy.svd)\n', (1291, 1300), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((2791, 2810), 'numpy.array', 'np.array', (['true_norm'], {}), '(true_norm)\n', (2799, 2810), True, 'import numpy as np\n'), ((2962, 2978), 'ivy.array_mode', 'ivy.array_mode', ([], {}), '()\n', (2976, 2978), False, 'import ivy\n'), ((2988, 3030), 'ivy_tests.test_ivy.helpers.assert_compilable', 'helpers.assert_compilable', (['ivy.vector_norm'], {}), '(ivy.vector_norm)\n', (3013, 3030), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((3881, 3894), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (3892, 3894), False, 'import pytest\n'), ((3981, 3994), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (3992, 3994), False, 'import pytest\n'), ((5162, 5178), 'ivy.array_mode', 'ivy.array_mode', ([], {}), '()\n', (5176, 5178), False, 'import ivy\n'), ((5188, 5230), 'ivy_tests.test_ivy.helpers.assert_compilable', 'helpers.assert_compilable', (['ivy.matrix_norm'], {}), '(ivy.matrix_norm)\n', (5213, 5230), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((5652, 5665), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (5663, 5665), False, 'import pytest\n'), ((5981, 5997), 'ivy.array_mode', 'ivy.array_mode', ([], {}), '()\n', (5995, 5997), False, 'import ivy\n'), ((6007, 6041), 'ivy_tests.test_ivy.helpers.assert_compilable', 'helpers.assert_compilable', (['ivy.inv'], {}), '(ivy.inv)\n', (6032, 6041), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((6486, 6499), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (6497, 6499), False, 'import pytest\n'), ((6852, 6868), 'ivy.array_mode', 'ivy.array_mode', ([], {}), '()\n', (6866, 6868), False, 'import ivy\n'), ((6878, 6913), 'ivy_tests.test_ivy.helpers.assert_compilable', 'helpers.assert_compilable', (['ivy.pinv'], {}), '(ivy.pinv)\n', (6903, 6913), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((7722, 7738), 'ivy.array_mode', 'ivy.array_mode', ([], {}), '()\n', (7736, 7738), False, 'import ivy\n'), ((7748, 7810), 'ivy_tests.test_ivy.helpers.assert_compilable', 'helpers.assert_compilable', (['ivy.vector_to_skew_symmetric_matrix'], {}), '(ivy.vector_to_skew_symmetric_matrix)\n', (7773, 7810), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((8481, 8497), 'ivy.array_mode', 'ivy.array_mode', ([], {}), '()\n', (8495, 8497), False, 'import ivy\n'), ((8507, 8546), 'ivy_tests.test_ivy.helpers.assert_compilable', 'helpers.assert_compilable', (['ivy.cholesky'], {}), '(ivy.cholesky)\n', (8532, 8546), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((4956, 4971), 'numpy.array', 'np.array', (['x_raw'], {}), '(x_raw)\n', (4964, 4971), True, 'import numpy as np\n'), ((5929, 5944), 'ivy.to_numpy', 'ivy.to_numpy', (['x'], {}), '(x)\n', (5941, 5944), False, 'import ivy\n'), ((6800, 6815), 'ivy.to_numpy', 'ivy.to_numpy', (['x'], {}), '(x)\n', (6812, 6815), False, 'import ivy\n'), ((7670, 7685), 'ivy.to_numpy', 'ivy.to_numpy', (['x'], {}), '(x)\n', (7682, 7685), False, 'import ivy\n'), ((8429, 8444), 'ivy.to_numpy', 'ivy.to_numpy', (['x'], {}), '(x)\n', (8441, 8444), False, 'import ivy\n')] |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from numba import cuda
from warprnnt_numba.rnnt_loss import rnnt_numpy
from warprnnt_numba.rnnt_loss.rnnt_pytorch import certify_inputs
from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce
from warprnnt_numba import numba_utils
from warprnnt_numba.numba_utils import __NUMBA_MINIMUM_VERSION__
def log_softmax(x, axis=-1):
x = torch.from_numpy(x) # zero-copy
x = torch.log_softmax(x, dim=axis)
x = x.numpy()
return x
class TestRNNTCUDAKernels:
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
def test_compute_alphas_kernel(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
# Numpy kernel
x = random.randn(*original_shape)
labels = np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]]) # [1, 10]
label_len = len(labels[0]) + 1
blank_idx = 0
x_np = log_softmax(x, axis=-1)
ground_alphas, ground_log_likelihood = rnnt_numpy.forward_pass(
x_np[0, :, :label_len, :], labels[0, : label_len - 1], blank_idx
)
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=torch.float32)
labels_c = torch.tensor(labels, device=device, dtype=torch.int32)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llForward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int32, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int32, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_alphas_kernel[B, U, stream, 0](
x_c, denom, alphas, llForward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# sync kernel
stream.synchronize()
# reshape alphas
alphas = alphas.view([B, T, U])
diff = ground_alphas - alphas[0].cpu().numpy()
assert np.abs(diff).mean() <= 1e-5
assert np.square(diff).mean() <= 1e-10
ll_diff = ground_log_likelihood - llForward[0].cpu().numpy()
assert np.abs(ll_diff).mean() <= 1e-5
assert np.square(ll_diff).mean() <= 1e-10
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
def test_compute_betas_kernel(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
# Numpy kernel
x = random.randn(*original_shape)
labels = np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]]) # [1, 10]
label_len = len(labels[0]) + 1
blank_idx = 0
x_np = log_softmax(x, axis=-1)
ground_alphas, ground_log_likelihood = rnnt_numpy.backward_pass(
x_np[0, :, :label_len, :], labels[0, : label_len - 1], blank_idx
)
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=torch.float32)
labels_c = torch.tensor(labels, device=device, dtype=torch.int32)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
betas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llBackward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int32, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int32, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# beta kernel
gpu_rnnt_kernel.compute_betas_kernel[B, U, stream, 0](
x_c, denom, betas, llBackward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# sync kernel
stream.synchronize()
# reshape alphas
betas = betas.view([B, T, U])
diff = ground_alphas - betas[0].cpu().numpy()
assert np.abs(diff).mean() <= 1e-5
assert np.square(diff).mean() <= 1e-10
ll_diff = ground_log_likelihood - llBackward[0].cpu().numpy()
assert np.abs(ll_diff).mean() <= 1e-5
assert np.square(ll_diff).mean() <= 1e-10
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
def test_compute_grads_kernel(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
fastemit_lambda = 0.0
clamp = 0.0
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
# Numpy kernel
x = random.randn(*original_shape)
labels = torch.from_numpy(np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]], dtype=np.int32)) # [1, 10]
audio_len = torch.from_numpy(np.array([T], dtype=np.int32))
label_len = torch.from_numpy(np.array([U - 1], dtype=np.int32))
blank_idx = 0
x_np = torch.from_numpy(x)
x_np.requires_grad_(True)
"""
Here we will directly utilize the numpy variant of the loss without explicitly calling
the numpy functions for alpha, beta and grads.
This is because the grads returned by the rnnt_numpy.transduce_batch() are :
d/dx (alpha + beta alignment)(log_softmax(x)).
But according to the chain rule, we'd still need to compute the gradient of log_softmax(x)
and update the alignments by hand. Instead, we will rely on pytorch to compute the gradient
of the log_softmax(x) step and propagate it backwards.
"""
loss_func = rnnt_numpy.RNNTLoss(blank_idx, fastemit_lambda=fastemit_lambda, clamp=clamp)
loss_val = loss_func(x_np, labels, audio_len, label_len)
loss_val.sum().backward()
true_grads = x_np.grad
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=torch.float32)
labels_c = torch.tensor(labels, device=device, dtype=torch.int32)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
betas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llForward = torch.zeros(B, device=device, dtype=x_c.dtype)
llBackward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int32, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int32, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
grads = torch.zeros_like(x_c, requires_grad=False)
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_alphas_kernel[B, U, stream, 0](
x_c, denom, alphas, llForward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# beta kernel
gpu_rnnt_kernel.compute_betas_kernel[B, U, stream, 0](
x_c, denom, betas, llBackward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# gamma kernel
grad_blocks_per_grid = B * T * U
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_grad_kernel[grad_blocks_per_grid, grad_threads_per_block, stream, 0](
grads,
x_c,
denom,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels_c,
B,
T,
U,
V,
blank_idx,
fastemit_lambda,
clamp,
)
# sync kernel
stream.synchronize()
# reshape grads
grads = grads.view([B, T, U, V])
diff = true_grads - grads[0].cpu().numpy()
assert np.abs(diff).mean() <= 1e-5
assert np.square(diff).mean() <= 1e-10
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
def test_compute_grads_kernel_fastemit(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
fastemit_lambda = 0.001
clamp = 0.0
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
# Numpy kernel
x = random.randn(*original_shape)
labels = torch.from_numpy(np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]], dtype=np.int32)) # [1, 10]
audio_len = torch.from_numpy(np.array([T], dtype=np.int32))
label_len = torch.from_numpy(np.array([U - 1], dtype=np.int32))
blank_idx = 0
x_np = torch.from_numpy(x)
x_np.requires_grad_(True)
"""
Here we will directly utilize the numpy variant of the loss without explicitly calling
the numpy functions for alpha, beta and grads.
This is because the grads returned by the rnnt_numpy.transduce_batch() are :
d/dx (alpha + beta alignment)(log_softmax(x)).
But according to the chain rule, we'd still need to compute the gradient of log_softmax(x)
and update the alignments by hand. Instead, we will rely on pytorch to compute the gradient
of the log_softmax(x) step and propagate it backwards.
"""
loss_func = rnnt_numpy.RNNTLoss(blank_idx, fastemit_lambda=fastemit_lambda, clamp=clamp)
loss_val = loss_func(x_np, labels, audio_len, label_len)
loss_val.sum().backward()
true_grads = x_np.grad
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=torch.float32)
labels_c = torch.tensor(labels, device=device, dtype=torch.int32)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
betas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llForward = torch.zeros(B, device=device, dtype=x_c.dtype)
llBackward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int32, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int32, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
grads = torch.zeros_like(x_c, requires_grad=False)
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_alphas_kernel[B, U, stream, 0](
x_c, denom, alphas, llForward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# beta kernel
gpu_rnnt_kernel.compute_betas_kernel[B, U, stream, 0](
x_c, denom, betas, llBackward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# gamma kernel
grad_blocks_per_grid = B * T * U
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_grad_kernel[grad_blocks_per_grid, grad_threads_per_block, stream, 0](
grads,
x_c,
denom,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels_c,
B,
T,
U,
V,
blank_idx,
fastemit_lambda,
clamp,
)
# sync kernel
stream.synchronize()
# reshape grads
grads = grads.view([B, T, U, V])
diff = true_grads - grads[0].cpu().numpy()
assert np.abs(diff).mean() <= 1e-5
assert np.square(diff).mean() <= 1e-10
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
def test_compute_grads_kernel_clamp(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
fastemit_lambda = 0.0
clamp = 0.1
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
# Numpy kernel
x = random.randn(*original_shape)
labels = torch.from_numpy(np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]], dtype=np.int32)) # [1, 10]
audio_len = torch.from_numpy(np.array([T], dtype=np.int32))
label_len = torch.from_numpy(np.array([U - 1], dtype=np.int32))
blank_idx = 0
x_np = torch.from_numpy(x)
x_np.requires_grad_(True)
"""
Here we will directly utilize the numpy variant of the loss without explicitly calling
the numpy functions for alpha, beta and grads.
This is because the grads returned by the rnnt_numpy.transduce_batch() are :
d/dx (alpha + beta alignment)(log_softmax(x)).
But according to the chain rule, we'd still need to compute the gradient of log_softmax(x)
and update the alignments by hand. Instead, we will rely on pytorch to compute the gradient
of the log_softmax(x) step and propagate it backwards.
"""
loss_func = rnnt_numpy.RNNTLoss(blank_idx, fastemit_lambda=fastemit_lambda, clamp=clamp)
loss_val = loss_func(x_np, labels, audio_len, label_len)
loss_val.sum().backward()
true_grads = x_np.grad
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=torch.float32)
labels_c = torch.tensor(labels, device=device, dtype=torch.int32)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
betas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llForward = torch.zeros(B, device=device, dtype=x_c.dtype)
llBackward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int32, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int32, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
grads = torch.zeros_like(x_c, requires_grad=False)
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_alphas_kernel[B, U, stream, 0](
x_c, denom, alphas, llForward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# beta kernel
gpu_rnnt_kernel.compute_betas_kernel[B, U, stream, 0](
x_c, denom, betas, llBackward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# gamma kernel
grad_blocks_per_grid = B * T * U
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_grad_kernel[grad_blocks_per_grid, grad_threads_per_block, stream, 0](
grads,
x_c,
denom,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels_c,
B,
T,
U,
V,
blank_idx,
fastemit_lambda,
clamp,
)
# sync kernel
stream.synchronize()
# reshape grads
grads = grads.view([B, T, U, V])
diff = true_grads - grads[0].cpu().numpy()
assert np.abs(diff).mean() <= 1e-5
assert np.square(diff).mean() <= 1e-10
| [
"numpy.abs",
"warprnnt_numba.numba_utils.skip_numba_cuda_test_if_unsupported",
"torch.device",
"warprnnt_numba.rnnt_loss.rnnt_pytorch.certify_inputs",
"numpy.random.RandomState",
"numba.cuda.default_stream",
"torch.zeros",
"torch.log_softmax",
"torch.zeros_like",
"numpy.square",
"warprnnt_numba.... | [((1015, 1034), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1031, 1034), False, 'import torch\n'), ((1056, 1086), 'torch.log_softmax', 'torch.log_softmax', (['x'], {'dim': 'axis'}), '(x, dim=axis)\n', (1073, 1086), False, 'import torch\n'), ((1311, 1385), 'warprnnt_numba.numba_utils.skip_numba_cuda_test_if_unsupported', 'numba_utils.skip_numba_cuda_test_if_unsupported', (['__NUMBA_MINIMUM_VERSION__'], {}), '(__NUMBA_MINIMUM_VERSION__)\n', (1358, 1385), False, 'from warprnnt_numba import numba_utils\n'), ((1404, 1428), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (1425, 1428), True, 'import numpy as np\n'), ((1587, 1629), 'numpy.array', 'np.array', (['[[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]]'], {}), '([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]])\n', (1595, 1629), True, 'import numpy as np\n'), ((1789, 1882), 'warprnnt_numba.rnnt_loss.rnnt_numpy.forward_pass', 'rnnt_numpy.forward_pass', (['x_np[0, :, :label_len, :]', 'labels[0, :label_len - 1]', 'blank_idx'], {}), '(x_np[0, :, :label_len, :], labels[0, :label_len - 1\n ], blank_idx)\n', (1812, 1882), False, 'from warprnnt_numba.rnnt_loss import rnnt_numpy\n'), ((1944, 1964), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1956, 1964), False, 'import torch\n'), ((2171, 2222), 'torch.tensor', 'torch.tensor', (['x'], {'device': 'device', 'dtype': 'torch.float32'}), '(x, device=device, dtype=torch.float32)\n', (2183, 2222), False, 'import torch\n'), ((2242, 2296), 'torch.tensor', 'torch.tensor', (['labels'], {'device': 'device', 'dtype': 'torch.int32'}), '(labels, device=device, dtype=torch.int32)\n', (2254, 2296), False, 'import torch\n'), ((2350, 2404), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (2361, 2404), False, 'import torch\n'), ((2422, 2476), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (2433, 2476), False, 'import torch\n'), ((2497, 2543), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B, device=device, dtype=x_c.dtype)\n', (2508, 2543), False, 'import torch\n'), ((2568, 2619), 'torch.tensor', 'torch.tensor', (['[T]'], {'dtype': 'torch.int32', 'device': 'device'}), '([T], dtype=torch.int32, device=device)\n', (2580, 2619), False, 'import torch\n'), ((2747, 2806), 'warprnnt_numba.rnnt_loss.rnnt_pytorch.certify_inputs', 'certify_inputs', (['x_c', 'labels_c', 'input_lengths', 'label_lengths'], {}), '(x_c, labels_c, input_lengths, label_lengths)\n', (2761, 2806), False, 'from warprnnt_numba.rnnt_loss.rnnt_pytorch import certify_inputs\n'), ((2965, 3051), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_max', 'reduce.reduce_max', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(False)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=\n stream)\n', (2982, 3051), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((3055, 3140), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_exp', 'reduce.reduce_exp', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(True)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream\n )\n', (3072, 3140), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((3935, 4009), 'warprnnt_numba.numba_utils.skip_numba_cuda_test_if_unsupported', 'numba_utils.skip_numba_cuda_test_if_unsupported', (['__NUMBA_MINIMUM_VERSION__'], {}), '(__NUMBA_MINIMUM_VERSION__)\n', (3982, 4009), False, 'from warprnnt_numba import numba_utils\n'), ((4028, 4052), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (4049, 4052), True, 'import numpy as np\n'), ((4211, 4253), 'numpy.array', 'np.array', (['[[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]]'], {}), '([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]])\n', (4219, 4253), True, 'import numpy as np\n'), ((4413, 4507), 'warprnnt_numba.rnnt_loss.rnnt_numpy.backward_pass', 'rnnt_numpy.backward_pass', (['x_np[0, :, :label_len, :]', 'labels[0, :label_len - 1]', 'blank_idx'], {}), '(x_np[0, :, :label_len, :], labels[0, :label_len - \n 1], blank_idx)\n', (4437, 4507), False, 'from warprnnt_numba.rnnt_loss import rnnt_numpy\n'), ((4569, 4589), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4581, 4589), False, 'import torch\n'), ((4796, 4847), 'torch.tensor', 'torch.tensor', (['x'], {'device': 'device', 'dtype': 'torch.float32'}), '(x, device=device, dtype=torch.float32)\n', (4808, 4847), False, 'import torch\n'), ((4867, 4921), 'torch.tensor', 'torch.tensor', (['labels'], {'device': 'device', 'dtype': 'torch.int32'}), '(labels, device=device, dtype=torch.int32)\n', (4879, 4921), False, 'import torch\n'), ((4975, 5029), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (4986, 5029), False, 'import torch\n'), ((5046, 5100), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (5057, 5100), False, 'import torch\n'), ((5122, 5168), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B, device=device, dtype=x_c.dtype)\n', (5133, 5168), False, 'import torch\n'), ((5193, 5244), 'torch.tensor', 'torch.tensor', (['[T]'], {'dtype': 'torch.int32', 'device': 'device'}), '([T], dtype=torch.int32, device=device)\n', (5205, 5244), False, 'import torch\n'), ((5372, 5431), 'warprnnt_numba.rnnt_loss.rnnt_pytorch.certify_inputs', 'certify_inputs', (['x_c', 'labels_c', 'input_lengths', 'label_lengths'], {}), '(x_c, labels_c, input_lengths, label_lengths)\n', (5386, 5431), False, 'from warprnnt_numba.rnnt_loss.rnnt_pytorch import certify_inputs\n'), ((5590, 5676), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_max', 'reduce.reduce_max', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(False)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=\n stream)\n', (5607, 5676), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((5680, 5765), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_exp', 'reduce.reduce_exp', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(True)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream\n )\n', (5697, 5765), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((6578, 6652), 'warprnnt_numba.numba_utils.skip_numba_cuda_test_if_unsupported', 'numba_utils.skip_numba_cuda_test_if_unsupported', (['__NUMBA_MINIMUM_VERSION__'], {}), '(__NUMBA_MINIMUM_VERSION__)\n', (6625, 6652), False, 'from warprnnt_numba import numba_utils\n'), ((6722, 6746), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (6743, 6746), True, 'import numpy as np\n'), ((7171, 7190), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (7187, 7190), False, 'import torch\n'), ((7835, 7911), 'warprnnt_numba.rnnt_loss.rnnt_numpy.RNNTLoss', 'rnnt_numpy.RNNTLoss', (['blank_idx'], {'fastemit_lambda': 'fastemit_lambda', 'clamp': 'clamp'}), '(blank_idx, fastemit_lambda=fastemit_lambda, clamp=clamp)\n', (7854, 7911), False, 'from warprnnt_numba.rnnt_loss import rnnt_numpy\n'), ((8085, 8105), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (8097, 8105), False, 'import torch\n'), ((8312, 8363), 'torch.tensor', 'torch.tensor', (['x'], {'device': 'device', 'dtype': 'torch.float32'}), '(x, device=device, dtype=torch.float32)\n', (8324, 8363), False, 'import torch\n'), ((8383, 8437), 'torch.tensor', 'torch.tensor', (['labels'], {'device': 'device', 'dtype': 'torch.int32'}), '(labels, device=device, dtype=torch.int32)\n', (8395, 8437), False, 'import torch\n'), ((8491, 8545), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (8502, 8545), False, 'import torch\n'), ((8563, 8617), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (8574, 8617), False, 'import torch\n'), ((8634, 8688), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (8645, 8688), False, 'import torch\n'), ((8709, 8755), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B, device=device, dtype=x_c.dtype)\n', (8720, 8755), False, 'import torch\n'), ((8777, 8823), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B, device=device, dtype=x_c.dtype)\n', (8788, 8823), False, 'import torch\n'), ((8848, 8899), 'torch.tensor', 'torch.tensor', (['[T]'], {'dtype': 'torch.int32', 'device': 'device'}), '([T], dtype=torch.int32, device=device)\n', (8860, 8899), False, 'import torch\n'), ((9027, 9086), 'warprnnt_numba.rnnt_loss.rnnt_pytorch.certify_inputs', 'certify_inputs', (['x_c', 'labels_c', 'input_lengths', 'label_lengths'], {}), '(x_c, labels_c, input_lengths, label_lengths)\n', (9041, 9086), False, 'from warprnnt_numba.rnnt_loss.rnnt_pytorch import certify_inputs\n'), ((9198, 9240), 'torch.zeros_like', 'torch.zeros_like', (['x_c'], {'requires_grad': '(False)'}), '(x_c, requires_grad=False)\n', (9214, 9240), False, 'import torch\n'), ((9304, 9390), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_max', 'reduce.reduce_max', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(False)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=\n stream)\n', (9321, 9390), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((9394, 9479), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_exp', 'reduce.reduce_exp', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(True)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream\n )\n', (9411, 9479), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((10907, 10981), 'warprnnt_numba.numba_utils.skip_numba_cuda_test_if_unsupported', 'numba_utils.skip_numba_cuda_test_if_unsupported', (['__NUMBA_MINIMUM_VERSION__'], {}), '(__NUMBA_MINIMUM_VERSION__)\n', (10954, 10981), False, 'from warprnnt_numba import numba_utils\n'), ((11053, 11077), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (11074, 11077), True, 'import numpy as np\n'), ((11502, 11521), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (11518, 11521), False, 'import torch\n'), ((12158, 12234), 'warprnnt_numba.rnnt_loss.rnnt_numpy.RNNTLoss', 'rnnt_numpy.RNNTLoss', (['blank_idx'], {'fastemit_lambda': 'fastemit_lambda', 'clamp': 'clamp'}), '(blank_idx, fastemit_lambda=fastemit_lambda, clamp=clamp)\n', (12177, 12234), False, 'from warprnnt_numba.rnnt_loss import rnnt_numpy\n'), ((12408, 12428), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (12420, 12428), False, 'import torch\n'), ((12635, 12686), 'torch.tensor', 'torch.tensor', (['x'], {'device': 'device', 'dtype': 'torch.float32'}), '(x, device=device, dtype=torch.float32)\n', (12647, 12686), False, 'import torch\n'), ((12706, 12760), 'torch.tensor', 'torch.tensor', (['labels'], {'device': 'device', 'dtype': 'torch.int32'}), '(labels, device=device, dtype=torch.int32)\n', (12718, 12760), False, 'import torch\n'), ((12814, 12868), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (12825, 12868), False, 'import torch\n'), ((12886, 12940), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (12897, 12940), False, 'import torch\n'), ((12957, 13011), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (12968, 13011), False, 'import torch\n'), ((13032, 13078), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B, device=device, dtype=x_c.dtype)\n', (13043, 13078), False, 'import torch\n'), ((13100, 13146), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B, device=device, dtype=x_c.dtype)\n', (13111, 13146), False, 'import torch\n'), ((13171, 13222), 'torch.tensor', 'torch.tensor', (['[T]'], {'dtype': 'torch.int32', 'device': 'device'}), '([T], dtype=torch.int32, device=device)\n', (13183, 13222), False, 'import torch\n'), ((13350, 13409), 'warprnnt_numba.rnnt_loss.rnnt_pytorch.certify_inputs', 'certify_inputs', (['x_c', 'labels_c', 'input_lengths', 'label_lengths'], {}), '(x_c, labels_c, input_lengths, label_lengths)\n', (13364, 13409), False, 'from warprnnt_numba.rnnt_loss.rnnt_pytorch import certify_inputs\n'), ((13521, 13563), 'torch.zeros_like', 'torch.zeros_like', (['x_c'], {'requires_grad': '(False)'}), '(x_c, requires_grad=False)\n', (13537, 13563), False, 'import torch\n'), ((13627, 13713), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_max', 'reduce.reduce_max', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(False)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=\n stream)\n', (13644, 13713), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((13717, 13802), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_exp', 'reduce.reduce_exp', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(True)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream\n )\n', (13734, 13802), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((15227, 15301), 'warprnnt_numba.numba_utils.skip_numba_cuda_test_if_unsupported', 'numba_utils.skip_numba_cuda_test_if_unsupported', (['__NUMBA_MINIMUM_VERSION__'], {}), '(__NUMBA_MINIMUM_VERSION__)\n', (15274, 15301), False, 'from warprnnt_numba import numba_utils\n'), ((15371, 15395), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (15392, 15395), True, 'import numpy as np\n'), ((15820, 15839), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (15836, 15839), False, 'import torch\n'), ((16476, 16552), 'warprnnt_numba.rnnt_loss.rnnt_numpy.RNNTLoss', 'rnnt_numpy.RNNTLoss', (['blank_idx'], {'fastemit_lambda': 'fastemit_lambda', 'clamp': 'clamp'}), '(blank_idx, fastemit_lambda=fastemit_lambda, clamp=clamp)\n', (16495, 16552), False, 'from warprnnt_numba.rnnt_loss import rnnt_numpy\n'), ((16726, 16746), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (16738, 16746), False, 'import torch\n'), ((16953, 17004), 'torch.tensor', 'torch.tensor', (['x'], {'device': 'device', 'dtype': 'torch.float32'}), '(x, device=device, dtype=torch.float32)\n', (16965, 17004), False, 'import torch\n'), ((17024, 17078), 'torch.tensor', 'torch.tensor', (['labels'], {'device': 'device', 'dtype': 'torch.int32'}), '(labels, device=device, dtype=torch.int32)\n', (17036, 17078), False, 'import torch\n'), ((17132, 17186), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (17143, 17186), False, 'import torch\n'), ((17204, 17258), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (17215, 17258), False, 'import torch\n'), ((17275, 17329), 'torch.zeros', 'torch.zeros', (['(B * T * U)'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B * T * U, device=device, dtype=x_c.dtype)\n', (17286, 17329), False, 'import torch\n'), ((17350, 17396), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B, device=device, dtype=x_c.dtype)\n', (17361, 17396), False, 'import torch\n'), ((17418, 17464), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'device', 'dtype': 'x_c.dtype'}), '(B, device=device, dtype=x_c.dtype)\n', (17429, 17464), False, 'import torch\n'), ((17489, 17540), 'torch.tensor', 'torch.tensor', (['[T]'], {'dtype': 'torch.int32', 'device': 'device'}), '([T], dtype=torch.int32, device=device)\n', (17501, 17540), False, 'import torch\n'), ((17668, 17727), 'warprnnt_numba.rnnt_loss.rnnt_pytorch.certify_inputs', 'certify_inputs', (['x_c', 'labels_c', 'input_lengths', 'label_lengths'], {}), '(x_c, labels_c, input_lengths, label_lengths)\n', (17682, 17727), False, 'from warprnnt_numba.rnnt_loss.rnnt_pytorch import certify_inputs\n'), ((17839, 17881), 'torch.zeros_like', 'torch.zeros_like', (['x_c'], {'requires_grad': '(False)'}), '(x_c, requires_grad=False)\n', (17855, 17881), False, 'import torch\n'), ((17945, 18031), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_max', 'reduce.reduce_max', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(False)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=\n stream)\n', (17962, 18031), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((18035, 18120), 'warprnnt_numba.rnnt_loss.utils.cuda_utils.reduce.reduce_exp', 'reduce.reduce_exp', (['x_c', 'denom'], {'rows': 'V', 'cols': '(B * T * U)', 'minus': '(True)', 'stream': 'stream'}), '(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream\n )\n', (18052, 18120), False, 'from warprnnt_numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce\n'), ((2134, 2155), 'numba.cuda.default_stream', 'cuda.default_stream', ([], {}), '()\n', (2153, 2155), False, 'from numba import cuda\n'), ((1175, 1194), 'numba.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (1192, 1194), False, 'from numba import cuda\n'), ((4759, 4780), 'numba.cuda.default_stream', 'cuda.default_stream', ([], {}), '()\n', (4778, 4780), False, 'from numba import cuda\n'), ((3800, 3819), 'numba.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (3817, 3819), False, 'from numba import cuda\n'), ((6922, 6980), 'numpy.array', 'np.array', (['[[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]]'], {'dtype': 'np.int32'}), '([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]], dtype=np.int32)\n', (6930, 6980), True, 'import numpy as np\n'), ((7030, 7059), 'numpy.array', 'np.array', (['[T]'], {'dtype': 'np.int32'}), '([T], dtype=np.int32)\n', (7038, 7059), True, 'import numpy as np\n'), ((7098, 7131), 'numpy.array', 'np.array', (['[U - 1]'], {'dtype': 'np.int32'}), '([U - 1], dtype=np.int32)\n', (7106, 7131), True, 'import numpy as np\n'), ((8275, 8296), 'numba.cuda.default_stream', 'cuda.default_stream', ([], {}), '()\n', (8294, 8296), False, 'from numba import cuda\n'), ((6421, 6440), 'numba.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (6438, 6440), False, 'from numba import cuda\n'), ((11253, 11311), 'numpy.array', 'np.array', (['[[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]]'], {'dtype': 'np.int32'}), '([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]], dtype=np.int32)\n', (11261, 11311), True, 'import numpy as np\n'), ((11361, 11390), 'numpy.array', 'np.array', (['[T]'], {'dtype': 'np.int32'}), '([T], dtype=np.int32)\n', (11369, 11390), True, 'import numpy as np\n'), ((11429, 11462), 'numpy.array', 'np.array', (['[U - 1]'], {'dtype': 'np.int32'}), '([U - 1], dtype=np.int32)\n', (11437, 11462), True, 'import numpy as np\n'), ((12598, 12619), 'numba.cuda.default_stream', 'cuda.default_stream', ([], {}), '()\n', (12617, 12619), False, 'from numba import cuda\n'), ((10741, 10760), 'numba.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (10758, 10760), False, 'from numba import cuda\n'), ((15571, 15629), 'numpy.array', 'np.array', (['[[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]]'], {'dtype': 'np.int32'}), '([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]], dtype=np.int32)\n', (15579, 15629), True, 'import numpy as np\n'), ((15679, 15708), 'numpy.array', 'np.array', (['[T]'], {'dtype': 'np.int32'}), '([T], dtype=np.int32)\n', (15687, 15708), True, 'import numpy as np\n'), ((15747, 15780), 'numpy.array', 'np.array', (['[U - 1]'], {'dtype': 'np.int32'}), '([U - 1], dtype=np.int32)\n', (15755, 15780), True, 'import numpy as np\n'), ((16916, 16937), 'numba.cuda.default_stream', 'cuda.default_stream', ([], {}), '()\n', (16935, 16937), False, 'from numba import cuda\n'), ((15064, 15083), 'numba.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (15081, 15083), False, 'from numba import cuda\n'), ((2052, 2085), 'torch.cuda.current_stream', 'torch.cuda.current_stream', (['device'], {}), '(device)\n', (2077, 2085), False, 'import torch\n'), ((3529, 3541), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (3535, 3541), True, 'import numpy as np\n'), ((3572, 3587), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (3581, 3587), True, 'import numpy as np\n'), ((3690, 3705), 'numpy.abs', 'np.abs', (['ll_diff'], {}), '(ll_diff)\n', (3696, 3705), True, 'import numpy as np\n'), ((3736, 3754), 'numpy.square', 'np.square', (['ll_diff'], {}), '(ll_diff)\n', (3745, 3754), True, 'import numpy as np\n'), ((4677, 4710), 'torch.cuda.current_stream', 'torch.cuda.current_stream', (['device'], {}), '(device)\n', (4702, 4710), False, 'import torch\n'), ((6149, 6161), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (6155, 6161), True, 'import numpy as np\n'), ((6192, 6207), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (6201, 6207), True, 'import numpy as np\n'), ((6311, 6326), 'numpy.abs', 'np.abs', (['ll_diff'], {}), '(ll_diff)\n', (6317, 6326), True, 'import numpy as np\n'), ((6357, 6375), 'numpy.square', 'np.square', (['ll_diff'], {}), '(ll_diff)\n', (6366, 6375), True, 'import numpy as np\n'), ((8193, 8226), 'torch.cuda.current_stream', 'torch.cuda.current_stream', (['device'], {}), '(device)\n', (8218, 8226), False, 'import torch\n'), ((10637, 10649), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (10643, 10649), True, 'import numpy as np\n'), ((10680, 10695), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (10689, 10695), True, 'import numpy as np\n'), ((12516, 12549), 'torch.cuda.current_stream', 'torch.cuda.current_stream', (['device'], {}), '(device)\n', (12541, 12549), False, 'import torch\n'), ((14960, 14972), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (14966, 14972), True, 'import numpy as np\n'), ((15003, 15018), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (15012, 15018), True, 'import numpy as np\n'), ((16834, 16867), 'torch.cuda.current_stream', 'torch.cuda.current_stream', (['device'], {}), '(device)\n', (16859, 16867), False, 'import torch\n'), ((19278, 19290), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (19284, 19290), True, 'import numpy as np\n'), ((19321, 19336), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (19330, 19336), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.